[
  {
    "path": "EagleEye_normal_prune.py",
    "content": "from sys import float_repr_style\nfrom models import *\nfrom utils.utils import *\nfrom utils.prune_utils import *\nfrom utils.datasets import *\nimport os\nimport test\nimport argparse\nfrom thop import profile\n# from thop import profile\n# from distiller.model_summaries import model_performance_summary\n\ndef obtain_avg_forward_time(input, model, repeat=200):\n    model.eval()\n    start = time.time()\n    with torch.no_grad():\n        for i in range(repeat):\n            output = model(input)\n    avg_infer_time = (time.time() - start) / repeat\n\n    return avg_infer_time, output\n\n\ndef obtain_filters_mask(model, CBL_idx, prune_idx, idx_mask):\n    pruned = 0\n    total = 0\n    num_filters = []\n    filters_mask = []\n    # CBL_idx存储的是所有带BN的卷积层（YOLO层的前一层卷积层是不带BN的）\n    for idx in CBL_idx:\n        bn_module = model.module_list[idx][1]\n        if idx in prune_idx:\n            mask = idx_mask[idx]\n            # mask = obtain_bn_mask(bn_module, thre).cpu().numpy()\n            remain = int(mask.sum())\n            pruned = pruned + mask.shape[0] - remain\n\n            if remain == 0:\n                print(\"Channels would be all pruned!\")\n                raise Exception\n\n            # print(f'layer index: {idx:>3d} \\t total channel: {mask.shape[0]:>4d} \\t '\n            #       f'remaining channel: {remain:>4d}')\n        else:\n            mask = torch.ones(bn_module.weight.data.shape)\n            remain = mask.shape[0]\n\n        total += mask.shape[0]\n        num_filters.append(remain)\n        filters_mask.append(mask.clone())\n\n    # 因此，这里求出的prune_ratio,需要裁剪的α参数/cbl_idx中所有的α参数\n    # prune_ratio = pruned / total\n    # print(f'Prune channels: {pruned}\\tPrune ratio: {prune_ratio:.3f}')\n\n    return num_filters, filters_mask\n\n\ndef obtain_l1_mask(conv_module, random_rate):\n    w_copy = conv_module.weight.data.abs().clone()\n    w_copy = torch.sum(w_copy, dim=(1, 2, 3))\n    length = w_copy.cpu().numpy().shape[0]\n    num_retain = int(length * (1 - random_rate))\n    if num_retain == 0:\n        num_retain = 1\n    _, y = torch.topk(w_copy, num_retain)\n    mask = torch.zeros(length, dtype=torch.float32).to(w_copy.device)\n    mask[y] = 1\n\n    return mask\n\n#macs = flops / 2\ndef performance_summary(model, opt=None, prefix=\"\"):\n    macs, _ = profile(model, inputs=(torch.zeros(1, 3, 480, 640).to(device),), verbose=False)\n    return macs\n\n\ndef rand_prune_and_eval(model, min_rate, max_rate):\n    while True:\n        model_copy = deepcopy(model)\n        remain_num = 0\n        idx_new = dict()\n        for idx in prune_idx:\n            # bn_module = model_copy.module_list[idx][1]\n            conv_module = model_copy.module_list[idx][0]\n\n            random_rate = (max_rate - min_rate) * (np.random.rand(1)) + min_rate\n            mask = obtain_l1_mask(conv_module, random_rate)\n\n            idx_new[idx] = mask\n            remain_num += int(mask.sum())\n            conv_module.weight.data = conv_module.weight.data.permute(1, 2, 3, 0).mul(mask).float().permute(3, 0, 1, 2)\n            # bn_module.weight.data.mul_(mask)\n\n        # ---------------\n        num_filters, filters_mask = obtain_filters_mask(model_copy, CBL_idx, prune_idx, idx_new)\n        CBLidx2mask = {idx: mask for idx, mask in zip(CBL_idx, filters_mask)}\n\n        compact_module_defs = deepcopy(model.module_defs)\n        for idx, num in zip(CBL_idx, num_filters):\n            assert compact_module_defs[idx]['type'] == 'convolutional'\n            compact_module_defs[idx]['filters'] = str(num)\n        compact_model = Darknet([model.hyperparams.copy()] + compact_module_defs).to(device)\n        current_parameters = obtain_num_parameters(compact_model)\n        # print(current_parameters/origin_nparameters, end='；')\n        current_macs = performance_summary(compact_model)\n        # if current_parameters / origin_nparameters > remain_ratio + delta or current_parameters / origin_nparameters < remain_ratio - delta:\n        # macs = flops/2\n        if current_macs / origin_macs > remain_ratio + delta or current_macs / origin_macs < remain_ratio - delta:\n            # print('missing')\n            model_copy.cpu()\n            compact_model.cpu()\n            torch.cuda.empty_cache()\n            continue\n\n        print(\"yes---\")\n\n        for i in CBLidx2mask:\n            CBLidx2mask[i] = CBLidx2mask[i].clone().cpu().numpy()\n        pruned_model = prune_model_keep_size_forEagleEye(model, prune_idx, CBLidx2mask)\n        init_weights_from_loose_model(compact_model, pruned_model, CBL_idx, Conv_idx, CBLidx2mask)\n\n        compact_model.train()\n        with torch.no_grad():\n            for batch_i, (imgs, targets, paths, shapes) in enumerate(tqdm(dataloader)):\n                imgs = imgs.cuda().float() / 255.0\n                compact_model(imgs)\n                if batch_i > steps:\n                    break\n        del model_copy\n        torch.cuda.empty_cache()\n        break\n    return compact_module_defs, current_parameters, compact_model\n\n\nif __name__ == '__main__':\n\n    parser = argparse.ArgumentParser()\n    parser.add_argument('--cfg', type=str, default='cfg/yolov3/yolov3.cfg', help='cfg file path')\n    parser.add_argument('--data', type=str, default='data/coco2017.data', help='*.data file path')\n    parser.add_argument('--weights', type=str, default='weights/pretrain_weights/yolov3.weights',\n                        help='sparse model weights')\n    parser.add_argument('--percent', type=float, default=0.5, help='global channel prune percent')\n    parser.add_argument('--delta', type=float, default=0.05, help='delta')\n    parser.add_argument('--img-size', type=int, default=416, help='inference size (pixels)')\n    parser.add_argument('--batch-size', type=int, default=16, help='batch-size')\n    parser.add_argument('--number', type=int, default=200, help='number of subnetwork')\n    opt = parser.parse_args()\n    print(opt)\n\n    t0 = time.time()\n    remain_ratio = 1 - opt.percent\n    number = opt.number\n    img_size = opt.img_size\n    batch_size = opt.batch_size\n    delta = opt.delta\n\n    hyp = {'giou': 3.54,  # giou loss gain\n           'cls': 37.4,  # cls loss gain\n           'cls_pw': 1.0,  # cls BCELoss positive_weight\n           'obj': 64.3,  # obj loss gain (*=img_size/320 if img_size != 320)\n           'obj_pw': 1.0,  # obj BCELoss positive_weight\n           'iou_t': 0.20,  # iou training threshold\n           'lr0': 0.01,  # initial learning rate (SGD=5E-3, Adam=5E-4)\n           'lrf': 0.0005,  # final learning rate (with cos scheduler)\n           'momentum': 0.937,  # SGD momentum\n           'weight_decay': 0.0005,  # optimizer weight decay\n           'fl_gamma': 0.0,  # focal loss gamma (efficientDet default is gamma=1.5)\n           'hsv_h': 0.0138,  # image HSV-Hue augmentation (fraction)\n           'hsv_s': 0.678,  # image HSV-Saturation augmentation (fraction)\n           'hsv_v': 0.36,  # image HSV-Value augmentation (fraction)\n           'degrees': 1.98 * 0,  # image rotation (+/- deg)\n           'translate': 0.05 * 0,  # image translation (+/- fraction)\n           'scale': 0.05 * 0,  # image scale (+/- gain)\n           'shear': 0.641 * 0}  # image shear (+/- deg)\n\n    device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n    model = Darknet(opt.cfg).to(device)\n\n    if opt.weights:\n        if opt.weights.endswith(\".pt\"):\n            model.load_state_dict(torch.load(opt.weights, map_location=device)['model'])\n        else:\n            _ = load_darknet_weights(model, opt.weights)\n\n    data_config = parse_data_cfg(opt.data)\n\n    valid_path = data_config[\"valid\"]\n    train_path = data_config[\"train\"]\n    class_names = load_classes(data_config[\"names\"])\n    steps = math.ceil((len(open(train_path).readlines()) / batch_size) * 0.1)\n\n    obtain_num_parameters = lambda model: sum([param.nelement() for param in model.parameters()])\n\n    dataset = LoadImagesAndLabels(train_path,\n                                  img_size,\n                                  batch_size,\n                                  augment=True,\n                                  hyp=hyp,  # augmentation hyperparameters\n                                  rect=False,  # rectangular training\n                                  cache_images=False)\n\n    dataloader = torch.utils.data.DataLoader(dataset,\n                                             batch_size=batch_size,\n                                             num_workers=min([os.cpu_count(), batch_size, 16]),\n                                             shuffle=True,  # Shuffle=True unless rectangular training is used\n                                             pin_memory=True,\n                                             collate_fn=dataset.collate_fn)\n\n    test_dataset = LoadImagesAndLabels(valid_path, img_size, batch_size,\n                                       hyp=hyp,\n                                       rect=True,\n                                       cache_images=False)\n    testloader = torch.utils.data.DataLoader(test_dataset,\n                                             batch_size=batch_size,\n                                             num_workers=min([os.cpu_count(), batch_size, 8]),\n                                             shuffle=False,\n                                             pin_memory=True,\n                                             collate_fn=test_dataset.collate_fn)\n\n    with torch.no_grad():\n        origin_model_metric = test.test(opt.cfg,\n                                        opt.data,\n                                        batch_size=batch_size,\n                                        imgsz=img_size,\n                                        model=model,\n                                        dataloader=testloader,\n                                        rank=-1,\n                                        plot=False)\n    origin_nparameters = obtain_num_parameters(model)\n    origin_macs = performance_summary(model)\n\n    CBL_idx, Conv_idx, prune_idx = parse_module_defs(model.module_defs)\n\n    print(\"-------------------------------------------------------\")\n\n    max_mAP = 0\n    for i in range(number):\n        compact_module_defs, current_parameters, compact_model = rand_prune_and_eval(model, 0, 1)\n        with torch.no_grad():\n            # 防止随机生成的较差的模型撑爆显存，增大nmsconf阈值\n            mAP = test.test(opt.cfg,\n                            opt.data,\n                            batch_size=batch_size,\n                            imgsz=img_size,\n                            conf_thres=0.1,\n                            model=compact_model,\n                            dataloader=testloader,\n                            rank=-1,\n                            plot=False)[0][2]\n        print('candidate: ' + str(i), end=\" \")\n        print('remain_ratio: ' + str(current_parameters / origin_nparameters))\n        print(f'mAP of the pruned model is {mAP:.4f}')\n        if mAP > max_mAP:\n            max_mAP = mAP\n            compact_model_winnner = deepcopy(compact_model)\n            cfg_name = 'cfg_backup/' + str(i) + '.cfg'\n            if not os.path.isdir('cfg_backup/'):\n                os.makedirs('cfg_backup/')\n            pruned_cfg_file = write_cfg(cfg_name, [model.hyperparams.copy()] + compact_module_defs)\n        del compact_model\n        torch.cuda.empty_cache()\n    # 获得原始模型的module_defs，并修改该defs中的卷积核数量\n    compact_module_defs = deepcopy(compact_model_winnner.module_defs)\n\n    compact_nparameters = obtain_num_parameters(compact_model_winnner)\n\n    compact_macs =  performance_summary(compact_model_winnner)\n    compact_flops =  compact_macs*2 / 1024**3\n    origin_flops = origin_macs*2 / 1024**3\n\n    random_input = torch.rand((16, 3, 416, 416)).to(device)\n\n    pruned_forward_time, pruned_output = obtain_avg_forward_time(random_input, model)\n    compact_forward_time, compact_output = obtain_avg_forward_time(random_input, compact_model_winnner)\n\n    # 在测试集上测试剪枝后的模型, 并统计模型的参数数量\n    with torch.no_grad():\n        compact_model_metric = test.test(opt.cfg,\n                                         opt.data,\n                                         batch_size=batch_size,\n                                         imgsz=img_size,\n                                         model=compact_model_winnner,\n                                         dataloader=testloader,\n                                         rank=-1,\n                                         plot=False)\n\n    # 比较剪枝前后参数数量的变化、指标性能的变化\n    metric_table = [\n        [\"Metric\", \"Before\", \"After\"],\n        [\"mAP\", f'{origin_model_metric[1].mean():.6f}', f'{compact_model_metric[1].mean():.6f}'],\n        [\"Parameters\", f\"{origin_nparameters}\", f\"{compact_nparameters}\"],\n        [\"GFLOPs\",f\"{origin_flops}\",f\"{compact_flops}\"],\n        [\"Inference\", f'{pruned_forward_time:.4f}', f'{compact_forward_time:.4f}']\n    ]\n    print(AsciiTable(metric_table).table)\n\n    # 生成剪枝后的cfg文件并保存模型\n    pruned_cfg_name = 'cfg/rand-normal_' + str(remain_ratio) + '_' + str(number) + '/' + 'rand-normal_-' + str(\n        remain_ratio) + '_' + str(number) + '.cfg'\n    # 创建存储目录\n    dir_name = 'cfg/rand-normal_' + str(remain_ratio) + '_' + str(number) + '/'\n    if not os.path.isdir(dir_name):\n        os.makedirs(dir_name)\n\n    # 由于原始的compact_module_defs将anchor从字符串变为了数组，因此这里将anchors重新变为字符串\n    file = open(opt.cfg, 'r')\n    lines = file.read().split('\\n')\n    for line in lines:\n        if line.split(' = ')[0] == 'anchors':\n            anchor = line.split(' = ')[1]\n            break\n        if line.split('=')[0] == 'anchors':\n            anchor = line.split('=')[1]\n            break\n    file.close()\n    for item in compact_module_defs:\n        if item['type'] == 'shortcut':\n            item['from'] = str(item['from'][0])\n        elif item['type'] == 'route':\n            item['layers'] = \",\".join('%s' % i for i in item['layers'])\n        elif item['type'] == 'yolo':\n            item['mask'] = \",\".join('%s' % i for i in item['mask'])\n            item['anchors'] = anchor\n    pruned_cfg_file = write_cfg(pruned_cfg_name, [model.hyperparams.copy()] + compact_module_defs)\n    print(f'Config file has been saved: {pruned_cfg_file}')\n    weights_dir_name = dir_name.replace('cfg', 'weights')\n    if not os.path.isdir(weights_dir_name):\n        os.makedirs(weights_dir_name)\n    compact_model_name = weights_dir_name + 'rand-normal_' + str(remain_ratio) + '_' + str(number) + '.weights'\n\n    save_weights(compact_model_winnner, path=compact_model_name)\n    print(f'Compact model has been saved: {compact_model_name}')\n    print('%g sub networks completed in %.3f hours.\\n' % (number, (time.time() - t0) / 3600))\n"
  },
  {
    "path": "EagleEye_regular_prune.py",
    "content": "from models import *\nfrom utils.utils import *\nfrom utils.prune_utils import *\nfrom utils.datasets import *\nimport os\nimport test\nimport argparse\nfrom thop import profile\n\ndef obtain_avg_forward_time(input, model, repeat=200):\n    model.eval()\n    start = time.time()\n    with torch.no_grad():\n        for i in range(repeat):\n            output = model(input)\n    avg_infer_time = (time.time() - start) / repeat\n\n    return avg_infer_time, output\n\n\ndef obtain_filters_mask(model, CBL_idx, prune_idx, idx_mask):\n    pruned = 0\n    total = 0\n    num_filters = []\n    filters_mask = []\n    # CBL_idx存储的是所有带BN的卷积层（YOLO层的前一层卷积层是不带BN的）\n    for idx in CBL_idx:\n        bn_module = model.module_list[idx][1]\n        if idx in prune_idx:\n            mask = idx_mask[idx]\n            # mask = obtain_bn_mask(bn_module, thre).cpu().numpy()\n            remain = int(mask.sum())\n            pruned = pruned + mask.shape[0] - remain\n\n            if remain == 0:\n                print(\"Channels would be all pruned!\")\n                raise Exception\n\n\n        else:\n            mask = torch.ones(bn_module.weight.data.shape)\n            remain = mask.shape[0]\n\n        total += mask.shape[0]\n        num_filters.append(remain)\n        filters_mask.append(mask.clone())\n\n    return num_filters, filters_mask\n\n\n\ndef obtain_l1_mask(conv_module, random_rate):\n    w_copy = conv_module.weight.data.abs().clone()\n    w_copy = torch.sum(w_copy, dim=(1, 2, 3))\n    length = w_copy.cpu().numpy().shape[0]\n    num_retain = int(length * (1 - random_rate))\n    num_retain = get_nearest_multiple(num_retain,channel_base)\n    if num_retain > length:\n        num_retain = length\n    if num_retain == 0:\n        num_retain = channel_base\n    _, y = torch.topk(w_copy, num_retain)\n    mask = torch.zeros(length, dtype=torch.float32).to(w_copy.device)\n    mask[y] = 1\n    return mask\n\n#macs = flops / 2\ndef performance_summary(model, opt=None, prefix=\"\"):\n    macs, _ = profile(model, inputs=(torch.zeros(1, 3, 480, 640).to(device),), verbose=False)\n    return macs\n\ndef rand_prune_and_eval(model, min_rate, max_rate):\n    while True:\n        model_copy = deepcopy(model)\n        remain_num = 0\n        idx_new = dict()\n        for idx in prune_idx:\n            # bn_module = model_copy.module_list[idx][1]\n            conv_module = model_copy.module_list[idx][0]\n            random_rate = (max_rate - min_rate) * (np.random.rand(1)) + min_rate\n            mask = obtain_l1_mask(conv_module, random_rate)\n            idx_new[idx] = mask\n            remain_num += int(mask.sum())\n            conv_module.weight.data = conv_module.weight.data.permute(1, 2, 3, 0).mul(mask).float().permute(3, 0, 1, 2)\n            # bn_module.weight.data.mul_(mask)\n\n        # ---------------\n        num_filters, filters_mask = obtain_filters_mask(model_copy, CBL_idx, prune_idx, idx_new)\n        CBLidx2mask = {idx: mask for idx, mask in zip(CBL_idx, filters_mask)}\n        CBLidx2filters = {idx: filters for idx, filters in zip(CBL_idx, num_filters)}\n        compact_module_defs = deepcopy(model.module_defs)\n        for i in model_copy.module_defs:\n            if i['type'] == 'shortcut':\n                i['is_access'] = False\n        merge_mask(model_copy, CBLidx2mask, CBLidx2filters, base=channel_base)\n\n        for idx, num in CBLidx2filters.items():\n            #num = get_nearest_multiple(num,channel_base)\n            assert compact_module_defs[idx]['type'] == 'convolutional'\n            compact_module_defs[idx]['filters'] = str(num)\n        compact_model = Darknet([model.hyperparams.copy()] + compact_module_defs).to(device)\n        current_parameters = obtain_num_parameters(compact_model)\n        # print(current_parameters/origin_nparameters, end='；')\n        current_macs = performance_summary(compact_model)\n        # macs = flops/2\n        if current_macs / origin_macs > remain_ratio + delta or current_macs / origin_macs < remain_ratio - delta:\n            # print('missing')\n            del model_copy\n            del compact_model\n            torch.cuda.empty_cache()\n            continue\n        print(\"yes---\")\n\n        for i in CBLidx2mask:\n            CBLidx2mask[i] = CBLidx2mask[i].clone().cpu().numpy()\n        pruned_model = prune_model_keep_size_forEagleEye(model, prune_idx, CBLidx2mask)\n        init_weights_from_loose_model(compact_model, pruned_model, CBL_idx, Conv_idx, CBLidx2mask)\n        compact_model.train()\n        with torch.no_grad():\n            for batch_i, (imgs, targets, paths, shapes) in enumerate(tqdm(dataloader)):\n                imgs = imgs.cuda().float() / 255.0\n                compact_model(imgs)\n                if batch_i > steps:\n                    break\n        del model_copy\n        torch.cuda.empty_cache()\n        break\n    return compact_module_defs, current_parameters, compact_model\n\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n    parser.add_argument('--cfg', type=str, default='cfg/yolov3/yolov3.cfg', help='cfg file path')\n    parser.add_argument('--data', type=str, default='data/coco2017.data', help='*.data file path')\n    parser.add_argument('--weights', type=str, default='weights/pretrain_weights/yolov3.weights',\n                        help='sparse model weights')\n    parser.add_argument('--percent', type=float, default=0.5, help='global channel prune percent')\n    parser.add_argument('--delta', type=float, default=0.05, help='delta')\n    parser.add_argument('--img-size', type=int, default=416, help='inference size (pixels)')\n    parser.add_argument('--batch-size', type=int, default=16, help='batch-size')\n    parser.add_argument('--number', type=int, default=200, help='number of subnetwork')\n    parser.add_argument('--channel-base', type=int, default=8, help='channel of subnetwork is multiple of channel-base')\n    opt = parser.parse_args()\n    print(opt)\n\n    t0 = time.time()\n    remain_ratio = 1 - opt.percent\n    number = opt.number\n    img_size = opt.img_size\n    batch_size = opt.batch_size\n    delta = opt.delta\n    channel_base = opt.channel_base\n    hyp = {'giou': 3.54,  # giou loss gain\n           'cls': 37.4,  # cls loss gain\n           'cls_pw': 1.0,  # cls BCELoss positive_weight\n           'obj': 64.3,  # obj loss gain (*=img_size/320 if img_size != 320)\n           'obj_pw': 1.0,  # obj BCELoss positive_weight\n           'iou_t': 0.20,  # iou training threshold\n           'lr0': 0.01,  # initial learning rate (SGD=5E-3, Adam=5E-4)\n           'lrf': 0.0005,  # final learning rate (with cos scheduler)\n           'momentum': 0.937,  # SGD momentum\n           'weight_decay': 0.0005,  # optimizer weight decay\n           'fl_gamma': 0.0,  # focal loss gamma (efficientDet default is gamma=1.5)\n           'hsv_h': 0.0138,  # image HSV-Hue augmentation (fraction)\n           'hsv_s': 0.678,  # image HSV-Saturation augmentation (fraction)\n           'hsv_v': 0.36,  # image HSV-Value augmentation (fraction)\n           'degrees': 1.98 * 0,  # image rotation (+/- deg)\n           'translate': 0.05 * 0,  # image translation (+/- fraction)\n           'scale': 0.05 * 0,  # image scale (+/- gain)\n           'shear': 0.641 * 0}  # image shear (+/- deg)\n\n    device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n    model = Darknet(opt.cfg).to(device)\n\n    if opt.weights:\n        if opt.weights.endswith(\".pt\"):\n            model.load_state_dict(torch.load(opt.weights, map_location=device)['model'])\n        else:\n            _ = load_darknet_weights(model, opt.weights)\n\n    data_config = parse_data_cfg(opt.data)\n\n    valid_path = data_config[\"valid\"]\n    train_path = data_config[\"train\"]\n    class_names = load_classes(data_config[\"names\"])\n    steps = math.ceil((len(open(train_path).readlines()) / batch_size) * 0.1)\n\n    obtain_num_parameters = lambda model: sum([param.nelement() for param in model.parameters()])\n\n    dataset = LoadImagesAndLabels(train_path,\n                                  img_size,\n                                  batch_size,\n                                  augment=True,\n                                  hyp=hyp,  # augmentation hyperparameters\n                                  rect=False,  # rectangular training\n                                  cache_images=False)\n\n    dataloader = torch.utils.data.DataLoader(dataset,\n                                             batch_size=batch_size,\n                                             num_workers=min([os.cpu_count(), batch_size, 16]),\n                                             shuffle=True,  # Shuffle=True unless rectangular training is used\n                                             pin_memory=True,\n                                             collate_fn=dataset.collate_fn)\n\n    test_dataset = LoadImagesAndLabels(valid_path, img_size, batch_size,\n                                       hyp=hyp,\n                                       rect=True,\n                                       cache_images=False)\n    testloader = torch.utils.data.DataLoader(test_dataset,\n                                             batch_size=batch_size,\n                                             num_workers=min([os.cpu_count(), batch_size, 8]),\n                                             shuffle=False,\n                                             pin_memory=True,\n                                             collate_fn=test_dataset.collate_fn)\n    with torch.no_grad():\n        origin_model_metric = test.test(opt.cfg,\n                                        opt.data,\n                                        batch_size=batch_size,\n                                        imgsz=img_size,\n                                        model=model,\n                                        dataloader=testloader,\n                                        rank=-1,\n                                        plot=False)\n    origin_nparameters = obtain_num_parameters(model)\n    origin_macs = performance_summary(model)\n\n    CBL_idx, Conv_idx, prune_idx, _, _ = parse_module_defs2(model.module_defs)\n\n    print(\"-------------------------------------------------------\")\n\n    max_mAP = 0\n    for i in range(number):\n        compact_module_defs, current_parameters, compact_model = rand_prune_and_eval(model, 0, 1)\n        with torch.no_grad():\n            # 防止随机生成的较差的模型撑爆显存，增大nmsconf阈值\n            mAP = test.test(opt.cfg,\n                            opt.data,\n                            batch_size=batch_size,\n                            imgsz=img_size,\n                            conf_thres=0.1,\n                            model=compact_model,\n                            dataloader=testloader,\n                            rank=-1,\n                            plot=False)[0][2]\n\n        print('candidate: ' + str(i), end=\" \")\n        print('remain_ratio: ' + str(current_parameters / origin_nparameters))\n        print(f'mAP of the pruned model is {mAP:.4f}')\n        if mAP > max_mAP:\n            max_mAP = mAP\n            compact_model_winnner = deepcopy(compact_model)\n            cfg_name = 'cfg_backup/' + str(i) + '.cfg'\n            if not os.path.isdir('cfg_backup/'):\n                os.makedirs('cfg_backup/')\n            pruned_cfg_file = write_cfg(cfg_name, [model.hyperparams.copy()] + compact_module_defs)\n        del compact_model\n        torch.cuda.empty_cache()\n    # 获得原始模型的module_defs，并修改该defs中的卷积核数量\n    compact_module_defs = deepcopy(compact_model_winnner.module_defs)\n    compact_nparameters = obtain_num_parameters(compact_model_winnner)\n\n    compact_macs =  performance_summary(compact_model_winnner)\n    compact_flops =  compact_macs*2 / 1024**3\n    origin_flops = origin_macs*2 / 1024**3\n\n    random_input = torch.rand((16, 3, 416, 416)).to(device)\n\n    pruned_forward_time, pruned_output = obtain_avg_forward_time(random_input, model)\n    compact_forward_time, compact_output = obtain_avg_forward_time(random_input, compact_model_winnner)\n\n    # 在测试集上测试剪枝后的模型, 并统计模型的参数数量\n    with torch.no_grad():\n        compact_model_metric = test.test(opt.cfg,\n                                         opt.data,\n                                         batch_size=batch_size,\n                                         imgsz=img_size,\n                                         model=compact_model_winnner,\n                                         dataloader=testloader,\n                                         rank=-1,\n                                         plot=False)\n\n    # 比较剪枝前后参数数量的变化、指标性能的变化\n    metric_table = [\n        [\"Metric\", \"Before\", \"After\"],\n        [\"mAP\", f'{origin_model_metric[1].mean():.6f}', f'{compact_model_metric[1].mean():.6f}'],\n        [\"Parameters\", f\"{origin_nparameters}\", f\"{compact_nparameters}\"],\n        [\"GFLOPs\",f\"{origin_flops}\",f\"{compact_flops}\"],\n        [\"Inference\", f'{pruned_forward_time:.4f}', f'{compact_forward_time:.4f}']\n    ]\n    print(AsciiTable(metric_table).table)\n\n    # 生成剪枝后的cfg文件并保存模型\n    pruned_cfg_name = 'cfg/rand-slim_' + str(remain_ratio) + '_' + str(number) + '/' + 'rand-slim_' + str(\n        remain_ratio) + '_' + str(number) + '.cfg'\n    # 创建存储目录\n    dir_name = 'cfg/rand-slim_' + str(remain_ratio) + '_' + str(number) + '/'\n    if not os.path.isdir(dir_name):\n        os.makedirs(dir_name)\n\n    # 由于原始的compact_module_defs将anchor从字符串变为了数组，因此这里将anchors重新变为字符串\n    file = open(opt.cfg, 'r')\n    lines = file.read().split('\\n')\n    for line in lines:\n        if line.split(' = ')[0] == 'anchors':\n            anchor = line.split(' = ')[1]\n            break\n        if line.split('=')[0] == 'anchors':\n            anchor = line.split('=')[1]\n            break\n    file.close()\n    for item in compact_module_defs:\n        if item['type'] == 'shortcut':\n            item['from'] = str(item['from'][0])\n        elif item['type'] == 'route':\n            item['layers'] = \",\".join('%s' % i for i in item['layers'])\n        elif item['type'] == 'yolo':\n            item['mask'] = \",\".join('%s' % i for i in item['mask'])\n            item['anchors'] = anchor\n    pruned_cfg_file = write_cfg(pruned_cfg_name, [model.hyperparams.copy()] + compact_module_defs)\n    print(f'Config file has been saved: {pruned_cfg_file}')\n    weights_dir_name = dir_name.replace('cfg', 'weights')\n    if not os.path.isdir(weights_dir_name):\n        os.makedirs(weights_dir_name)\n    compact_model_name = weights_dir_name + 'rand-slim_' + str(remain_ratio) + '_' + str(number) + '.weights'\n\n    save_weights(compact_model_winnner, path=compact_model_name)\n    print(f'Compact model has been saved: {compact_model_name}')\n    print('%g sub networks completed in %.3f hours.\\n' % (number, (time.time() - t0) / 3600))\n"
  },
  {
    "path": "EagleEye_slim_prune.py",
    "content": "from models import *\nfrom utils.utils import *\nfrom utils.prune_utils import *\nfrom utils.datasets import *\nimport os\nimport test\nimport argparse\nfrom thop import profile\n\n\ndef obtain_avg_forward_time(input, model, repeat=200):\n    model.eval()\n    start = time.time()\n    with torch.no_grad():\n        for i in range(repeat):\n            output = model(input)\n    avg_infer_time = (time.time() - start) / repeat\n\n    return avg_infer_time, output\n\n\ndef obtain_filters_mask(model, CBL_idx, prune_idx, idx_mask):\n    pruned = 0\n    total = 0\n    num_filters = []\n    filters_mask = []\n    # CBL_idx存储的是所有带BN的卷积层（YOLO层的前一层卷积层是不带BN的）\n    for idx in CBL_idx:\n        bn_module = model.module_list[idx][1]\n        if idx in prune_idx:\n            mask = idx_mask[idx]\n            # mask = obtain_bn_mask(bn_module, thre).cpu().numpy()\n            remain = int(mask.sum())\n            pruned = pruned + mask.shape[0] - remain\n\n            if remain == 0:\n                print(\"Channels would be all pruned!\")\n                raise Exception\n\n        else:\n            mask = torch.ones(bn_module.weight.data.shape)\n            remain = mask.shape[0]\n\n        total += mask.shape[0]\n        num_filters.append(remain)\n        filters_mask.append(mask.clone())\n\n    return num_filters, filters_mask\n\n\ndef obtain_l1_mask(conv_module, random_rate):\n    w_copy = conv_module.weight.data.abs().clone()\n    w_copy = torch.sum(w_copy, dim=(1, 2, 3))\n    length = w_copy.cpu().numpy().shape[0]\n    num_retain = int(length * (1 - random_rate))\n    if num_retain == 0:\n        num_retain = 1\n    _, y = torch.topk(w_copy, num_retain)\n    mask = torch.zeros(length, dtype=torch.float32).to(w_copy.device)\n    mask[y] = 1\n\n    return mask\n\n#macs = flops / 2\ndef performance_summary(model, opt=None, prefix=\"\"):\n    macs, _ = profile(model, inputs=(torch.zeros(1, 3, 480, 640).to(device),), verbose=False)\n    return macs\n\ndef rand_prune_and_eval(model, min_rate, max_rate):\n    while True:\n        model_copy = deepcopy(model)\n        remain_num = 0\n        idx_new = dict()\n        for idx in prune_idx:\n            # bn_module = model_copy.module_list[idx][1]\n            conv_module = model_copy.module_list[idx][0]\n\n            random_rate = (max_rate - min_rate) * (np.random.rand(1)) + min_rate\n            mask = obtain_l1_mask(conv_module, random_rate)\n\n            idx_new[idx] = mask\n            remain_num += int(mask.sum())\n            conv_module.weight.data = conv_module.weight.data.permute(1, 2, 3, 0).mul(mask).float().permute(3, 0, 1, 2)\n            # bn_module.weight.data.mul_(mask)\n\n        # ---------------\n        num_filters, filters_mask = obtain_filters_mask(model_copy, CBL_idx, prune_idx, idx_new)\n        CBLidx2mask = {idx: mask for idx, mask in zip(CBL_idx, filters_mask)}\n        CBLidx2filters = {idx: filters for idx, filters in zip(CBL_idx, num_filters)}\n        compact_module_defs = deepcopy(model.module_defs)\n        for i in model_copy.module_defs:\n            if i['type'] == 'shortcut':\n                i['is_access'] = False\n        merge_mask(model_copy, CBLidx2mask, CBLidx2filters)\n\n        for idx, num in CBLidx2filters.items():\n            assert compact_module_defs[idx]['type'] == 'convolutional'\n            compact_module_defs[idx]['filters'] = str(num)\n        compact_model = Darknet([model.hyperparams.copy()] + compact_module_defs).to(device)\n        current_parameters = obtain_num_parameters(compact_model)\n        # print(current_parameters/origin_nparameters, end='；')\n        current_macs = performance_summary(compact_model)\n        # macs = flops/2\n        if current_macs / origin_macs > remain_ratio + delta or current_macs / origin_macs < remain_ratio - delta:\n            # print('missing')\n            del model_copy\n            del compact_model\n            torch.cuda.empty_cache()\n            continue\n        print(\"yes---\")\n\n        for i in CBLidx2mask:\n            CBLidx2mask[i] = CBLidx2mask[i].clone().cpu().numpy()\n        pruned_model = prune_model_keep_size_forEagleEye(model, prune_idx, CBLidx2mask)\n        init_weights_from_loose_model(compact_model, pruned_model, CBL_idx, Conv_idx, CBLidx2mask)\n        compact_model.train()\n        with torch.no_grad():\n            for batch_i, (imgs, targets, paths, shapes) in enumerate(tqdm(dataloader)):\n                imgs = imgs.cuda().float() / 255.0\n                compact_model(imgs)\n                if batch_i > steps:\n                    break\n        del model_copy\n        torch.cuda.empty_cache()\n        break\n    return compact_module_defs, current_parameters, compact_model\n\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n    parser.add_argument('--cfg', type=str, default='cfg/yolov3/yolov3.cfg', help='cfg file path')\n    parser.add_argument('--data', type=str, default='data/coco2017.data', help='*.data file path')\n    parser.add_argument('--weights', type=str, default='weights/pretrain_weights/yolov3.weights',\n                        help='sparse model weights')\n    parser.add_argument('--percent', type=float, default=0.5, help='global channel prune percent')\n    parser.add_argument('--delta', type=float, default=0.05, help='delta')\n    parser.add_argument('--img-size', type=int, default=416, help='inference size (pixels)')\n    parser.add_argument('--batch-size', type=int, default=16, help='batch-size')\n    parser.add_argument('--number', type=int, default=200, help='number of subnetwork')\n    opt = parser.parse_args()\n    print(opt)\n\n    t0 = time.time()\n    remain_ratio = 1 - opt.percent\n    number = opt.number\n    img_size = opt.img_size\n    batch_size = opt.batch_size\n    delta = opt.delta\n\n    hyp = {'giou': 3.54,  # giou loss gain\n           'cls': 37.4,  # cls loss gain\n           'cls_pw': 1.0,  # cls BCELoss positive_weight\n           'obj': 64.3,  # obj loss gain (*=img_size/320 if img_size != 320)\n           'obj_pw': 1.0,  # obj BCELoss positive_weight\n           'iou_t': 0.20,  # iou training threshold\n           'lr0': 0.01,  # initial learning rate (SGD=5E-3, Adam=5E-4)\n           'lrf': 0.0005,  # final learning rate (with cos scheduler)\n           'momentum': 0.937,  # SGD momentum\n           'weight_decay': 0.0005,  # optimizer weight decay\n           'fl_gamma': 0.0,  # focal loss gamma (efficientDet default is gamma=1.5)\n           'hsv_h': 0.0138,  # image HSV-Hue augmentation (fraction)\n           'hsv_s': 0.678,  # image HSV-Saturation augmentation (fraction)\n           'hsv_v': 0.36,  # image HSV-Value augmentation (fraction)\n           'degrees': 1.98 * 0,  # image rotation (+/- deg)\n           'translate': 0.05 * 0,  # image translation (+/- fraction)\n           'scale': 0.05 * 0,  # image scale (+/- gain)\n           'shear': 0.641 * 0}  # image shear (+/- deg)\n\n    device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n    model = Darknet(opt.cfg).to(device)\n\n    if opt.weights:\n        if opt.weights.endswith(\".pt\"):\n            model.load_state_dict(torch.load(opt.weights, map_location=device)['model'])\n        else:\n            _ = load_darknet_weights(model, opt.weights)\n\n    data_config = parse_data_cfg(opt.data)\n\n    valid_path = data_config[\"valid\"]\n    train_path = data_config[\"train\"]\n    class_names = load_classes(data_config[\"names\"])\n    steps = math.ceil((len(open(train_path).readlines()) / batch_size) * 0.1)\n\n    obtain_num_parameters = lambda model: sum([param.nelement() for param in model.parameters()])\n\n    dataset = LoadImagesAndLabels(train_path,\n                                  img_size,\n                                  batch_size,\n                                  augment=True,\n                                  hyp=hyp,  # augmentation hyperparameters\n                                  rect=False,  # rectangular training\n                                  cache_images=False)\n\n    dataloader = torch.utils.data.DataLoader(dataset,\n                                             batch_size=batch_size,\n                                             num_workers=min([os.cpu_count(), batch_size, 16]),\n                                             shuffle=True,  # Shuffle=True unless rectangular training is used\n                                             pin_memory=True,\n                                             collate_fn=dataset.collate_fn)\n\n    test_dataset = LoadImagesAndLabels(valid_path, img_size, batch_size,\n                                       hyp=hyp,\n                                       rect=True,\n                                       cache_images=False)\n    testloader = torch.utils.data.DataLoader(test_dataset,\n                                             batch_size=batch_size,\n                                             num_workers=min([os.cpu_count(), batch_size, 8]),\n                                             shuffle=False,\n                                             pin_memory=True,\n                                             collate_fn=test_dataset.collate_fn)\n    with torch.no_grad():\n        origin_model_metric = test.test(opt.cfg,\n                                        opt.data,\n                                        batch_size=batch_size,\n                                        imgsz=img_size,\n                                        model=model,\n                                        dataloader=testloader,\n                                        rank=-1,\n                                        plot=False)\n    origin_nparameters = obtain_num_parameters(model)\n    origin_macs = performance_summary(model)\n\n    CBL_idx, Conv_idx, prune_idx, _, _ = parse_module_defs2(model.module_defs)\n\n    print(\"-------------------------------------------------------\")\n\n    max_mAP = 0\n    for i in range(number):\n        compact_module_defs, current_parameters, compact_model = rand_prune_and_eval(model, 0, 1)\n        with torch.no_grad():\n            # 防止随机生成的较差的模型撑爆显存，增大nmsconf阈值\n            mAP = test.test(opt.cfg,\n                            opt.data,\n                            batch_size=batch_size,\n                            imgsz=img_size,\n                            conf_thres=0.1,\n                            model=compact_model,\n                            dataloader=testloader,\n                            rank=-1,\n                            plot=False)[0][2]\n\n        print('candidate: ' + str(i), end=\" \")\n        print('remain_ratio: ' + str(current_parameters / origin_nparameters))\n        print(f'mAP of the pruned model is {mAP:.4f}')\n        if mAP > max_mAP:\n            max_mAP = mAP\n            compact_model_winnner = deepcopy(compact_model)\n            cfg_name = 'cfg_backup/' + str(i) + '.cfg'\n            if not os.path.isdir('cfg_backup/'):\n                os.makedirs('cfg_backup/')\n            pruned_cfg_file = write_cfg(cfg_name, [model.hyperparams.copy()] + compact_module_defs)\n        del compact_model\n        torch.cuda.empty_cache()\n    # 获得原始模型的module_defs，并修改该defs中的卷积核数量\n    compact_module_defs = deepcopy(compact_model_winnner.module_defs)\n    compact_nparameters = obtain_num_parameters(compact_model_winnner)\n\n    compact_macs =  performance_summary(compact_model_winnner)\n    compact_flops =  compact_macs*2 / 1024**3\n    origin_flops = origin_macs*2 / 1024**3\n\n    random_input = torch.rand((16, 3, 416, 416)).to(device)\n\n    pruned_forward_time, pruned_output = obtain_avg_forward_time(random_input, model)\n    compact_forward_time, compact_output = obtain_avg_forward_time(random_input, compact_model_winnner)\n\n    # 在测试集上测试剪枝后的模型, 并统计模型的参数数量\n    with torch.no_grad():\n        compact_model_metric = test.test(opt.cfg,\n                                         opt.data,\n                                         batch_size=batch_size,\n                                         imgsz=img_size,\n                                         model=compact_model_winnner,\n                                         dataloader=testloader,\n                                         rank=-1,\n                                         plot=False)\n\n    # 比较剪枝前后参数数量的变化、指标性能的变化\n    metric_table = [\n        [\"Metric\", \"Before\", \"After\"],\n        [\"mAP\", f'{origin_model_metric[1].mean():.6f}', f'{compact_model_metric[1].mean():.6f}'],\n        [\"Parameters\", f\"{origin_nparameters}\", f\"{compact_nparameters}\"],\n        [\"GFLOPs\",f\"{origin_flops}\",f\"{compact_flops}\"],\n        [\"Inference\", f'{pruned_forward_time:.4f}', f'{compact_forward_time:.4f}']\n    ]\n    print(AsciiTable(metric_table).table)\n\n    # 生成剪枝后的cfg文件并保存模型\n    pruned_cfg_name = 'cfg/rand-slim_' + str(remain_ratio) + '_' + str(number) + '/' + 'rand-slim_' + str(\n        remain_ratio) + '_' + str(number) + '.cfg'\n    # 创建存储目录\n    dir_name = 'cfg/rand-slim_' + str(remain_ratio) + '_' + str(number) + '/'\n    if not os.path.isdir(dir_name):\n        os.makedirs(dir_name)\n\n    # 由于原始的compact_module_defs将anchor从字符串变为了数组，因此这里将anchors重新变为字符串\n    file = open(opt.cfg, 'r')\n    lines = file.read().split('\\n')\n    for line in lines:\n        if line.split(' = ')[0] == 'anchors':\n            anchor = line.split(' = ')[1]\n            break\n        if line.split('=')[0] == 'anchors':\n            anchor = line.split('=')[1]\n            break\n    file.close()\n    for item in compact_module_defs:\n        if item['type'] == 'shortcut':\n            item['from'] = str(item['from'][0])\n        elif item['type'] == 'route':\n            item['layers'] = \",\".join('%s' % i for i in item['layers'])\n        elif item['type'] == 'yolo':\n            item['mask'] = \",\".join('%s' % i for i in item['mask'])\n            item['anchors'] = anchor\n    pruned_cfg_file = write_cfg(pruned_cfg_name, [model.hyperparams.copy()] + compact_module_defs)\n    print(f'Config file has been saved: {pruned_cfg_file}')\n    weights_dir_name = dir_name.replace('cfg', 'weights')\n    if not os.path.isdir(weights_dir_name):\n        os.makedirs(weights_dir_name)\n    compact_model_name = weights_dir_name + 'rand-slim_' + str(remain_ratio) + '_' + str(number) + '.weights'\n\n    save_weights(compact_model_winnner, path=compact_model_name)\n    print(f'Compact model has been saved: {compact_model_name}')\n    print('%g sub networks completed in %.3f hours.\\n' % (number, (time.time() - t0) / 3600))\n"
  },
  {
    "path": "LICENSE",
    "content": "                    GNU GENERAL PUBLIC LICENSE\n                       Version 3, 29 June 2007\n\n Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>\n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.\n\n                            Preamble\n\n  The GNU General Public License is a free, copyleft license for\nsoftware and other kinds of works.\n\n  The licenses for most software and other practical works are designed\nto take away your freedom to share and change the works.  By contrast,\nthe GNU General Public License is intended to guarantee your freedom to\nshare and change all versions of a program--to make sure it remains free\nsoftware for all its users.  We, the Free Software Foundation, use the\nGNU General Public License for most of our software; it applies also to\nany other work released this way by its authors.  You can apply it to\nyour programs, too.\n\n  When we speak of free software, we are referring to freedom, not\nprice.  Our General Public Licenses are designed to make sure that you\nhave the freedom to distribute copies of free software (and charge for\nthem if you wish), that you receive source code or can get it if you\nwant it, that you can change the software or use pieces of it in new\nfree programs, and that you know you can do these things.\n\n  To protect your rights, we need to prevent others from denying you\nthese rights or asking you to surrender the rights.  Therefore, you have\ncertain responsibilities if you distribute copies of the software, or if\nyou modify it: responsibilities to respect the freedom of others.\n\n  For example, if you distribute copies of such a program, whether\ngratis or for a fee, you must pass on to the recipients the same\nfreedoms that you received.  You must make sure that they, too, receive\nor can get the source code.  And you must show them these terms so they\nknow their rights.\n\n  Developers that use the GNU GPL protect your rights with two steps:\n(1) assert copyright on the software, and (2) offer you this License\ngiving you legal permission to copy, distribute and/or modify it.\n\n  For the developers' and authors' protection, the GPL clearly explains\nthat there is no warranty for this free software.  For both users' and\nauthors' sake, the GPL requires that modified versions be marked as\nchanged, so that their problems will not be attributed erroneously to\nauthors of previous versions.\n\n  Some devices are designed to deny users access to install or run\nmodified versions of the software inside them, although the manufacturer\ncan do so.  This is fundamentally incompatible with the aim of\nprotecting users' freedom to change the software.  The systematic\npattern of such abuse occurs in the area of products for individuals to\nuse, which is precisely where it is most unacceptable.  Therefore, we\nhave designed this version of the GPL to prohibit the practice for those\nproducts.  If such problems arise substantially in other domains, we\nstand ready to extend this provision to those domains in future versions\nof the GPL, as needed to protect the freedom of users.\n\n  Finally, every program is threatened constantly by software patents.\nStates should not allow patents to restrict development and use of\nsoftware on general-purpose computers, but in those that do, we wish to\navoid the special danger that patents applied to a free program could\nmake it effectively proprietary.  To prevent this, the GPL assures that\npatents cannot be used to render the program non-free.\n\n  The precise terms and conditions for copying, distribution and\nmodification follow.\n\n                       TERMS AND CONDITIONS\n\n  0. Definitions.\n\n  \"This License\" refers to version 3 of the GNU General Public License.\n\n  \"Copyright\" also means copyright-like laws that apply to other kinds of\nworks, such as semiconductor masks.\n\n  \"The Program\" refers to any copyrightable work licensed under this\nLicense.  Each licensee is addressed as \"you\".  \"Licensees\" and\n\"recipients\" may be individuals or organizations.\n\n  To \"modify\" a work means to copy from or adapt all or part of the work\nin a fashion requiring copyright permission, other than the making of an\nexact copy.  The resulting work is called a \"modified version\" of the\nearlier work or a work \"based on\" the earlier work.\n\n  A \"covered work\" means either the unmodified Program or a work based\non the Program.\n\n  To \"propagate\" a work means to do anything with it that, without\npermission, would make you directly or secondarily liable for\ninfringement under applicable copyright law, except executing it on a\ncomputer or modifying a private copy.  Propagation includes copying,\ndistribution (with or without modification), making available to the\npublic, and in some countries other activities as well.\n\n  To \"convey\" a work means any kind of propagation that enables other\nparties to make or receive copies.  Mere interaction with a user through\na computer network, with no transfer of a copy, is not conveying.\n\n  An interactive user interface displays \"Appropriate Legal Notices\"\nto the extent that it includes a convenient and prominently visible\nfeature that (1) displays an appropriate copyright notice, and (2)\ntells the user that there is no warranty for the work (except to the\nextent that warranties are provided), that licensees may convey the\nwork under this License, and how to view a copy of this License.  If\nthe interface presents a list of user commands or options, such as a\nmenu, a prominent item in the list meets this criterion.\n\n  1. Source Code.\n\n  The \"source code\" for a work means the preferred form of the work\nfor making modifications to it.  \"Object code\" means any non-source\nform of a work.\n\n  A \"Standard Interface\" means an interface that either is an official\nstandard defined by a recognized standards body, or, in the case of\ninterfaces specified for a particular programming language, one that\nis widely used among developers working in that language.\n\n  The \"System Libraries\" of an executable work include anything, other\nthan the work as a whole, that (a) is included in the normal form of\npackaging a Major Component, but which is not part of that Major\nComponent, and (b) serves only to enable use of the work with that\nMajor Component, or to implement a Standard Interface for which an\nimplementation is available to the public in source code form.  A\n\"Major Component\", in this context, means a major essential component\n(kernel, window system, and so on) of the specific operating system\n(if any) on which the executable work runs, or a compiler used to\nproduce the work, or an object code interpreter used to run it.\n\n  The \"Corresponding Source\" for a work in object code form means all\nthe source code needed to generate, install, and (for an executable\nwork) run the object code and to modify the work, including scripts to\ncontrol those activities.  However, it does not include the work's\nSystem Libraries, or general-purpose tools or generally available free\nprograms which are used unmodified in performing those activities but\nwhich are not part of the work.  For example, Corresponding Source\nincludes interface definition files associated with source files for\nthe work, and the source code for shared libraries and dynamically\nlinked subprograms that the work is specifically designed to require,\nsuch as by intimate data communication or control flow between those\nsubprograms and other parts of the work.\n\n  The Corresponding Source need not include anything that users\ncan regenerate automatically from other parts of the Corresponding\nSource.\n\n  The Corresponding Source for a work in source code form is that\nsame work.\n\n  2. Basic Permissions.\n\n  All rights granted under this License are granted for the term of\ncopyright on the Program, and are irrevocable provided the stated\nconditions are met.  This License explicitly affirms your unlimited\npermission to run the unmodified Program.  The output from running a\ncovered work is covered by this License only if the output, given its\ncontent, constitutes a covered work.  This License acknowledges your\nrights of fair use or other equivalent, as provided by copyright law.\n\n  You may make, run and propagate covered works that you do not\nconvey, without conditions so long as your license otherwise remains\nin force.  You may convey covered works to others for the sole purpose\nof having them make modifications exclusively for you, or provide you\nwith facilities for running those works, provided that you comply with\nthe terms of this License in conveying all material for which you do\nnot control copyright.  Those thus making or running the covered works\nfor you must do so exclusively on your behalf, under your direction\nand control, on terms that prohibit them from making any copies of\nyour copyrighted material outside their relationship with you.\n\n  Conveying under any other circumstances is permitted solely under\nthe conditions stated below.  Sublicensing is not allowed; section 10\nmakes it unnecessary.\n\n  3. Protecting Users' Legal Rights From Anti-Circumvention Law.\n\n  No covered work shall be deemed part of an effective technological\nmeasure under any applicable law fulfilling obligations under article\n11 of the WIPO copyright treaty adopted on 20 December 1996, or\nsimilar laws prohibiting or restricting circumvention of such\nmeasures.\n\n  When you convey a covered work, you waive any legal power to forbid\ncircumvention of technological measures to the extent such circumvention\nis effected by exercising rights under this License with respect to\nthe covered work, and you disclaim any intention to limit operation or\nmodification of the work as a means of enforcing, against the work's\nusers, your or third parties' legal rights to forbid circumvention of\ntechnological measures.\n\n  4. Conveying Verbatim Copies.\n\n  You may convey verbatim copies of the Program's source code as you\nreceive it, in any medium, provided that you conspicuously and\nappropriately publish on each copy an appropriate copyright notice;\nkeep intact all notices stating that this License and any\nnon-permissive terms added in accord with section 7 apply to the code;\nkeep intact all notices of the absence of any warranty; and give all\nrecipients a copy of this License along with the Program.\n\n  You may charge any price or no price for each copy that you convey,\nand you may offer support or warranty protection for a fee.\n\n  5. Conveying Modified Source Versions.\n\n  You may convey a work based on the Program, or the modifications to\nproduce it from the Program, in the form of source code under the\nterms of section 4, provided that you also meet all of these conditions:\n\n    a) The work must carry prominent notices stating that you modified\n    it, and giving a relevant date.\n\n    b) The work must carry prominent notices stating that it is\n    released under this License and any conditions added under section\n    7.  This requirement modifies the requirement in section 4 to\n    \"keep intact all notices\".\n\n    c) You must license the entire work, as a whole, under this\n    License to anyone who comes into possession of a copy.  This\n    License will therefore apply, along with any applicable section 7\n    additional terms, to the whole of the work, and all its parts,\n    regardless of how they are packaged.  This License gives no\n    permission to license the work in any other way, but it does not\n    invalidate such permission if you have separately received it.\n\n    d) If the work has interactive user interfaces, each must display\n    Appropriate Legal Notices; however, if the Program has interactive\n    interfaces that do not display Appropriate Legal Notices, your\n    work need not make them do so.\n\n  A compilation of a covered work with other separate and independent\nworks, which are not by their nature extensions of the covered work,\nand which are not combined with it such as to form a larger program,\nin or on a volume of a storage or distribution medium, is called an\n\"aggregate\" if the compilation and its resulting copyright are not\nused to limit the access or legal rights of the compilation's users\nbeyond what the individual works permit.  Inclusion of a covered work\nin an aggregate does not cause this License to apply to the other\nparts of the aggregate.\n\n  6. Conveying Non-Source Forms.\n\n  You may convey a covered work in object code form under the terms\nof sections 4 and 5, provided that you also convey the\nmachine-readable Corresponding Source under the terms of this License,\nin one of these ways:\n\n    a) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by the\n    Corresponding Source fixed on a durable physical medium\n    customarily used for software interchange.\n\n    b) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by a\n    written offer, valid for at least three years and valid for as\n    long as you offer spare parts or customer support for that product\n    model, to give anyone who possesses the object code either (1) a\n    copy of the Corresponding Source for all the software in the\n    product that is covered by this License, on a durable physical\n    medium customarily used for software interchange, for a price no\n    more than your reasonable cost of physically performing this\n    conveying of source, or (2) access to copy the\n    Corresponding Source from a network server at no charge.\n\n    c) Convey individual copies of the object code with a copy of the\n    written offer to provide the Corresponding Source.  This\n    alternative is allowed only occasionally and noncommercially, and\n    only if you received the object code with such an offer, in accord\n    with subsection 6b.\n\n    d) Convey the object code by offering access from a designated\n    place (gratis or for a charge), and offer equivalent access to the\n    Corresponding Source in the same way through the same place at no\n    further charge.  You need not require recipients to copy the\n    Corresponding Source along with the object code.  If the place to\n    copy the object code is a network server, the Corresponding Source\n    may be on a different server (operated by you or a third party)\n    that supports equivalent copying facilities, provided you maintain\n    clear directions next to the object code saying where to find the\n    Corresponding Source.  Regardless of what server hosts the\n    Corresponding Source, you remain obligated to ensure that it is\n    available for as long as needed to satisfy these requirements.\n\n    e) Convey the object code using peer-to-peer transmission, provided\n    you inform other peers where the object code and Corresponding\n    Source of the work are being offered to the general public at no\n    charge under subsection 6d.\n\n  A separable portion of the object code, whose source code is excluded\nfrom the Corresponding Source as a System Library, need not be\nincluded in conveying the object code work.\n\n  A \"User Product\" is either (1) a \"consumer product\", which means any\ntangible personal property which is normally used for personal, family,\nor household purposes, or (2) anything designed or sold for incorporation\ninto a dwelling.  In determining whether a product is a consumer product,\ndoubtful cases shall be resolved in favor of coverage.  For a particular\nproduct received by a particular user, \"normally used\" refers to a\ntypical or common use of that class of product, regardless of the status\nof the particular user or of the way in which the particular user\nactually uses, or expects or is expected to use, the product.  A product\nis a consumer product regardless of whether the product has substantial\ncommercial, industrial or non-consumer uses, unless such uses represent\nthe only significant mode of use of the product.\n\n  \"Installation Information\" for a User Product means any methods,\nprocedures, authorization keys, or other information required to install\nand execute modified versions of a covered work in that User Product from\na modified version of its Corresponding Source.  The information must\nsuffice to ensure that the continued functioning of the modified object\ncode is in no case prevented or interfered with solely because\nmodification has been made.\n\n  If you convey an object code work under this section in, or with, or\nspecifically for use in, a User Product, and the conveying occurs as\npart of a transaction in which the right of possession and use of the\nUser Product is transferred to the recipient in perpetuity or for a\nfixed term (regardless of how the transaction is characterized), the\nCorresponding Source conveyed under this section must be accompanied\nby the Installation Information.  But this requirement does not apply\nif neither you nor any third party retains the ability to install\nmodified object code on the User Product (for example, the work has\nbeen installed in ROM).\n\n  The requirement to provide Installation Information does not include a\nrequirement to continue to provide support service, warranty, or updates\nfor a work that has been modified or installed by the recipient, or for\nthe User Product in which it has been modified or installed.  Access to a\nnetwork may be denied when the modification itself materially and\nadversely affects the operation of the network or violates the rules and\nprotocols for communication across the network.\n\n  Corresponding Source conveyed, and Installation Information provided,\nin accord with this section must be in a format that is publicly\ndocumented (and with an implementation available to the public in\nsource code form), and must require no special password or key for\nunpacking, reading or copying.\n\n  7. Additional Terms.\n\n  \"Additional permissions\" are terms that supplement the terms of this\nLicense by making exceptions from one or more of its conditions.\nAdditional permissions that are applicable to the entire Program shall\nbe treated as though they were included in this License, to the extent\nthat they are valid under applicable law.  If additional permissions\napply only to part of the Program, that part may be used separately\nunder those permissions, but the entire Program remains governed by\nthis License without regard to the additional permissions.\n\n  When you convey a copy of a covered work, you may at your option\nremove any additional permissions from that copy, or from any part of\nit.  (Additional permissions may be written to require their own\nremoval in certain cases when you modify the work.)  You may place\nadditional permissions on material, added by you to a covered work,\nfor which you have or can give appropriate copyright permission.\n\n  Notwithstanding any other provision of this License, for material you\nadd to a covered work, you may (if authorized by the copyright holders of\nthat material) supplement the terms of this License with terms:\n\n    a) Disclaiming warranty or limiting liability differently from the\n    terms of sections 15 and 16 of this License; or\n\n    b) Requiring preservation of specified reasonable legal notices or\n    author attributions in that material or in the Appropriate Legal\n    Notices displayed by works containing it; or\n\n    c) Prohibiting misrepresentation of the origin of that material, or\n    requiring that modified versions of such material be marked in\n    reasonable ways as different from the original version; or\n\n    d) Limiting the use for publicity purposes of names of licensors or\n    authors of the material; or\n\n    e) Declining to grant rights under trademark law for use of some\n    trade names, trademarks, or service marks; or\n\n    f) Requiring indemnification of licensors and authors of that\n    material by anyone who conveys the material (or modified versions of\n    it) with contractual assumptions of liability to the recipient, for\n    any liability that these contractual assumptions directly impose on\n    those licensors and authors.\n\n  All other non-permissive additional terms are considered \"further\nrestrictions\" within the meaning of section 10.  If the Program as you\nreceived it, or any part of it, contains a notice stating that it is\ngoverned by this License along with a term that is a further\nrestriction, you may remove that term.  If a license document contains\na further restriction but permits relicensing or conveying under this\nLicense, you may add to a covered work material governed by the terms\nof that license document, provided that the further restriction does\nnot survive such relicensing or conveying.\n\n  If you add terms to a covered work in accord with this section, you\nmust place, in the relevant source files, a statement of the\nadditional terms that apply to those files, or a notice indicating\nwhere to find the applicable terms.\n\n  Additional terms, permissive or non-permissive, may be stated in the\nform of a separately written license, or stated as exceptions;\nthe above requirements apply either way.\n\n  8. Termination.\n\n  You may not propagate or modify a covered work except as expressly\nprovided under this License.  Any attempt otherwise to propagate or\nmodify it is void, and will automatically terminate your rights under\nthis License (including any patent licenses granted under the third\nparagraph of section 11).\n\n  However, if you cease all violation of this License, then your\nlicense from a particular copyright holder is reinstated (a)\nprovisionally, unless and until the copyright holder explicitly and\nfinally terminates your license, and (b) permanently, if the copyright\nholder fails to notify you of the violation by some reasonable means\nprior to 60 days after the cessation.\n\n  Moreover, your license from a particular copyright holder is\nreinstated permanently if the copyright holder notifies you of the\nviolation by some reasonable means, this is the first time you have\nreceived notice of violation of this License (for any work) from that\ncopyright holder, and you cure the violation prior to 30 days after\nyour receipt of the notice.\n\n  Termination of your rights under this section does not terminate the\nlicenses of parties who have received copies or rights from you under\nthis License.  If your rights have been terminated and not permanently\nreinstated, you do not qualify to receive new licenses for the same\nmaterial under section 10.\n\n  9. Acceptance Not Required for Having Copies.\n\n  You are not required to accept this License in order to receive or\nrun a copy of the Program.  Ancillary propagation of a covered work\noccurring solely as a consequence of using peer-to-peer transmission\nto receive a copy likewise does not require acceptance.  However,\nnothing other than this License grants you permission to propagate or\nmodify any covered work.  These actions infringe copyright if you do\nnot accept this License.  Therefore, by modifying or propagating a\ncovered work, you indicate your acceptance of this License to do so.\n\n  10. Automatic Licensing of Downstream Recipients.\n\n  Each time you convey a covered work, the recipient automatically\nreceives a license from the original licensors, to run, modify and\npropagate that work, subject to this License.  You are not responsible\nfor enforcing compliance by third parties with this License.\n\n  An \"entity transaction\" is a transaction transferring control of an\norganization, or substantially all assets of one, or subdividing an\norganization, or merging organizations.  If propagation of a covered\nwork results from an entity transaction, each party to that\ntransaction who receives a copy of the work also receives whatever\nlicenses to the work the party's predecessor in interest had or could\ngive under the previous paragraph, plus a right to possession of the\nCorresponding Source of the work from the predecessor in interest, if\nthe predecessor has it or can get it with reasonable efforts.\n\n  You may not impose any further restrictions on the exercise of the\nrights granted or affirmed under this License.  For example, you may\nnot impose a license fee, royalty, or other charge for exercise of\nrights granted under this License, and you may not initiate litigation\n(including a cross-claim or counterclaim in a lawsuit) alleging that\nany patent claim is infringed by making, using, selling, offering for\nsale, or importing the Program or any portion of it.\n\n  11. Patents.\n\n  A \"contributor\" is a copyright holder who authorizes use under this\nLicense of the Program or a work on which the Program is based.  The\nwork thus licensed is called the contributor's \"contributor version\".\n\n  A contributor's \"essential patent claims\" are all patent claims\nowned or controlled by the contributor, whether already acquired or\nhereafter acquired, that would be infringed by some manner, permitted\nby this License, of making, using, or selling its contributor version,\nbut do not include claims that would be infringed only as a\nconsequence of further modification of the contributor version.  For\npurposes of this definition, \"control\" includes the right to grant\npatent sublicenses in a manner consistent with the requirements of\nthis License.\n\n  Each contributor grants you a non-exclusive, worldwide, royalty-free\npatent license under the contributor's essential patent claims, to\nmake, use, sell, offer for sale, import and otherwise run, modify and\npropagate the contents of its contributor version.\n\n  In the following three paragraphs, a \"patent license\" is any express\nagreement or commitment, however denominated, not to enforce a patent\n(such as an express permission to practice a patent or covenant not to\nsue for patent infringement).  To \"grant\" such a patent license to a\nparty means to make such an agreement or commitment not to enforce a\npatent against the party.\n\n  If you convey a covered work, knowingly relying on a patent license,\nand the Corresponding Source of the work is not available for anyone\nto copy, free of charge and under the terms of this License, through a\npublicly available network server or other readily accessible means,\nthen you must either (1) cause the Corresponding Source to be so\navailable, or (2) arrange to deprive yourself of the benefit of the\npatent license for this particular work, or (3) arrange, in a manner\nconsistent with the requirements of this License, to extend the patent\nlicense to downstream recipients.  \"Knowingly relying\" means you have\nactual knowledge that, but for the patent license, your conveying the\ncovered work in a country, or your recipient's use of the covered work\nin a country, would infringe one or more identifiable patents in that\ncountry that you have reason to believe are valid.\n\n  If, pursuant to or in connection with a single transaction or\narrangement, you convey, or propagate by procuring conveyance of, a\ncovered work, and grant a patent license to some of the parties\nreceiving the covered work authorizing them to use, propagate, modify\nor convey a specific copy of the covered work, then the patent license\nyou grant is automatically extended to all recipients of the covered\nwork and works based on it.\n\n  A patent license is \"discriminatory\" if it does not include within\nthe scope of its coverage, prohibits the exercise of, or is\nconditioned on the non-exercise of one or more of the rights that are\nspecifically granted under this License.  You may not convey a covered\nwork if you are a party to an arrangement with a third party that is\nin the business of distributing software, under which you make payment\nto the third party based on the extent of your activity of conveying\nthe work, and under which the third party grants, to any of the\nparties who would receive the covered work from you, a discriminatory\npatent license (a) in connection with copies of the covered work\nconveyed by you (or copies made from those copies), or (b) primarily\nfor and in connection with specific products or compilations that\ncontain the covered work, unless you entered into that arrangement,\nor that patent license was granted, prior to 28 March 2007.\n\n  Nothing in this License shall be construed as excluding or limiting\nany implied license or other defenses to infringement that may\notherwise be available to you under applicable patent law.\n\n  12. No Surrender of Others' Freedom.\n\n  If conditions are imposed on you (whether by court order, agreement or\notherwise) that contradict the conditions of this License, they do not\nexcuse you from the conditions of this License.  If you cannot convey a\ncovered work so as to satisfy simultaneously your obligations under this\nLicense and any other pertinent obligations, then as a consequence you may\nnot convey it at all.  For example, if you agree to terms that obligate you\nto collect a royalty for further conveying from those to whom you convey\nthe Program, the only way you could satisfy both those terms and this\nLicense would be to refrain entirely from conveying the Program.\n\n  13. Use with the GNU Affero General Public License.\n\n  Notwithstanding any other provision of this License, you have\npermission to link or combine any covered work with a work licensed\nunder version 3 of the GNU Affero General Public License into a single\ncombined work, and to convey the resulting work.  The terms of this\nLicense will continue to apply to the part which is the covered work,\nbut the special requirements of the GNU Affero General Public License,\nsection 13, concerning interaction through a network will apply to the\ncombination as such.\n\n  14. Revised Versions of this License.\n\n  The Free Software Foundation may publish revised and/or new versions of\nthe GNU General Public License from time to time.  Such new versions will\nbe similar in spirit to the present version, but may differ in detail to\naddress new problems or concerns.\n\n  Each version is given a distinguishing version number.  If the\nProgram specifies that a certain numbered version of the GNU General\nPublic License \"or any later version\" applies to it, you have the\noption of following the terms and conditions either of that numbered\nversion or of any later version published by the Free Software\nFoundation.  If the Program does not specify a version number of the\nGNU General Public License, you may choose any version ever published\nby the Free Software Foundation.\n\n  If the Program specifies that a proxy can decide which future\nversions of the GNU General Public License can be used, that proxy's\npublic statement of acceptance of a version permanently authorizes you\nto choose that version for the Program.\n\n  Later license versions may give you additional or different\npermissions.  However, no additional obligations are imposed on any\nauthor or copyright holder as a result of your choosing to follow a\nlater version.\n\n  15. Disclaimer of Warranty.\n\n  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY\nAPPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT\nHOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY\nOF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,\nTHE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\nPURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM\nIS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF\nALL NECESSARY SERVICING, REPAIR OR CORRECTION.\n\n  16. Limitation of Liability.\n\n  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\nWILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS\nTHE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY\nGENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE\nUSE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF\nDATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD\nPARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),\nEVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF\nSUCH DAMAGES.\n\n  17. Interpretation of Sections 15 and 16.\n\n  If the disclaimer of warranty and limitation of liability provided\nabove cannot be given local legal effect according to their terms,\nreviewing courts shall apply local law that most closely approximates\nan absolute waiver of all civil liability in connection with the\nProgram, unless a warranty or assumption of liability accompanies a\ncopy of the Program in return for a fee.\n\n                     END OF TERMS AND CONDITIONS\n\n            How to Apply These Terms to Your New Programs\n\n  If you develop a new program, and you want it to be of the greatest\npossible use to the public, the best way to achieve this is to make it\nfree software which everyone can redistribute and change under these terms.\n\n  To do so, attach the following notices to the program.  It is safest\nto attach them to the start of each source file to most effectively\nstate the exclusion of warranty; and each file should have at least\nthe \"copyright\" line and a pointer to where the full notice is found.\n\n    <one line to give the program's name and a brief idea of what it does.>\n    Copyright (C) <year>  <name of author>\n\n    This program is free software: you can redistribute it and/or modify\n    it under the terms of the GNU General Public License as published by\n    the Free Software Foundation, either version 3 of the License, or\n    (at your option) any later version.\n\n    This program is distributed in the hope that it will be useful,\n    but WITHOUT ANY WARRANTY; without even the implied warranty of\n    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n    GNU General Public License for more details.\n\n    You should have received a copy of the GNU General Public License\n    along with this program.  If not, see <https://www.gnu.org/licenses/>.\n\nAlso add information on how to contact you by electronic and paper mail.\n\n  If the program does terminal interaction, make it output a short\nnotice like this when it starts in an interactive mode:\n\n    <program>  Copyright (C) <year>  <name of author>\n    This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.\n    This is free software, and you are welcome to redistribute it\n    under certain conditions; type `show c' for details.\n\nThe hypothetical commands `show w' and `show c' should show the appropriate\nparts of the General Public License.  Of course, your program's commands\nmight be different; for a GUI interface, you would use an \"about box\".\n\n  You should also get your employer (if you work as a programmer) or school,\nif any, to sign a \"copyright disclaimer\" for the program, if necessary.\nFor more information on this, and how to apply and follow the GNU GPL, see\n<https://www.gnu.org/licenses/>.\n\n  The GNU General Public License does not permit incorporating your program\ninto proprietary programs.  If your program is a subroutine library, you\nmay consider it more useful to permit linking proprietary applications with\nthe library.  If this is what you want to do, use the GNU Lesser General\nPublic License instead of this License.  But first, please read\n<https://www.gnu.org/licenses/why-not-lgpl.html>.\n"
  },
  {
    "path": "PTQ.py",
    "content": "import argparse\nimport test\nfrom torch.utils.data import DataLoader\nfrom models import *\nfrom utils.datasets import *\nfrom utils.utils import *\n\nwdir = 'weights' + os.sep  # weights dir\nPTQ_weights = wdir + 'PTQ.pt'\n\n\ndef PTQ(cfg,\n        data,\n        weights=None,\n        batch_size=64,\n        imgsz=416,\n        augment=False,\n        a_bit=8,\n        w_bit=8, ):\n    # Initialize/load model and set device\n    device = torch_utils.select_device(opt.device, batch_size=batch_size)\n    print('PTQ only support for one gpu!')\n    print('')  # skip a line\n    # Initialize model\n    model = Darknet(cfg, is_gray_scale=opt.gray_scale, maxabsscaler=opt.maxabsscaler)\n    q_model = Darknet(cfg, quantized=3, a_bit=a_bit, w_bit=w_bit, is_gray_scale=opt.gray_scale,\n                      maxabsscaler=opt.maxabsscaler,\n                      shortcut_way=opt.shortcut_way)\n\n    # Load weights\n    attempt_download(weights)\n    if weights.endswith('.pt'):  # pytorch format\n        model.load_state_dict(torch.load(weights, map_location=device)['model'])\n        q_model.load_state_dict(torch.load(weights, map_location=device)['model'])\n    else:  # darknet format\n        load_darknet_weights(model, weights)\n        load_darknet_weights(q_model, weights, quant=True)\n\n    model.to(device)\n    q_model.to(device)\n\n    # Configure run\n    data_dict = parse_data_cfg(data)\n    cali_path = data_dict['train']\n    test_path = data_dict['valid']\n\n    # Dataloader\n    cali_dataset = LoadImagesAndLabels(cali_path, imgsz, batch_size, rect=True,\n                                    is_gray_scale=True if opt.gray_scale else False, subset_len=opt.subset_len)\n    cali_batch_size = min(batch_size, len(cali_dataset))\n    cali_dataloader = DataLoader(cali_dataset,\n                              batch_size=cali_batch_size,\n                              num_workers=min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]),\n                              pin_memory=True,\n                              collate_fn=cali_dataset.collate_fn)\n\n    test_dataset = LoadImagesAndLabels(test_path, imgsz, batch_size, rect=True,\n                                    is_gray_scale=True if opt.gray_scale else False)\n    test_batch_size = min(batch_size, len(test_dataset))\n    test_dataloader = DataLoader(test_dataset,\n                              batch_size=test_batch_size,\n                              num_workers=min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]),\n                              pin_memory=True,\n                              collate_fn=test_dataset.collate_fn)\n    print('')  # skip a line\n    print('<.....................test original model.......................>')\n    test.test(cfg,\n              data=opt.data,\n              batch_size=batch_size,\n              imgsz=imgsz,\n              model=model,\n              dataloader=test_dataloader,\n              rank=-1,\n              maxabsscaler=opt.maxabsscaler)\n\n    q_model.train()\n    print('')  # skip a line\n    print('<.....................Quantize.......................>')\n\n    for batch_i, (imgs, _, _, _) in enumerate(tqdm(cali_dataloader)):\n        if opt.maxabsscaler:\n            imgs = imgs.to(device).float() / 256.0  # uint8 to float32, 0 - 255 to 0.0 - 1.0\n            imgs = imgs * 2 - 1\n        else:\n            imgs = imgs.to(device).float() / 256.0  # uint8 to float32, 0 - 255 to 0.0 - 1.0\n        # Disable gradients\n        with torch.no_grad():\n            _, _ = q_model(imgs, augment=augment)  # inference and training outputs\n    print('')  # skip a line\n    print('<.....................test quantized model.......................>')\n    print('')  # skip a line\n    test.test(cfg,\n              data=opt.data,\n              batch_size=batch_size,\n              imgsz=imgsz,\n              model=q_model,\n              dataloader=test_dataloader,\n              quantized=3,\n              a_bit=opt.a_bit,\n              w_bit=opt.w_bit,\n              rank=-1,\n              maxabsscaler=opt.maxabsscaler)\n    # Save model\n\n    if hasattr(q_model, 'module'):\n        model_temp = q_model.module.state_dict()\n    else:\n        model_temp = q_model.state_dict()\n    chkpt = {'epoch': None,\n             'best_fitness': None,\n             'training_results': None,\n             'model': model_temp,\n             'optimizer': None}\n    # Save last, best and delete\n    torch.save(chkpt, PTQ_weights)\n    del chkpt\n\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser(prog='test.py')\n    parser.add_argument('--cfg', type=str, default='cfg/yolov3-spp.cfg', help='*.cfg path')\n    parser.add_argument('--data', type=str, default='data/coco2014.data', help='*.data path')\n    parser.add_argument('--weights', type=str, default='weights/yolov3-spp-ultralytics.pt', help='weights path')\n    parser.add_argument('--batch-size', type=int, default=16, help='size of each image batch')\n    parser.add_argument('--img-size', type=int, default=512, help='inference size (pixels)')\n    parser.add_argument('--device', default='', help='device id (i.e. 0 or 0,1) or cpu')\n    parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset')\n    parser.add_argument('--augment', action='store_true', help='augmented inference')\n    parser.add_argument('--a-bit', type=int, default=8,\n                        help='a-bit')\n    parser.add_argument('--w-bit', type=int, default=8,\n                        help='w-bit')\n    parser.add_argument('--subset_len', type=int, default=-1, help='calibration set len')\n    parser.add_argument('--gray_scale', action='store_true', help='gray scale trainning')\n    parser.add_argument('--maxabsscaler', '-mas', action='store_true', help='Standarize input to (-1,1)')\n    parser.add_argument('--shortcut_way', type=int, default=1, help='--shortcut quantization way')\n    opt = parser.parse_args()\n    opt.cfg = list(glob.iglob('./**/' + opt.cfg, recursive=True))[0]  # find file\n    opt.data = list(glob.iglob('./**/' + opt.data, recursive=True))[0]  # find file\n\n    print(opt)\n\n    PTQ(opt.cfg,\n        opt.data,\n        opt.weights,\n        opt.batch_size,\n        opt.img_size,\n        opt.augment,\n        a_bit=opt.a_bit,\n        w_bit=opt.w_bit)\n"
  },
  {
    "path": "README.md",
    "content": "# YOLOv3-ModelCompression-MultidatasetTraining\n\nThis project mainly include three parts.\n\n1.Provides training methods for multiple mainstream object detection datasets(coco2017, coco2014, BDD100k, Visdrone,\nHand)\n\n2.Provides a mainstream model compression algorithm including pruning, quantization, and knowledge distillation.\n\n3.Provides multiple backbone for yolov3 including Darknet-YOLOv3，Tiny-YOLOv3，Mobilenetv3-YOLOv3\n\nSource using Pytorch implementation to [ultralytics/yolov3](https://github.com/ultralytics/yolov3) for yolov3 source\ncode. Pruning method based on BN layer\nby [coldlarry/YOLOv3-complete-pruning](https://github.com/coldlarry/YOLOv3-complete-pruning), thanks to both of you.\n\n**If you can't download weights file and datasets from BaiDu, please send e-mail(spurslipu@pku.edu.cn) to me, I will \nrely as soon as I can.**\n\n# Update\n\nJanuary 4, 2020. Provides download links and training methods to the Visdrone dataset.\n\nJanuary 19, 2020. Dior, Bdd100k and Visdrone training will be provided, as well as the converted weights file.\n\nMarch 1, 2020. Provides Mobilenetv3 backbone.\n\nApril 7, 2020. Implement two models based on Mobilenetv3: Yolov3-Mobilenet, and Yolov3tin-Mobilene-small, provide\npre-training weights, extend the normal pruning methods to the two Mobilenet-based models.\n\nApril 27, 2020. Update mobilenetv3 pre-training weights, add a layer pruning method, methods from\nthe [tanluren/yolov3-channel-and-layer-pruning/yolov3](https://github.com/tanluren/yolov3-channel-and-layer-pruning),\nThanks for sharing.\n\nMay 22, 2020. Updated some new optimizations from [ultralytics/yolov3](https://github.com/ultralytics/yolov3), update\ncfg file and weights of YOLOv4.\n\nMay 22, 2020. The 8-bit quantization method was updated and some bugs were fixed.\n\nJuly 12, 2020. The problem of mAP returning to 0 after pruning in yolov3-mobilenet was fixed. See issue#41 for more\ndetails.\n\nSeptember 30, 2020. The BN_Fold training method was updated to reduce the precision loss caused by BN fusion, and the\nPOW (2) quantization method targeted at FPGA was updated. See the quantization section for details.\n\n# Requirements\n\nOur project based on [ultralytics/yolov3](https://github.com/ultralytics/yolov3),\nsee [ultralytics/yolov3](https://github.com/ultralytics/yolov3) for details. Here is a brief explanation:\n\n- `numpy`\n- `torch >= 1.1.0`\n- `opencv-python`\n- `tqdm`\n\n# Current support\n\n|<center>Function</center>|<center></center>|\n| --- |--- |\n|<center>Multi-Backbone training</center>|<center>√</center>  |\n|<center>Multi-Datasets</center>|<center>√</center>  |\n|<center>Pruning</center>|<center>√</center>  |\n|<center>Quantization</center>|<center>√</center>  |\n|<center>Knowledge Distillation</center>|<center>√</center>  |\n\n# Training\n\n`python3 train.py --data ... --cfg ... `For training model command, the -pt command is required when using coco\npre-training model.\n\n`python3 test.py --data ... --cfg ... ` For testing model command\n\n`python3 detect.py --data ... --cfg ... --source ...` For detecting model command, the default address of source is\ndata/samples, the output result is saved in the /output, and the detection resource can be pictures and videos.\n\n# Multi-Datasets\n\nThis project provides preprocessed datasets for the YOLOv3, configuration files (.cfg), dataset index files (.data),\ndataset category files (.names), and anchor box sizes (including 9 boxes for YOLOv3 and 6 boxes for tiny- YOLOv3) that\nare reclustered using the K-means algorithm.\n\nmAP\n\n|<center>Dataset</center>|<center>YOLOv3-640</center>|<center>YOLOv4-640</center>|<center>YOLOv3-mobilenet-640</center>|\n| --- |--- |--- |--- |\n|<center>Dior</center>|<center>0.749</center>|\n|<center>bdd100k</center>|<center>0.543</center>|\n|<center>visdrone</center>|<center>0.311</center>|<center>0.383</center>|<center>0.348</center>|\n\nDatasets, download and unzip to /data.\n\n- [COCO2017](https://pan.baidu.com/s/1KysFL6AmdbCBq4tHDebqlw)\n\n  Extract code：hjln\n\n- [COCO2014](https://pan.baidu.com/s/1EoXOR77yEVokqPCaxg8QGg)\n\n  Extract code：rhqx\n\n- [COCO weights](https://pan.baidu.com/s/1JZylwRQIgAd389oWUu0djg)\n\n  Extract code：k8ms\n\nTraining command\n\n```bash\npython3 train.py --data data/coco2017.data --batch-size ... --weights weights/yolov3-608.weights -pt --cfg cfg/yolov3/yolov3.cfg --img-size ... --epochs ...\n```\n\n- [Dior](https://pan.baidu.com/s/1z0IQPBN16I-EctjwN9Idyg)\n\n  Extract code：vnuq\n\n- [Dior weights](https://pan.baidu.com/s/12lYOgBAo1R5VkOZqDqCFJQ)\n\n  Extract code：l8wz\n\nTraining command\n\n```bash\npython3 train.py --data data/dior.data --batch-size ... --weights weights/yolov3-608.weights -pt --cfg cfg/yolov3/yolov3-onDIOR.cfg --img-size ... --epochs ...\n```\n\n- [bdd100k](https://pan.baidu.com/s/157Md2qeFgmcOv5UmnIGI_g)\n\n  Extract code：8duw\n\n- [bdd100k weights](https://pan.baidu.com/s/1wWiHlLxIaK_WHy_mG2wmAA)\n\n  Extract code：xeqo\n\nTraining command\n\n```bash\npython3 train.py --data data/bdd100k.data --batch-size ... --weights weights/yolov3-608.weights -pt --cfg cfg/yolov3/yolov3-bdd100k.cfg --img-size ... --epochs ...\n```\n\n- [visdrone](https://pan.baidu.com/s/1CPGmS3tLI7my4_m7qDhB4Q)\n\n  Extract code：dy4c\n\n- [YOLOv3-visdrone weights](https://pan.baidu.com/s/1N4qDP3b0tt8TIWuTFefDEw)\n\n  Extract code：87lf\n\n- [YOLOv4-visdrone weights](https://pan.baidu.com/s/1zOFyt_AFiNk0fAFa8yE9RQ)\n\n  Extract code：xblu\n\n- [YOLOv3-mobilenet-visdrone weights](https://pan.baidu.com/s/1BHC8b6xHmTuN8h74QJFt1g)\n\nExtract code：fb6y\n\nTraining command\n\n```bash\npython train.py --data data/visdrone.data --batch-size ... --weights weights/yolov3-608.weights -pt --cfg cfg/yolov3/yolov3-visdrone.cfg  --img-size ... --epochs ...\n```\n\n- [oxfordhand](https://pan.baidu.com/s/1JL4gFGh-W_gYEEsiIQssZw)\n\n  Extract code：3du4\n\nTraining command\n\n```bash\npython train.py --data data/oxfordhand.data --batch-size ... --weights weights/yolov3-608.weights -pt --cfg cfg/yolov3/yolov3-hand.cfg  --img-size ... --epochs ...\n```\n\n## 1.Dior\n\nThe DIRO dataset is one of the largest, most diverse, and publicly available object detection datasets in the Earth\nobservation community. Among them, the number of instances of ships and vehicles is high, which achieves a good balance\nbetween small instances and large ones. The images were collected from Google Earth.\n\n[Introduction](https://cloud.tencent.com/developer/article/1509762)\n\n### Test results\n\n![Test results](https://github.com/SpursLipu/YOLOv3-ModelCompression-MultidatasetTraining/blob/master/image_in_readme/2.jpg)\n![Test results](https://github.com/SpursLipu/YOLOv3-ModelCompression-MultidatasetTraining/blob/master/image_in_readme/3.jpg)\n\n## 2.bdd100k\n\nBdd100 is a large, diverse data set of driving videos containing 100,000 videos. Each video was about 40 seconds long,\nand the researchers marked bounding boxes for all 100,000 key frames of objects that often appeared on the road. The\ndata set covers different weather conditions, including sunny, cloudy and rainy days, and different times of day and\nnight.\n\n[Website](http://bair.berkeley.edu/blog/2018/05/30/bdd/)\n\n[Download](http://bdd-data.berkeley.edu)\n\n[Paper](https://arxiv.org/abs/1805.04687)\n\n### Test results\n\n![Test results](https://github.com/SpursLipu/YOLOv3-ModelCompression-MultidatasetTraining/blob/master/image_in_readme/1.jpg)\n\n## 3.Visdrone\n\nThe VisDrone2019 dataset was collected by AISKYEYE team at the Machine Learning and Data Mining Laboratory at Tianjin\nUniversity, China. Benchmark data set contains 288 video clips, and consists of 261908 frames and 10209 frames a static\nimage, by all sorts of installed on the unmanned aerial vehicle (uav) camera capture, covers a wide range of aspects,\nincluding location (thousands of kilometers apart from China in 14 different cities), environment (city and country),\nobject (pedestrians, vehicles, bicycles, etc.) and density (sparse and crowded scenario). This data set was collected\nusing a variety of uav platforms (i.e., uAvs with different models) in a variety of situations and under various weather\nand light conditions. These frames are manually marked with more than 2.6 million border frames, which are often targets\nof interest, such as pedestrians, cars, bicycles and tricycles. Some important attributes are also provided, including\nscene visibility, object categories, and occlusion, to improve data utilization.\n\n[Website](http://www.aiskyeye.com/)\n\n### Test results of YOLOv3\n\n![Test results](https://github.com/SpursLipu/YOLOv3-ModelCompression-MultidatasetTraining/blob/master/image_in_readme/4.jpg)\n\n### Test results of YOLOv4\n\n![Test results](https://github.com/SpursLipu/YOLOv3-ModelCompression-MultidatasetTraining/blob/master/image_in_readme/5.jpg)\n![Test results](https://github.com/SpursLipu/YOLOv3-ModelCompression-MultidatasetTraining/blob/master/image_in_readme/6.png)\n\n# Multi-Backbone\n\nBased on mobilenetv3, two network structures are designed.\n\n|Structure |<center>backbone</center>|<center>Postprocessing</center> |<center>Parameters</center> |<center>GFLOPS</center> |<center>mAP0.5</center> |<center>mAP0.5:0.95</center> |<center>speed(inference/NMS/total)</center> |<center>FPS</center> |\n| --- | --- | --- | --- | --- | --- | --- | --- | --- |\n|YOLOv3                      |38.74M  |20.39M  |59.13M  |117.3   |0.580  |0.340  |12.3/1.7/14.0 ms|71.4fps  |\n|YOLOv3tiny                  |6.00M   |2.45M   |8.45M   |9.9     |0.347  |0.168  |3.5/1.8/5.3 ms  |188.7fps |\n|YOLOv3-mobilenetv3          |2.84M   |20.25M  |23.09M  |32.2    |0.547  |0.346  |7.9/1.8/9.7 ms  |103.1fps |\n|YOLOv3tiny-mobilenetv3-small|0.92M   |2.00M   |2.92M   |2.9     |0.379  |0.214  |5.2/1.9/7.1 ms  |140.8fps |\n|YOLOv4                      |-       |-       |61.35M  |107.1   |0.650  |0.438  |13.5/1.8/15.3 ms|65.4fps  |\n|YOLOv4-tiny                 |-       |-       |5.78M   |12.3    |0.435  |0.225  |4.1/1.7/5.8 ms  |172.4fps |\n\n1. YOLOv3,YOLOv3tiny and YOLOv4 were trained and tested on coco2014, and Yolov3-Mobilenetv3 and YOLOv3tiny\n   Mobilenetv3-Small were trained and tested on coco2017.\n\n2. The inference speed test on GTX2080ti*4, and image size is 608.\n\n3. The training set should match the testing set, because mismatch will cause the mistakes of mAP.\n   Read [issue](https://github.com/ultralytics/yolov3/issues/970) for detial.\n\n## Train command\n\n1.YOLOv3\n\n```bash\npython3 train.py --data data/... --batch-size ... -pt --weights weights/yolov3-608.weights --cfg cfg/yolov3/yolov3.cfg --img_size ...\n```\n\nWeights Download\n\n- [COCO pretraining weights](https://pan.baidu.com/s/1JZylwRQIgAd389oWUu0djg)\n\n  Extract code：k8ms\n\n2.YOLOv3tiny\n\n```bash\npython3 train.py --data data/... --batch-size ... -pt --weights weights/yolov3tiny.weights --cfg cfg/yolov3tiny/yolov3-tiny.cfg --img_size ...\n```\n\n- [COCO pretraining weights](https://pan.baidu.com/s/1iWGxdjR3TWxEe37__msyRA)\n\n  Extract code：udfe\n\n3.YOLOv3tiny-mobilenet-small\n\n```bash\npython3 train.py --data data/... --batch-size ... -pt --weights weights/yolov3tiny-mobilenet-small.weights --cfg cfg/yolov3tiny-mobilenet-small/yolov3tiny-mobilenet-small-coco.cfg --img_size ...\n```\n\n- [COCO pretraining weights](https://pan.baidu.com/s/1mSFjWLU91H2OhNemsAeiiQ)\n\n  Extract code：pxz4\n\n4.YOLOv3-mobilenet\n\n```bash\npython3 train.py --data data/... --batch-size ... -pt --weights weights/yolov3-mobilenet.weights --cfg cfg/yolov3-mobilenet/yolov3-mobilenet-coco.cfg --img_size ...\n```\n\n- [COCO pretraining weights](https://pan.baidu.com/s/1EI2Xh1i18CRLoZo_P3NVHw)\n\n  Extract code：3vm8\n\n5.YOLOv4\n\n```bash\npython3 train.py --data data/... --batch-size ... -pt --weights weights/yolov4.weights --cfg cfg/yolov4/yolov4.cfg --img_size ...\n```\n\n- [COCO pretraining weights](https://pan.baidu.com/s/1jAGNNC19oQhAIgBfUrkzmQ)\n\n  Extract code：njdg\n\n# Model Compression\n\n## 1. Pruning\n\n### Features\n\n|<center>method</center> |<center>advantage</center>|<center>disadvantage</center> |\n| --- | --- | --- |\n|Normal pruning        |Not prune for shortcut layer. It has a considerable and stable compression rate but requires no fine tuning.|The compression rate is limited.  |\n|Shortcut pruning      |Very high compression rate.  |Fine-tuning is necessary.  |\n|Silmming              |Shortcut fusion method is used to improve the precision of shear planting.|Best way for shortcut pruning|\n|Regular pruning       |Designed for hardware deployment, the number of filters after pruning is a multiple of 2, no fine-tuning, support tiny-yolov3 and Mobilenet.|Part of the compression ratio is sacrificed for regularization. |\n|layer pruning         |ResBlock is used as the basic unit for purning, which is conducive to hardware deployment. |It can only cut backbone. |\n|layer-channel pruning |First, use channel pruning and then use layer pruning, and pruning rate was very high. |Accuracy may be affected. |\n\n### Step\n\n1.Training\n\n```bash\npython3 train.py --data ... -pt --batch-size ... --weights ... --cfg ...\n```\n\n2.Sparse training\n\n`--s`Specifies the sparsity factor，`--prune`Specify the sparsity type.\n\n`--prune 0` is the sparsity of normal pruning and regular pruning.\n\n`--prune 1` is the sparsity of shortcut pruning.\n\n`--prune 2` is the sparsity of layer pruning.\n\ncommand：\n\n```bash\npython3 train.py --data ... -pt --batch-size 32  --weights ... --cfg ... --s 0.001 --prune 0 \n```\n\n3.Pruning\n\n- normal pruning\n\n```bash\npython3 normal_prune.py --cfg ... --data ... --weights ... --percent ...\n```\n\n- regular pruning\n\n```bash\npython3 regular_prune.py --cfg ... --data ... --weights ... --percent ...\n```\n\n- shortcut pruning\n\n```bash\npython3 shortcut_prune.py --cfg ... --data ... --weights ... --percent ...\n```\n\n- silmming\n\n```bash\npython3 slim_prune.py --cfg ... --data ... --weights ... --percent ...\n```\n\n- layer pruning\n\n```bash\npython3 layer_prune.py --cfg ... --data ... --weights ... --shortcut ...\n```\n\n- layer-channel pruning\n\n```bash\npython3 layer_channel_prune.py --cfg ... --data ... --weights ... --shortcut ... --percent ...\n```\n\nIt is important to note that the cfg and weights variables in OPT need to be pointed to the cfg and weights files\ngenerated by step 2.\n\nIn addition, you can get more compression by increasing the percent value in the code.\n(If the sparsity is not enough and the percent value is too high, the program will report an error.)\n\n### Pruning experiment\n\n1.normal pruning oxfordhand，img_size = 608，test on GTX2080Ti*4\n\n|<center>model</center> |<center>parameter before pruning</center> |<center>mAP before pruning</center>|<center>inference time before pruning</center>|<center>percent</center> |<center>parameter after pruning</center> |<center>mAP after pruning</center> |<center>inference time after pruning</center>\n| --- | --- | --- | --- | --- | --- | --- | --- |\n|yolov3(without fine tuning)     |58.67M   |0.806   |0.1139s   |0.8    |10.32M |0.802 |0.0844s |\n|yolov3-mobilenet(fine tuning)   |22.75M   |0.812   |0.0345s   |0.97   |2.72M  |0.795 |0.0211s |\n|yolov3tiny(fine tuning)         |8.27M    |0.708   |0.0144s   |0.5    |1.13M  |0.641 |0.0116s |\n\n2.regular pruning oxfordhand，img_size = 608，test ong GTX2080Ti*4\n\n|<center>model</center> |<center>parameter before pruning</center> |<center>mAP before pruning</center>|<center>inference time before pruning</center>|<center>percent</center> |<center>parameter after pruning</center> |<center>mAP after pruning</center> |<center>inference time after pruning</center>\n| --- | --- | --- | --- | --- | --- | --- | --- |\n|yolov3(without fine tuning)           |58.67M   |0.806   |0.1139s   |0.8    |12.15M |0.805 |0.0874s |\n|yolov3-mobilenet(fine tuning)   |22.75M   |0.812   |0.0345s   |0.97   |2.75M  |0.803 |0.0208s |\n|yolov3tiny(fine tuning)         |8.27M    |0.708   |0.0144s   |0.5    |1.82M  |0.703 |0.0122s |\n\n3.shortcut pruning oxfordhand，img_size = 608，test ong GTX2080Ti*4\n\n|<center>model</center> |<center>parameter before pruning</center> |<center>mAP before pruning</center>|<center>inference time before pruning</center>|<center>percent</center> |<center>parameter after pruning</center> |<center>mAP after pruning</center> |<center>inference time after pruning</center>\n| --- | --- | --- | --- | --- | --- | --- | --- |\n|yolov3           |58.67M   |0.806   |   |0.8    |6.35M  |0.816 | |\n|yolov4           |60.94M   |0.896   |   |0.6    |13.97M |0.855 | |\n\n## 2.quantization\n\n`--quantized 2` Dorefa quantization method\n\n```bash\npython train.py --data ... --batch-size ... --weights ... --cfg ... --img-size ... --epochs ... --quantized 2\n```\n\n`--quantized 1` Google quantization method\n\n```bash\npython train.py --data ... --batch-size ... --weights ... --cfg ... --img-size ... --epochs ... --quantized 1\n```\n\n`--FPGA` Pow(2) quantization for FPGA.\n\n### experiment\n\noxfordhand, yolov3, 640image-size\n\n|<center>method</center> |<center>mAP</center> |\n| --- | --- |\n|Baseline                     |0.847    |\n|Google8bit                   |0.851    |\n|Google8bit + BN Flod         |0.851    |\n|Google8bit + BN Flod + FPGA  |0.852    |\n|Google4bit + BN Flod + FPGA  |0.842    |\n\n## 3.Knowledge Distillation\n\n### Knowledge Distillation\n\nThe distillation method is based on the basic distillation method proposed by Hinton in 2015, and has been partially\nimproved in combination with the detection network.\n\nDistilling the Knowledge in a Neural Network\n[paper](https://arxiv.org/abs/1503.02531)\n\ncommand : `--t_cfg --t_weights --KDstr`\n\n`--t_cfg` cfg file of teacher model\n\n`--t_weights` weights file of teacher model\n\n`--KDstr` KD strategy\n\n    `--KDstr 1` KLloss can be obtained directly from the output of teacher network and the output of student network and added to the overall loss.\n    `--KDstr 2` To distinguish between box loss and class loss, the student does not learn directly from the teacher. L2 distance is calculated respectively for student, teacher and GT. When student is greater than teacher, an additional loss is added for student and GT.\n    `--KDstr 3` To distinguish between Boxloss and ClassLoss, the student learns directly from the teacher.\n    `--KDstr 4` KDloss is divided into three categories, box loss, class loss and feature loss.\n    `--KDstr 5` On the basis of KDstr 4, the fine-grain-mask is added into the feature\n\nexample:\n\n```bash\npython train.py --data ... --batch-size ... --weights ... --cfg ... --img-size ... --epochs ... --t_cfg ... --t_weights ...\n```\n\nUsually, the pre-compression model is used as the teacher model, and the post-compression model is used as the student\nmodel for distillation training to improve the mAP of student network.\n\n### experiment\n\noxfordhand，yolov3tiny as teacher model，normal pruning yolov3tiny as student model\n\n|<center>teacher model</center> |<center>mAP of teacher model</center> |<center>student model</center>|<center>directly fine tuning</center>|<center>KDstr 1</center> |<center>KDstr 2</center> |<center>KDstr 3</center>  |<center>KDstr 4(L1)</center> |<center>KDstr 5(L1)</center> |\n| --- | --- | --- | --- | --- | --- | --- |--- |--- |\n|yolov3tiny608   |0.708    |normal pruning yolov3tiny608    |0.658     |0.666    |0.661  |0.672   |0.673   |0.674   |"
  },
  {
    "path": "cfg/yolov2/yolov2-hand.cfg",
    "content": "[net]\n# Testing\nbatch = 1\nsubdivisions = 1\n# Training\n# batch=64\n# subdivisions=8\nwidth = 608\nheight = 608\nchannels = 3\nmomentum = 0.9\ndecay = 0.0005\nangle = 0\nsaturation = 1.5\nexposure = 1.5\nhue = .1\n\nlearning_rate = 0.001\nburn_in = 1000\nmax_batches = 500200\npolicy = steps\nsteps = 400000,450000\nscales = .1,.1\n\n[convolutional]\nbatch_normalize = 1\nfilters = 32\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[maxpool]\nsize = 2\nstride = 2\n\n[convolutional]\nbatch_normalize = 1\nfilters = 64\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[maxpool]\nsize = 2\nstride = 2\n\n[convolutional]\nbatch_normalize = 1\nfilters = 128\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 64\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 128\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[maxpool]\nsize = 2\nstride = 2\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 128\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[maxpool]\nsize = 2\nstride = 2\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[maxpool]\nsize = 2\nstride = 2\n\n[convolutional]\nbatch_normalize = 1\nfilters = 1024\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 1024\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 1024\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n\n#######\n\n[convolutional]\nbatch_normalize = 1\nsize = 3\nstride = 1\npad = 1\nfilters = 1024\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nsize = 3\nstride = 1\npad = 1\nfilters = 1024\nactivation = leaky\n\n\n[convolutional]\nbatch_normalize = 1\nsize = 3\nstride = 1\npad = 1\nfilters = 1024\nactivation = leaky\n\n[convolutional]\nsize = 1\nstride = 1\npad = 1\nfilters = 30\nactivation = linear\n\n\n[yolo]\nmask = 0,1,2,3,4\nanchors = 9,13, 15,21, 24,29, 38,43, 70,74\nclasses = 1\nnum = 5\njitter = .3\nignore_thresh = .7\ntruth_thresh = 1\nrandom = 1"
  },
  {
    "path": "cfg/yolov2/yolov2-tiny-hand.cfg",
    "content": "[net]\nbatch = 64\nsubdivisions = 8\nwidth = 416\nheight = 416\nchannels = 3\nmomentum = 0.9\ndecay = 0.0005\nangle = 0\nsaturation = 1.5\nexposure = 1.5\nhue = .1\n\nlearning_rate = 0.001\nmax_batches = 40200\npolicy = steps\nsteps = -1,100,20000,30000\nscales = .1,10,.1,.1\n\n[convolutional]\nbatch_normalize = 1\nfilters = 16\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[maxpool]\nsize = 2\nstride = 2\n\n[convolutional]\nbatch_normalize = 1\nfilters = 32\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[maxpool]\nsize = 2\nstride = 2\n\n[convolutional]\nbatch_normalize = 1\nfilters = 64\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[maxpool]\nsize = 2\nstride = 2\n\n[convolutional]\nbatch_normalize = 1\nfilters = 128\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[maxpool]\nsize = 2\nstride = 2\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[maxpool]\nsize = 2\nstride = 2\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[maxpool]\nsize = 2\nstride = 1\n\n[convolutional]\nbatch_normalize = 1\nfilters = 1024\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n###########\n\n[convolutional]\nbatch_normalize = 1\nsize = 3\nstride = 1\npad = 1\nfilters = 1024\nactivation = leaky\n\n[convolutional]\nsize = 1\nstride = 1\npad = 1\nfilters = 30\nactivation = linear\n\n[yolo]\nmask = 0,1,2,3,4\nanchors = 9,13, 15,21, 24,29, 38,43, 70,74\nclasses = 1\nnum = 5\njitter = .3\nignore_thresh = .7\ntruth_thresh = 1\nrandom = 1\n"
  },
  {
    "path": "cfg/yolov2/yolov2-tiny.cfg",
    "content": "[net]\nbatch = 64\nsubdivisions = 8\nwidth = 416\nheight = 416\nchannels = 3\nmomentum = 0.9\ndecay = 0.0005\nangle = 0\nsaturation = 1.5\nexposure = 1.5\nhue = .1\n\nlearning_rate = 0.001\nmax_batches = 40200\npolicy = steps\nsteps = -1,100,20000,30000\nscales = .1,10,.1,.1\n\n[convolutional]\nbatch_normalize = 1\nfilters = 16\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[maxpool]\nsize = 2\nstride = 2\n\n[convolutional]\nbatch_normalize = 1\nfilters = 32\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[maxpool]\nsize = 2\nstride = 2\n\n[convolutional]\nbatch_normalize = 1\nfilters = 64\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[maxpool]\nsize = 2\nstride = 2\n\n[convolutional]\nbatch_normalize = 1\nfilters = 128\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[maxpool]\nsize = 2\nstride = 2\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[maxpool]\nsize = 2\nstride = 2\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[maxpool]\nsize = 2\nstride = 1\n\n[convolutional]\nbatch_normalize = 1\nfilters = 1024\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n###########\n\n[convolutional]\nbatch_normalize = 1\nsize = 3\nstride = 1\npad = 1\nfilters = 1024\nactivation = leaky\n\n[convolutional]\nsize = 1\nstride = 1\npad = 1\nfilters = 425\nactivation = linear\n\n[yolo]\nmask = 0,1,2,3,4\nanchors = 7,12, 19,30, 45,61, 90,141, 240,279\nclasses = 80\nnum = 5\njitter = .3\nignore_thresh = .7\ntruth_thresh = 1\nrandom = 1\n"
  },
  {
    "path": "cfg/yolov2/yolov2.cfg",
    "content": "[net]\n# Testing\nbatch = 1\nsubdivisions = 1\n# Training\n# batch=64\n# subdivisions=8\nwidth = 608\nheight = 608\nchannels = 3\nmomentum = 0.9\ndecay = 0.0005\nangle = 0\nsaturation = 1.5\nexposure = 1.5\nhue = .1\n\nlearning_rate = 0.001\nburn_in = 1000\nmax_batches = 500200\npolicy = steps\nsteps = 400000,450000\nscales = .1,.1\n\n[convolutional]\nbatch_normalize = 1\nfilters = 32\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[maxpool]\nsize = 2\nstride = 2\n\n[convolutional]\nbatch_normalize = 1\nfilters = 64\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[maxpool]\nsize = 2\nstride = 2\n\n[convolutional]\nbatch_normalize = 1\nfilters = 128\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 64\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 128\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[maxpool]\nsize = 2\nstride = 2\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 128\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[maxpool]\nsize = 2\nstride = 2\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[maxpool]\nsize = 2\nstride = 2\n\n[convolutional]\nbatch_normalize = 1\nfilters = 1024\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 1024\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 1024\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n\n#######\n\n[convolutional]\nbatch_normalize = 1\nsize = 3\nstride = 1\npad = 1\nfilters = 1024\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nsize = 3\nstride = 1\npad = 1\nfilters = 1024\nactivation = leaky\n\n\n[convolutional]\nbatch_normalize = 1\nsize = 3\nstride = 1\npad = 1\nfilters = 1024\nactivation = leaky\n\n[convolutional]\nsize = 1\nstride = 1\npad = 1\nfilters = 425\nactivation = linear\n\n\n[yolo]\nmask = 0,1,2,3,4\nanchors = 7,12, 19,30, 45,61, 90,141, 240,279\nclasses = 80\nnum = 5\njitter = .3\nignore_thresh = .7\ntruth_thresh = 1\nrandom = 1"
  },
  {
    "path": "cfg/yolov3/yolov3-UAV.cfg",
    "content": "\n[net]\n# Testing\n#batch=1\n#subdivisions=1\n# Training\nbatch=16\nsubdivisions=1\nwidth=416\nheight=416\nchannels=3\nmomentum=0.9\ndecay=0.0005\nangle=0\nsaturation = 1.5\nexposure = 1.5\nhue=.1\n\nlearning_rate=0.001\nburn_in=1000\nmax_batches = 500200\npolicy=steps\nsteps=400000,450000\nscales=.1,.1\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n######################\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=18\nactivation=linear\n\n\n[yolo]\nmask = 6,7,8\nanchors = 5,6, 8,12, 9,8, 10,10, 11,12, 13,16, 15,13, 18,18, 22,25\nclasses=1\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = -1, 61\n\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=18\nactivation=linear\n\n\n[yolo]\nmask = 3,4,5\nanchors = 5,6, 8,12, 9,8, 10,10, 11,12, 13,16, 15,13, 18,18, 22,25\nclasses=1\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = -1, 36\n\n\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=18\nactivation=linear\n\n\n[yolo]\nmask = 0,1,2\nanchors = 5,6, 8,12, 9,8, 10,10, 11,12, 13,16, 15,13, 18,18, 22,25\nclasses=1\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n"
  },
  {
    "path": "cfg/yolov3/yolov3-asff.cfg",
    "content": "# Generated by Glenn Jocher (glenn.jocher@ultralytics.com) for https://github.com/ultralytics/yolov3\n# def kmean_anchors(path='../coco/train2017.txt', n=12, img_size=(320, 640)):  # from utils.utils import *; kmean_anchors()\n# Evolving anchors: 100%|██████████| 1000/1000 [41:15<00:00,  2.48s/it]\n# 0.20 iou_thr: 0.992 best possible recall, 4.25 anchors > thr\n# kmeans anchors (n=12, img_size=(320, 640), IoU=0.005/0.184/0.634-min/mean/best): 6,9,  15,16,  17,35,  37,26,  36,67,  63,42,  57,100,  121,81,  112,169,  241,158,  195,310,  426,359\n\n[net]\n# Testing\n# batch=1\n# subdivisions=1\n# Training\nbatch=64\nsubdivisions=16\nwidth=608\nheight=608\nchannels=3\nmomentum=0.9\ndecay=0.0005\nangle=0\nsaturation = 1.5\nexposure = 1.5\nhue=.1\n\nlearning_rate=0.001\nburn_in=1000\nmax_batches = 500200\npolicy=steps\nsteps=400000,450000\nscales=.1,.1\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n######################\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n# SPP --------------------------------------------------------------------------\n[maxpool]\nstride=1\nsize=5\n\n[route]\nlayers=-2\n\n[maxpool]\nstride=1\nsize=9\n\n[route]\nlayers=-4\n\n[maxpool]\nstride=1\nsize=13\n\n[route]\nlayers=-1,-3,-5,-6\n# SPP --------------------------------------------------------------------------\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=258\nactivation=linear\n\n# YOLO -------------------------------------------------------------------------\n\n[route]\nlayers = -3\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = -1, 61\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=258\nactivation=linear\n\n# YOLO -------------------------------------------------------------------------\n\n[route]\nlayers = -3\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = -1, 36\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=258\nactivation=linear\n\n[yolo]\nfrom=88,99,110\nmask = 6,7,8\nanchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326\nclasses=80\nnum=9\n\n[yolo]\nfrom=88,99,110\nmask = 3,4,5\nanchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326\nclasses=80\nnum=9\n\n[yolo]\nfrom=88,99,110\nmask = 0,1,2\nanchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326\nclasses=80\nnum=9"
  },
  {
    "path": "cfg/yolov3/yolov3-bdd100k.cfg",
    "content": "[net]\n# Testing\n#batch=1\n#subdivisions=1\n# Training\nbatch=16\nsubdivisions=1\nwidth=416\nheight=416\nchannels=3\nmomentum=0.9\ndecay=0.0005\nangle=0\nsaturation = 1.5\nexposure = 1.5\nhue=.1\n\nlearning_rate=0.001\nburn_in=1000\nmax_batches = 500200\npolicy=steps\nsteps=400000,450000\nscales=.1,.1\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n######################\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=45\nactivation=linear\n\n\n[yolo]\nmask = 6,7,8\nanchors = 3,7, 5,18, 6,9, 10,32, 11,14, 17,21, 24,36, 45,59, 93,132\nclasses=10\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = -1, 61\n\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=45\nactivation=linear\n\n\n[yolo]\nmask = 3,4,5\nanchors = 3,7, 5,18, 6,9, 10,32, 11,14, 17,21, 24,36, 45,59, 93,132\nclasses=10\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = -1, 36\n\n\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=45\nactivation=linear\n\n\n[yolo]\nmask = 0,1,2\nanchors = 3,7, 5,18, 6,9, 10,32, 11,14, 17,21, 24,36, 45,59, 93,132\nclasses=10\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n"
  },
  {
    "path": "cfg/yolov3/yolov3-hand.cfg",
    "content": "\n[net]\n# Testing\n#batch=1\n#subdivisions=1\n# Training\nbatch=16\nsubdivisions=1\nwidth=416\nheight=416\nchannels=3\nmomentum=0.9\ndecay=0.0005\nangle=0\nsaturation = 1.5\nexposure = 1.5\nhue=.1\n\nlearning_rate=0.001\nburn_in=1000\nmax_batches = 500200\npolicy=steps\nsteps=400000,450000\nscales=.1,.1\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n######################\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=18\nactivation=linear\n\n\n[yolo]\nmask = 6,7,8\nanchors = 8,13, 14,20, 22,25, 26,36, 35,49, 40,31, 51,69, 63,47, 94,103\nclasses=1\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = -1, 61\n\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=18\nactivation=linear\n\n\n[yolo]\nmask = 3,4,5\nanchors = 8,13, 14,20, 22,25, 26,36, 35,49, 40,31, 51,69, 63,47, 94,103\nclasses=1\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = -1, 36\n\n\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=18\nactivation=linear\n\n\n[yolo]\nmask = 0,1,2\nanchors = 8,13, 14,20, 22,25, 26,36, 35,49, 40,31, 51,69, 63,47, 94,103\nclasses=1\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n"
  },
  {
    "path": "cfg/yolov3/yolov3-onDIOR.cfg",
    "content": "[net]\n# Testing\n#batch=1\n#subdivisions=1\n# Training\nbatch=16\nsubdivisions=1\nwidth=416\nheight=416\nchannels=3\nmomentum=0.9\ndecay=0.0005\nangle=0\nsaturation = 1.5\nexposure = 1.5\nhue=.1\n\nlearning_rate=0.001\nburn_in=1000\nmax_batches = 500200\npolicy=steps\nsteps=400000,450000\nscales=.1,.1\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n######################\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=75\nactivation=linear\n\n\n[yolo]\nmask = 6,7,8\nanchors = 5,5, 6,13, 10,26, 13,6, 15,15, 27,10, 28,33, 61,74, 167,169\nclasses=20\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = -1, 61\n\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=75\nactivation=linear\n\n\n[yolo]\nmask = 3,4,5\nanchors = 5,5, 6,13, 10,26, 13,6, 15,15, 27,10, 28,33, 61,74, 167,169\nclasses=20\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = -1, 36\n\n\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=75\nactivation=linear\n\n\n[yolo]\nmask = 0,1,2\nanchors = 5,5, 6,13, 10,26, 13,6, 15,15, 27,10, 28,33, 61,74, 167,169\nclasses=20\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n"
  },
  {
    "path": "cfg/yolov3/yolov3-screw.cfg",
    "content": "[net]\n# Testing\n#  batch=1\n#  subdivisions=1\n# Training\nbatch=64\nsubdivisions=16\nwidth=256\nheight=256\nchannels=3\nmomentum=0.9\ndecay=0.0005\nangle=0\nsaturation = 1.5\nexposure = 1.5\nhue=.1\n\n# learning_rate=0.0001\n# burn_in=1000\n# max_batches = 50200\n# policy=steps\n# steps=40000,45000\n# scales=.1,.1\nlearning_rate=0.0001\nburn_in=1000\nmax_batches = 40000\npolicy=steps\nsteps=20000,30000\nscales=.1,.1\n\n\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n######################\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=21\nactivation=linear\n\n[yolo]\nmask = 6,7,8\nanchors = 20,29, 26,34, 29,40, 33,44, 35,47, 37,50, 39,52, 43,57, 74,87\nclasses=2\nnum=9\njitter=.3\nignore_thresh = .5\ntruth_thresh = 1\nrandom=1\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = -1, 61\n\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=21\nactivation=linear\n\n[yolo]\nmask = 3,4,5\nanchors = 20,29, 26,34, 29,40, 33,44, 35,47, 37,50, 39,52, 43,57, 74,87\nclasses=2\nnum=9\njitter=.3\nignore_thresh = .5\ntruth_thresh = 1\nrandom=1\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = -1, 36\n\n\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=21\nactivation=linear\n\n[yolo]\nmask = 0,1,2\nanchors = 20,29, 26,34, 29,40, 33,44, 35,47, 37,50, 39,52, 43,57, 74,87\nclasses=2\nnum=9\njitter=.3\nignore_thresh = .5\ntruth_thresh = 1\nrandom=1\n\n"
  },
  {
    "path": "cfg/yolov3/yolov3-ship.cfg",
    "content": "[net]\n# Testing\n#batch=1\n#subdivisions=1\n# Training\nbatch = 16\nsubdivisions = 1\nwidth = 416\nheight = 416\nchannels = 3\nmomentum = 0.9\ndecay = 0.0005\nangle = 0\nsaturation = 1.5\nexposure = 1.5\nhue = .1\n\nlearning_rate = 0.001\nburn_in = 1000\nmax_batches = 500200\npolicy = steps\nsteps = 400000,450000\nscales = .1,.1\n\n[convolutional]\nbatch_normalize = 1\nfilters = 32\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n# Downsample\n\n[convolutional]\nbatch_normalize = 1\nfilters = 64\nsize = 3\nstride = 2\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 32\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 64\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize = 1\nfilters = 128\nsize = 3\nstride = 2\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 64\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 128\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n[convolutional]\nbatch_normalize = 1\nfilters = 64\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 128\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 3\nstride = 2\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 128\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n[convolutional]\nbatch_normalize = 1\nfilters = 128\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n[convolutional]\nbatch_normalize = 1\nfilters = 128\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n[convolutional]\nbatch_normalize = 1\nfilters = 128\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n\n[convolutional]\nbatch_normalize = 1\nfilters = 128\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n[convolutional]\nbatch_normalize = 1\nfilters = 128\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n[convolutional]\nbatch_normalize = 1\nfilters = 128\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n[convolutional]\nbatch_normalize = 1\nfilters = 128\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 3\nstride = 2\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize = 1\nfilters = 1024\nsize = 3\nstride = 2\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 1024\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 1024\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 1024\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 1024\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n######################\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nsize = 3\nstride = 1\npad = 1\nfilters = 1024\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nsize = 3\nstride = 1\npad = 1\nfilters = 1024\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nsize = 3\nstride = 1\npad = 1\nfilters = 1024\nactivation = leaky\n\n[convolutional]\nsize = 1\nstride = 1\npad = 1\nfilters = 30\nactivation = linear\n\n\n[yolo]\nmask = 6,7,8\nanchors = 112,107, 148,331, 184,196, 234,284, 297,135, 297,350, 352,261, 358,193, 377,352\nclasses = 5\nnum = 9\njitter = .3\nignore_thresh = .7\ntruth_thresh = 1\nrandom = 1\n\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[upsample]\nstride = 2\n\n[route]\nlayers = -1, 61\n\n\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nsize = 3\nstride = 1\npad = 1\nfilters = 512\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nsize = 3\nstride = 1\npad = 1\nfilters = 512\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nsize = 3\nstride = 1\npad = 1\nfilters = 512\nactivation = leaky\n\n[convolutional]\nsize = 1\nstride = 1\npad = 1\nfilters = 30\nactivation = linear\n\n\n[yolo]\nmask = 3,4,5\nanchors = 112,107, 148,331, 184,196, 234,284, 297,135, 297,350, 352,261, 358,193, 377,352\nclasses = 5\nnum = 9\njitter = .3\nignore_thresh = .7\ntruth_thresh = 1\nrandom = 1\n\n\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize = 1\nfilters = 128\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[upsample]\nstride = 2\n\n[route]\nlayers = -1, 36\n\n\n\n[convolutional]\nbatch_normalize = 1\nfilters = 128\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nsize = 3\nstride = 1\npad = 1\nfilters = 256\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 128\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nsize = 3\nstride = 1\npad = 1\nfilters = 256\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 128\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nsize = 3\nstride = 1\npad = 1\nfilters = 256\nactivation = leaky\n\n[convolutional]\nsize = 1\nstride = 1\npad = 1\nfilters = 30\nactivation = linear\n\n\n[yolo]\nmask = 0,1,2\nanchors = 112,107, 148,331, 184,196, 234,284, 297,135, 297,350, 352,261, 358,193, 377,352\nclasses = 5\nnum = 9\njitter = .3\nignore_thresh = .7\ntruth_thresh = 1\nrandom = 1\n"
  },
  {
    "path": "cfg/yolov3/yolov3-spp-matrix.cfg",
    "content": "[net]\n# Testing\n# batch=1\n# subdivisions=1\n# Training\nbatch=64\nsubdivisions=16\nwidth=416\nheight=416\nchannels=3\nmomentum=0.9\ndecay=0.0005\nangle=0\nsaturation = 1.5\nexposure = 1.5\nhue=.1\n\nlearning_rate=0.001\nburn_in=1000\nmax_batches = 500500\npolicy=steps\nsteps=400000,450000\nscales=.1,.1\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n######################\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n### SPP ###\n[maxpool]\nstride=1\nsize=5\n\n[route]\nlayers=-2\n\n[maxpool]\nstride=1\nsize=9\n\n[route]\nlayers=-4\n\n[maxpool]\nstride=1\nsize=13\n\n[route]\nlayers=-1,-3,-5,-6\n\n### End SPP ###\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=255\nactivation=linear\n\n# 89\n[yolo]\nmask = 6,7,8\nanchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326, 10,7,  16,15,  33,12, 5,13,  8,30,  17,23,  30,31,  62,23,  59,60,  15,61,  31,45,  30,119,  116,45,  156,99,  373,163,  58,90,  78,198,  187,326\nclasses=80\nnum=27\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = -1, 61\n\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=255\nactivation=linear\n\n# 101\n[yolo]\nmask = 3,4,5\nanchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326, 10,7,  16,15,  33,12, 5,13,  8,30,  17,23,  30,31,  62,23,  59,60,  15,61,  31,45,  30,119,  116,45,  156,99,  373,163,  58,90,  78,198,  187,326\nclasses=80\nnum=27\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = -1, 36\n\n\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=255\nactivation=linear\n\n# 113\n[yolo]\nmask = 0,1,2\nanchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326, 10,7,  16,15,  33,12, 5,13,  8,30,  17,23,  30,31,  62,23,  59,60,  15,61,  31,45,  30,119,  116,45,  156,99,  373,163,  58,90,  78,198,  187,326\nclasses=80\nnum=27\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n##################\n\n[route]\nlayers = 110\n\n# 115\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n# 116\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride_x=1\nstride_y=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=255\nactivation=linear\n\n[yolo]\nmask = 9,10,11\nanchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326, 10,7,  16,15,  33,12, 5,13,  8,30,  17,23,  30,31,  62,23,  59,60,  15,61,  31,45,  30,119,  116,45,  156,99,  373,163,  58,90,  78,198,  187,326\nclasses=80\nnum=27\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n\n\n[route]\nlayers = 110\n\n# 121\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n# 122\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride_x=2\nstride_y=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=255\nactivation=linear\n\n[yolo]\nmask = 12,13,14\nanchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326, 10,7,  16,15,  33,12, 5,13,  8,30,  17,23,  30,31,  62,23,  59,60,  15,61,  31,45,  30,119,  116,45,  156,99,  373,163,  58,90,  78,198,  187,326\nclasses=80\nnum=27\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n##################\n\n[route]\nlayers = 98\n\n[convolutional]\nshare_index=115\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nshare_index=116\nbatch_normalize=1\nfilters=128\nsize=1\nstride_x=1\nstride_y=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=255\nactivation=linear\n\n[yolo]\nmask = 15,16,17\nanchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326, 10,7,  16,15,  33,12, 5,13,  8,30,  17,23,  30,31,  62,23,  59,60,  15,61,  31,45,  30,119,  116,45,  156,99,  373,163,  58,90,  78,198,  187,326\nclasses=80\nnum=27\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n\n\n[route]\nlayers = 98\n\n[convolutional]\nshare_index=121\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nshare_index=122\nbatch_normalize=1\nfilters=128\nsize=1\nstride_x=2\nstride_y=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=255\nactivation=linear\n\n[yolo]\nmask = 18,19,20\nanchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326, 10,7,  16,15,  33,12, 5,13,  8,30,  17,23,  30,31,  62,23,  59,60,  15,61,  31,45,  30,119,  116,45,  156,99,  373,163,  58,90,  78,198,  187,326\nclasses=80\nnum=27\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n##################\n\n[route]\nlayers = 86\n\n[convolutional]\nshare_index=115\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nshare_index=116\nbatch_normalize=1\nfilters=128\nsize=1\nstride_x=1\nstride_y=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=255\nactivation=linear\n\n[yolo]\nmask = 21,22,23\nanchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326, 10,7,  16,15,  33,12, 5,13,  8,30,  17,23,  30,31,  62,23,  59,60,  15,61,  31,45,  30,119,  116,45,  156,99,  373,163,  58,90,  78,198,  187,326\nclasses=80\nnum=27\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n\n\n[route]\nlayers = 86\n\n[convolutional]\nshare_index=121\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nshare_index=122\nbatch_normalize=1\nfilters=128\nsize=1\nstride_x=2\nstride_y=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=255\nactivation=linear\n\n[yolo]\nmask = 24,25,26\nanchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326, 10,7,  16,15,  33,12, 5,13,  8,30,  17,23,  30,31,  62,23,  59,60,  15,61,  31,45,  30,119,  116,45,  156,99,  373,163,  58,90,  78,198,  187,326\nclasses=80\nnum=27\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1"
  },
  {
    "path": "cfg/yolov3/yolov3-spp-pan-scale.cfg",
    "content": "[net]\n# Testing\n#batch=1\n#subdivisions=1\n# Training\nbatch=64\nsubdivisions=32\nwidth=544\nheight=544\nchannels=3\nmomentum=0.9\ndecay=0.0005\nangle=0\nsaturation = 1.5\nexposure = 1.5\nhue=.1\n\n\nlearning_rate=0.001\nburn_in=1000\nmax_batches = 10000\n\npolicy=steps\nsteps=8000,9000\nscales=.1,.1\n\n#policy=sgdr\n#sgdr_cycle=1000\n#sgdr_mult=2\n#steps=4000,6000,8000,9000\n#scales=1, 1, 0.1, 0.1\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n######################\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n### SPP ###\n[maxpool]\nstride=1\nsize=5\n\n[route]\nlayers=-2\n\n[maxpool]\nstride=1\nsize=9\n\n[route]\nlayers=-4\n\n[maxpool]\nstride=1\nsize=13\n\n[route]\nlayers=-1,-3,-5,-6\n\n### End SPP ###\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n\n\n########### to [yolo-3]\n\n\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = -1, 61\n\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n\n########### to [yolo-2]\n\n\n\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = -1, 36\n\n\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n\n\n########### to [yolo-1]\n\n\n########### features of different layers\n\n\n[route]\nlayers=1\n\n[reorg3d]\nstride=2\n\n[route]\nlayers=5,-1\n\n[reorg3d]\nstride=2\n\n[route]\nlayers=12,-1\n\n[reorg3d]\nstride=2\n\n[route]\nlayers=37,-1\n\n[reorg3d]\nstride=2\n\n[route]\nlayers=62,-1\n\n\n\n########### [yolo-1]\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=4\n\n[route]\nlayers = -1,-12\n\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=340\nactivation=linear\n\n\n[yolo]\nmask = 0,1,2,3\nanchors = 8,8, 10,13, 16,30, 33,23,  32,32, 30,61, 62,45, 64,64,  59,119, 116,90, 156,198, 373,326\nclasses=80\nnum=12\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nscale_x_y = 1.05\nrandom=0\n\n\n\n\n########### [yolo-2]\n\n\n[route]\nlayers = -7\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = -1,-28\n\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=340\nactivation=linear\n\n\n[yolo]\nmask = 4,5,6,7\nanchors = 8,8, 10,13, 16,30, 33,23,  32,32, 30,61, 62,45, 64,64,  59,119, 116,90, 156,198, 373,326\nclasses=80\nnum=12\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nscale_x_y = 1.1\nrandom=0\n\n\n\n########### [yolo-3]\n\n[route]\nlayers = -14\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[route]\nlayers = -1,-43\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=340\nactivation=linear\n\n\n[yolo]\nmask = 8,9,10,11\nanchors = 8,8, 10,13, 16,30, 33,23,  32,32, 30,61, 62,45, 59,119,   80,80, 116,90, 156,198, 373,326\nclasses=80\nnum=12\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nscale_x_y = 1.2\nrandom=0\n"
  },
  {
    "path": "cfg/yolov3/yolov3-spp.cfg",
    "content": "[net]\n# Testing\n# batch=1\n# subdivisions=1\n# Training\nbatch=64\nsubdivisions=16\nwidth=608\nheight=608\nchannels=3\nmomentum=0.9\ndecay=0.0005\nangle=0\nsaturation = 1.5\nexposure = 1.5\nhue=.1\n\nlearning_rate=0.001\nburn_in=1000\nmax_batches = 500200\npolicy=steps\nsteps=400000,450000\nscales=.1,.1\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n######################\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n### SPP ###\n[maxpool]\nstride=1\nsize=5\n\n[route]\nlayers=-2\n\n[maxpool]\nstride=1\nsize=9\n\n[route]\nlayers=-4\n\n[maxpool]\nstride=1\nsize=13\n\n[route]\nlayers=-1,-3,-5,-6\n\n### End SPP ###\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=255\nactivation=linear\n\n\n[yolo]\nmask = 6,7,8\nanchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326\nclasses=80\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = -1, 61\n\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=255\nactivation=linear\n\n\n[yolo]\nmask = 3,4,5\nanchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326\nclasses=80\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = -1, 36\n\n\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=255\nactivation=linear\n\n\n[yolo]\nmask = 0,1,2\nanchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326\nclasses=80\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n"
  },
  {
    "path": "cfg/yolov3/yolov3-spp3.cfg",
    "content": "[net]\n# Testing\nbatch=1\nsubdivisions=1\n# Training\n# batch=64\n# subdivisions=16\nwidth=608\nheight=608\nchannels=3\nmomentum=0.9\ndecay=0.0005\nangle=0\nsaturation = 1.5\nexposure = 1.5\nhue=.1\n\nlearning_rate=0.001\nburn_in=1000\nmax_batches = 120200\npolicy=steps\nsteps=70000,100000\nscales=.1,.1\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n######################\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n### SPP ###\n[maxpool]\nstride=1\nsize=5\n\n[route]\nlayers=-2\n\n[maxpool]\nstride=1\nsize=9\n\n[route]\nlayers=-4\n\n[maxpool]\nstride=1\nsize=13\n\n[route]\nlayers=-1,-3,-5,-6\n\n### End SPP ###\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=255\nactivation=linear\n\n\n[yolo]\nmask = 6,7,8\nanchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326\nclasses=80\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = -1, 61\n\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n### SPP ###\n[maxpool]\nstride=1\nsize=5\n\n[route]\nlayers=-2\n\n[maxpool]\nstride=1\nsize=9\n\n[route]\nlayers=-4\n\n[maxpool]\nstride=1\nsize=13\n\n[route]\nlayers=-1,-3,-5,-6\n\n### End SPP ###\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=255\nactivation=linear\n\n\n[yolo]\nmask = 3,4,5\nanchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326\nclasses=80\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = -1, 36\n\n\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n### SPP ###\n[maxpool]\nstride=1\nsize=5\n\n[route]\nlayers=-2\n\n[maxpool]\nstride=1\nsize=9\n\n[route]\nlayers=-4\n\n[maxpool]\nstride=1\nsize=13\n\n[route]\nlayers=-1,-3,-5,-6\n\n### End SPP ###\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=255\nactivation=linear\n\n\n[yolo]\nmask = 0,1,2\nanchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326\nclasses=80\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n"
  },
  {
    "path": "cfg/yolov3/yolov3-visdrone.cfg",
    "content": "[net]\n# Testing\n#batch=1\n#subdivisions=1\n# Training\nbatch=16\nsubdivisions=1\nwidth=416\nheight=416\nchannels=3\nmomentum=0.9\ndecay=0.0005\nangle=0\nsaturation = 1.5\nexposure = 1.5\nhue=.1\n\nlearning_rate=0.001\nburn_in=1000\nmax_batches = 500200\npolicy=steps\nsteps=400000,450000\nscales=.1,.1\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n######################\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=45\nactivation=linear\n\n\n[yolo]\nmask = 6,7,8\nanchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326\nclasses=10\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = -1, 61\n\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=45\nactivation=linear\n\n\n[yolo]\nmask = 3,4,5\nanchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326\nclasses=10\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = -1, 36\n\n\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=45\nactivation=linear\n\n\n[yolo]\nmask = 0,1,2\nanchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326\nclasses=10\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n"
  },
  {
    "path": "cfg/yolov3/yolov3.cfg",
    "content": "[net]\n# Testing\n#batch=1\n#subdivisions=1\n# Training\nbatch=16\nsubdivisions=1\nwidth=416\nheight=416\nchannels=3\nmomentum=0.9\ndecay=0.0005\nangle=0\nsaturation = 1.5\nexposure = 1.5\nhue=.1\n\nlearning_rate=0.001\nburn_in=1000\nmax_batches = 500200\npolicy=steps\nsteps=400000,450000\nscales=.1,.1\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n######################\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=255\nactivation=linear\n\n\n[yolo]\nmask = 6,7,8\nanchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326\nclasses=80\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = -1, 61\n\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=255\nactivation=linear\n\n\n[yolo]\nmask = 3,4,5\nanchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326\nclasses=80\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = -1, 36\n\n\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=255\nactivation=linear\n\n\n[yolo]\nmask = 0,1,2\nanchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326\nclasses=80\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n"
  },
  {
    "path": "cfg/yolov3-ghostnet/yolov3-ghost-coco.cfg",
    "content": "[net]\n# Testing\n#batch=1\n#subdivisions=1\n# Training\nbatch=16\nsubdivisions=1\nwidth=416\nheight=416\nchannels=3\nmomentum=0.9\ndecay=0.0005\nangle=0\nsaturation = 1.5\nexposure = 1.5\nhue=.1\n\nlearning_rate=0.001\nburn_in=1000\nmax_batches = 500200\npolicy=steps\nsteps=400000,450000\nscales=.1,.1\n# 0\n[convolutional]\nbatch_normalize=1\nfilters=16\nsize=3\nstride=2\npad=1\ngroups=1\nactivation=relu\n\n# ghost bottleneck  starts\n\n#GB1-PConv #1\n[convolutional]\nbatch_normalize=1\nfilters=8\nsize=1\nstride=1\npad=0\ngroups=1\nactivation=relu\n\n#GB1-Cheap #2\n[convolutional]\nbatch_normalize=1\nfilters=8\nsize=3\nstride=1\npad=1\ngroups=8\nactivation=relu\n\n# 3\n[route]\nlayers=-1, 1\n\n# 4\n[convolutional]\nbatch_normalize=1\nfilters=8\nsize=1\nstride=1\npad=0\ngroups=1\nactivation=none\n\n# 5\n[convolutional]\nbatch_normalize=1\nfilters=8\nsize=3\nstride=1\npad=1\ngroups=8\nactivation=none\n\n# 6\n[route]\nlayers=-1,4\n\n# 7\n[shortcut]\nfrom=-7\nactivation=none\n\n# GB2-PConv # 8\n[convolutional]\nbatch_normalize=1\nfilters=24\nsize=1\nstride=1\npad=0\ngroups=1\nactivation=relu\n\n# GB2-Cheap # 9\n[convolutional]\nbatch_normalize=1\nfilters=24\nsize=3\nstride=1\npad=1\ngroups=24\nactivation=relu\n\n#10\n[route]\nlayers=-1,8\n\n#11\n[convolutional]\nbatch_normalize=1\nfilters=48\nsize=3\nstride=2\ngroups=48\npad=1\nactivation=none\n\n#12\n[convolutional]\nbatch_normalize=1\nfilters=12\nsize=1\nstride=1\ngroups=1\npad=0\nactivation=none\n\n#13\n[convolutional]\nbatch_normalize=1\nfilters=12\nsize=3\nstride=1\ngroups=12\npad=1\nactivation=none\n\n#14\n[route]\nlayers=-1,12\n\n#15\n[route]\nlayers=7\n\n#16\n[convolutional]\nbatch_normalize=1\nfilters=16\nsize=3\nstride=2\ngroups=16\npad=1\nactivation=none\n\n#17\n[convolutional]\nbatch_normalize=1\nfilters=24\nsize=1\nstride=1\ngroups=1\npad=0\nactivation=none\n\n#18\n[shortcut]\nfrom=-4\nactivation=none\n\n# GB3-PConv #19\n[convolutional]\nbatch_normalize=1\nfilters=36\nsize=1\nstride=1\ngroups=1\npad=0\nactivation=relu\n\n# GB3-Cheap #20\n[convolutional]\nbatch_normalize=1\nfilters=36\nsize=3\nstride=1\ngroups=36\npad=1\nactivation=relu\n\n#21\n[route]\nlayers=-1,19\n\n#22\n[convolutional]\nbatch_normalize=1\nfilters=12\nsize=1\nstride=1\ngroups=1\npad=0\nactivation=none\n\n#23\n[convolutional]\nbatch_normalize=1\nfilters=12\nsize=3\nstride=1\ngroups=12\npad=1\nactivation=none\n\n#24\n[route]\nlayers=-1,22\n\n#25\n[shortcut]\nfrom=-7\nactivation=none\n\n#GB4-PConv #26\n[convolutional]\nbatch_normalize=1\nfilters=36\nsize=1\nstride=1\ngroups=1\npad=0\nactivation=relu\n\n#GB4-Cheap #27\n[convolutional]\nbatch_normalize=1\nfilters=36\nsize=3\nstride=1\ngroups=36\npad=1\nactivation=relu\n\n#28\n[route]\nlayers=-1,26\n\n#29\n[convolutional]\nbatch_normalize=1\nfilters=72\nsize=5\nstride=2\ngroups=72\npad=2\nactivation=none\n\n#30\n[se]\nreduction=4\n\n#31\n[convolutional]\nbatch_normalize=1\nfilters=20\nsize=1\nstride=1\ngroups=1\npad=0\nactivation=none\n\n#32\n[convolutional]\nbatch_normalize=1\nfilters=20\nsize=3\nstride=1\ngroups=20\npad=1\nactivation=none\n\n#33\n[route]\nlayers=-1,31\n\n#34\n[route]\nlayers=25\n\n#35\n[convolutional]\nbatch_normalize=1\nfilters=24\nsize=5\nstride=2\ngroups=24\npad=2\nactivation=none\n\n#36\n[convolutional]\nbatch_normalize=1\nfilters=40\nsize=1\nstride=1\ngroups=1\npad=0\nactivation=none\n\n#37\n[shortcut]\nfrom=-4\nactivation=none\n\n#GB5-PConv #38\n[convolutional]\nbatch_normalize=1\nfilters=60\nsize=1\nstride=1\ngroups=1\npad=0\nactivation=relu\n\n#GB5-Cheap #39\n[convolutional]\nbatch_normalize=1\nfilters=60\nsize=3\nstride=1\ngroups=60\npad=1\nactivation=relu\n\n#40\n[route]\nlayers=-1,38\n\n#41\n[se]\nreduction=4\n\n#42\n[convolutional]\nbatch_normalize=1\nfilters=20\nsize=1\nstride=1\ngroups=1\npad=0\nactivation=none\n\n#43\n[convolutional]\nbatch_normalize=1\nfilters=20\nsize=3\nstride=1\ngroups=20\npad=1\nactivation=none\n\n#44\n[route]\nlayers=-1,42\n\n#45\n[shortcut]\nfrom=-8\n\n#GB6-PConv #46\n[convolutional]\nbatch_normalize=1\nfilters=120\nsize=1\nstride=1\ngroups=1\npad=0\nactivation=relu\n\n#GB6-Cheap #47\n[convolutional]\nbatch_normalize=1\nfilters=120\nsize=3\nstride=1\ngroups=120\npad=1\nactivation=relu\n\n#48\n[route]\nlayers=-1,46\n\n#49\n[convolutional]\nbatch_normalize=1\nfilters=240\nsize=3\nstride=2\ngroups=240\npad=1\nactivation=none\n\n#50\n[convolutional]\nbatch_normalize=1\nfilters=40\nsize=1\nstride=1\ngroups=1\npad=0\nactivation=none\n\n#51\n[convolutional]\nbatch_normalize=1\nfilters=40\nsize=3\nstride=1\ngroups=40\npad=1\nactivation=none\n\n#52\n[route]\nlayers=-1,50\n\n#53\n[route]\nlayers=45\n\n#54\n[convolutional]\nbatch_normalize=1\nfilters=40\nsize=3\nstride=2\ngroups=40\npad=1\nactivation=none\n\n#55\n[convolutional]\nbatch_normalize=1\nfilters=80\nsize=1\nstride=1\ngroups=1\npad=0\nactivation=none\n\n#56\n[shortcut]\nfrom=-4\nactivation=none\n\n#GB7-PConv #57\n[convolutional]\nbatch_normalize=1\nfilters=100\nsize=1\nstride=1\ngroups=1\npad=0\nactivation=relu\n\n#GB7-Cheap #58\n[convolutional]\nbatch_normalize=1\nfilters=100\nsize=3\nstride=1\ngroups=100\npad=1\nactivation=relu\n\n#59\n[route]\nlayers=-1,57\n\n#60\n[convolutional]\nbatch_normalize=1\nfilters=40\nsize=1\nstride=1\ngroups=1\npad=0\nactivation=none\n\n#61\n[convolutional]\nbatch_normalize=1\nfilters=40\nsize=3\nstride=1\ngroups=40\npad=1\nactivation=none\n\n#62\n[route]\nlayers=-1,60\n\n#63\n[shortcut]\nfrom=-7\nactivation=none\n\n#GB8-PConv #64\n[convolutional]\nbatch_normalize=1\nfilters=92\nsize=1\nstride=1\ngroups=1\npad=0\nactivation=relu\n\n#GB8-Cheap #65\n[convolutional]\nbatch_normalize=1\nfilters=92\nsize=3\nstride=1\ngroups=92\npad=1\nactivation=relu\n\n#66\n[route]\nlayers=-1,64\n\n#67\n[convolutional]\nbatch_normalize=1\nfilters=40\nsize=1\nstride=1\ngroups=1\npad=0\nactivation=none\n\n#68\n[convolutional]\nbatch_normalize=1\nfilters=40\nsize=3\nstride=1\ngroups=40\npad=1\nactivation=none\n\n#69\n[route]\nlayers=-1,67\n\n#70\n[shortcut]\nfrom=-7\nactivation=none\n\n#GB9-PConv #71\n[convolutional]\nbatch_normalize=1\nfilters=92\nsize=1\nstride=1\ngroups=1\npad=0\nactivation=relu\n\n#GB9-Cheap #72\n[convolutional]\nbatch_normalize=1\nfilters=92\nsize=3\nstride=1\ngroups=92\npad=1\nactivation=relu\n\n#73\n[route]\nlayers=-1,71\n\n#74\n[convolutional]\nbatch_normalize=1\nfilters=40\nsize=1\nstride=1\ngroups=1\npad=0\nactivation=none\n\n#75\n[convolutional]\nbatch_normalize=1\nfilters=40\nsize=3\nstride=1\ngroups=40\npad=1\nactivation=none\n\n#76\n[route]\nlayers=-1,74\n\n#77\n[shortcut]\nfrom=-7\nactivation=none\n\n#GB10-PConv #78\n[convolutional]\nbatch_normalize=1\nfilters=240\nsize=1\nstride=1\ngroups=1\npad=0\nactivation=relu\n\n\n#GB10-Cheap #79\n[convolutional]\nbatch_normalize=1\nfilters=240\nsize=3\nstride=1\ngroups=240\npad=1\nactivation=relu\n\n#80\n[route]\nlayers=-1,78\n\n#81\n[se]\nreduction=4\n\n#82\n[convolutional]\nbatch_normalize=1\nfilters=56\nsize=1\nstride=1\ngroups=1\npad=0\nactivation=none\n\n#83\n[convolutional]\nbatch_normalize=1\nfilters=56\nsize=3\nstride=1\ngroups=56\npad=1\nactivation=none\n\n#84\n[route]\nlayers=-1,82\n\n#85\n[route]\nlayers=77\n\n#86\n[convolutional]\nbatch_normalize=1\nfilters=80\nsize=3\nstride=1\ngroups=80\npad=1\nactivation=none\n\n#87\n[convolutional]\nbatch_normalize=1\nfilters=112\nsize=1\nstride=1\ngroups=1\npad=0\nactivation=none\n\n#88\n[shortcut]\nfrom=-4\nactivation=none\n\n#GB11-PConv #89\n[convolutional]\nbatch_normalize=1\nfilters=336\nsize=1\nstride=1\ngroups=1\npad=0\nactivation=relu\n\n#GB11-Cheap #90\n[convolutional]\nbatch_normalize=1\nfilters=336\nsize=3\nstride=1\ngroups=336\npad=1\nactivation=relu\n\n#91\n[route]\nlayers=-1,89\n\n#92\n[se]\nreduction=4\n\n#93\n[convolutional]\nbatch_normalize=1\nfilters=56\nsize=1\nstride=1\ngroups=1\npad=0\nactivation=none\n\n#94\n[convolutional]\nbatch_normalize=1\nfilters=56\nsize=3\nstride=1\ngroups=56\npad=1\nactivation=none\n\n#95\n[route]\nlayers=-1,93\n\n#96\n[shortcut]\nfrom=-8\nactivation=none\n\n#GB12-PConv #97\n[convolutional]\nbatch_normalize=1\nfilters=336\nsize=1\nstride=1\ngroups=1\npad=0\nactivation=relu\n\n#GB12-Cheap #98\n[convolutional]\nbatch_normalize=1\nfilters=336\nsize=3\nstride=1\ngroups=336\npad=1\nactivation=relu\n\n#99\n[route]\nlayers=-1,97\n\n#100\n[convolutional]\nbatch_normalize=1\nfilters=672\nsize=5\nstride=2\ngroups=672\npad=2\nactivation=none\n\n#101\n[se]\nreduction=4\n\n#102\n[convolutional]\nbatch_normalize=1\nfilters=80\nsize=1\nstride=1\ngroups=1\npad=0\nactivation=none\n\n#103\n[convolutional]\nbatch_normalize=1\nfilters=80\nsize=3\nstride=1\ngroups=80\npad=1\nactivation=none\n\n#104\n[route]\nlayers=-1,102\n\n#105\n[route]\nlayers=96\n\n#106\n[convolutional]\nbatch_normalize=1\nfilters=112\nsize=5\nstride=2\ngroups=112\npad=2\nactivation=none\n\n#107\n[convolutional]\nbatch_normalize=1\nfilters=160\nsize=1\nstride=1\ngroups=1\npad=0\nactivation=none\n\n#108\n[shortcut]\nfrom=-4\nactivation=none\n\n#GB13-PConv #109\n[convolutional]\nbatch_normalize=1\nfilters=480\nsize=1\nstride=1\ngroups=1\npad=0\nactivation=relu\n\n#GB13-Cheap #110\n[convolutional]\nbatch_normalize=1\nfilters=480\nsize=3\nstride=1\ngroups=480\npad=1\nactivation=relu\n\n#111\n[route]\nlayers=-1,109\n\n#112\n[convolutional]\nbatch_normalize=1\nfilters=80\nsize=1\nstride=1\ngroups=1\npad=0\nactivation=none\n\n#113\n[convolutional]\nbatch_normalize=1\nfilters=80\nsize=3\nstride=1\ngroups=80\npad=1\nactivation=none\n\n#114\n[route]\nlayers=-1,112\n\n#115\n[shortcut]\nfrom=-7\nactivation=none\n\n#GB14-PConv #116\n[convolutional]\nbatch_normalize=1\nfilters=480\nsize=1\nstride=1\ngroups=1\npad=0\nactivation=relu\n\n#GB14-Cheap #117\n[convolutional]\nbatch_normalize=1\nfilters=480\nsize=3\nstride=1\ngroups=480\npad=1\nactivation=relu\n\n#118\n[route]\nlayers=-1,116\n\n#119\n[se]\nreduction=4\n\n#120\n[convolutional]\nbatch_normalize=1\nfilters=80\nsize=1\nstride=1\ngroups=1\npad=0\nactivation=none\n\n#121\n[convolutional]\nbatch_normalize=1\nfilters=80\nsize=3\nstride=1\ngroups=80\npad=1\nactivation=none\n\n#122\n[route]\nlayers=-1,120\n\n#123\n[shortcut]\nfrom=-8\nactivation=none\n\n#GB15-PConv #124\n[convolutional]\nbatch_normalize=1\nfilters=480\nsize=1\nstride=1\ngroups=1\npad=0\nactivation=relu\n\n#GB15-Cheap #125\n[convolutional]\nbatch_normalize=1\nfilters=480\nsize=3\nstride=1\ngroups=480\npad=1\nactivation=relu\n\n#126\n[route]\nlayers=-1,124\n\n#127\n[convolutional]\nbatch_normalize=1\nfilters=80\nsize=1\nstride=1\ngroups=1\npad=0\nactivation=none\n\n#128\n[convolutional]\nbatch_normalize=1\nfilters=80\nsize=3\nstride=1\ngroups=80\npad=1\nactivation=none\n\n#129\n[route]\nlayers=-1,127\n\n#130\n[shortcut]\nfrom=-7\nactivation=none\n\n#GB16-PConv #131\n[convolutional]\nbatch_normalize=1\nfilters=480\nsize=1\nstride=1\ngroups=1\npad=0\nactivation=relu\n\n#GB16-Cheap #132\n[convolutional]\nbatch_normalize=1\nfilters=480\nsize=3\nstride=1\ngroups=480\npad=1\nactivation=relu\n\n#133\n[route]\nlayers=-1,131\n\n#134\n[se]\nreduction=4\n\n#135\n[convolutional]\nbatch_normalize=1\nfilters=80\nsize=1\nstride=1\ngroups=1\npad=0\nactivation=none\n\n#136\n[convolutional]\nbatch_normalize=1\nfilters=80\nsize=3\nstride=1\ngroups=80\npad=1\nactivation=none\n\n#137\n[route]\nlayers=-1,135\n\n#138\n[shortcut]\nfrom=-8\n\n#139\n[convolutional]\nbatch_normalize=1\nfilters=960\nsize=1\nstride=1\ngroups=1\npad=0\nactivation=relu\n\n\n\n\n#######Backbone结束\n\n#140\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=0\ngroups=1\nactivation=leaky\n\n#141\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\ngroups=1\nfilters=1024\nactivation=leaky\n\n#142\n[convolutional]\nbatch_normalize=1\ngroups=1\nfilters=512\nsize=1\nstride=1\npad=0\nactivation=leaky\n\n#143\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\ngroups=1\nfilters=1024\nactivation=leaky\n\n#144\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=0\ngroups=1\nactivation=leaky\n\n#145\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\ngroups=1\nactivation=leaky\n\n#146\n[convolutional]\nsize=1\nstride=1\npad=0\nfilters=255\ngroups=1\nactivation=linear\n\n#147\n[yolo]\nmask = 6,7,8\nanchors =  10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326\nclasses=80\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n#148\n[route]\nlayers = -4\n\n#149\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\ngroups=1\npad=0\nactivation=leaky\n\n#150\n[upsample]\nstride=2\n\n#151\n[route]\nlayers = -1, 96\n\n#152\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\ngroups=1\nstride=1\npad=0\nactivation=leaky\n\n#153\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\ngroups=1\nfilters=512\nactivation=leaky\n\n#154\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\ngroups=1\nstride=1\npad=0\nactivation=leaky\n\n#155\n[convolutional]\nbatch_normalize=1\nsize=3\ngroups=1\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n#156\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=0\ngroups=1\nactivation=leaky\n\n#157\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\ngroups=1\nactivation=leaky\n\n#158\n[convolutional]\nsize=1\nstride=1\npad=0\ngroups=1\nfilters=255\nactivation=linear\n\n#159\n[yolo]\nmask = 3,4,5\nanchors =  10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326\nclasses=80\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n\n#160\n[route]\nlayers = -4\n\n#161\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=0\ngroups=1\nactivation=leaky\n\n#162\n[upsample]\nstride=2\n\n#163\n[route]\nlayers = -1, 45\n\n#164\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\ngroups=1\nstride=1\npad=0\nactivation=leaky\n\n#165\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\ngroups=1\nfilters=256\nactivation=leaky\n\n#166\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\ngroups=1\nstride=1\npad=0\nactivation=leaky\n\n#167\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\ngroups=1\nfilters=256\nactivation=leaky\n\n#168\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\ngroups=1\nstride=1\npad=0\nactivation=leaky\n\n#169\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\ngroups=1\nfilters=256\nactivation=leaky\n\n#170\n[convolutional]\nsize=1\nstride=1\npad=0\ngroups=1\nfilters=255\nactivation=linear\n\n#171\n[yolo]\nmask = 0,1,2\nanchors =  10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326\nclasses=80\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n"
  },
  {
    "path": "cfg/yolov3-mobilenet/yolov3-mobilenet-UAV.cfg",
    "content": "[net]\n# Testing\n#batch=1\n#subdivisions=1\n# Training\nbatch=16\nsubdivisions=1\nwidth=416\nheight=416\nchannels=3\nmomentum=0.9\ndecay=0.0005\nangle=0\nsaturation = 1.5\nexposure = 1.5\nhue=.1\n\nlearning_rate=0.001\nburn_in=1000\nmax_batches = 500200\npolicy=steps\nsteps=400000,450000\nscales=.1,.1\n\n[convolutional]\nbatch_normalize=1\nfilters=16\nsize=3\nstride=2\npad=1\nactivation=h_swish\n\n# bneck1\n\n[convolutional]\nbatch_normalize=1\nfilters=16\nsize=1\nstride=1\npad=1\nactivation=relu6\n\n[depthwise]\nbatch_normalize=1\nfilters=16\nsize=3\nstride=1\npad=1\nactivation=relu6\n\n[convolutional]\nbatch_normalize=1\nfilters=16\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-4\nactivation=linear\n\n# bneck2\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=relu6\n\n[depthwise]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=2\npad=1\nactivation=relu6\n\n[convolutional]\nbatch_normalize=1\nfilters=24\nsize=1\nstride=1\npad=1\nactivation=linear\n\n# bneck3\n\n[convolutional]\nbatch_normalize=1\nfilters=72\nsize=1\nstride=1\npad=1\nactivation=relu6\n\n[depthwise]\nbatch_normalize=1\nfilters=72\nsize=3\nstride=1\npad=1\nactivation=relu6\n\n[convolutional]\nbatch_normalize=1\nfilters=24\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-4\nactivation=linear\n\n# bneck4\n\n[convolutional]\nbatch_normalize=1\nfilters=72\nsize=1\nstride=1\npad=1\nactivation=relu6\n\n[depthwise]\nbatch_normalize=1\nfilters=72\nsize=5\nstride=2\npad=1\nactivation=relu6\n\n[se]\nfilters=72\n\n[convolutional]\nbatch_normalize=1\nfilters=40\nsize=1\nstride=1\npad=1\nactivation=linear\n\n# bneck5\n\n[convolutional]\nbatch_normalize=1\nfilters=120\nsize=1\nstride=1\npad=1\nactivation=relu6\n\n[depthwise]\nbatch_normalize=1\nfilters=120\nsize=5\nstride=1\npad=1\nactivation=relu6\n\n[se]\nfilters=120\n\n[convolutional]\nbatch_normalize=1\nfilters=40\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-5\nactivation=linear\n\n# bneck6\n\n[convolutional]\nbatch_normalize=1\nfilters=120\nsize=1\nstride=1\npad=1\nactivation=relu6\n\n[depthwise]\nbatch_normalize=1\nfilters=120\nsize=5\nstride=1\npad=1\nactivation=relu6\n\n[se]\nfilters=120\n\n[convolutional]\nbatch_normalize=1\nfilters=40\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-5\nactivation=linear\n\n# bneck7\n\n[convolutional]\nbatch_normalize=1\nfilters=240\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=240\nsize=3\nstride=2\npad=1\nactivation=h_swish\n\n[convolutional]\nbatch_normalize=1\nfilters=80\nsize=1\nstride=1\npad=1\nactivation=linear\n\n# bneck8\n\n[convolutional]\nbatch_normalize=1\nfilters=200\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=200\nsize=3\nstride=1\npad=1\nactivation=h_swish\n\n[convolutional]\nbatch_normalize=1\nfilters=80\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-4\nactivation=linear\n\n# bneck9\n\n[convolutional]\nbatch_normalize=1\nfilters=184\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=184\nsize=3\nstride=1\npad=1\nactivation=h_swish\n\n[convolutional]\nbatch_normalize=1\nfilters=80\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-4\nactivation=linear\n\n# bneck10\n\n[convolutional]\nbatch_normalize=1\nfilters=184\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=184\nsize=3\nstride=1\npad=1\nactivation=h_swish\n\n[convolutional]\nbatch_normalize=1\nfilters=80\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-4\nactivation=linear\n\n# bneck11\n\n[convolutional]\nbatch_normalize=1\nfilters=480\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=480\nsize=3\nstride=1\npad=1\nactivation=h_swish\n\n[se]\nfilters=480\n\n[convolutional]\nbatch_normalize=1\nfilters=112\nsize=1\nstride=1\npad=1\nactivation=linear\n\n# bneck12\n\n[convolutional]\nbatch_normalize=1\nfilters=672\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=672\nsize=3\nstride=1\npad=1\nactivation=h_swish\n\n[se]\nfilters=672\n\n[convolutional]\nbatch_normalize=1\nfilters=112\nsize=1\nstride=1\npad=1\nactivation=linear\n\n\n[shortcut]\nfrom=-5\nactivation=linear\n\n# bneck13\n\n[convolutional]\nbatch_normalize=1\nfilters=672\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=672\nsize=5\nstride=2\npad=1\nactivation=h_swish\n\n[se]\nfilters=672\n\n[convolutional]\nbatch_normalize=1\nfilters=160\nsize=1\nstride=1\npad=1\nactivation=linear\n\n# bneck14\n\n[convolutional]\nbatch_normalize=1\nfilters=960\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=960\nsize=5\nstride=1\npad=1\nactivation=h_swish\n\n[se]\nfilters=960\n\n[convolutional]\nbatch_normalize=1\nfilters=160\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-5\nactivation=linear\n\n# bneck15\n\n[convolutional]\nbatch_normalize=1\nfilters=960\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=960\nsize=5\nstride=1\npad=1\nactivation=h_swish\n\n[se]\nfilters=960\n\n[convolutional]\nbatch_normalize=1\nfilters=160\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-5\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n######################\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=18\nactivation=linear\n\n\n[yolo]\nmask = 6,7,8\nanchors = 5,6, 8,12, 9,8, 10,10, 11,12, 13,16, 15,13, 18,18, 22,25\nclasses=1\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = -1, 49\n\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=18\nactivation=linear\n\n\n[yolo]\nmask = 3,4,5\nanchors = 5,6, 8,12, 9,8, 10,10, 11,12, 13,16, 15,13, 18,18, 22,25\nclasses=1\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = -1, 25\n\n\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=18\nactivation=linear\n\n\n[yolo]\nmask = 0,1,2\nanchors = 5,6, 8,12, 9,8, 10,10, 11,12, 13,16, 15,13, 18,18, 22,25\nclasses=1\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n"
  },
  {
    "path": "cfg/yolov3-mobilenet/yolov3-mobilenet-coco.cfg",
    "content": "[net]\n# Testing\n#batch=1\n#subdivisions=1\n# Training\nbatch=16\nsubdivisions=1\nwidth=416\nheight=416\nchannels=3\nmomentum=0.9\ndecay=0.0005\nangle=0\nsaturation = 1.5\nexposure = 1.5\nhue=.1\n\nlearning_rate=0.001\nburn_in=1000\nmax_batches = 500200\npolicy=steps\nsteps=400000,450000\nscales=.1,.1\n\n[convolutional]\nbatch_normalize=1\nfilters=16\nsize=3\nstride=2\npad=1\nactivation=h_swish\n\n# bneck1\n\n[convolutional]\nbatch_normalize=1\nfilters=16\nsize=1\nstride=1\npad=1\nactivation=relu6\n\n[depthwise]\nbatch_normalize=1\nfilters=16\nsize=3\nstride=1\npad=1\nactivation=relu6\n\n[convolutional]\nbatch_normalize=1\nfilters=16\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-4\nactivation=linear\n\n# bneck2\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=relu6\n\n[depthwise]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=2\npad=1\nactivation=relu6\n\n[convolutional]\nbatch_normalize=1\nfilters=24\nsize=1\nstride=1\npad=1\nactivation=linear\n\n# bneck3\n\n[convolutional]\nbatch_normalize=1\nfilters=72\nsize=1\nstride=1\npad=1\nactivation=relu6\n\n[depthwise]\nbatch_normalize=1\nfilters=72\nsize=3\nstride=1\npad=1\nactivation=relu6\n\n[convolutional]\nbatch_normalize=1\nfilters=24\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-4\nactivation=linear\n\n# bneck4\n\n[convolutional]\nbatch_normalize=1\nfilters=72\nsize=1\nstride=1\npad=1\nactivation=relu6\n\n[depthwise]\nbatch_normalize=1\nfilters=72\nsize=5\nstride=2\npad=1\nactivation=relu6\n\n[se]\nfilters=72\n\n[convolutional]\nbatch_normalize=1\nfilters=40\nsize=1\nstride=1\npad=1\nactivation=linear\n\n# bneck5\n\n[convolutional]\nbatch_normalize=1\nfilters=120\nsize=1\nstride=1\npad=1\nactivation=relu6\n\n[depthwise]\nbatch_normalize=1\nfilters=120\nsize=5\nstride=1\npad=1\nactivation=relu6\n\n[se]\nfilters=120\n\n[convolutional]\nbatch_normalize=1\nfilters=40\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-5\nactivation=linear\n\n# bneck6\n\n[convolutional]\nbatch_normalize=1\nfilters=120\nsize=1\nstride=1\npad=1\nactivation=relu6\n\n[depthwise]\nbatch_normalize=1\nfilters=120\nsize=5\nstride=1\npad=1\nactivation=relu6\n\n[se]\nfilters=120\n\n[convolutional]\nbatch_normalize=1\nfilters=40\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-5\nactivation=linear\n\n# bneck7\n\n[convolutional]\nbatch_normalize=1\nfilters=240\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=240\nsize=3\nstride=2\npad=1\nactivation=h_swish\n\n[convolutional]\nbatch_normalize=1\nfilters=80\nsize=1\nstride=1\npad=1\nactivation=linear\n\n# bneck8\n\n[convolutional]\nbatch_normalize=1\nfilters=200\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=200\nsize=3\nstride=1\npad=1\nactivation=h_swish\n\n[convolutional]\nbatch_normalize=1\nfilters=80\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-4\nactivation=linear\n\n# bneck9\n\n[convolutional]\nbatch_normalize=1\nfilters=184\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=184\nsize=3\nstride=1\npad=1\nactivation=h_swish\n\n[convolutional]\nbatch_normalize=1\nfilters=80\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-4\nactivation=linear\n\n# bneck10\n\n[convolutional]\nbatch_normalize=1\nfilters=184\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=184\nsize=3\nstride=1\npad=1\nactivation=h_swish\n\n[convolutional]\nbatch_normalize=1\nfilters=80\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-4\nactivation=linear\n\n# bneck11\n\n[convolutional]\nbatch_normalize=1\nfilters=480\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=480\nsize=3\nstride=1\npad=1\nactivation=h_swish\n\n[se]\nfilters=480\n\n[convolutional]\nbatch_normalize=1\nfilters=112\nsize=1\nstride=1\npad=1\nactivation=linear\n\n# bneck12\n\n[convolutional]\nbatch_normalize=1\nfilters=672\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=672\nsize=3\nstride=1\npad=1\nactivation=h_swish\n\n[se]\nfilters=672\n\n[convolutional]\nbatch_normalize=1\nfilters=112\nsize=1\nstride=1\npad=1\nactivation=linear\n\n\n[shortcut]\nfrom=-5\nactivation=linear\n\n# bneck13\n\n[convolutional]\nbatch_normalize=1\nfilters=672\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=672\nsize=5\nstride=2\npad=1\nactivation=h_swish\n\n[se]\nfilters=672\n\n[convolutional]\nbatch_normalize=1\nfilters=160\nsize=1\nstride=1\npad=1\nactivation=linear\n\n# bneck14\n\n[convolutional]\nbatch_normalize=1\nfilters=960\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=960\nsize=5\nstride=1\npad=1\nactivation=h_swish\n\n[se]\nfilters=960\n\n[convolutional]\nbatch_normalize=1\nfilters=160\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-5\nactivation=linear\n\n# bneck15\n\n[convolutional]\nbatch_normalize=1\nfilters=960\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=960\nsize=5\nstride=1\npad=1\nactivation=h_swish\n\n[se]\nfilters=960\n\n[convolutional]\nbatch_normalize=1\nfilters=160\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-5\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n######################\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=255\nactivation=linear\n\n\n[yolo]\nmask = 6,7,8\nanchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326\nclasses=80\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = -1, 49\n\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=255\nactivation=linear\n\n\n[yolo]\nmask = 3,4,5\nanchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326\nclasses=80\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = -1, 25\n\n\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=255\nactivation=linear\n\n\n[yolo]\nmask = 0,1,2\nanchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326\nclasses=80\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n"
  },
  {
    "path": "cfg/yolov3-mobilenet/yolov3-mobilenet-hand.cfg",
    "content": "[net]\n# Testing\n#batch=1\n#subdivisions=1\n# Training\nbatch=16\nsubdivisions=1\nwidth=416\nheight=416\nchannels=3\nmomentum=0.9\ndecay=0.0005\nangle=0\nsaturation = 1.5\nexposure = 1.5\nhue=.1\n\nlearning_rate=0.001\nburn_in=1000\nmax_batches = 500200\npolicy=steps\nsteps=400000,450000\nscales=.1,.1\n\n[convolutional]\nbatch_normalize=1\nfilters=16\nsize=3\nstride=2\npad=1\nactivation=h_swish\n\n# bneck1\n\n[convolutional]\nbatch_normalize=1\nfilters=16\nsize=1\nstride=1\npad=1\nactivation=relu6\n\n[depthwise]\nbatch_normalize=1\nfilters=16\nsize=3\nstride=1\npad=1\nactivation=relu6\n\n[convolutional]\nbatch_normalize=1\nfilters=16\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-4\nactivation=linear\n\n# bneck2\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=relu6\n\n[depthwise]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=2\npad=1\nactivation=relu6\n\n[convolutional]\nbatch_normalize=1\nfilters=24\nsize=1\nstride=1\npad=1\nactivation=linear\n\n# bneck3\n\n[convolutional]\nbatch_normalize=1\nfilters=72\nsize=1\nstride=1\npad=1\nactivation=relu6\n\n[depthwise]\nbatch_normalize=1\nfilters=72\nsize=3\nstride=1\npad=1\nactivation=relu6\n\n[convolutional]\nbatch_normalize=1\nfilters=24\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-4\nactivation=linear\n\n# bneck4\n\n[convolutional]\nbatch_normalize=1\nfilters=72\nsize=1\nstride=1\npad=1\nactivation=relu6\n\n[depthwise]\nbatch_normalize=1\nfilters=72\nsize=5\nstride=2\npad=1\nactivation=relu6\n\n[se]\nfilters=72\n\n[convolutional]\nbatch_normalize=1\nfilters=40\nsize=1\nstride=1\npad=1\nactivation=linear\n\n# bneck5\n\n[convolutional]\nbatch_normalize=1\nfilters=120\nsize=1\nstride=1\npad=1\nactivation=relu6\n\n[depthwise]\nbatch_normalize=1\nfilters=120\nsize=5\nstride=1\npad=1\nactivation=relu6\n\n[se]\nfilters=120\n\n[convolutional]\nbatch_normalize=1\nfilters=40\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-5\nactivation=linear\n\n# bneck6\n\n[convolutional]\nbatch_normalize=1\nfilters=120\nsize=1\nstride=1\npad=1\nactivation=relu6\n\n[depthwise]\nbatch_normalize=1\nfilters=120\nsize=5\nstride=1\npad=1\nactivation=relu6\n\n[se]\nfilters=120\n\n[convolutional]\nbatch_normalize=1\nfilters=40\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-5\nactivation=linear\n\n# bneck7\n\n[convolutional]\nbatch_normalize=1\nfilters=240\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=240\nsize=3\nstride=2\npad=1\nactivation=h_swish\n\n[convolutional]\nbatch_normalize=1\nfilters=80\nsize=1\nstride=1\npad=1\nactivation=linear\n\n# bneck8\n\n[convolutional]\nbatch_normalize=1\nfilters=200\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=200\nsize=3\nstride=1\npad=1\nactivation=h_swish\n\n[convolutional]\nbatch_normalize=1\nfilters=80\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-4\nactivation=linear\n\n# bneck9\n\n[convolutional]\nbatch_normalize=1\nfilters=184\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=184\nsize=3\nstride=1\npad=1\nactivation=h_swish\n\n[convolutional]\nbatch_normalize=1\nfilters=80\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-4\nactivation=linear\n\n# bneck10\n\n[convolutional]\nbatch_normalize=1\nfilters=184\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=184\nsize=3\nstride=1\npad=1\nactivation=h_swish\n\n[convolutional]\nbatch_normalize=1\nfilters=80\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-4\nactivation=linear\n\n# bneck11\n\n[convolutional]\nbatch_normalize=1\nfilters=480\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=480\nsize=3\nstride=1\npad=1\nactivation=h_swish\n\n[se]\nfilters=480\n\n[convolutional]\nbatch_normalize=1\nfilters=112\nsize=1\nstride=1\npad=1\nactivation=linear\n\n# bneck12\n\n[convolutional]\nbatch_normalize=1\nfilters=672\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=672\nsize=3\nstride=1\npad=1\nactivation=h_swish\n\n[se]\nfilters=672\n\n[convolutional]\nbatch_normalize=1\nfilters=112\nsize=1\nstride=1\npad=1\nactivation=linear\n\n\n[shortcut]\nfrom=-5\nactivation=linear\n\n# bneck13\n\n[convolutional]\nbatch_normalize=1\nfilters=672\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=672\nsize=5\nstride=2\npad=1\nactivation=h_swish\n\n[se]\nfilters=672\n\n[convolutional]\nbatch_normalize=1\nfilters=160\nsize=1\nstride=1\npad=1\nactivation=linear\n\n# bneck14\n\n[convolutional]\nbatch_normalize=1\nfilters=960\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=960\nsize=5\nstride=1\npad=1\nactivation=h_swish\n\n[se]\nfilters=960\n\n[convolutional]\nbatch_normalize=1\nfilters=160\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-5\nactivation=linear\n\n# bneck15\n\n[convolutional]\nbatch_normalize=1\nfilters=960\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=960\nsize=5\nstride=1\npad=1\nactivation=h_swish\n\n[se]\nfilters=960\n\n[convolutional]\nbatch_normalize=1\nfilters=160\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-5\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n######################\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=18\nactivation=linear\n\n\n[yolo]\nmask = 6,7,8\nanchors = 8,13, 14,20, 22,25, 26,36, 35,49, 40,31, 51,69, 63,47, 94,103\nclasses=1\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = -1, 49\n\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=18\nactivation=linear\n\n\n[yolo]\nmask = 3,4,5\nanchors = 8,13, 14,20, 22,25, 26,36, 35,49, 40,31, 51,69, 63,47, 94,103\nclasses=1\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = -1, 25\n\n\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=18\nactivation=linear\n\n\n[yolo]\nmask = 0,1,2\nanchors = 8,13, 14,20, 22,25, 26,36, 35,49, 40,31, 51,69, 63,47, 94,103\nclasses=1\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n"
  },
  {
    "path": "cfg/yolov3-mobilenet/yolov3-mobilenet-screw.cfg",
    "content": "[net]\n# Testing\n#batch=1\n#subdivisions=1\n# Training\nbatch=16\nsubdivisions=1\nwidth=416\nheight=416\nchannels=3\nmomentum=0.9\ndecay=0.0005\nangle=0\nsaturation = 1.5\nexposure = 1.5\nhue=.1\n\nlearning_rate=0.001\nburn_in=1000\nmax_batches = 500200\npolicy=steps\nsteps=400000,450000\nscales=.1,.1\n\n[convolutional]\nbatch_normalize=1\nfilters=16\nsize=3\nstride=2\npad=1\nactivation=h_swish\n\n# bneck1\n\n[convolutional]\nbatch_normalize=1\nfilters=16\nsize=1\nstride=1\npad=1\nactivation=relu6\n\n[depthwise]\nbatch_normalize=1\nfilters=16\nsize=3\nstride=1\npad=1\nactivation=relu6\n\n[convolutional]\nbatch_normalize=1\nfilters=16\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-4\nactivation=linear\n\n# bneck2\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=relu6\n\n[depthwise]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=2\npad=1\nactivation=relu6\n\n[convolutional]\nbatch_normalize=1\nfilters=24\nsize=1\nstride=1\npad=1\nactivation=linear\n\n# bneck3\n\n[convolutional]\nbatch_normalize=1\nfilters=72\nsize=1\nstride=1\npad=1\nactivation=relu6\n\n[depthwise]\nbatch_normalize=1\nfilters=72\nsize=3\nstride=1\npad=1\nactivation=relu6\n\n[convolutional]\nbatch_normalize=1\nfilters=24\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-4\nactivation=linear\n\n# bneck4\n\n[convolutional]\nbatch_normalize=1\nfilters=72\nsize=1\nstride=1\npad=1\nactivation=relu6\n\n[depthwise]\nbatch_normalize=1\nfilters=72\nsize=5\nstride=2\npad=1\nactivation=relu6\n\n[se]\nfilters=72\n\n[convolutional]\nbatch_normalize=1\nfilters=40\nsize=1\nstride=1\npad=1\nactivation=linear\n\n# bneck5\n\n[convolutional]\nbatch_normalize=1\nfilters=120\nsize=1\nstride=1\npad=1\nactivation=relu6\n\n[depthwise]\nbatch_normalize=1\nfilters=120\nsize=5\nstride=1\npad=1\nactivation=relu6\n\n[se]\nfilters=120\n\n[convolutional]\nbatch_normalize=1\nfilters=40\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-5\nactivation=linear\n\n# bneck6\n\n[convolutional]\nbatch_normalize=1\nfilters=120\nsize=1\nstride=1\npad=1\nactivation=relu6\n\n[depthwise]\nbatch_normalize=1\nfilters=120\nsize=5\nstride=1\npad=1\nactivation=relu6\n\n[se]\nfilters=120\n\n[convolutional]\nbatch_normalize=1\nfilters=40\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-5\nactivation=linear\n\n# bneck7\n\n[convolutional]\nbatch_normalize=1\nfilters=240\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=240\nsize=3\nstride=2\npad=1\nactivation=h_swish\n\n[convolutional]\nbatch_normalize=1\nfilters=80\nsize=1\nstride=1\npad=1\nactivation=linear\n\n# bneck8\n\n[convolutional]\nbatch_normalize=1\nfilters=200\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=200\nsize=3\nstride=1\npad=1\nactivation=h_swish\n\n[convolutional]\nbatch_normalize=1\nfilters=80\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-4\nactivation=linear\n\n# bneck9\n\n[convolutional]\nbatch_normalize=1\nfilters=184\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=184\nsize=3\nstride=1\npad=1\nactivation=h_swish\n\n[convolutional]\nbatch_normalize=1\nfilters=80\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-4\nactivation=linear\n\n# bneck10\n\n[convolutional]\nbatch_normalize=1\nfilters=184\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=184\nsize=3\nstride=1\npad=1\nactivation=h_swish\n\n[convolutional]\nbatch_normalize=1\nfilters=80\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-4\nactivation=linear\n\n# bneck11\n\n[convolutional]\nbatch_normalize=1\nfilters=480\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=480\nsize=3\nstride=1\npad=1\nactivation=h_swish\n\n[se]\nfilters=480\n\n[convolutional]\nbatch_normalize=1\nfilters=112\nsize=1\nstride=1\npad=1\nactivation=linear\n\n# bneck12\n\n[convolutional]\nbatch_normalize=1\nfilters=672\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=672\nsize=3\nstride=1\npad=1\nactivation=h_swish\n\n[se]\nfilters=672\n\n[convolutional]\nbatch_normalize=1\nfilters=112\nsize=1\nstride=1\npad=1\nactivation=linear\n\n\n[shortcut]\nfrom=-5\nactivation=linear\n\n# bneck13\n\n[convolutional]\nbatch_normalize=1\nfilters=672\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=672\nsize=5\nstride=2\npad=1\nactivation=h_swish\n\n[se]\nfilters=672\n\n[convolutional]\nbatch_normalize=1\nfilters=160\nsize=1\nstride=1\npad=1\nactivation=linear\n\n# bneck14\n\n[convolutional]\nbatch_normalize=1\nfilters=960\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=960\nsize=5\nstride=1\npad=1\nactivation=h_swish\n\n[se]\nfilters=960\n\n[convolutional]\nbatch_normalize=1\nfilters=160\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-5\nactivation=linear\n\n# bneck15\n\n[convolutional]\nbatch_normalize=1\nfilters=960\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=960\nsize=5\nstride=1\npad=1\nactivation=h_swish\n\n[se]\nfilters=960\n\n[convolutional]\nbatch_normalize=1\nfilters=160\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-5\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n######################\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=21\nactivation=linear\n\n\n[yolo]\nmask = 6,7,8\nanchors = 20,29, 26,34, 29,40, 33,44, 35,47, 37,50, 39,52, 43,57, 74,87\nclasses=2\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = -1, 49\n\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=21\nactivation=linear\n\n\n[yolo]\nmask = 3,4,5\nanchors = 20,29, 26,34, 29,40, 33,44, 35,47, 37,50, 39,52, 43,57, 74,87\nclasses=2\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = -1, 25\n\n\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=21\nactivation=linear\n\n\n[yolo]\nmask = 0,1,2\nanchors = 20,29, 26,34, 29,40, 33,44, 35,47, 37,50, 39,52, 43,57, 74,87\nclasses=2\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n"
  },
  {
    "path": "cfg/yolov3-mobilenet/yolov3-mobilenet-visdrone.cfg",
    "content": "[net]\n# Testing\n#batch=1\n#subdivisions=1\n# Training\nbatch=16\nsubdivisions=1\nwidth=416\nheight=416\nchannels=3\nmomentum=0.9\ndecay=0.0005\nangle=0\nsaturation = 1.5\nexposure = 1.5\nhue=.1\n\nlearning_rate=0.001\nburn_in=1000\nmax_batches = 500200\npolicy=steps\nsteps=400000,450000\nscales=.1,.1\n\n[convolutional]\nbatch_normalize=1\nfilters=16\nsize=3\nstride=2\npad=1\nactivation=h_swish\n\n# bneck1\n\n[convolutional]\nbatch_normalize=1\nfilters=16\nsize=1\nstride=1\npad=1\nactivation=relu6\n\n[depthwise]\nbatch_normalize=1\nfilters=16\nsize=3\nstride=1\npad=1\nactivation=relu6\n\n[convolutional]\nbatch_normalize=1\nfilters=16\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-4\nactivation=linear\n\n# bneck2\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=relu6\n\n[depthwise]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=2\npad=1\nactivation=relu6\n\n[convolutional]\nbatch_normalize=1\nfilters=24\nsize=1\nstride=1\npad=1\nactivation=linear\n\n# bneck3\n\n[convolutional]\nbatch_normalize=1\nfilters=72\nsize=1\nstride=1\npad=1\nactivation=relu6\n\n[depthwise]\nbatch_normalize=1\nfilters=72\nsize=3\nstride=1\npad=1\nactivation=relu6\n\n[convolutional]\nbatch_normalize=1\nfilters=24\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-4\nactivation=linear\n\n# bneck4\n\n[convolutional]\nbatch_normalize=1\nfilters=72\nsize=1\nstride=1\npad=1\nactivation=relu6\n\n[depthwise]\nbatch_normalize=1\nfilters=72\nsize=5\nstride=2\npad=1\nactivation=relu6\n\n[se]\nfilters=72\n\n[convolutional]\nbatch_normalize=1\nfilters=40\nsize=1\nstride=1\npad=1\nactivation=linear\n\n# bneck5\n\n[convolutional]\nbatch_normalize=1\nfilters=120\nsize=1\nstride=1\npad=1\nactivation=relu6\n\n[depthwise]\nbatch_normalize=1\nfilters=120\nsize=5\nstride=1\npad=1\nactivation=relu6\n\n[se]\nfilters=120\n\n[convolutional]\nbatch_normalize=1\nfilters=40\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-5\nactivation=linear\n\n# bneck6\n\n[convolutional]\nbatch_normalize=1\nfilters=120\nsize=1\nstride=1\npad=1\nactivation=relu6\n\n[depthwise]\nbatch_normalize=1\nfilters=120\nsize=5\nstride=1\npad=1\nactivation=relu6\n\n[se]\nfilters=120\n\n[convolutional]\nbatch_normalize=1\nfilters=40\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-5\nactivation=linear\n\n# bneck7\n\n[convolutional]\nbatch_normalize=1\nfilters=240\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=240\nsize=3\nstride=2\npad=1\nactivation=h_swish\n\n[convolutional]\nbatch_normalize=1\nfilters=80\nsize=1\nstride=1\npad=1\nactivation=linear\n\n# bneck8\n\n[convolutional]\nbatch_normalize=1\nfilters=200\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=200\nsize=3\nstride=1\npad=1\nactivation=h_swish\n\n[convolutional]\nbatch_normalize=1\nfilters=80\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-4\nactivation=linear\n\n# bneck9\n\n[convolutional]\nbatch_normalize=1\nfilters=184\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=184\nsize=3\nstride=1\npad=1\nactivation=h_swish\n\n[convolutional]\nbatch_normalize=1\nfilters=80\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-4\nactivation=linear\n\n# bneck10\n\n[convolutional]\nbatch_normalize=1\nfilters=184\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=184\nsize=3\nstride=1\npad=1\nactivation=h_swish\n\n[convolutional]\nbatch_normalize=1\nfilters=80\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-4\nactivation=linear\n\n# bneck11\n\n[convolutional]\nbatch_normalize=1\nfilters=480\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=480\nsize=3\nstride=1\npad=1\nactivation=h_swish\n\n[se]\nfilters=480\n\n[convolutional]\nbatch_normalize=1\nfilters=112\nsize=1\nstride=1\npad=1\nactivation=linear\n\n# bneck12\n\n[convolutional]\nbatch_normalize=1\nfilters=672\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=672\nsize=3\nstride=1\npad=1\nactivation=h_swish\n\n[se]\nfilters=672\n\n[convolutional]\nbatch_normalize=1\nfilters=112\nsize=1\nstride=1\npad=1\nactivation=linear\n\n\n[shortcut]\nfrom=-5\nactivation=linear\n\n# bneck13\n\n[convolutional]\nbatch_normalize=1\nfilters=672\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=672\nsize=5\nstride=2\npad=1\nactivation=h_swish\n\n[se]\nfilters=672\n\n[convolutional]\nbatch_normalize=1\nfilters=160\nsize=1\nstride=1\npad=1\nactivation=linear\n\n# bneck14\n\n[convolutional]\nbatch_normalize=1\nfilters=960\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=960\nsize=5\nstride=1\npad=1\nactivation=h_swish\n\n[se]\nfilters=960\n\n[convolutional]\nbatch_normalize=1\nfilters=160\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-5\nactivation=linear\n\n# bneck15\n\n[convolutional]\nbatch_normalize=1\nfilters=960\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n[depthwise]\nbatch_normalize=1\nfilters=960\nsize=5\nstride=1\npad=1\nactivation=h_swish\n\n[se]\nfilters=960\n\n[convolutional]\nbatch_normalize=1\nfilters=160\nsize=1\nstride=1\npad=1\nactivation=linear\n\n[shortcut]\nfrom=-5\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n######################\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=45\nactivation=linear\n\n\n[yolo]\nmask = 6,7,8\nanchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326\nclasses=10\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = -1, 49\n\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=45\nactivation=linear\n\n\n[yolo]\nmask = 3,4,5\nanchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326\nclasses=10\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = -1, 25\n\n\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=45\nactivation=linear\n\n\n[yolo]\nmask = 0,1,2\nanchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326\nclasses=10\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n"
  },
  {
    "path": "cfg/yolov3-singlechannel/yolov3-singlechannel.cfg",
    "content": "[net]\n# Testing\n#batch=1\n#subdivisions=1\n# Training\nbatch = 16\nsubdivisions = 1\nwidth = 416\nheight = 416\nchannels = 1\nmomentum = 0.9\ndecay = 0.0005\nangle = 0\nsaturation = 1.5\nexposure = 1.5\nhue = .1\n\nlearning_rate = 0.001\nburn_in = 1000\nmax_batches = 500200\npolicy = steps\nsteps = 400000,450000\nscales = .1,.1\n\n[convolutional]\nbatch_normalize = 1\nfilters = 32\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n# Downsample\n\n[convolutional]\nbatch_normalize = 1\nfilters = 64\nsize = 3\nstride = 2\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 32\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 64\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize = 1\nfilters = 128\nsize = 3\nstride = 2\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 64\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 128\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n[convolutional]\nbatch_normalize = 1\nfilters = 64\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 128\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 3\nstride = 2\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 128\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n[convolutional]\nbatch_normalize = 1\nfilters = 128\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n[convolutional]\nbatch_normalize = 1\nfilters = 128\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n[convolutional]\nbatch_normalize = 1\nfilters = 128\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n\n[convolutional]\nbatch_normalize = 1\nfilters = 128\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n[convolutional]\nbatch_normalize = 1\nfilters = 128\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n[convolutional]\nbatch_normalize = 1\nfilters = 128\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n[convolutional]\nbatch_normalize = 1\nfilters = 128\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 3\nstride = 2\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n# Downsample\n\n[convolutional]\nbatch_normalize = 1\nfilters = 1024\nsize = 3\nstride = 2\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 1024\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 1024\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 1024\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 1024\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[shortcut]\nfrom = -3\nactivation = linear\n\n######################\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nsize = 3\nstride = 1\npad = 1\nfilters = 1024\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nsize = 3\nstride = 1\npad = 1\nfilters = 1024\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nsize = 3\nstride = 1\npad = 1\nfilters = 1024\nactivation = leaky\n\n[convolutional]\nsize = 1\nstride = 1\npad = 1\nfilters = 30\nactivation = linear\n\n\n[yolo]\nmask = 6,7,8\nanchors = 112,107, 148,331, 184,196, 234,284, 297,135, 297,350, 352,261, 358,193, 377,352\nclasses = 5\nnum = 9\njitter = .3\nignore_thresh = .7\ntruth_thresh = 1\nrandom = 1\n\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[upsample]\nstride = 2\n\n[route]\nlayers = -1, 61\n\n\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nsize = 3\nstride = 1\npad = 1\nfilters = 512\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nsize = 3\nstride = 1\npad = 1\nfilters = 512\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nsize = 3\nstride = 1\npad = 1\nfilters = 512\nactivation = leaky\n\n[convolutional]\nsize = 1\nstride = 1\npad = 1\nfilters = 30\nactivation = linear\n\n\n[yolo]\nmask = 3,4,5\nanchors = 112,107, 148,331, 184,196, 234,284, 297,135, 297,350, 352,261, 358,193, 377,352\nclasses = 5\nnum = 9\njitter = .3\nignore_thresh = .7\ntruth_thresh = 1\nrandom = 1\n\n\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize = 1\nfilters = 128\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[upsample]\nstride = 2\n\n[route]\nlayers = -1, 36\n\n\n\n[convolutional]\nbatch_normalize = 1\nfilters = 128\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nsize = 3\nstride = 1\npad = 1\nfilters = 256\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 128\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nsize = 3\nstride = 1\npad = 1\nfilters = 256\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 128\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nsize = 3\nstride = 1\npad = 1\nfilters = 256\nactivation = leaky\n\n[convolutional]\nsize = 1\nstride = 1\npad = 1\nfilters = 30\nactivation = linear\n\n\n[yolo]\nmask = 0,1,2\nanchors = 112,107, 148,331, 184,196, 234,284, 297,135, 297,350, 352,261, 358,193, 377,352\nclasses = 5\nnum = 9\njitter = .3\nignore_thresh = .7\ntruth_thresh = 1\nrandom = 1\n"
  },
  {
    "path": "cfg/yolov3tiny/yolov3-tiny-UAV.cfg",
    "content": "[net]\n# Testing\nbatch=1\nsubdivisions=1\n# Training\nbatch=16\nsubdivisions=1\nwidth=416\nheight=416\nchannels=3\nmomentum=0.9\ndecay=0.0005\nangle=0\nsaturation = 1.5\nexposure = 1.5\nhue=.1\n\nlearning_rate=0.001\nburn_in=1000\nmax_batches = 500200\npolicy=steps\nsteps=15,25,60,99,150,160,180\nscales=0.5,0.5,0.1,0.5,0.5,0.1,0.1\n\n# 0\n[convolutional]\nbatch_normalize=1\nfilters=16\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# 1\n[maxpool]\nsize=2\nstride=2\n\n# 2\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# 3\n[maxpool]\nsize=2\nstride=2\n\n# 4\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# 5\n[maxpool]\nsize=2\nstride=2\n\n# 6\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# 7\n[maxpool]\nsize=2\nstride=2\n\n# 8\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# 9\n[maxpool]\nsize=2\nstride=2\n\n# 10\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# 11\n[maxpool]\nsize=2\nstride=1\n\n# 12\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n###########\n\n# 13\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n# 14\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# 15\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=18\nactivation=linear\n\n\n\n# 16\n[yolo]\nmask = 3,4,5\nanchors = 8,9, 10,12, 13,12, 14,15, 17,20, 23,26\nclasses=1\nnum=6\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n# 17\n[route]\nlayers = -4\n\n# 18\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n# 19\n[upsample]\nstride=2\n\n# 20\n[route]\nlayers = -1, 8\n\n# 21\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# 22\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=18\nactivation=linear\n\n# 23\n[yolo]\nmask = 0,1,2\nanchors = 8,9, 10,12, 13,12, 14,15, 17,20, 23,26\nclasses=1\nnum=6\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n"
  },
  {
    "path": "cfg/yolov3tiny/yolov3-tiny-hand.cfg",
    "content": "[net]\n# Testing\nbatch=1\nsubdivisions=1\n# Training\nbatch=16\nsubdivisions=1\nwidth=416\nheight=416\nchannels=3\nmomentum=0.9\ndecay=0.0005\nangle=0\nsaturation = 1.5\nexposure = 1.5\nhue=.1\n\nlearning_rate=0.001\nburn_in=1000\nmax_batches = 500200\npolicy=steps\nsteps=15,25,60,99,150,160,180\nscales=0.5,0.5,0.1,0.5,0.5,0.1,0.1\n\n# 0\n[convolutional]\nbatch_normalize=1\nfilters=16\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# 1\n[maxpool]\nsize=2\nstride=2\n\n# 2\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# 3\n[maxpool]\nsize=2\nstride=2\n\n# 4\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# 5\n[maxpool]\nsize=2\nstride=2\n\n# 6\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# 7\n[maxpool]\nsize=2\nstride=2\n\n# 8\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# 9\n[maxpool]\nsize=2\nstride=2\n\n# 10\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# 11\n[maxpool]\nsize=2\nstride=1\n\n# 12\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n###########\n\n# 13\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n# 14\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# 15\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=18\nactivation=linear\n\n\n\n# 16\n[yolo]\nmask = 3,4,5\nanchors = 9,13, 16,22, 27,38, 28,27, 44,49, 79,83\nclasses=1\nnum=6\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n# 17\n[route]\nlayers = -4\n\n# 18\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n# 19\n[upsample]\nstride=2\n\n# 20\n[route]\nlayers = -1, 8\n\n# 21\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# 22\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=18\nactivation=linear\n\n# 23\n[yolo]\nmask = 0,1,2\nanchors = 9,13, 16,22, 27,38, 28,27, 44,49, 79,83\nclasses=1\nnum=6\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n"
  },
  {
    "path": "cfg/yolov3tiny/yolov3-tiny-ship-one.cfg",
    "content": "[net]\n# Testing\nbatch=1\nsubdivisions=1\n# Training\n# batch=64\n# subdivisions=2\nwidth=416\nheight=416\nchannels=3\nmomentum=0.9\ndecay=0.0005\nangle=0\nsaturation = 1.5\nexposure = 1.5\nhue=.1\n\nlearning_rate=0.001\nburn_in=1000\nmax_batches = 500200\npolicy=steps\nsteps=400000,450000\nscales=.1,.1\n\n[convolutional]\nbatch_normalize=1\nfilters=16\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[maxpool]\nsize=2\nstride=2\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[maxpool]\nsize=2\nstride=2\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[maxpool]\nsize=2\nstride=2\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[maxpool]\nsize=2\nstride=2\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[maxpool]\nsize=2\nstride=2\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[maxpool]\nsize=2\nstride=1\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n###########\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=30\nactivation=linear\n\n\n\n[yolo]\nmask = 0,1,2\nanchors = 209,277, 315,160, 358,321\nclasses=5\nnum=6\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n"
  },
  {
    "path": "cfg/yolov3tiny/yolov3-tiny-ship.cfg",
    "content": "[net]\n# Testing\nbatch=1\nsubdivisions=1\n# Training\n# batch=64\n# subdivisions=2\nwidth=416\nheight=416\nchannels=3\nmomentum=0.9\ndecay=0.0005\nangle=0\nsaturation = 1.5\nexposure = 1.5\nhue=.1\n\nlearning_rate=0.001\nburn_in=1000\nmax_batches = 500200\npolicy=steps\nsteps=400000,450000\nscales=.1,.1\n\n[convolutional]\nbatch_normalize=1\nfilters=16\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[maxpool]\nsize=2\nstride=2\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[maxpool]\nsize=2\nstride=2\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[maxpool]\nsize=2\nstride=2\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[maxpool]\nsize=2\nstride=2\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[maxpool]\nsize=2\nstride=2\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[maxpool]\nsize=2\nstride=1\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n###########\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=30\nactivation=linear\n\n\n\n[yolo]\nmask = 3,4,5\nanchors = 140,147, 209,309, 293,136, 328,260, 358,194, 365,351\nclasses=5\nnum=6\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = -1, 8\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=30\nactivation=linear\n\n[yolo]\nmask = 0,1,2\nanchors = 140,147, 209,309, 293,136, 328,260, 358,194, 365,351\nclasses=5\nnum=6\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n"
  },
  {
    "path": "cfg/yolov3tiny/yolov3-tiny.cfg",
    "content": "[net]\n# Testing\nbatch=1\nsubdivisions=1\n# Training\n# batch=64\n# subdivisions=2\nwidth=416\nheight=416\nchannels=3\nmomentum=0.9\ndecay=0.0005\nangle=0\nsaturation = 1.5\nexposure = 1.5\nhue=.1\n\nlearning_rate=0.001\nburn_in=1000\nmax_batches = 500200\npolicy=steps\nsteps=400000,450000\nscales=.1,.1\n\n[convolutional]\nbatch_normalize=1\nfilters=16\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[maxpool]\nsize=2\nstride=2\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[maxpool]\nsize=2\nstride=2\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[maxpool]\nsize=2\nstride=2\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[maxpool]\nsize=2\nstride=2\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[maxpool]\nsize=2\nstride=2\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[maxpool]\nsize=2\nstride=1\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n###########\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=255\nactivation=linear\n\n\n\n[yolo]\nmask = 3,4,5\nanchors = 10,14,  23,27,  37,58,  81,82,  135,169,  344,319\nclasses=80\nnum=6\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = -1, 8\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=255\nactivation=linear\n\n[yolo]\nmask = 0,1,2\nanchors = 10,14,  23,27,  37,58,  81,82,  135,169,  344,319\nclasses=80\nnum=6\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n"
  },
  {
    "path": "cfg/yolov3tiny/yolov3-tiny3.cfg",
    "content": "[net]\n# Testing\n# batch=1\n# subdivisions=1\n# Training\nbatch=64\nsubdivisions=16\nwidth=608\nheight=608\nchannels=3\nmomentum=0.9\ndecay=0.0005\nangle=0\nsaturation = 1.5\nexposure = 1.5\nhue=.1\n\nlearning_rate=0.001\nburn_in=1000\nmax_batches = 200000\npolicy=steps\nsteps=180000,190000\nscales=.1,.1\n\n\n[convolutional]\nbatch_normalize=1\nfilters=16\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[maxpool]\nsize=2\nstride=2\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[maxpool]\nsize=2\nstride=2\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[maxpool]\nsize=2\nstride=2\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[maxpool]\nsize=2\nstride=2\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[maxpool]\nsize=2\nstride=2\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[maxpool]\nsize=2\nstride=1\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n###########\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=255\nactivation=linear\n\n\n\n[yolo]\nmask = 6,7,8\nanchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326\nclasses=80\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = -1, 8\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=255\nactivation=linear\n\n[yolo]\nmask = 3,4,5\nanchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326\nclasses=80\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n\n\n[route]\nlayers = -3\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = -1, 6\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=255\nactivation=linear\n\n[yolo]\nmask = 0,1,2\nanchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326\nclasses=80\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n"
  },
  {
    "path": "cfg/yolov3tiny/yolov3-tiny_bdd100k.cfg",
    "content": "[net]\n# Testing\nbatch=1\nsubdivisions=1\n# Training\n# batch=64\n# subdivisions=2\nwidth=416\nheight=416\nchannels=3\nmomentum=0.9\ndecay=0.001\nangle=0\nsaturation = 1.5\nexposure = 1.5\nhue=.1\n\nlearning_rate=0.0001\nburn_in=1000\nmax_batches = 500200\npolicy=steps\nsteps=400000,450000\nscales=.1,.1\n\n# 0\n[convolutional]\nbatch_normalize=1\nfilters=16\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# 1\n[maxpool]\nsize=2\nstride=2\n\n# 2\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# 3\n[maxpool]\nsize=2\nstride=2\n\n# 4\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# 5\n[maxpool]\nsize=2\nstride=2\n\n# 6\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# 7\n[maxpool]\nsize=2\nstride=2\n\n# 8\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# 9\n[maxpool]\nsize=2\nstride=2\n\n# 10\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# 11\n[maxpool]\nsize=2\nstride=1\n\n# 12\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n###########\n\n# 13\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n# 14\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# 15\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=45\nactivation=linear\n\n\n\n# 16\n[yolo]\nmask = 3,4,5\nanchors = 3,7, 5,18, 7,10, 12,20, 26,38, 70,96\nclasses=10\nnum=6\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n# 17\n[route]\nlayers = -4\n\n# 18\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n# 19\n[upsample]\nstride=2\n\n# 20\n[route]\nlayers = -1, 8\n\n# 21\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# 22\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=45\nactivation=linear\n\n# 23\n[yolo]\nmask = 0,1,2\nanchors = 3,7, 5,18, 7,10, 12,20, 26,38, 70,96\nclasses=10\nnum=6\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n"
  },
  {
    "path": "cfg/yolov3tiny/yolov3-tiny_onDIOR.cfg",
    "content": "[net]\n# Testing\nbatch=1\nsubdivisions=1\n# Training\n# batch=64\n# subdivisions=2\nwidth=416\nheight=416\nchannels=3\nmomentum=0.9\ndecay=0.001\nangle=0\nsaturation = 1.5\nexposure = 1.5\nhue=.1\n\nlearning_rate=0.0001\nburn_in=1000\nmax_batches = 500200\npolicy=steps\nsteps=400000,450000\nscales=.1,.1\n\n# 0\n[convolutional]\nbatch_normalize=1\nfilters=16\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# 1\n[maxpool]\nsize=2\nstride=2\n\n# 2\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# 3\n[maxpool]\nsize=2\nstride=2\n\n# 4\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# 5\n[maxpool]\nsize=2\nstride=2\n\n# 6\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# 7\n[maxpool]\nsize=2\nstride=2\n\n# 8\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# 9\n[maxpool]\nsize=2\nstride=2\n\n# 10\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# 11\n[maxpool]\nsize=2\nstride=1\n\n# 12\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n###########\n\n# 13\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n# 14\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# 15\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=75\nactivation=linear\n\n\n\n# 16\n[yolo]\nmask = 3,4,5\nanchors = 5,5,  8,15,  17,9,  22,27,  54,65,  152,155\nclasses=20\nnum=6\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n# 17\n[route]\nlayers = -4\n\n# 18\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n# 19\n[upsample]\nstride=2\n\n# 20\n[route]\nlayers = -1, 8\n\n# 21\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# 22\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=75\nactivation=linear\n\n# 23\n[yolo]\nmask = 0,1,2\nanchors = 5,5,  8,15,  17,9,  22,27,  54,65,  152,155\nclasses=20\nnum=6\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n"
  },
  {
    "path": "cfg/yolov3tiny/yolov3-tiny_visdrone.cfg",
    "content": "[net]\n# Testing\nbatch=1\nsubdivisions=1\n# Training\n# batch=64\n# subdivisions=2\nwidth=416\nheight=416\nchannels=3\nmomentum=0.9\ndecay=0.0005\nangle=0\nsaturation = 1.5\nexposure = 1.5\nhue=.1\n\nlearning_rate=0.001\nburn_in=1000\nmax_batches = 500200\npolicy=steps\nsteps=400000,450000\nscales=.1,.1\n\n[convolutional]\nbatch_normalize=1\nfilters=16\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[maxpool]\nsize=2\nstride=2\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[maxpool]\nsize=2\nstride=2\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[maxpool]\nsize=2\nstride=2\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[maxpool]\nsize=2\nstride=2\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[maxpool]\nsize=2\nstride=2\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[maxpool]\nsize=2\nstride=1\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n###########\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=45\nactivation=linear\n\n\n\n[yolo]\nmask = 3,4,5\nanchors = 0,3, 2,5, 3,11, 5,6, 9,14, 22,30\nclasses=10\nnum=6\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = -1, 8\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=45\nactivation=linear\n\n[yolo]\nmask = 0,1,2\nanchors = 0,3, 2,5, 3,11, 5,6, 9,14, 22,30\nclasses=10\nnum=6\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\n"
  },
  {
    "path": "cfg/yolov3tiny-efficientnetB0/yolov3tiny-efficientnetB0.cfg",
    "content": "[net]\n# Testing\n#batch=1\n#subdivisions=1\n# Training\nbatch=64\nsubdivisions=8\nwidth=416\nheight=416\nchannels=3\nmomentum=0.9\ndecay=0.0005\nangle=0\nsaturation = 1.5\nexposure = 1.5\nhue=.1\n\nlearning_rate=0.001\nburn_in=1000\nmax_batches = 500200\npolicy=steps\nsteps=400000,450000\nscales=.1,.1\n\n### CONV1 - 1 (1)\n# conv1\n[convolutional]\nfilters=32\nsize=3\npad=1\nstride=2\nbatch_normalize=1\nactivation=swish\n\n\n### CONV2 - MBConv1 - 1 (1)\n# conv2_1_expand\n[convolutional]\nfilters=32\nsize=1\nstride=1\npad=0\nbatch_normalize=1\nactivation=swish\n\n# conv2_1_dwise\n[convolutional]\ngroups=32\nfilters=32\nsize=3\nstride=1\npad=1\nbatch_normalize=1\nactivation=swish\n\n\n#squeeze-n-excitation\n[avgpool]\n\n# squeeze ratio r=4 (recommended r=16)\n[convolutional]\nfilters=8\nsize=1\nstride=1\nactivation=swish\n\n# excitation\n[convolutional]\nfilters=32\nsize=1\nstride=1\nactivation=logistic\n\n# multiply channels\n[scale_channels]\nfrom=-4\n\n\n# conv2_1_linear\n[convolutional]\nfilters=16\nsize=1\nstride=1\npad=0\nbatch_normalize=1\nactivation=linear\n\n\n\n### CONV3 - MBConv6 - 1 (2)\n# conv2_2_expand\n[convolutional]\nfilters=96\nsize=1\nstride=1\npad=0\nbatch_normalize=1\nactivation=swish\n\n# conv2_2_dwise\n[convolutional]\ngroups=96\nfilters=96\nsize=3\npad=1\nstride=2\nbatch_normalize=1\nactivation=swish\n\n\n#squeeze-n-excitation\n[avgpool]\n\n# squeeze ratio r=8 (recommended r=16)\n[convolutional]\nfilters=16\nsize=1\nstride=1\nactivation=swish\n\n# excitation\n[convolutional]\nfilters=96\nsize=1\nstride=1\nactivation=logistic\n\n# multiply channels\n[scale_channels]\nfrom=-4\n\n\n# conv2_2_linear\n[convolutional]\nfilters=24\nsize=1\nstride=1\npad=0\nbatch_normalize=1\nactivation=linear\n\n\n### CONV3 - MBConv6 - 2 (2)\n# conv3_1_expand\n[convolutional]\nfilters=144\nsize=1\nstride=1\npad=0\nbatch_normalize=1\nactivation=swish\n\n# conv3_1_dwise\n[convolutional]\ngroups=144\nfilters=144\nsize=3\nstride=1\npad=1\nbatch_normalize=1\nactivation=swish\n\n\n#squeeze-n-excitation\n[avgpool]\n\n# squeeze ratio r=16 (recommended r=16)\n[convolutional]\nfilters=8\nsize=1\nstride=1\nactivation=swish\n\n# excitation\n[convolutional]\nfilters=144\nsize=1\nstride=1\nactivation=logistic\n\n# multiply channels\n[scale_channels]\nfrom=-4\n\n\n# conv3_1_linear\n[convolutional]\nfilters=24\nsize=1\nstride=1\npad=0\nbatch_normalize=1\nactivation=linear\n\n\n\n### CONV4 - MBConv6 - 1 (2)\n# dropout only before residual connection\n[dropout]\nprobability=.0\n\n# block_3_1\n[shortcut]\nfrom=-9\nactivation=linear\n\n# conv_3_2_expand\n[convolutional]\nfilters=144\nsize=1\nstride=1\npad=0\nbatch_normalize=1\nactivation=swish\n\n# conv_3_2_dwise\n[convolutional]\ngroups=144\nfilters=144\nsize=5\npad=1\nstride=2\nbatch_normalize=1\nactivation=swish\n\n\n#squeeze-n-excitation\n[avgpool]\n\n# squeeze ratio r=16 (recommended r=16)\n[convolutional]\nfilters=8\nsize=1\nstride=1\nactivation=swish\n\n# excitation\n[convolutional]\nfilters=144\nsize=1\nstride=1\nactivation=logistic\n\n# multiply channels\n[scale_channels]\nfrom=-4\n\n\n# conv_3_2_linear\n[convolutional]\nfilters=40\nsize=1\nstride=1\npad=0\nbatch_normalize=1\nactivation=linear\n\n\n### CONV4 - MBConv6 - 2 (2)\n# conv_4_1_expand\n[convolutional]\nfilters=192\nsize=1\nstride=1\npad=0\nbatch_normalize=1\nactivation=swish\n\n# conv_4_1_dwise\n[convolutional]\ngroups=192\nfilters=192\nsize=5\nstride=1\npad=1\nbatch_normalize=1\nactivation=swish\n\n\n#squeeze-n-excitation\n[avgpool]\n\n# squeeze ratio r=16 (recommended r=16)\n[convolutional]\nfilters=16\nsize=1\nstride=1\nactivation=swish\n\n# excitation\n[convolutional]\nfilters=192\nsize=1\nstride=1\nactivation=logistic\n\n# multiply channels\n[scale_channels]\nfrom=-4\n\n\n# conv_4_1_linear\n[convolutional]\nfilters=40\nsize=1\nstride=1\npad=0\nbatch_normalize=1\nactivation=linear\n\n\n\n\n### CONV5 - MBConv6 - 1 (3)\n# dropout only before residual connection\n[dropout]\nprobability=.0\n\n# block_4_2\n[shortcut]\nfrom=-9\nactivation=linear\n\n# conv_4_3_expand\n[convolutional]\nfilters=192\nsize=1\nstride=1\npad=0\nbatch_normalize=1\nactivation=swish\n\n# conv_4_3_dwise\n[convolutional]\ngroups=192\nfilters=192\nsize=3\nstride=1\npad=1\nbatch_normalize=1\nactivation=swish\n\n\n#squeeze-n-excitation\n[avgpool]\n\n# squeeze ratio r=16 (recommended r=16)\n[convolutional]\nfilters=16\nsize=1\nstride=1\nactivation=swish\n\n# excitation\n[convolutional]\nfilters=192\nsize=1\nstride=1\nactivation=logistic\n\n# multiply channels\n[scale_channels]\nfrom=-4\n\n\n# conv_4_3_linear\n[convolutional]\nfilters=80\nsize=1\nstride=1\npad=0\nbatch_normalize=1\nactivation=linear\n\n\n### CONV5 - MBConv6 - 2 (3)\n# conv_4_4_expand\n[convolutional]\nfilters=384\nsize=1\nstride=1\npad=0\nbatch_normalize=1\nactivation=swish\n\n# conv_4_4_dwise\n[convolutional]\ngroups=384\nfilters=384\nsize=3\nstride=1\npad=1\nbatch_normalize=1\nactivation=swish\n\n\n#squeeze-n-excitation\n[avgpool]\n\n# squeeze ratio r=16 (recommended r=16)\n[convolutional]\nfilters=24\nsize=1\nstride=1\nactivation=swish\n\n# excitation\n[convolutional]\nfilters=384\nsize=1\nstride=1\nactivation=logistic\n\n# multiply channels\n[scale_channels]\nfrom=-4\n\n\n# conv_4_4_linear\n[convolutional]\nfilters=80\nsize=1\nstride=1\npad=0\nbatch_normalize=1\nactivation=linear\n\n\n### CONV5 - MBConv6 - 3 (3)\n# dropout only before residual connection\n[dropout]\nprobability=.0\n\n# block_4_4\n[shortcut]\nfrom=-9\nactivation=linear\n\n# conv_4_5_expand\n[convolutional]\nfilters=384\nsize=1\nstride=1\npad=0\nbatch_normalize=1\nactivation=swish\n\n# conv_4_5_dwise\n[convolutional]\ngroups=384\nfilters=384\nsize=3\nstride=1\npad=1\nbatch_normalize=1\nactivation=swish\n\n\n#squeeze-n-excitation\n[avgpool]\n\n# squeeze ratio r=16 (recommended r=16)\n[convolutional]\nfilters=24\nsize=1\nstride=1\nactivation=swish\n\n# excitation\n[convolutional]\nfilters=384\nsize=1\nstride=1\nactivation=logistic\n\n# multiply channels\n[scale_channels]\nfrom=-4\n\n\n# conv_4_5_linear\n[convolutional]\nfilters=80\nsize=1\nstride=1\npad=0\nbatch_normalize=1\nactivation=linear\n\n\n\n### CONV6 - MBConv6 - 1 (3)\n# dropout only before residual connection\n[dropout]\nprobability=.0\n\n# block_4_6\n[shortcut]\nfrom=-9\nactivation=linear\n\n# conv_4_7_expand\n[convolutional]\nfilters=384\nsize=1\nstride=1\npad=0\nbatch_normalize=1\nactivation=swish\n\n# conv_4_7_dwise\n[convolutional]\ngroups=384\nfilters=384\nsize=5\npad=1\nstride=2\nbatch_normalize=1\nactivation=swish\n\n\n#squeeze-n-excitation\n[avgpool]\n\n# squeeze ratio r=16 (recommended r=16)\n[convolutional]\nfilters=24\nsize=1\nstride=1\nactivation=swish\n\n# excitation\n[convolutional]\nfilters=384\nsize=1\nstride=1\nactivation=logistic\n\n# multiply channels\n[scale_channels]\nfrom=-4\n\n\n# conv_4_7_linear\n[convolutional]\nfilters=112\nsize=1\nstride=1\npad=0\nbatch_normalize=1\nactivation=linear\n\n\n### CONV6 - MBConv6 - 2 (3)\n# conv_5_1_expand\n[convolutional]\nfilters=576\nsize=1\nstride=1\npad=0\nbatch_normalize=1\nactivation=swish\n\n# conv_5_1_dwise\n[convolutional]\ngroups=576\nfilters=576\nsize=5\nstride=1\npad=1\nbatch_normalize=1\nactivation=swish\n\n\n#squeeze-n-excitation\n[avgpool]\n\n# squeeze ratio r=16 (recommended r=16)\n[convolutional]\nfilters=32\nsize=1\nstride=1\nactivation=swish\n\n# excitation\n[convolutional]\nfilters=576\nsize=1\nstride=1\nactivation=logistic\n\n# multiply channels\n[scale_channels]\nfrom=-4\n\n\n# conv_5_1_linear\n[convolutional]\nfilters=112\nsize=1\nstride=1\npad=0\nbatch_normalize=1\nactivation=linear\n\n\n### CONV6 - MBConv6 - 3 (3)\n# dropout only before residual connection\n[dropout]\nprobability=.0\n\n# block_5_1\n[shortcut]\nfrom=-9\nactivation=linear\n\n# conv_5_2_expand\n[convolutional]\nfilters=576\nsize=1\nstride=1\npad=0\nbatch_normalize=1\nactivation=swish\n\n# conv_5_2_dwise\n[convolutional]\ngroups=576\nfilters=576\nsize=5\nstride=1\npad=1\nbatch_normalize=1\nactivation=swish\n\n\n#squeeze-n-excitation\n[avgpool]\n\n# squeeze ratio r=16 (recommended r=16)\n[convolutional]\nfilters=32\nsize=1\nstride=1\nactivation=swish\n\n# excitation\n[convolutional]\nfilters=576\nsize=1\nstride=1\nactivation=logistic\n\n# multiply channels\n[scale_channels]\nfrom=-4\n\n\n# conv_5_2_linear\n[convolutional]\nfilters=112\nsize=1\nstride=1\npad=0\nbatch_normalize=1\nactivation=linear\n\n\n### CONV7 - MBConv6 - 1 (4)\n# dropout only before residual connection\n[dropout]\nprobability=.0\n\n# block_5_2\n[shortcut]\nfrom=-9\nactivation=linear\n\n# conv_5_3_expand\n[convolutional]\nfilters=576\nsize=1\nstride=1\npad=0\nbatch_normalize=1\nactivation=swish\n\n# conv_5_3_dwise\n[convolutional]\ngroups=576\nfilters=576\nsize=5\npad=1\nstride=2\nbatch_normalize=1\nactivation=swish\n\n\n#squeeze-n-excitation\n[avgpool]\n\n# squeeze ratio r=16 (recommended r=16)\n[convolutional]\nfilters=32\nsize=1\nstride=1\nactivation=swish\n\n# excitation\n[convolutional]\nfilters=576\nsize=1\nstride=1\nactivation=logistic\n\n# multiply channels\n[scale_channels]\nfrom=-4\n\n\n# conv_5_3_linear\n[convolutional]\nfilters=192\nsize=1\nstride=1\npad=0\nbatch_normalize=1\nactivation=linear\n\n\n### CONV7 - MBConv6 - 2 (4)\n# conv_6_1_expand\n[convolutional]\nfilters=960\nsize=1\nstride=1\npad=0\nbatch_normalize=1\nactivation=swish\n\n# conv_6_1_dwise\n[convolutional]\ngroups=960\nfilters=960\nsize=5\nstride=1\npad=1\nbatch_normalize=1\nactivation=swish\n\n\n#squeeze-n-excitation\n[avgpool]\n\n# squeeze ratio r=16 (recommended r=16)\n[convolutional]\nfilters=64\nsize=1\nstride=1\nactivation=swish\n\n# excitation\n[convolutional]\nfilters=960\nsize=1\nstride=1\nactivation=logistic\n\n# multiply channels\n[scale_channels]\nfrom=-4\n\n\n# conv_6_1_linear\n[convolutional]\nfilters=192\nsize=1\nstride=1\npad=0\nbatch_normalize=1\nactivation=linear\n\n\n### CONV7 - MBConv6 - 3 (4)\n# dropout only before residual connection\n[dropout]\nprobability=.0\n\n# block_6_1\n[shortcut]\nfrom=-9\nactivation=linear\n\n# conv_6_2_expand\n[convolutional]\nfilters=960\nsize=1\nstride=1\npad=0\nbatch_normalize=1\nactivation=swish\n\n# conv_6_2_dwise\n[convolutional]\ngroups=960\nfilters=960\nsize=5\nstride=1\npad=1\nbatch_normalize=1\nactivation=swish\n\n\n#squeeze-n-excitation\n[avgpool]\n\n# squeeze ratio r=16 (recommended r=16)\n[convolutional]\nfilters=64\nsize=1\nstride=1\nactivation=swish\n\n# excitation\n[convolutional]\nfilters=960\nsize=1\nstride=1\nactivation=logistic\n\n# multiply channels\n[scale_channels]\nfrom=-4\n\n\n# conv_6_2_linear\n[convolutional]\nfilters=192\nsize=1\nstride=1\npad=0\nbatch_normalize=1\nactivation=linear\n\n\n### CONV7 - MBConv6 - 4 (4)\n# dropout only before residual connection\n[dropout]\nprobability=.0\n\n# block_6_1\n[shortcut]\nfrom=-9\nactivation=linear\n\n# conv_6_2_expand\n[convolutional]\nfilters=960\nsize=1\nstride=1\npad=0\nbatch_normalize=1\nactivation=swish\n\n# conv_6_2_dwise\n[convolutional]\ngroups=960\nfilters=960\nsize=5\nstride=1\npad=1\nbatch_normalize=1\nactivation=swish\n\n\n#squeeze-n-excitation\n[avgpool]\n\n# squeeze ratio r=16 (recommended r=16)\n[convolutional]\nfilters=64\nsize=1\nstride=1\nactivation=swish\n\n# excitation\n[convolutional]\nfilters=960\nsize=1\nstride=1\nactivation=logistic\n\n# multiply channels\n[scale_channels]\nfrom=-4\n\n\n# conv_6_2_linear\n[convolutional]\nfilters=192\nsize=1\nstride=1\npad=0\nbatch_normalize=1\nactivation=linear\n\n\n\n### CONV8 - MBConv6 - 1 (1)\n# dropout only before residual connection\n[dropout]\nprobability=.0\n\n# block_6_2\n[shortcut]\nfrom=-9\nactivation=linear\n\n# conv_6_3_expand\n[convolutional]\nfilters=960\nsize=1\nstride=1\npad=0\nbatch_normalize=1\nactivation=swish\n\n# conv_6_3_dwise\n[convolutional]\ngroups=960\nfilters=960\nsize=3\nstride=1\npad=1\nbatch_normalize=1\nactivation=swish\n\n\n#squeeze-n-excitation\n[avgpool]\n\n# squeeze ratio r=16 (recommended r=16)\n[convolutional]\nfilters=64\nsize=1\nstride=1\nactivation=swish\n\n# excitation\n[convolutional]\nfilters=960\nsize=1\nstride=1\nactivation=logistic\n\n# multiply channels\n[scale_channels]\nfrom=-4\n\n\n# conv_6_3_linear\n[convolutional]\nfilters=320\nsize=1\nstride=1\npad=0\nbatch_normalize=1\nactivation=linear\n\n\n### CONV9 - Conv2d 1x1\n# conv_6_4\n[convolutional]\nfilters=1280\nsize=1\nstride=1\npad=0\nbatch_normalize=1\nactivation=swish\n\n##########################\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nactivation=leaky\nfrom=-2\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=255\nactivation=linear\n\n\n\n[yolo]\nmask = 3,4,5\nanchors = 10,14,  23,27,  37,58,  81,82,  135,169,  344,319\nclasses=80\nnum=6\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=0\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[shortcut]\nactivation=leaky\nfrom=90\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nactivation=leaky\nfrom=-3\n\n[shortcut]\nactivation=leaky\nfrom=90\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=255\nactivation=linear\n\n[yolo]\nmask = 0,1,2\nanchors = 10,14,  23,27,  37,58,  81,82,  135,169,  344,319\nclasses=80\nnum=6\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=0\n"
  },
  {
    "path": "cfg/yolov3tiny-mobilenet-small/yolov3tiny-mobilenet-small-UAV.cfg",
    "content": "[net]\n# Testing\nbatch = 1\nsubdivisions = 1\n# Training\n# batch=64\n# subdivisions=2\nwidth = 416\nheight = 416\nchannels = 3\nmomentum = 0.9\ndecay = 0.0005\nangle = 0\nsaturation = 1.5\nexposure = 1.5\nhue = .1\n\nlearning_rate = 0.001\nburn_in = 1000\nmax_batches = 500200\npolicy = steps\nsteps = 400000,450000\nscales = .1,.1\n\n[convolutional]\nbatch_normalize = 1\nfilters = 16\nsize = 3\nstride = 2\npad = 1\nactivation = h_swish\n\n# bneck1\n\n[convolutional]\nbatch_normalize = 1\nfilters = 16\nsize = 1\nstride = 1\npad = 1\nactivation = relu6\n\n[depthwise]\nbatch_normalize = 1\nfilters = 16\nsize = 3\nstride = 2\npad = 1\nactivation = relu6\n\n[se]\nfilters = 16\n\n[convolutional]\nbatch_normalize = 1\nfilters = 16\nsize = 1\nstride = 1\npad = 1\nactivation = linear\n\n# bneck2\n\n[convolutional]\nbatch_normalize = 1\nfilters = 72\nsize = 1\nstride = 1\npad = 1\nactivation = relu6\n\n[depthwise]\nbatch_normalize = 1\nfilters = 72\nsize = 3\nstride = 2\npad = 1\nactivation = relu6\n\n[convolutional]\nbatch_normalize = 1\nfilters = 24\nsize = 1\nstride = 1\npad = 1\nactivation = linear\n\n# bneck3\n\n[convolutional]\nbatch_normalize = 1\nfilters = 88\nsize = 1\nstride = 1\npad = 1\nactivation = relu6\n\n[depthwise]\nbatch_normalize = 1\nfilters = 88\nsize = 3\nstride = 1\npad = 1\nactivation = relu6\n\n[convolutional]\nbatch_normalize = 1\nfilters = 24\nsize = 1\nstride = 1\npad = 1\nactivation = linear\n\n[shortcut]\nfrom = -4\nactivation = linear\n\n# bneck4\n\n[convolutional]\nbatch_normalize = 1\nfilters = 96\nsize = 1\nstride = 1\npad = 1\nactivation = h_swish\n\n[depthwise]\nbatch_normalize = 1\nfilters = 96\nsize = 5\nstride = 2\npad = 1\nactivation = h_swish\n\n[se]\nfilters = 96\n\n[convolutional]\nbatch_normalize = 1\nfilters = 40\nsize = 1\nstride = 1\npad = 1\nactivation = linear\n\n# bneck5\n\n[convolutional]\nbatch_normalize = 1\nfilters = 240\nsize = 1\nstride = 1\npad = 1\nactivation = h_swish\n\n[depthwise]\nbatch_normalize = 1\nfilters = 240\nsize = 5\nstride = 1\npad = 1\nactivation = h_swish\n\n[se]\nfilters = 240\n\n[convolutional]\nbatch_normalize = 1\nfilters = 40\nsize = 1\nstride = 1\npad = 1\nactivation = linear\n\n[shortcut]\nfrom = -5\nactivation = linear\n\n# bneck6\n\n[convolutional]\nbatch_normalize = 1\nfilters = 240\nsize = 1\nstride = 1\npad = 1\nactivation = h_swish\n\n[depthwise]\nbatch_normalize = 1\nfilters = 240\nsize = 5\nstride = 1\npad = 1\nactivation = h_swish\n\n[se]\nfilters = 240\n\n[convolutional]\nbatch_normalize = 1\nfilters = 40\nsize = 1\nstride = 1\npad = 1\nactivation = linear\n\n[shortcut]\nfrom = -5\nactivation = linear\n\n# bneck7\n\n[convolutional]\nbatch_normalize = 1\nfilters = 120\nsize = 1\nstride = 1\npad = 1\nactivation = h_swish\n\n[depthwise]\nbatch_normalize = 1\nfilters = 120\nsize = 5\nstride = 1\npad = 1\nactivation = h_swish\n\n[se]\nfilters = 120\n\n[convolutional]\nbatch_normalize = 1\nfilters = 48\nsize = 1\nstride = 1\npad = 1\nactivation = linear\n\n# bneck8\n\n[convolutional]\nbatch_normalize = 1\nfilters = 144\nsize = 1\nstride = 1\npad = 1\nactivation = h_swish\n\n[depthwise]\nbatch_normalize = 1\nfilters = 144\nsize = 5\nstride = 1\npad = 1\nactivation = h_swish\n\n[se]\nfilters = 144\n\n[convolutional]\nbatch_normalize = 1\nfilters = 48\nsize = 1\nstride = 1\npad = 1\nactivation = linear\n\n[shortcut]\nfrom = -5\nactivation = linear\n\n# bneck9\n\n[convolutional]\nbatch_normalize = 1\nfilters = 288\nsize = 1\nstride = 1\npad = 1\nactivation = h_swish\n\n[depthwise]\nbatch_normalize = 1\nfilters = 288\nsize = 5\nstride = 2\npad = 1\nactivation = h_swish\n\n[se]\nfilters = 288\n\n[convolutional]\nbatch_normalize = 1\nfilters = 96\nsize = 1\nstride = 1\npad = 1\nactivation = linear\n\n# bneck10\n\n[convolutional]\nbatch_normalize = 1\nfilters = 576\nsize = 1\nstride = 1\npad = 1\nactivation = h_swish\n\n[depthwise]\nbatch_normalize = 1\nfilters = 576\nsize = 5\nstride = 1\npad = 1\nactivation = h_swish\n\n[se]\nfilters = 576\n\n[convolutional]\nbatch_normalize = 1\nfilters = 96\nsize = 1\nstride = 1\npad = 1\nactivation = linear\n\n[shortcut]\nfrom = -5\nactivation = linear\n\n# bneck11\n\n[convolutional]\nbatch_normalize = 1\nfilters = 576\nsize = 1\nstride = 1\npad = 1\nactivation = h_swish\n\n[depthwise]\nbatch_normalize = 1\nfilters = 576\nsize = 5\nstride = 1\npad = 1\nactivation = h_swish\n\n[se]\nfilters = 576\n\n[convolutional]\nbatch_normalize = 1\nfilters = 96\nsize = 1\nstride = 1\npad = 1\nactivation = linear\n\n[shortcut]\nfrom = -5\nactivation = linear\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n###########\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nsize = 1\nstride = 1\npad = 1\nfilters = 18\nactivation = linear\n\n\n\n[yolo]\nmask = 3,4,5\nanchors = 8,9, 10,12, 13,12, 14,15, 17,20, 23,26\nclasses = 1\nnum = 6\njitter = .3\nignore_thresh = .7\ntruth_thresh = 1\nrandom = 1\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize = 1\nfilters = 128\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[upsample]\nstride = 2\n\n[route]\nlayers = -1, 34\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nsize = 1\nstride = 1\npad = 1\nfilters = 18\nactivation = linear\n\n[yolo]\nmask = 0,1,2\nanchors = 8,9, 10,12, 13,12, 14,15, 17,20, 23,26\nclasses = 1\nnum = 6\njitter = .3\nignore_thresh = .7\ntruth_thresh = 1\nrandom = 1\n"
  },
  {
    "path": "cfg/yolov3tiny-mobilenet-small/yolov3tiny-mobilenet-small-coco.cfg",
    "content": "[net]\n# Testing\nbatch = 1\nsubdivisions = 1\n# Training\n# batch=64\n# subdivisions=2\nwidth = 416\nheight = 416\nchannels = 3\nmomentum = 0.9\ndecay = 0.0005\nangle = 0\nsaturation = 1.5\nexposure = 1.5\nhue = .1\n\nlearning_rate = 0.001\nburn_in = 1000\nmax_batches = 500200\npolicy = steps\nsteps = 400000,450000\nscales = .1,.1\n\n[convolutional]\nbatch_normalize = 1\nfilters = 16\nsize = 3\nstride = 2\npad = 1\nactivation = h_swish\n\n# bneck1\n\n[convolutional]\nbatch_normalize = 1\nfilters = 16\nsize = 1\nstride = 1\npad = 1\nactivation = relu6\n\n[depthwise]\nbatch_normalize = 1\nfilters = 16\nsize = 3\nstride = 2\npad = 1\nactivation = relu6\n\n[se]\nfilters = 16\n\n[convolutional]\nbatch_normalize = 1\nfilters = 16\nsize = 1\nstride = 1\npad = 1\nactivation = linear\n\n# bneck2\n\n[convolutional]\nbatch_normalize = 1\nfilters = 72\nsize = 1\nstride = 1\npad = 1\nactivation = relu6\n\n[depthwise]\nbatch_normalize = 1\nfilters = 72\nsize = 3\nstride = 2\npad = 1\nactivation = relu6\n\n[convolutional]\nbatch_normalize = 1\nfilters = 24\nsize = 1\nstride = 1\npad = 1\nactivation = linear\n\n# bneck3\n\n[convolutional]\nbatch_normalize = 1\nfilters = 88\nsize = 1\nstride = 1\npad = 1\nactivation = relu6\n\n[depthwise]\nbatch_normalize = 1\nfilters = 88\nsize = 3\nstride = 1\npad = 1\nactivation = relu6\n\n[convolutional]\nbatch_normalize = 1\nfilters = 24\nsize = 1\nstride = 1\npad = 1\nactivation = linear\n\n[shortcut]\nfrom = -4\nactivation = linear\n\n# bneck4\n\n[convolutional]\nbatch_normalize = 1\nfilters = 96\nsize = 1\nstride = 1\npad = 1\nactivation = h_swish\n\n[depthwise]\nbatch_normalize = 1\nfilters = 96\nsize = 5\nstride = 2\npad = 1\nactivation = h_swish\n\n[se]\nfilters = 96\n\n[convolutional]\nbatch_normalize = 1\nfilters = 40\nsize = 1\nstride = 1\npad = 1\nactivation = linear\n\n# bneck5\n\n[convolutional]\nbatch_normalize = 1\nfilters = 240\nsize = 1\nstride = 1\npad = 1\nactivation = h_swish\n\n[depthwise]\nbatch_normalize = 1\nfilters = 240\nsize = 5\nstride = 1\npad = 1\nactivation = h_swish\n\n[se]\nfilters = 240\n\n[convolutional]\nbatch_normalize = 1\nfilters = 40\nsize = 1\nstride = 1\npad = 1\nactivation = linear\n\n[shortcut]\nfrom = -5\nactivation = linear\n\n# bneck6\n\n[convolutional]\nbatch_normalize = 1\nfilters = 240\nsize = 1\nstride = 1\npad = 1\nactivation = h_swish\n\n[depthwise]\nbatch_normalize = 1\nfilters = 240\nsize = 5\nstride = 1\npad = 1\nactivation = h_swish\n\n[se]\nfilters = 240\n\n[convolutional]\nbatch_normalize = 1\nfilters = 40\nsize = 1\nstride = 1\npad = 1\nactivation = linear\n\n[shortcut]\nfrom = -5\nactivation = linear\n\n# bneck7\n\n[convolutional]\nbatch_normalize = 1\nfilters = 120\nsize = 1\nstride = 1\npad = 1\nactivation = h_swish\n\n[depthwise]\nbatch_normalize = 1\nfilters = 120\nsize = 5\nstride = 1\npad = 1\nactivation = h_swish\n\n[se]\nfilters = 120\n\n[convolutional]\nbatch_normalize = 1\nfilters = 48\nsize = 1\nstride = 1\npad = 1\nactivation = linear\n\n# bneck8\n\n[convolutional]\nbatch_normalize = 1\nfilters = 144\nsize = 1\nstride = 1\npad = 1\nactivation = h_swish\n\n[depthwise]\nbatch_normalize = 1\nfilters = 144\nsize = 5\nstride = 1\npad = 1\nactivation = h_swish\n\n[se]\nfilters = 144\n\n[convolutional]\nbatch_normalize = 1\nfilters = 48\nsize = 1\nstride = 1\npad = 1\nactivation = linear\n\n[shortcut]\nfrom = -5\nactivation = linear\n\n# bneck9\n\n[convolutional]\nbatch_normalize = 1\nfilters = 288\nsize = 1\nstride = 1\npad = 1\nactivation = h_swish\n\n[depthwise]\nbatch_normalize = 1\nfilters = 288\nsize = 5\nstride = 2\npad = 1\nactivation = h_swish\n\n[se]\nfilters = 288\n\n[convolutional]\nbatch_normalize = 1\nfilters = 96\nsize = 1\nstride = 1\npad = 1\nactivation = linear\n\n# bneck10\n\n[convolutional]\nbatch_normalize = 1\nfilters = 576\nsize = 1\nstride = 1\npad = 1\nactivation = h_swish\n\n[depthwise]\nbatch_normalize = 1\nfilters = 576\nsize = 5\nstride = 1\npad = 1\nactivation = h_swish\n\n[se]\nfilters = 576\n\n[convolutional]\nbatch_normalize = 1\nfilters = 96\nsize = 1\nstride = 1\npad = 1\nactivation = linear\n\n[shortcut]\nfrom = -5\nactivation = linear\n\n# bneck11\n\n[convolutional]\nbatch_normalize = 1\nfilters = 576\nsize = 1\nstride = 1\npad = 1\nactivation = h_swish\n\n[depthwise]\nbatch_normalize = 1\nfilters = 576\nsize = 5\nstride = 1\npad = 1\nactivation = h_swish\n\n[se]\nfilters = 576\n\n[convolutional]\nbatch_normalize = 1\nfilters = 96\nsize = 1\nstride = 1\npad = 1\nactivation = linear\n\n[shortcut]\nfrom = -5\nactivation = linear\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n###########\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nsize = 1\nstride = 1\npad = 1\nfilters = 255\nactivation = linear\n\n\n\n[yolo]\nmask = 3,4,5\nanchors = 10,14,  23,27,  37,58,  81,82,  135,169,  344,319\nclasses = 80\nnum = 6\njitter = .3\nignore_thresh = .7\ntruth_thresh = 1\nrandom = 1\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize = 1\nfilters = 128\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[upsample]\nstride = 2\n\n[route]\nlayers = -1, 34\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nsize = 1\nstride = 1\npad = 1\nfilters = 255\nactivation = linear\n\n[yolo]\nmask = 0,1,2\nanchors = 10,14,  23,27,  37,58,  81,82,  135,169,  344,319\nclasses = 80\nnum = 6\njitter = .3\nignore_thresh = .7\ntruth_thresh = 1\nrandom = 1\n"
  },
  {
    "path": "cfg/yolov3tiny-mobilenet-small/yolov3tiny-mobilenet-small-screw.cfg",
    "content": "[net]\n# Testing\nbatch = 1\nsubdivisions = 1\n# Training\n# batch=64\n# subdivisions=2\nwidth = 416\nheight = 416\nchannels = 3\nmomentum = 0.9\ndecay = 0.0005\nangle = 0\nsaturation = 1.5\nexposure = 1.5\nhue = .1\n\nlearning_rate = 0.001\nburn_in = 1000\nmax_batches = 500200\npolicy = steps\nsteps = 400000,450000\nscales = .1,.1\n\n[convolutional]\nbatch_normalize = 1\nfilters = 16\nsize = 3\nstride = 2\npad = 1\nactivation = h_swish\n\n# bneck1\n\n[convolutional]\nbatch_normalize = 1\nfilters = 16\nsize = 1\nstride = 1\npad = 1\nactivation = relu6\n\n[depthwise]\nbatch_normalize = 1\nfilters = 16\nsize = 3\nstride = 2\npad = 1\nactivation = relu6\n\n[se]\nfilters = 16\n\n[convolutional]\nbatch_normalize = 1\nfilters = 16\nsize = 1\nstride = 1\npad = 1\nactivation = linear\n\n# bneck2\n\n[convolutional]\nbatch_normalize = 1\nfilters = 72\nsize = 1\nstride = 1\npad = 1\nactivation = relu6\n\n[depthwise]\nbatch_normalize = 1\nfilters = 72\nsize = 3\nstride = 2\npad = 1\nactivation = relu6\n\n[convolutional]\nbatch_normalize = 1\nfilters = 24\nsize = 1\nstride = 1\npad = 1\nactivation = linear\n\n# bneck3\n\n[convolutional]\nbatch_normalize = 1\nfilters = 88\nsize = 1\nstride = 1\npad = 1\nactivation = relu6\n\n[depthwise]\nbatch_normalize = 1\nfilters = 88\nsize = 3\nstride = 1\npad = 1\nactivation = relu6\n\n[convolutional]\nbatch_normalize = 1\nfilters = 24\nsize = 1\nstride = 1\npad = 1\nactivation = linear\n\n[shortcut]\nfrom = -4\nactivation = linear\n\n# bneck4\n\n[convolutional]\nbatch_normalize = 1\nfilters = 96\nsize = 1\nstride = 1\npad = 1\nactivation = h_swish\n\n[depthwise]\nbatch_normalize = 1\nfilters = 96\nsize = 5\nstride = 2\npad = 1\nactivation = h_swish\n\n[se]\nfilters = 96\n\n[convolutional]\nbatch_normalize = 1\nfilters = 40\nsize = 1\nstride = 1\npad = 1\nactivation = linear\n\n# bneck5\n\n[convolutional]\nbatch_normalize = 1\nfilters = 240\nsize = 1\nstride = 1\npad = 1\nactivation = h_swish\n\n[depthwise]\nbatch_normalize = 1\nfilters = 240\nsize = 5\nstride = 1\npad = 1\nactivation = h_swish\n\n[se]\nfilters = 240\n\n[convolutional]\nbatch_normalize = 1\nfilters = 40\nsize = 1\nstride = 1\npad = 1\nactivation = linear\n\n[shortcut]\nfrom = -5\nactivation = linear\n\n# bneck6\n\n[convolutional]\nbatch_normalize = 1\nfilters = 240\nsize = 1\nstride = 1\npad = 1\nactivation = h_swish\n\n[depthwise]\nbatch_normalize = 1\nfilters = 240\nsize = 5\nstride = 1\npad = 1\nactivation = h_swish\n\n[se]\nfilters = 240\n\n[convolutional]\nbatch_normalize = 1\nfilters = 40\nsize = 1\nstride = 1\npad = 1\nactivation = linear\n\n[shortcut]\nfrom = -5\nactivation = linear\n\n# bneck7\n\n[convolutional]\nbatch_normalize = 1\nfilters = 120\nsize = 1\nstride = 1\npad = 1\nactivation = h_swish\n\n[depthwise]\nbatch_normalize = 1\nfilters = 120\nsize = 5\nstride = 1\npad = 1\nactivation = h_swish\n\n[se]\nfilters = 120\n\n[convolutional]\nbatch_normalize = 1\nfilters = 48\nsize = 1\nstride = 1\npad = 1\nactivation = linear\n\n# bneck8\n\n[convolutional]\nbatch_normalize = 1\nfilters = 144\nsize = 1\nstride = 1\npad = 1\nactivation = h_swish\n\n[depthwise]\nbatch_normalize = 1\nfilters = 144\nsize = 5\nstride = 1\npad = 1\nactivation = h_swish\n\n[se]\nfilters = 144\n\n[convolutional]\nbatch_normalize = 1\nfilters = 48\nsize = 1\nstride = 1\npad = 1\nactivation = linear\n\n[shortcut]\nfrom = -5\nactivation = linear\n\n# bneck9\n\n[convolutional]\nbatch_normalize = 1\nfilters = 288\nsize = 1\nstride = 1\npad = 1\nactivation = h_swish\n\n[depthwise]\nbatch_normalize = 1\nfilters = 288\nsize = 5\nstride = 2\npad = 1\nactivation = h_swish\n\n[se]\nfilters = 288\n\n[convolutional]\nbatch_normalize = 1\nfilters = 96\nsize = 1\nstride = 1\npad = 1\nactivation = linear\n\n# bneck10\n\n[convolutional]\nbatch_normalize = 1\nfilters = 576\nsize = 1\nstride = 1\npad = 1\nactivation = h_swish\n\n[depthwise]\nbatch_normalize = 1\nfilters = 576\nsize = 5\nstride = 1\npad = 1\nactivation = h_swish\n\n[se]\nfilters = 576\n\n[convolutional]\nbatch_normalize = 1\nfilters = 96\nsize = 1\nstride = 1\npad = 1\nactivation = linear\n\n[shortcut]\nfrom = -5\nactivation = linear\n\n# bneck11\n\n[convolutional]\nbatch_normalize = 1\nfilters = 576\nsize = 1\nstride = 1\npad = 1\nactivation = h_swish\n\n[depthwise]\nbatch_normalize = 1\nfilters = 576\nsize = 5\nstride = 1\npad = 1\nactivation = h_swish\n\n[se]\nfilters = 576\n\n[convolutional]\nbatch_normalize = 1\nfilters = 96\nsize = 1\nstride = 1\npad = 1\nactivation = linear\n\n[shortcut]\nfrom = -5\nactivation = linear\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n###########\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nsize = 1\nstride = 1\npad = 1\nfilters = 21\nactivation = linear\n\n\n\n[yolo]\nmask = 3,4,5\nanchors = 25,34, 33,45, 37,50, 43,57, 70,85, 76,88\nclasses = 2\nnum = 6\njitter = .3\nignore_thresh = .7\ntruth_thresh = 1\nrandom = 1\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize = 1\nfilters = 128\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[upsample]\nstride = 2\n\n[route]\nlayers = -1, 34\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nsize = 1\nstride = 1\npad = 1\nfilters = 21\nactivation = linear\n\n[yolo]\nmask = 0,1,2\nanchors = 25,34, 33,45, 37,50, 43,57, 70,85, 76,88\nclasses = 2\nnum = 6\njitter = .3\nignore_thresh = .7\ntruth_thresh = 1\nrandom = 1\n"
  },
  {
    "path": "cfg/yolov3tiny-mobilenet-small/yolov3tiny-mobilenet-small-visdrone.cfg",
    "content": "[net]\n# Testing\nbatch = 1\nsubdivisions = 1\n# Training\n# batch=64\n# subdivisions=2\nwidth = 416\nheight = 416\nchannels = 3\nmomentum = 0.9\ndecay = 0.0005\nangle = 0\nsaturation = 1.5\nexposure = 1.5\nhue = .1\n\nlearning_rate = 0.001\nburn_in = 1000\nmax_batches = 500200\npolicy = steps\nsteps = 400000,450000\nscales = .1,.1\n\n[convolutional]\nbatch_normalize = 1\nfilters = 16\nsize = 3\nstride = 2\npad = 1\nactivation = h_swish\n\n# bneck1\n\n[convolutional]\nbatch_normalize = 1\nfilters = 16\nsize = 1\nstride = 1\npad = 1\nactivation = relu6\n\n[depthwise]\nbatch_normalize = 1\nfilters = 16\nsize = 3\nstride = 2\npad = 1\nactivation = relu6\n\n[se]\nfilters = 16\n\n[convolutional]\nbatch_normalize = 1\nfilters = 16\nsize = 1\nstride = 1\npad = 1\nactivation = linear\n\n# bneck2\n\n[convolutional]\nbatch_normalize = 1\nfilters = 72\nsize = 1\nstride = 1\npad = 1\nactivation = relu6\n\n[depthwise]\nbatch_normalize = 1\nfilters = 72\nsize = 3\nstride = 2\npad = 1\nactivation = relu6\n\n[convolutional]\nbatch_normalize = 1\nfilters = 24\nsize = 1\nstride = 1\npad = 1\nactivation = linear\n\n# bneck3\n\n[convolutional]\nbatch_normalize = 1\nfilters = 88\nsize = 1\nstride = 1\npad = 1\nactivation = relu6\n\n[depthwise]\nbatch_normalize = 1\nfilters = 88\nsize = 3\nstride = 1\npad = 1\nactivation = relu6\n\n[convolutional]\nbatch_normalize = 1\nfilters = 24\nsize = 1\nstride = 1\npad = 1\nactivation = linear\n\n[shortcut]\nfrom = -4\nactivation = linear\n\n# bneck4\n\n[convolutional]\nbatch_normalize = 1\nfilters = 96\nsize = 1\nstride = 1\npad = 1\nactivation = h_swish\n\n[depthwise]\nbatch_normalize = 1\nfilters = 96\nsize = 5\nstride = 2\npad = 1\nactivation = h_swish\n\n[se]\nfilters = 96\n\n[convolutional]\nbatch_normalize = 1\nfilters = 40\nsize = 1\nstride = 1\npad = 1\nactivation = linear\n\n# bneck5\n\n[convolutional]\nbatch_normalize = 1\nfilters = 240\nsize = 1\nstride = 1\npad = 1\nactivation = h_swish\n\n[depthwise]\nbatch_normalize = 1\nfilters = 240\nsize = 5\nstride = 1\npad = 1\nactivation = h_swish\n\n[se]\nfilters = 240\n\n[convolutional]\nbatch_normalize = 1\nfilters = 40\nsize = 1\nstride = 1\npad = 1\nactivation = linear\n\n[shortcut]\nfrom = -5\nactivation = linear\n\n# bneck6\n\n[convolutional]\nbatch_normalize = 1\nfilters = 240\nsize = 1\nstride = 1\npad = 1\nactivation = h_swish\n\n[depthwise]\nbatch_normalize = 1\nfilters = 240\nsize = 5\nstride = 1\npad = 1\nactivation = h_swish\n\n[se]\nfilters = 240\n\n[convolutional]\nbatch_normalize = 1\nfilters = 40\nsize = 1\nstride = 1\npad = 1\nactivation = linear\n\n[shortcut]\nfrom = -5\nactivation = linear\n\n# bneck7\n\n[convolutional]\nbatch_normalize = 1\nfilters = 120\nsize = 1\nstride = 1\npad = 1\nactivation = h_swish\n\n[depthwise]\nbatch_normalize = 1\nfilters = 120\nsize = 5\nstride = 1\npad = 1\nactivation = h_swish\n\n[se]\nfilters = 120\n\n[convolutional]\nbatch_normalize = 1\nfilters = 48\nsize = 1\nstride = 1\npad = 1\nactivation = linear\n\n# bneck8\n\n[convolutional]\nbatch_normalize = 1\nfilters = 144\nsize = 1\nstride = 1\npad = 1\nactivation = h_swish\n\n[depthwise]\nbatch_normalize = 1\nfilters = 144\nsize = 5\nstride = 1\npad = 1\nactivation = h_swish\n\n[se]\nfilters = 144\n\n[convolutional]\nbatch_normalize = 1\nfilters = 48\nsize = 1\nstride = 1\npad = 1\nactivation = linear\n\n[shortcut]\nfrom = -5\nactivation = linear\n\n# bneck9\n\n[convolutional]\nbatch_normalize = 1\nfilters = 288\nsize = 1\nstride = 1\npad = 1\nactivation = h_swish\n\n[depthwise]\nbatch_normalize = 1\nfilters = 288\nsize = 5\nstride = 2\npad = 1\nactivation = h_swish\n\n[se]\nfilters = 288\n\n[convolutional]\nbatch_normalize = 1\nfilters = 96\nsize = 1\nstride = 1\npad = 1\nactivation = linear\n\n# bneck10\n\n[convolutional]\nbatch_normalize = 1\nfilters = 576\nsize = 1\nstride = 1\npad = 1\nactivation = h_swish\n\n[depthwise]\nbatch_normalize = 1\nfilters = 576\nsize = 5\nstride = 1\npad = 1\nactivation = h_swish\n\n[se]\nfilters = 576\n\n[convolutional]\nbatch_normalize = 1\nfilters = 96\nsize = 1\nstride = 1\npad = 1\nactivation = linear\n\n[shortcut]\nfrom = -5\nactivation = linear\n\n# bneck11\n\n[convolutional]\nbatch_normalize = 1\nfilters = 576\nsize = 1\nstride = 1\npad = 1\nactivation = h_swish\n\n[depthwise]\nbatch_normalize = 1\nfilters = 576\nsize = 5\nstride = 1\npad = 1\nactivation = h_swish\n\n[se]\nfilters = 576\n\n[convolutional]\nbatch_normalize = 1\nfilters = 96\nsize = 1\nstride = 1\npad = 1\nactivation = linear\n\n[shortcut]\nfrom = -5\nactivation = linear\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=1\nstride=1\npad=1\nactivation=h_swish\n\n###########\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nbatch_normalize = 1\nfilters = 512\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nsize = 1\nstride = 1\npad = 1\nfilters = 45\nactivation = linear\n\n\n\n[yolo]\nmask = 3,4,5\nanchors = 0,3, 2,5, 3,11, 5,6, 9,14, 22,30\nclasses = 10\nnum = 6\njitter = .3\nignore_thresh = .7\ntruth_thresh = 1\nrandom = 1\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize = 1\nfilters = 128\nsize = 1\nstride = 1\npad = 1\nactivation = leaky\n\n[upsample]\nstride = 2\n\n[route]\nlayers = -1, 34\n\n[convolutional]\nbatch_normalize = 1\nfilters = 256\nsize = 3\nstride = 1\npad = 1\nactivation = leaky\n\n[convolutional]\nsize = 1\nstride = 1\npad = 1\nfilters = 45\nactivation = linear\n\n[yolo]\nmask = 0,1,2\nanchors = 0,3, 2,5, 3,11, 5,6, 9,14, 22,30\nclasses = 10\nnum = 6\njitter = .3\nignore_thresh = .7\ntruth_thresh = 1\nrandom = 1\n"
  },
  {
    "path": "cfg/yolov4/yolov4-hand.cfg",
    "content": "[net]\n# Testing\n#batch=1\n#subdivisions=1\n# Training\nbatch=64\nsubdivisions=8\nwidth=608\nheight=608\nchannels=3\nmomentum=0.949\ndecay=0.0005\nangle=0\nsaturation = 1.5\nexposure = 1.5\nhue=.1\n\nlearning_rate=0.00261\nburn_in=1000\nmax_batches = 500500\npolicy=steps\nsteps=400000,450000\nscales=.1,.1\n\n#cutmix=1\nmosaic=1\n\n#:104x104 54:52x52 85:26x26 104:13x13 for 416\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=3\nstride=1\npad=1\nactivation=mish\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=2\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[route]\nlayers = -2\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[route]\nlayers = -1,-7\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=mish\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=2\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[route]\nlayers = -2\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[route]\nlayers = -1,-10\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=2\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[route]\nlayers = -2\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[route]\nlayers = -1,-28\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=2\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[route]\nlayers = -2\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[route]\nlayers = -1,-28\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=mish\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=2\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[route]\nlayers = -2\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[route]\nlayers = -1,-16\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=1\nstride=1\npad=1\nactivation=mish\n\n##########################\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n### SPP ###\n[maxpool]\nstride=1\nsize=5\n\n[route]\nlayers=-2\n\n[maxpool]\nstride=1\nsize=9\n\n[route]\nlayers=-4\n\n[maxpool]\nstride=1\nsize=13\n\n[route]\nlayers=-1,-3,-5,-6\n### End SPP ###\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = 85\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[route]\nlayers = -1, -3\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = 54\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[route]\nlayers = -1, -3\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n##########################\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=18\nactivation=linear\n\n\n[yolo]\nmask = 0,1,2\nanchors = 8,13, 14,20, 22,25, 26,36, 35,49, 40,31, 51,69, 63,47, 94,103\nclasses=1\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nscale_x_y = 1.2\niou_thresh=0.213\ncls_normalizer=1.0\niou_normalizer=0.07\niou_loss=ciou\nnms_kind=greedynms\nbeta_nms=0.6\n\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=2\npad=1\nfilters=256\nactivation=leaky\n\n[route]\nlayers = -1, -16\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=18\nactivation=linear\n\n\n[yolo]\nmask = 3,4,5\nanchors = 8,13, 14,20, 22,25, 26,36, 35,49, 40,31, 51,69, 63,47, 94,103\nclasses=1\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nscale_x_y = 1.1\niou_thresh=0.213\ncls_normalizer=1.0\niou_normalizer=0.07\niou_loss=ciou\nnms_kind=greedynms\nbeta_nms=0.6\n\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=2\npad=1\nfilters=512\nactivation=leaky\n\n[route]\nlayers = -1, -37\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=18\nactivation=linear\n\n\n[yolo]\nmask = 6,7,8\nanchors = 8,13, 14,20, 22,25, 26,36, 35,49, 40,31, 51,69, 63,47, 94,103\nclasses=1\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\nscale_x_y = 1.05\niou_thresh=0.213\ncls_normalizer=1.0\niou_normalizer=0.07\niou_loss=ciou\nnms_kind=greedynms\nbeta_nms=0.6\n\n"
  },
  {
    "path": "cfg/yolov4/yolov4-relu.cfg",
    "content": "[net]\n# Testing\n#batch=1\n#subdivisions=1\n# Training\nbatch=64\nsubdivisions=8\nwidth=608\nheight=608\nchannels=3\nmomentum=0.949\ndecay=0.0005\nangle=0\nsaturation = 1.5\nexposure = 1.5\nhue=.1\n\nlearning_rate=0.00261\nburn_in=1000\nmax_batches = 500500\npolicy=steps\nsteps=400000,450000\nscales=.1,.1\n\n#cutmix=1\nmosaic=1\n\n#:104x104 54:52x52 85:26x26 104:13x13 for 416\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[route]\nlayers = -2\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[route]\nlayers = -1,-7\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[route]\nlayers = -2\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[route]\nlayers = -1,-10\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[route]\nlayers = -2\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[route]\nlayers = -1,-28\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[route]\nlayers = -2\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[route]\nlayers = -1,-28\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[route]\nlayers = -2\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[route]\nlayers = -1,-16\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n##########################\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n### SPP ###\n[maxpool]\nstride=1\nsize=5\n\n[route]\nlayers=-2\n\n[maxpool]\nstride=1\nsize=9\n\n[route]\nlayers=-4\n\n[maxpool]\nstride=1\nsize=13\n\n[route]\nlayers=-1,-3,-5,-6\n### End SPP ###\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = 85\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[route]\nlayers = -1, -3\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = 54\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[route]\nlayers = -1, -3\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n##########################\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=255\nactivation=linear\n\n\n[yolo]\nmask = 0,1,2\nanchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401\nclasses=80\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nscale_x_y = 1.2\niou_thresh=0.213\ncls_normalizer=1.0\niou_normalizer=0.07\niou_loss=ciou\nnms_kind=greedynms\nbeta_nms=0.6\n\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=2\npad=1\nfilters=256\nactivation=leaky\n\n[route]\nlayers = -1, -16\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=255\nactivation=linear\n\n\n[yolo]\nmask = 3,4,5\nanchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401\nclasses=80\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nscale_x_y = 1.1\niou_thresh=0.213\ncls_normalizer=1.0\niou_normalizer=0.07\niou_loss=ciou\nnms_kind=greedynms\nbeta_nms=0.6\n\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=2\npad=1\nfilters=512\nactivation=leaky\n\n[route]\nlayers = -1, -37\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=255\nactivation=linear\n\n\n[yolo]\nmask = 6,7,8\nanchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401\nclasses=80\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\nscale_x_y = 1.05\niou_thresh=0.213\ncls_normalizer=1.0\niou_normalizer=0.07\niou_loss=ciou\nnms_kind=greedynms\nbeta_nms=0.6\n"
  },
  {
    "path": "cfg/yolov4/yolov4-visdrone.cfg",
    "content": "[net]\n# Testing\n#batch=1\n#subdivisions=1\n# Training\nbatch=64\nsubdivisions=8\nwidth=608\nheight=608\nchannels=3\nmomentum=0.949\ndecay=0.0005\nangle=0\nsaturation = 1.5\nexposure = 1.5\nhue=.1\n\nlearning_rate=0.00261\nburn_in=1000\nmax_batches = 500500\npolicy=steps\nsteps=400000,450000\nscales=.1,.1\n\n#cutmix=1\nmosaic=1\n\n#:104x104 54:52x52 85:26x26 104:13x13 for 416\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=3\nstride=1\npad=1\nactivation=mish\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=2\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[route]\nlayers = -2\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[route]\nlayers = -1,-7\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=mish\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=2\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[route]\nlayers = -2\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[route]\nlayers = -1,-10\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=2\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[route]\nlayers = -2\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[route]\nlayers = -1,-28\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=2\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[route]\nlayers = -2\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[route]\nlayers = -1,-28\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=mish\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=2\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[route]\nlayers = -2\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[route]\nlayers = -1,-16\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=1\nstride=1\npad=1\nactivation=mish\n\n##########################\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n### SPP ###\n[maxpool]\nstride=1\nsize=5\n\n[route]\nlayers=-2\n\n[maxpool]\nstride=1\nsize=9\n\n[route]\nlayers=-4\n\n[maxpool]\nstride=1\nsize=13\n\n[route]\nlayers=-1,-3,-5,-6\n### End SPP ###\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = 85\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[route]\nlayers = -1, -3\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = 54\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[route]\nlayers = -1, -3\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n##########################\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=45\nactivation=linear\n\n\n[yolo]\nmask = 0,1,2\nanchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401\nclasses=10\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nscale_x_y = 1.2\niou_thresh=0.213\ncls_normalizer=1.0\niou_normalizer=0.07\niou_loss=ciou\nnms_kind=greedynms\nbeta_nms=0.6\n\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=2\npad=1\nfilters=256\nactivation=leaky\n\n[route]\nlayers = -1, -16\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=45\nactivation=linear\n\n\n[yolo]\nmask = 3,4,5\nanchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401\nclasses=10\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nscale_x_y = 1.1\niou_thresh=0.213\ncls_normalizer=1.0\niou_normalizer=0.07\niou_loss=ciou\nnms_kind=greedynms\nbeta_nms=0.6\n\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=2\npad=1\nfilters=512\nactivation=leaky\n\n[route]\nlayers = -1, -37\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=45\nactivation=linear\n\n\n[yolo]\nmask = 6,7,8\nanchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401\nclasses=10\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\nscale_x_y = 1.05\niou_thresh=0.213\ncls_normalizer=1.0\niou_normalizer=0.07\niou_loss=ciou\nnms_kind=greedynms\nbeta_nms=0.6\n\n"
  },
  {
    "path": "cfg/yolov4/yolov4.cfg",
    "content": "[net]\n# Testing\n#batch=1\n#subdivisions=1\n# Training\nbatch=64\nsubdivisions=8\nwidth=608\nheight=608\nchannels=3\nmomentum=0.949\ndecay=0.0005\nangle=0\nsaturation = 1.5\nexposure = 1.5\nhue=.1\n\nlearning_rate=0.00261\nburn_in=1000\nmax_batches = 500500\npolicy=steps\nsteps=400000,450000\nscales=.1,.1\n\n#cutmix=1\nmosaic=1\n\n#:104x104 54:52x52 85:26x26 104:13x13 for 416\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=3\nstride=1\npad=1\nactivation=mish\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=2\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[route]\nlayers = -2\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[route]\nlayers = -1,-7\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=mish\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=2\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[route]\nlayers = -2\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[route]\nlayers = -1,-10\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=2\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[route]\nlayers = -2\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[route]\nlayers = -1,-28\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=2\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[route]\nlayers = -2\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[route]\nlayers = -1,-28\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=mish\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=2\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[route]\nlayers = -2\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[route]\nlayers = -1,-16\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=1\nstride=1\npad=1\nactivation=mish\n\n##########################\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n### SPP ###\n[maxpool]\nstride=1\nsize=5\n\n[route]\nlayers=-2\n\n[maxpool]\nstride=1\nsize=9\n\n[route]\nlayers=-4\n\n[maxpool]\nstride=1\nsize=13\n\n[route]\nlayers=-1,-3,-5,-6\n### End SPP ###\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = 85\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[route]\nlayers = -1, -3\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = 54\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[route]\nlayers = -1, -3\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n##########################\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=255\nactivation=linear\n\n\n[yolo]\nmask = 0,1,2\nanchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401\nclasses=80\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nscale_x_y = 1.2\niou_thresh=0.213\ncls_normalizer=1.0\niou_normalizer=0.07\niou_loss=ciou\nnms_kind=grpeedynms\nbeta_nms=0.6\n\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=2\npad=1\nfilters=256\nactivation=leaky\n\n[route]\nlayers = -1, -16\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=255\nactivation=linear\n\n\n[yolo]\nmask = 3,4,5\nanchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401\nclasses=80\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nscale_x_y = 1.1\niou_thresh=0.213\ncls_normalizer=1.0\niou_normalizer=0.07\niou_loss=ciou\nnms_kind=greedynms\nbeta_nms=0.6\n\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=2\npad=1\nfilters=512\nactivation=leaky\n\n[route]\nlayers = -1, -37\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=255\nactivation=linear\n\n\n[yolo]\nmask = 6,7,8\nanchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401\nclasses=80\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\nscale_x_y = 1.05\niou_thresh=0.213\ncls_normalizer=1.0\niou_normalizer=0.07\niou_loss=ciou\nnms_kind=greedynms\nbeta_nms=0.6\n\n"
  },
  {
    "path": "cfg/yolov4tiny/yolov4-tiny.cfg",
    "content": "[net]\n# Testing\n#batch=1\n#subdivisions=1\n# Training\nbatch=64\nsubdivisions=1\nwidth=416\nheight=416\nchannels=3\nmomentum=0.9\ndecay=0.0005\nangle=0\nsaturation = 1.5\nexposure = 1.5\nhue=.1\n\nlearning_rate=0.00261\nburn_in=1000\nmax_batches = 500200\npolicy=steps\nsteps=400000,450000\nscales=.1,.1\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[route]\nlayers=-1\ngroups=2\ngroup_id=1\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[route]\nlayers = -1,-2\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[route]\nlayers = -6,-1\n\n[maxpool]\nsize=2\nstride=2\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[route]\nlayers=-1\ngroups=2\ngroup_id=1\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[route]\nlayers = -1,-2\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[route]\nlayers = -6,-1\n\n[maxpool]\nsize=2\nstride=2\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[route]\nlayers=-1\ngroups=2\ngroup_id=1\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[route]\nlayers = -1,-2\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[route]\nlayers = -6,-1\n\n[maxpool]\nsize=2\nstride=2\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n##################################\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=255\nactivation=linear\n\n\n\n[yolo]\nmask = 3,4,5\nanchors = 10,14,  23,27,  37,58,  81,82,  135,169,  344,319\nclasses=80\nnum=6\njitter=.3\nscale_x_y = 1.05\ncls_normalizer=1.0\niou_normalizer=0.07\niou_loss=ciou\nignore_thresh = .7\ntruth_thresh = 1\nrandom=0\nresize=1.5\nnms_kind=greedynms\nbeta_nms=0.6\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = -1, 23\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=255\nactivation=linear\n\n[yolo]\nmask = 0,1,2\nanchors = 10,14,  23,27,  37,58,  81,82,  135,169,  344,319\nclasses=80\nnum=6\njitter=.3\nscale_x_y = 1.05\ncls_normalizer=1.0\niou_normalizer=0.07\niou_loss=ciou\nignore_thresh = .7\ntruth_thresh = 1\nrandom=0\nresize=1.5\nnms_kind=greedynms\nbeta_nms=0.6"
  },
  {
    "path": "convert.py",
    "content": "# Author:LiPu\nimport argparse\nfrom sys import platform\n\nfrom models import *\nfrom utils.datasets import *\nfrom utils.utils import *\n\n\ndef convert():\n    img_size = opt.img_size  # (320, 192) or (416, 256) or (608, 352) for (height, width)\n    weights = opt.weights\n    # Initialize\n    device = torch_utils.select_device(opt.device)\n\n    # Initialize model\n    model = Darknet(opt.cfg, img_size, is_gray_scale=opt.gray_scale)\n\n    # Load weights\n    attempt_download(weights)\n    if weights.endswith('.pt'):  # pytorch format\n        model.load_state_dict(torch.load(weights, map_location=device)['model'])\n    else:  # darknet format\n        _ = load_darknet_weights(model, weights)\n\n    # Eval mode\n    model.to(device).eval()\n\n    save_weights(model, path='weights/best.weights')\n\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n    parser.add_argument('--cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path')\n    parser.add_argument('--weights', type=str, default='weights/yolov3.weights', help='path to weights file')\n    parser.add_argument('--output', type=str, default='output', help='output folder')  # output folder\n    parser.add_argument('--img_size', type=int, default=416, help='inference size (pixels)')\n    parser.add_argument('--device', default='', help='device id (i.e. 0 or 0,1) or cpu')\n    parser.add_argument('--gray-scale', action='store_true', help='gray scale trainning')\n    opt = parser.parse_args()\n    print(opt)\n\n    with torch.no_grad():\n        convert()\n"
  },
  {
    "path": "convert_FPGA.py",
    "content": "import argparse\nimport struct\n\nfrom models import *  # set ONNX_EXPORT in models.py\n\nfrom utils.utils import *\n\n\ndef convert():\n    img_size = (320, 192) if ONNX_EXPORT else opt.img_size  # (320, 192) or (416, 256) or (608, 352) for (height, width)\n    weights = opt.weights\n\n    # Initialize\n    device = torch_utils.select_device(device='cpu' if ONNX_EXPORT else opt.device)\n\n    # Initialize model\n    model = Darknet(opt.cfg, img_size, quantized=opt.quantized, a_bit=opt.a_bit, w_bit=opt.w_bit,\n                    FPGA=opt.FPGA, is_gray_scale=opt.gray_scale)\n\n    # Load weights\n    attempt_download(weights)\n    if weights.endswith('.pt'):  # pytorch format\n        model.load_state_dict(torch.load(weights, map_location=device)['model'])\n    else:  # darknet format\n        _ = load_darknet_weights(model, weights, FPGA=opt.FPGA)\n    if opt.quantized == 0:\n        save_weights(model, path='weights/' + opt.cfg.split('/')[-1].replace('.cfg', '') + '-best.weights')\n    else:\n        w_file = open('weights/' + opt.cfg.split('/')[-1].replace('.cfg', '') + '_weights.bin', 'wb')\n        b_file = open('weights/' + opt.cfg.split('/')[-1].replace('.cfg', '') + '_bias.bin', 'wb')\n        if opt.quantized == 1:\n            w_scale = open('weights/' + opt.cfg.split('/')[-1].replace('.cfg', '') + '_w_scale.bin', 'wb')\n            a_scale = open('weights/' + opt.cfg.split('/')[-1].replace('.cfg', '') + '_a_scale.bin', 'wb')\n            b_scale = open('weights/' + opt.cfg.split('/')[-1].replace('.cfg', '') + '_b_scale.bin', 'wb')\n            s_scale = open('weights/' + opt.cfg.split('/')[-1].replace('.cfg', '') + '_s_scale.bin', 'wb')\n            if opt.w_bit == 16:\n                a = struct.pack('<i', 14)\n            if opt.w_bit == 8:\n                a = struct.pack('<i', 7)\n            a_scale.write(a)\n        for _, (mdef, module) in enumerate(zip(model.module_defs, model.module_list)):\n            print(mdef)\n            if mdef['type'] == 'convolutional':\n                conv_layer = module[0]\n                # 使用BN训练中量化，融合BN参数\n                weight, bias = conv_layer.BN_fuse()\n                if opt.quantized == 1:\n                    # 得到缩放因子\n                    activate_scale = -math.log(conv_layer.activation_quantizer.scale.cpu().data.numpy()[0], 2)\n                    weight_scale = -math.log(conv_layer.weight_quantizer.scale.cpu().data.numpy()[0], 2)\n                    a = struct.pack('<i', int(activate_scale))\n                    a_scale.write(a)\n                    a = struct.pack('<i', int(weight_scale))\n                    w_scale.write(a)\n                # 处理weights\n                para = conv_layer.weight_quantizer.get_quantize_value(weight)\n\n                if opt.reorder:\n                    # 重排序参数\n                    print(\"use reorder!\")\n                    shape_output = para.shape[0]\n                    shape_input = para.shape[1]\n                    num_TN = int(shape_input / opt.TN)\n                    remainder_TN = shape_input % opt.TN\n                    num_TM = int(shape_output / opt.TM)\n                    remainder_TM = shape_output % opt.TM\n                    first = True\n                    for j in range(num_TM):\n                        for k in range(num_TN):\n                            temp = para[j * opt.TM:(j + 1) * opt.TM, k * opt.TN:(k + 1) * opt.TN, :, :]\n                            temp = temp.view(temp.shape[0], temp.shape[1], temp.shape[2] * temp.shape[3])\n                            temp = temp.permute(2, 0, 1).contiguous().view(-1)\n                            if first:\n                                reorder_para = temp.clone().cpu().data.numpy()\n                                first = False\n                            else:\n                                reorder_para = np.append(reorder_para, temp.cpu().data.numpy())\n                        temp = para[j * opt.TM:(j + 1) * opt.TM, num_TN * opt.TN:num_TN * opt.TN + remainder_TN, :, :]\n                        temp = temp.view(temp.shape[0], temp.shape[1], temp.shape[2] * temp.shape[3])\n                        temp = temp.permute(2, 0, 1).contiguous().view(-1)\n                        if first:\n                            reorder_para = temp.clone().cpu().data.numpy()\n                            first = False\n                        else:\n                            reorder_para = np.append(reorder_para, temp.cpu().data.numpy())\n\n                    for k in range(num_TN):\n                        temp = para[num_TM * opt.TM:num_TM * opt.TM + remainder_TM, k * opt.TN:(k + 1) * opt.TN, :, :]\n                        temp = temp.view(temp.shape[0], temp.shape[1], temp.shape[2] * temp.shape[3])\n                        temp = temp.permute(2, 0, 1).contiguous().view(-1)\n                        if first:\n                            reorder_para = temp.clone().cpu().data.numpy()\n                            first = False\n                        else:\n                            reorder_para = np.append(reorder_para, temp.cpu().data.numpy())\n                    temp = para[num_TM * opt.TM:num_TM * opt.TM + remainder_TM,\n                           num_TN * opt.TN:num_TN * opt.TN + remainder_TN, :, :]\n                    temp = temp.view(temp.shape[0], temp.shape[1], temp.shape[2] * temp.shape[3])\n                    temp = temp.permute(2, 0, 1).contiguous().view(-1)\n                    if first:\n                        reorder_para = temp.clone().cpu().data.numpy()\n                        first = False\n                    else:\n                        reorder_para = np.append(reorder_para, temp.cpu().data.numpy())\n\n                    para_flatten = reorder_para\n                else:\n                    para_flatten = para.cpu().data.numpy().flatten()  # 展开\n\n                # 存储weights\n                for i in para_flatten:\n                    if opt.w_bit == 16:\n                        # Dorefa量化为非对称量化 Google量化为对称量化\n                        if opt.quantized == 1:\n                            a = struct.pack('<h', int(i))\n                        if opt.quantized == 2:\n                            a = struct.pack('<H', int(i))\n                    elif opt.w_bit == 8:\n                        # Dorefa量化为非对称量化 Google量化为对称量化\n                        if opt.quantized == 1:\n                            a = struct.pack('b', int(i))\n                        if opt.quantized == 2:\n                            a = struct.pack('B', int(i))\n                    else:\n                        a = struct.pack('<f', i)\n                    w_file.write(a)\n\n                # 处理bias\n                if bias != None:\n                    # 生成量化后的参数\n                    para = conv_layer.bias_quantizer.get_quantize_value(bias)\n                    if opt.quantized == 1:\n                        bias_scale = -math.log(conv_layer.bias_quantizer.scale.cpu().data.numpy()[0], 2)\n                        a = struct.pack('<i', int(bias_scale))\n                        b_scale.write(a)\n                    # print(para.shape)\n                    para_flatten = para.cpu().data.numpy().flatten()  # 展开\n                    # 存储bias\n                    for i in para_flatten:\n                        if opt.w_bit == 16:\n                            # Dorefa量化为非对称量化 Google量化为对称量化\n                            if opt.quantized == 1:\n                                a = struct.pack('<h', int(i))\n                            if opt.quantized == 2:\n                                a = struct.pack('<H', int(i))\n                        elif opt.w_bit == 8:\n                            # Dorefa量化为非对称量化 Google量化为对称量化\n                            if opt.quantized == 1:\n                                a = struct.pack('b', int(i))\n                            if opt.quantized == 2:\n                                a = struct.pack('B', int(i))\n                        else:\n                            a = struct.pack('<f', i)\n                        b_file.write(a)\n            if mdef['type'] == 'shortcut':\n                shortcut_scale = -math.log(module.scale.cpu().data.numpy()[0], 2)\n                a = struct.pack('<i', int(shortcut_scale))\n                s_scale.write(a)\n        if opt.quantized == 1:\n            w_scale.close()\n            a_scale.close()\n            b_scale.close()\n            s_scale.close()\n        w_file.close()\n        b_file.close()\n    # Eval mode\n    model.to(device).eval()\n\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n    parser.add_argument('--cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path')\n    parser.add_argument('--data', type=str, default='data/coco2017.data', help='coco.data file path')\n    parser.add_argument('--weights', type=str, default='weights/yolov3.weights', help='path to weights file')\n    parser.add_argument('--source', type=str, default='data/samples', help='source')  # input file/folder, 0 for webcam\n    parser.add_argument('--output', type=str, default='output', help='output folder')  # output folder\n    parser.add_argument('--img_size', type=int, default=416, help='inference size (pixels)')\n    parser.add_argument('--conf-thres', type=float, default=0.6, help='object confidence threshold')\n    parser.add_argument('--nms-thres', type=float, default=0.8, help='iou threshold for non-maximum suppression')\n    parser.add_argument('--device', default='', help='device id (i.e. 0 or 0,1) or cpu')\n    parser.add_argument('--quantized', type=int, default=-1, help='quantization way')\n    parser.add_argument('--a-bit', type=int, default=8, help='a-bit')\n    parser.add_argument('--w-bit', type=int, default=8, help='w-bit')\n    parser.add_argument('--FPGA', action='store_true', help='FPGA')\n    parser.add_argument('--reorder', action='store_true', help='reorder')\n    parser.add_argument('--TN', type=int, default=8, help='TN')\n    parser.add_argument('--TM', type=int, default=64, help='TN')\n    parser.add_argument('--gray-scale', action='store_true', help='gray scale trainning')\n    opt = parser.parse_args()\n    print(opt)\n\n    with torch.no_grad():\n        convert()\n"
  },
  {
    "path": "convert_FPGA_2.py",
    "content": "import argparse\nimport struct\n\nfrom models import *  # set ONNX_EXPORT in models.py\n\nfrom utils.utils import *\n\n\ndef convert():\n    img_size = opt.img_size  # (320, 192) or (416, 256) or (608, 352) for (height, width)\n    weights = opt.weights\n\n    # Initialize\n    device = torch_utils.select_device(opt.device)\n\n    # Initialize model\n    model = Darknet(opt.cfg, img_size, quantized=opt.quantized, a_bit=opt.a_bit, w_bit=opt.w_bit,\n                    FPGA=opt.FPGA, is_gray_scale=opt.gray_scale, shortcut_way=opt.shortcut_way)\n\n    # Load weights\n    attempt_download(weights)\n    if weights.endswith('.pt'):  # pytorch format\n        model.load_state_dict(torch.load(weights, map_location=device)['model'])\n    else:  # darknet format\n        _ = load_darknet_weights(model, weights, FPGA=opt.FPGA)\n    if opt.quantized == 0:\n        save_weights(model, path='weights/' + opt.cfg.split('/')[-1].replace('.cfg', '') + '-best.weights')\n    else:\n        w_file = open('weights/' + opt.cfg.split('/')[-1].replace('.cfg', '') + '_weights.bin', 'wb')\n        w_scale = open('weights/' + opt.cfg.split('/')[-1].replace('.cfg', '') + '_w_scale.bin', 'wb')\n        a_scale = open('weights/' + opt.cfg.split('/')[-1].replace('.cfg', '') + '_a_scale.bin', 'wb')\n        b_scale = open('weights/' + opt.cfg.split('/')[-1].replace('.cfg', '') + '_b_scale.bin', 'wb')\n        s_scale = open('weights/' + opt.cfg.split('/')[-1].replace('.cfg', '') + '_s_scale.bin', 'wb')\n        a = struct.pack('<i', 7)\n        a_scale.write(a)\n        for _, (mdef, module) in enumerate(zip(model.module_defs, model.module_list)):\n            print(mdef)\n            if mdef['type'] == 'convolutional':\n                conv_layer = module[0]\n                # 使用BN训练中量化，融合BN参数\n                weight, bias = conv_layer.BN_fuse()\n                activate_scale = -math.log(conv_layer.activation_quantizer.scale.cpu().data.numpy()[0], 2)\n                weight_scale = -math.log(conv_layer.weight_quantizer.scale.cpu().data.numpy()[0], 2)\n                bias_scale = -math.log(conv_layer.bias_quantizer.scale.cpu().data.numpy()[0], 2)\n                a = struct.pack('<i', int(activate_scale))\n                a_scale.write(a)\n                a = struct.pack('<i', int(weight_scale))\n                w_scale.write(a)\n                a = struct.pack('<i', int(bias_scale))\n                b_scale.write(a)\n\n                # 处理bias\n                para = conv_layer.bias_quantizer.get_quantize_value(bias)\n                # print(para.shape)\n                para_flatten = para.cpu().data.numpy().flatten()  # 展开\n                # 存储bias\n                count = 0\n                for i in para_flatten:\n                    if opt.w_bit == 16:\n                        a = struct.pack('<h', int(i))\n                    elif opt.w_bit == 8:\n                        a = struct.pack('b', int(i))\n                    else:\n                        a = struct.pack('<f', i)\n                    w_file.write(a)\n                    count += 1\n                for i in range(count, 2048):\n                    if opt.w_bit == 16:\n                        a = struct.pack('<h', int(0))\n                    elif opt.w_bit == 8:\n                        a = struct.pack('b', int(0))\n                    else:\n                        a = struct.pack('<f', 0)\n                    w_file.write(a)\n\n                # 处理weights\n                para = conv_layer.weight_quantizer.get_quantize_value(weight)\n                if opt.reorder:\n                    # 重排序参数\n                    print(\"use reorder!\")\n                    shape_output = para.shape[0]\n                    shape_input = para.shape[1]\n                    num_TN = int(shape_input / opt.TN)\n                    remainder_TN = shape_input % opt.TN\n                    num_TM = int(shape_output / opt.TM)\n                    remainder_TM = shape_output % opt.TM\n                    first = True\n                    for j in range(num_TM):\n                        for k in range(num_TN):\n                            temp = para[j * opt.TM:(j + 1) * opt.TM, k * opt.TN:(k + 1) * opt.TN, :, :]\n                            temp = temp.view(temp.shape[0], temp.shape[1], temp.shape[2] * temp.shape[3])\n                            temp = temp.permute(2, 0, 1).contiguous().view(-1)\n                            if first:\n                                reorder_para = temp.clone().cpu().data.numpy()\n                                first = False\n                            else:\n                                reorder_para = np.append(reorder_para, temp.cpu().data.numpy())\n                        if shape_input == 3 or (opt.gray_scale and shape_input == 1):\n                            temp = para[j * opt.TM:(j + 1) * opt.TM, num_TN * opt.TN:num_TN * opt.TN + remainder_TN, :,\n                                   :]\n                            temp = temp.view(temp.shape[0], temp.shape[1], temp.shape[2] * temp.shape[3])\n                            fill = torch.zeros(opt.TM, opt.TN, temp.shape[2]).to(temp.device)\n                            fill[:, 0:remainder_TN, :] = temp\n                            temp = fill.permute(2, 0, 1).contiguous().view(-1)\n                            if first:\n                                reorder_para = temp.clone().cpu().data.numpy()\n                                first = False\n                            else:\n                                reorder_para = np.append(reorder_para, temp.cpu().data.numpy())\n                    if mdef['activation'] == 'linear':\n                        for k in range(num_TN):\n                            temp = para[0:remainder_TM, k * opt.TN:(k + 1) * opt.TN, :, :]\n                            temp = temp.view(temp.shape[0], temp.shape[1], temp.shape[2] * temp.shape[3])\n                            fill = torch.zeros(opt.TM, opt.TN, temp.shape[2]).to(temp.device)\n                            fill[0:remainder_TM, :, :] = temp\n                            temp = fill.permute(2, 0, 1).contiguous().view(-1)\n                            if first:\n                                reorder_para = temp.clone().cpu().data.numpy()\n                                first = False\n                            else:\n                                reorder_para = np.append(reorder_para, temp.cpu().data.numpy())\n\n                    para_flatten = reorder_para\n                    if shape_input == 3 or (opt.gray_scale and shape_input == 1):\n                        if para_flatten.size == para.shape[0] * 32 * para.shape[2] * para.shape[3]:\n                            print(\"convert correctly!\")\n                        else:\n                            print(\"convert mismatchingly!\")\n                    elif mdef['activation'] == 'linear':\n                        if para_flatten.size == ((para.shape[0]) // 32 + 1) * 32 * para.shape[1] * para.shape[2] * \\\n                                para.shape[3]:\n                            print(\"convert correctly!\")\n                        else:\n                            print(\"convert mismatchingly!\")\n                    else:\n                        if para_flatten.size == para.shape[0] * para.shape[1] * para.shape[2] * para.shape[3]:\n                            print(\"convert correctly!\")\n                        else:\n                            print(\"convert mismatchingly!\")\n                else:\n                    para_flatten = para.cpu().data.numpy().flatten()  # 展开\n                # 存储weights\n                for i in para_flatten:\n                    if opt.w_bit == 16:\n                        a = struct.pack('<h', int(i))\n                    elif opt.w_bit == 8:\n                        a = struct.pack('b', int(i))\n                    else:\n                        a = struct.pack('<f', i)\n                    w_file.write(a)\n            if mdef['type'] == 'shortcut':\n                shortcut_scale = -math.log(module.scale.cpu().data.numpy()[0], 2)\n                a = struct.pack('<i', int(shortcut_scale))\n                s_scale.write(a)\n        w_scale.close()\n        a_scale.close()\n        b_scale.close()\n        s_scale.close()\n        w_file.close()\n    # Eval mode\n    model.to(device).eval()\n\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n    parser.add_argument('--cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path')\n    parser.add_argument('--data', type=str, default='data/coco2017.data', help='coco.data file path')\n    parser.add_argument('--weights', type=str, default='weights/yolov3.weights', help='path to weights file')\n    parser.add_argument('--source', type=str, default='data/samples', help='source')  # input file/folder, 0 for webcam\n    parser.add_argument('--output', type=str, default='output', help='output folder')  # output folder\n    parser.add_argument('--img_size', type=int, default=416, help='inference size (pixels)')\n    parser.add_argument('--conf-thres', type=float, default=0.6, help='object confidence threshold')\n    parser.add_argument('--nms-thres', type=float, default=0.8, help='iou threshold for non-maximum suppression')\n    parser.add_argument('--device', default='', help='device id (i.e. 0 or 0,1) or cpu')\n    parser.add_argument('--quantized', type=int, default=-1, help='quantization way')\n    parser.add_argument('--shortcut_way', type=int, default=-1, help='--shortcut quantization way')\n    parser.add_argument('--a-bit', type=int, default=8, help='a-bit')\n    parser.add_argument('--w-bit', type=int, default=8, help='w-bit')\n    parser.add_argument('--FPGA', action='store_true', help='FPGA')\n    parser.add_argument('--reorder', action='store_true', help='reorder')\n    parser.add_argument('--TN', type=int, default=32, help='TN')\n    parser.add_argument('--TM', type=int, default=32, help='TN')\n    parser.add_argument('--gray-scale', action='store_true', help='gray scale trainning')\n    opt = parser.parse_args()\n    print(opt)\n\n    with torch.no_grad():\n        convert()\n"
  },
  {
    "path": "data/UAV_Samples_label.data",
    "content": "classes= 1\ntrain=data/UAV_Samples_label/train.txt\nvalid=data/UAV_Samples_label/test.txt\nnames=data/UAV_Samples_label.names\n"
  },
  {
    "path": "data/UAV_Samples_label.names",
    "content": "UAV\n"
  },
  {
    "path": "data/bdd100k.data",
    "content": "classes= 10\ntrain=data/bdd100k/train.txt\nvalid=data/bdd100k/test.txt\nnames=data/bdd100k.names\n"
  },
  {
    "path": "data/bdd100k.names",
    "content": "car\nbus\nperson\nbike\ntruck\nmotor\ntrain\nrider\ntraffic sign\ntraffic light\n"
  },
  {
    "path": "data/coco.names",
    "content": "person\nbicycle\ncar\nmotorcycle\nairplane\nbus\ntrain\ntruck\nboat\ntraffic light\nfire hydrant\nstop sign\nparking meter\nbench\nbird\ncat\ndog\nhorse\nsheep\ncow\nelephant\nbear\nzebra\ngiraffe\nbackpack\numbrella\nhandbag\ntie\nsuitcase\nfrisbee\nskis\nsnowboard\nsports ball\nkite\nbaseball bat\nbaseball glove\nskateboard\nsurfboard\ntennis racket\nbottle\nwine glass\ncup\nfork\nknife\nspoon\nbowl\nbanana\napple\nsandwich\norange\nbroccoli\ncarrot\nhot dog\npizza\ndonut\ncake\nchair\ncouch\npotted plant\nbed\ndining table\ntoilet\ntv\nlaptop\nmouse\nremote\nkeyboard\ncell phone\nmicrowave\noven\ntoaster\nsink\nrefrigerator\nbook\nclock\nvase\nscissors\nteddy bear\nhair drier\ntoothbrush\n"
  },
  {
    "path": "data/coco2014.data",
    "content": "classes=80\ntrain=data/coco2014/train2014.txt\nvalid=data/coco2014/val2014.txt\nnames=data/coco.names\n"
  },
  {
    "path": "data/coco2017.data",
    "content": "classes=80\ntrain=data/coco2017/train2017.txt\nvalid=data/coco2017/val2017.txt\nnames=data/coco.names\n"
  },
  {
    "path": "data/dior.data",
    "content": "classes= 20\ntrain=data/DIOR-full/train.txt\nvalid=data/DIOR-full/test.txt\nnames=data/dior.names\n"
  },
  {
    "path": "data/dior.names",
    "content": "airplane\nairport\nbaseballfield\nbasketballcourt\nbridge\nchimney\ndam\nExpressway-Service-area\nExpressway-toll-station\ngolffield\ngroundtrackfield\nharbor\nship\nstadium\nstoragetank\ntenniscourt\ntrainstation\nvehicle\nwindmill\noverpass\n"
  },
  {
    "path": "data/get_coco2014.sh",
    "content": "#!/bin/bash\n# Zip coco folder\n# zip -r coco.zip coco\n# tar -czvf coco.tar.gz coco\n\n# Download labels from Google Drive, accepting presented query\nfilename=\"coco2014labels.zip\"\nfileid=\"1s6-CmF5_SElM28r52P1OUrCcuXZN-SFo\"\ncurl -c ./cookie -s -L \"https://drive.google.com/uc?export=download&id=${fileid}\" > /dev/null\ncurl -Lb ./cookie \"https://drive.google.com/uc?export=download&confirm=`awk '/download/ {print $NF}' ./cookie`&id=${fileid}\" -o ${filename}\nrm ./cookie\n\n# Unzip labels\nunzip -q ${filename}  # for coco.zip\n# tar -xzf ${filename}  # for coco.tar.gz\nrm ${filename}\n\n# Download and unzip images\ncd coco/images\nf=\"train2014.zip\" && curl http://images.cocodataset.org/zips/$f -o $f && unzip -q $f && rm $f\nf=\"val2014.zip\" && curl http://images.cocodataset.org/zips/$f -o $f && unzip -q $f && rm $f\n\n# cd out\ncd ../..\n"
  },
  {
    "path": "data/get_coco2017.sh",
    "content": "#!/bin/bash\n# Zip coco folder\n# zip -r coco.zip coco\n# tar -czvf coco.tar.gz coco\n\n# Download labels from Google Drive, accepting presented query\nfilename=\"coco2017labels.zip\"\nfileid=\"1cXZR_ckHki6nddOmcysCuuJFM--T-Q6L\"\ncurl -c ./cookie -s -L \"https://drive.google.com/uc?export=download&id=${fileid}\" > /dev/null\ncurl -Lb ./cookie \"https://drive.google.com/uc?export=download&confirm=`awk '/download/ {print $NF}' ./cookie`&id=${fileid}\" -o ${filename}\nrm ./cookie\n\n# Unzip labels\nunzip -q ${filename}  # for coco.zip\n# tar -xzf ${filename}  # for coco.tar.gz\nrm ${filename}\n\n# Download and unzip images\ncd coco/images\nf=\"train2017.zip\" && curl http://images.cocodataset.org/zips/$f -o $f && unzip -q $f && rm $f\nf=\"val2017.zip\" && curl http://images.cocodataset.org/zips/$f -o $f && unzip -q $f && rm $f\n\n# cd out\ncd ../..\n"
  },
  {
    "path": "data/oxfordhand.data",
    "content": "classes= 1\ntrain=data/hand/train.txt\nvalid=data/hand/valid.txt\nnames=data/oxfordhand.names\n"
  },
  {
    "path": "data/oxfordhand.names",
    "content": "hand\n"
  },
  {
    "path": "data/screw.data",
    "content": "classes= 2\ntrain  = data/screw/train.txt\nvalid  = data/screw/valid.txt\nnames = data/screw.names\nbackup = backup\n\n"
  },
  {
    "path": "data/screw.names",
    "content": "noscrew\nscrew"
  },
  {
    "path": "data/trainset.data",
    "content": "classes=5\ntrain=data/trainset/train.txt\nvalid=data/trainset/test.txt\nnames=data/trainset.names\n"
  },
  {
    "path": "data/trainset.names",
    "content": "Freedom\nBurke\nNimitz\nWasp\nTiconderoga\n"
  },
  {
    "path": "data/visdrone.data",
    "content": "classes= 10\ntrain=data/visdrone/train.txt\nvalid=data/visdrone/test.txt\nnames=data/visdrone.names\n"
  },
  {
    "path": "data/visdrone.names",
    "content": "pedestrian\npeople\nbicycle\ncar\nvan\ntruck\ntricycle\nawning-tricycle\nbus\nmotor\n"
  },
  {
    "path": "detect.py",
    "content": "import argparse\nfrom utils import output_upsample\n\nfrom models import *\nfrom utils.datasets import *\nfrom utils.utils import *\n\n\ndef detect(save_img=False):\n    if opt.quantizer_output == True:\n        tmp_dir = 'quantizer_output'\n        subprocess.Popen(\"rm -rf %s\" % tmp_dir, shell=True)\n    imgsz = opt.img_size  # (320, 192) or (416, 256) or (608, 352) for (height, width)\n    out, source, weights, view_img, save_txt = opt.output, opt.source, opt.weights, opt.view_img, opt.save_txt\n    webcam = source == '0' or source.startswith('rtsp') or source.startswith('http') or source.endswith('.txt')\n\n    # Initialize\n    device = torch_utils.select_device(opt.device)\n    if os.path.exists(out):\n        shutil.rmtree(out)  # delete output folder\n    os.makedirs(out)  # make new output folder\n\n    # Initialize model\n    model = Darknet(opt.cfg, imgsz, quantized=opt.quantized, quantizer_output=opt.quantizer_output,\n                    layer_idx=opt.layer_idx,\n                    reorder=opt.reorder, TN=opt.TN, TM=opt.TM, a_bit=opt.a_bit, w_bit=opt.w_bit, FPGA=opt.FPGA,\n                    is_gray_scale=opt.gray_scale, maxabsscaler=opt.maxabsscaler, shortcut_way=opt.shortcut_way)\n\n    # Load weights\n    attempt_download(weights)\n    if weights.endswith('.pt'):  # pytorch format\n        model.load_state_dict(torch.load(weights, map_location=device)['model'], strict=False)\n    else:  # darknet format\n        load_darknet_weights(model, weights)\n    #################打印model_list\n    '''AWEIGHT = torch.load(weights, map_location=device)['model']\n    for k,v in AWEIGHT.items():\n        print(k)'''\n\n    # Eval mode\n    model.to(device).eval()\n\n    # Set Dataloader\n    vid_path, vid_writer = None, None\n    if webcam:\n        view_img = True\n        torch.backends.cudnn.benchmark = True  # set True to speed up constant image size inference\n        dataset = LoadStreams(source, img_size=imgsz)\n    else:\n        save_img = True\n        dataset = LoadImages(source, img_size=imgsz, is_gray_scale=opt.gray_scale, rect=opt.rect)\n\n    # Get names and colors\n    names = load_classes(opt.names)\n    colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(names))]\n\n    # Run inference\n    t0 = time.time()\n    # img = torch.zeros((1, 3, imgsz, imgsz), device=device)  # init img\n    # _ = model(img.float()) if device.type != 'cpu' else None  # run once\n    for path, img, im0s, vid_cap in dataset:\n        img = torch.from_numpy(img).to(device)\n        img = img.float()  # uint8 to fp16/32\n        if opt.maxabsscaler:\n            # 输出原始图片\n            if opt.quantizer_output == True:\n                if not os.path.isdir('./quantizer_output/'):\n                    os.makedirs('./quantizer_output/')\n                ori_img = copy.deepcopy(img)\n                ori_img_input = np.array(ori_img.cpu()).reshape(1, -1)\n                np.savetxt('./quantizer_output/img_input.txt', ori_img_input, delimiter='\\n')\n                ori_img_input = ori_img_input.astype(np.int8)\n                writer = open('./quantizer_output/img_bin', \"wb\")\n                writer.write(ori_img_input)\n                writer.close()\n\n                val_img = copy.deepcopy(img)\n                val_img = val_img - 128\n            img /= 256\n            img = img * 2 - 1\n            # 输出第一层的要送入卷积的量化数据\n            if opt.quantizer_output == True:\n                if not os.path.isdir('./quantizer_output/'):\n                    os.makedirs('./quantizer_output/')\n                q_img_input = copy.deepcopy(img)\n                q_img_input = q_img_input * (2 ** (opt.a_bit - 1))\n\n                # 软硬件处理方式对比\n                delt = val_img - q_img_input\n                delt = np.array(delt.cpu()).reshape(1, -1)\n                delt_count = [np.sum(abs(delt) > 0)]\n                np.savetxt(('./quantizer_output/not0_count.txt'), delt_count)\n\n                q_img_input = np.array(q_img_input.cpu()).reshape(1, -1)\n                np.savetxt('./quantizer_output/q_img_input.txt', q_img_input, delimiter='\\n')\n                q_img_input = q_img_input.astype(np.int8)\n                writer = open('./quantizer_output/q_img_bin', \"wb\")\n                writer.write(q_img_input)\n                writer.close()\n        else:\n            img /= 256.0  # 0 - 255 to 0.0 - 1.0\n        if opt.quantized != -1:\n            if opt.a_bit == 16:\n                img = img * (2 ** 14)\n                sign = torch.sign(img)\n                img = sign * torch.floor(torch.abs(img) + 0.5)\n                img = img / (2 ** 14)\n        if img.ndimension() == 3:\n            img = img.unsqueeze(0)\n        # Inference\n        t1 = torch_utils.time_synchronized()\n        pred = model(img, augment=opt.augment)[0]\n        t2 = torch_utils.time_synchronized()\n\n        # Apply NMS\n        pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres,\n                                   multi_label=False, classes=opt.classes, agnostic=opt.agnostic_nms)\n\n        # Process detections\n        for i, det in enumerate(pred):  # detections for image i\n            if webcam:  # batch_size >= 1\n                p, s, im0 = path[i], '%g: ' % i, im0s[i].copy()\n            else:\n                p, s, im0 = path, '', im0s\n\n            save_path = str(Path(out) / Path(p).name)\n            s += '%gx%g ' % img.shape[2:]  # print string\n            gn = torch.tensor(im0.shape)[[1, 0, 1, 0]]  #  normalization gain whwh\n            if det is not None and len(det):\n                # Rescale boxes from imgsz to im0 size\n                det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()\n\n                # Print results\n                for c in det[:, -1].unique():\n                    n = (det[:, -1] == c).sum()  # detections per class\n                    s += '%g %ss, ' % (n, names[int(c)])  # add to string\n\n                # Write results\n                for *xyxy, conf, cls in det:\n                    if save_txt:  # Write to file\n                        xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()  # normalized xywh\n                        with open(save_path[:save_path.rfind('.')] + '.txt', 'a') as file:\n                            file.write(('%g ' * 5 + '\\n') % (cls, *xywh))  # label format\n\n                    if save_img or view_img:  # Add bbox to image\n                        label = '%s %.2f' % (names[int(cls)], conf)\n                        plot_one_box(xyxy, im0, label=label, color=colors[int(cls)])\n\n            # Print time (inference + NMS)\n            print('%sDone. (%.3fs)' % (s, t2 - t1))\n\n            # Stream results\n            if view_img:\n                cv2.imshow(p, im0)\n                if cv2.waitKey(1) == ord('q'):  # q to quit\n                    raise StopIteration\n\n            # Save results (image with detections)\n            if save_img:\n                if dataset.mode == 'images':\n                    cv2.imwrite(save_path, im0)\n                else:\n                    if vid_path != save_path:  # new video\n                        vid_path = save_path\n                        if isinstance(vid_writer, cv2.VideoWriter):\n                            vid_writer.release()  # release previous video writer\n\n                        fps = vid_cap.get(cv2.CAP_PROP_FPS)\n                        w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n                        h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n                        vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*opt.fourcc), fps, (w, h))\n                    vid_writer.write(im0)\n\n    if save_txt or save_img:\n        print('Results saved to %s' % os.getcwd() + os.sep + out)\n        if platform == 'darwin':  # MacOS\n            os.system('open ' + save_path)\n\n    print('Done. (%.3fs)' % (time.time() - t0))\n\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n    parser.add_argument('--cfg', type=str, default='cfg/yolov3-spp.cfg', help='*.cfg path')\n    parser.add_argument('--names', type=str, default='data/coco.names', help='*.names path')\n    parser.add_argument('--weights', type=str, default='weights/yolov3-spp-ultralytics.pt', help='weights path')\n    parser.add_argument('--source', type=str, default='data/samples', help='source')  # input file/folder, 0 for webcam\n    parser.add_argument('--output', type=str, default='output', help='output folder')  # output folder\n    parser.add_argument('--img-size', type=int, default=512, help='inference size (pixels)')\n    parser.add_argument('--conf-thres', type=float, default=0.3, help='object confidence threshold')\n    parser.add_argument('--iou-thres', type=float, default=0.6, help='IOU threshold for NMS')\n    parser.add_argument('--fourcc', type=str, default='mp4v', help='output video codec (verify ffmpeg support)')\n    parser.add_argument('--device', default='', help='device id (i.e. 0 or 0,1) or cpu')\n    parser.add_argument('--rect', action='store_true', help='rectangular detecting')\n    parser.add_argument('--view-img', action='store_true', help='display results')\n    parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')\n    parser.add_argument('--classes', nargs='+', type=int, help='filter by class')\n    parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')\n    parser.add_argument('--augment', action='store_true', help='augmented inference')\n    parser.add_argument('--quantized', type=int, default=-1, help='quantization way')\n    parser.add_argument('--shortcut_way', type=int, default=1, help='--shortcut quantization way')\n    parser.add_argument('--a_bit', type=int, default=8, help='a-bit')\n    parser.add_argument('--w_bit', type=int, default=8, help='w-bit')\n    parser.add_argument('--FPGA', action='store_true', help='FPGA')\n    parser.add_argument('--quantizer_output', action='store_true', help='quantizer output')\n    parser.add_argument('--layer_idx', type=int, default=-1, help='output')\n    parser.add_argument('--reorder', action='store_true', help='reorder')\n    parser.add_argument('--TN', type=int, default=32, help='TN')\n    parser.add_argument('--TM', type=int, default=32, help='TM')\n    parser.add_argument('--gray-scale', action='store_true', help='gray scale trainning')\n    parser.add_argument('--maxabsscaler', '-mas', action='store_true', help='Standarize input to (-1,1)')\n    opt = parser.parse_args()\n    opt.cfg = list(glob.iglob('./**/' + opt.cfg, recursive=True))[0]  # find file\n    opt.names = list(glob.iglob('./**/' + opt.names, recursive=True))[0]  # find file\n    print(opt)\n\n    with torch.no_grad():\n        detect()\n\n        if opt.quantizer_output == True and opt.layer_idx == -1:\n            output_upsample.Val_upsample(opt.cfg, opt.TN)\n"
  },
  {
    "path": "info.py",
    "content": "# Author:LiPu\nimport argparse\nfrom models import *\nfrom torchsummary import summary\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--cfg', type=str, default='cfg/yolov3/yolov3.cfg', help='*.cfg path')\nparser.add_argument('--img_size', type=int, default=416, help='img_size')\nparser.add_argument('--device', default='', help='device id (i.e. 0 or 0,1) or cpu')\nopt = parser.parse_args()\n\ndevice = torch_utils.select_device(opt.device)\nmodel = Darknet(opt.cfg)\n# model.fuse()\nmodel.to(device)\nsummary(model, input_size=(3, opt.img_size, opt.img_size))\n"
  },
  {
    "path": "layer_channel_prune.py",
    "content": "from models import *\nfrom utils.utils import *\nimport numpy as np\nfrom copy import deepcopy\nfrom test import test\nfrom terminaltables import AsciiTable\nimport time\nfrom utils.prune_utils import *\nimport argparse\n\n\n# %%\ndef obtain_filters_mask(model, thre, CBL_idx, prune_idx):\n    pruned = 0\n    total = 0\n    num_filters = []\n    filters_mask = []\n    for idx in CBL_idx:\n        bn_module = model.module_list[idx][1]\n        if idx in prune_idx:\n\n            weight_copy = bn_module.weight.data.abs().clone()\n\n            channels = weight_copy.shape[0]  #\n            min_channel_num = int(channels * opt.layer_keep) if int(channels * opt.layer_keep) > 0 else 1\n            mask = weight_copy.gt(thresh).float()\n\n            if int(torch.sum(mask)) < min_channel_num:\n                _, sorted_index_weights = torch.sort(weight_copy, descending=True)\n                mask[sorted_index_weights[:min_channel_num]] = 1.\n            remain = int(mask.sum())\n            pruned = pruned + mask.shape[0] - remain\n\n            print(f'layer index: {idx:>3d} \\t total channel: {mask.shape[0]:>4d} \\t '\n                  f'remaining channel: {remain:>4d}')\n        else:\n            mask = torch.ones(bn_module.weight.data.shape)\n            remain = mask.shape[0]\n\n        total += mask.shape[0]\n        num_filters.append(remain)\n        filters_mask.append(mask.clone())\n\n    prune_ratio = pruned / total\n    print(f'Prune channels: {pruned}\\tPrune ratio: {prune_ratio:.3f}')\n\n    return num_filters, filters_mask\n\n\ndef prune_and_eval(model, CBL_idx, CBLidx2mask):\n    model_copy = deepcopy(model)\n\n    for idx in CBL_idx:\n        bn_module = model_copy.module_list[idx][1]\n        mask = CBLidx2mask[idx].cuda()\n        bn_module.weight.data.mul_(mask)\n\n    with torch.no_grad():\n        mAP = eval_model(model_copy)[0][2]\n\n    print(f'mask the gamma as zero, mAP of the model is {mAP:.4f}')\n\n\ndef prune_and_eval2(model, prune_shortcuts=[]):\n    model_copy = deepcopy(model)\n    for idx in prune_shortcuts:\n        for i in [idx, idx - 1]:\n            bn_module = model_copy.module_list[i][1]\n\n            mask = torch.zeros(bn_module.weight.data.shape[0]).cuda()\n            bn_module.weight.data.mul_(mask)\n\n    with torch.no_grad():\n        mAP = eval_model(model_copy)[0][2]\n\n    print(f'simply mask the BN Gama of to_be_pruned CBL as zero, now the mAP is {mAP:.4f}')\n\n\n# %%\ndef obtain_filters_mask2(model, CBL_idx, prune_shortcuts):\n    filters_mask = []\n    for idx in CBL_idx:\n        bn_module = model.module_list[idx][1]\n        mask = np.ones(bn_module.weight.data.shape[0], dtype='float32')\n        filters_mask.append(mask.copy())\n    CBLidx2mask = {idx: mask for idx, mask in zip(CBL_idx, filters_mask)}\n    for idx in prune_shortcuts:\n        for i in [idx, idx - 1]:\n            bn_module = model.module_list[i][1]\n            mask = np.zeros(bn_module.weight.data.shape[0], dtype='float32')\n            CBLidx2mask[i] = mask.copy()\n    return CBLidx2mask\n\n\ndef obtain_avg_forward_time(input, model, repeat=200):\n    model.eval()\n    start = time.time()\n    with torch.no_grad():\n        for i in range(repeat):\n            output = model(input)\n    avg_infer_time = (time.time() - start) / repeat\n\n    return avg_infer_time, output\n\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n    parser.add_argument('--cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path')\n    parser.add_argument('--data', type=str, default='data/coco.data', help='*.data file path')\n    parser.add_argument('--weights', type=str, default='weights/last.pt', help='sparse model weights')\n    parser.add_argument('--shortcuts', type=int, default=8, help='how many shortcut layers will be pruned,\\\n        pruning one shortcut will also prune two CBL,yolov3 has 23 shortcuts')\n    parser.add_argument('--percent', type=float, default=0.6, help='global channel prune percent')\n    parser.add_argument('--layer_keep', type=float, default=0.01, help='channel keep percent per layer')\n    parser.add_argument('--img-size', type=int, default=416, help='inference size (pixels)')\n    parser.add_argument('--batch-size', type=int, default=16, help='batch-size')\n    parser.add_argument('--gray-scale', action='store_true', help='gray scale trainning')\n    opt = parser.parse_args()\n    print(opt)\n\n    assert opt.cfg.find(\"mobilenet\") == -1, \"Mobilenet doesn't support layer pruning!\"\n    img_size = opt.img_size\n    device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n    model = Darknet(opt.cfg, (img_size, img_size), is_gray_scale=opt.gray_scale).to(device)\n\n    if opt.weights.endswith(\".pt\"):\n        model.load_state_dict(torch.load(opt.weights, map_location=device)['model'])\n    else:\n        _ = load_darknet_weights(model, opt.weights)\n    print('\\nloaded weights from ', opt.weights)\n\n    eval_model = lambda model: test(model=model, cfg=opt.cfg, data=opt.data, batch_size=opt.batch_size, imgsz=img_size,\n                                    rank=-1, is_gray_scale=True if opt.gray_scale else False)\n    obtain_num_parameters = lambda model: sum([param.nelement() for param in model.parameters()])\n\n    print(\"\\nlet's test the original model first:\")\n    with torch.no_grad():\n        origin_model_metric = eval_model(model)\n    origin_nparameters = obtain_num_parameters(model)\n\n    ##############################################################\n    # 先剪通道\n    print(\"we will prune the channels first\")\n\n    CBL_idx, Conv_idx, prune_idx, _, _ = parse_module_defs2(model.module_defs)\n\n    bn_weights = gather_bn_weights(model.module_list, prune_idx)\n\n    sorted_bn = torch.sort(bn_weights)[0]\n    sorted_bn, sorted_index = torch.sort(bn_weights)\n    thresh_index = int(len(bn_weights) * opt.percent)\n    thresh = sorted_bn[thresh_index].cuda()\n\n    print(f'Global Threshold should be less than {thresh:.4f}.')\n\n    num_filters, filters_mask = obtain_filters_mask(model, thresh, CBL_idx, prune_idx)\n    CBLidx2mask = {idx: mask for idx, mask in zip(CBL_idx, filters_mask)}\n    CBLidx2filters = {idx: filters for idx, filters in zip(CBL_idx, num_filters)}\n\n    for i in model.module_defs:\n        if i['type'] == 'shortcut':\n            i['is_access'] = False\n\n    print('merge the mask of layers connected to shortcut!')\n    merge_mask(model, CBLidx2mask, CBLidx2filters)\n\n    prune_and_eval(model, CBL_idx, CBLidx2mask)\n\n    for i in CBLidx2mask:\n        CBLidx2mask[i] = CBLidx2mask[i].clone().cpu().numpy()\n\n    pruned_model = prune_model_keep_size(model, prune_idx, CBL_idx, CBLidx2mask)\n    print(\n        \"\\nnow prune the model but keep size,(actually add offset of BN beta to following layers), let's see how the mAP goes\")\n\n    with torch.no_grad():\n        eval_model(pruned_model)\n\n    for i in model.module_defs:\n        if i['type'] == 'shortcut':\n            i.pop('is_access')\n\n    compact_module_defs = deepcopy(model.module_defs)\n    for idx in CBL_idx:\n        assert compact_module_defs[idx]['type'] == 'convolutional'\n        compact_module_defs[idx]['filters'] = str(CBLidx2filters[idx])\n\n    compact_model1 = Darknet([model.hyperparams.copy()] + compact_module_defs, (img_size, img_size),\n                             is_gray_scale=opt.gray_scale).to(device)\n    compact_nparameters1 = obtain_num_parameters(compact_model1)\n\n    init_weights_from_loose_model(compact_model1, pruned_model, CBL_idx, Conv_idx, CBLidx2mask,\n                                  is_gray_scale=opt.gray_scale)\n\n    print('testing the channel pruned model...')\n    with torch.no_grad():\n        compact_model_metric1 = eval_model(compact_model1)\n\n    #########################################################\n    # 再剪层\n    print('\\nnow we prune shortcut layers and corresponding CBLs')\n\n    CBL_idx, Conv_idx, shortcut_idx = parse_module_defs4(compact_model1.module_defs)\n    print('all shortcut_idx:', [i + 1 for i in shortcut_idx])\n\n    # highest_thre = torch.zeros(len(shortcut_idx))\n    # for i, idx in enumerate(shortcut_idx):\n    #     highest_thre[i] = compact_model1.module_list[idx][1].weight.data.abs().max().clone()\n    # _, sorted_index_thre = torch.sort(highest_thre)\n\n    # 这里更改了选层策略，由最大值排序改为均值排序，均值一般表现要稍好，但不是绝对，可以自己切换尝试；前面注释的四行为原策略。\n    bn_mean = torch.zeros(len(shortcut_idx))\n    for i, idx in enumerate(shortcut_idx):\n        bn_mean[i] = compact_model1.module_list[idx][1].weight.data.abs().mean().clone()\n    _, sorted_index_thre = torch.sort(bn_mean)\n\n    prune_shortcuts = torch.tensor(shortcut_idx)[[sorted_index_thre[:opt.shortcuts]]]\n    prune_shortcuts = [int(x) for x in prune_shortcuts]\n\n    index_all = list(range(len(compact_model1.module_defs)))\n    index_prune = []\n    for idx in prune_shortcuts:\n        index_prune.extend([idx - 1, idx, idx + 1])\n    index_remain = [idx for idx in index_all if idx not in index_prune]\n\n    print('These shortcut layers and corresponding CBL will be pruned :', index_prune)\n\n    prune_and_eval2(compact_model1, prune_shortcuts)\n\n    CBLidx2mask = obtain_filters_mask2(compact_model1, CBL_idx, prune_shortcuts)\n\n    pruned_model = prune_model_keep_size(compact_model1, CBL_idx, CBL_idx, CBLidx2mask)\n\n    with torch.no_grad():\n        mAP = eval_model(pruned_model)[0][2]\n    print(\"after transfering the offset of pruned CBL's activation, map is {}\".format(mAP))\n\n    compact_module_defs = deepcopy(compact_model1.module_defs)\n\n    for module_def in compact_module_defs:\n        if module_def['type'] == 'route':\n            from_layers = [int(s) for s in module_def['layers']]\n            if len(from_layers) == 2:\n                count = 0\n                for i in index_prune:\n                    if i <= from_layers[1]:\n                        count += 1\n                from_layers[1] = from_layers[1] - count\n                # from_layers = ', '.join([str(s) for s in from_layers])\n                module_def['layers'] = from_layers\n\n    compact_module_defs = [compact_module_defs[i] for i in index_remain]\n    compact_model2 = Darknet([compact_model1.hyperparams.copy()] + compact_module_defs, (img_size, img_size),\n                             is_gray_scale=opt.gray_scale).to(device)\n\n    compact_nparameters2 = obtain_num_parameters(compact_model2)\n\n    print('testing the final model')\n    with torch.no_grad():\n        compact_model_metric2 = eval_model(compact_model2)\n\n    ################################################################\n    # 剪枝完毕，测试速度\n\n    if opt.gray_scale:\n        random_input = torch.rand((1, 1, img_size, img_size)).to(device)\n    else:\n        random_input = torch.rand((1, 3, img_size, img_size)).to(device)\n\n    print('testing inference time...')\n    pruned_forward_time, output = obtain_avg_forward_time(random_input, model)\n    compact_forward_time1, compact_output1 = obtain_avg_forward_time(random_input, compact_model1)\n    compact_forward_time2, compact_output2 = obtain_avg_forward_time(random_input, compact_model2)\n\n    metric_table = [\n        [\"Metric\", \"Before\", \"After prune channels\", \"After prune layers(final)\"],\n        [\"mAP\", f'{origin_model_metric[0][2]:.6f}', f'{compact_model_metric1[0][2]:.6f}',\n         f'{compact_model_metric2[0][2]:.6f}'],\n        [\"Parameters\", f\"{origin_nparameters}\", f\"{compact_nparameters1}\", f\"{compact_nparameters2}\"],\n        [\"Inference\", f'{pruned_forward_time:.4f}', f'{compact_forward_time1:.4f}', f'{compact_forward_time2:.4f}']\n    ]\n    print(AsciiTable(metric_table).table)\n\n    pruned_cfg_name = opt.cfg.replace('/',\n                                      f'/layer_channel_prune_{opt.percent}_{opt.shortcuts}_shortcut_')\n    # 创建存储目录\n    dir_name = pruned_cfg_name.split('/')[0] + '/' + pruned_cfg_name.split('/')[1]\n    if not os.path.isdir(dir_name):\n        os.makedirs(dir_name)\n    # 由于原始的compact_module_defs将anchor从字符串变为了数组，因此这里将anchors重新变为字符串\n    file = open(opt.cfg, 'r')\n    lines = file.read().split('\\n')\n    for line in lines:\n        if line.split(' = ')[0] == 'anchors':\n            anchor = line.split(' = ')[1]\n            break\n        if line.split('=')[0] == 'anchors':\n            anchor = line.split('=')[1]\n            break\n    file.close()\n    for item in compact_module_defs:\n        if item['type'] == 'shortcut':\n            item['from'] = str(item['from'][0])\n        elif item['type'] == 'route':\n            item['layers'] = \",\".join('%s' % i for i in item['layers'])\n        elif item['type'] == 'yolo':\n            item['mask'] = \",\".join('%s' % i for i in item['mask'])\n            item['anchors'] = anchor\n    pruned_cfg_file = write_cfg(pruned_cfg_name, [model.hyperparams.copy()] + compact_module_defs)\n    print(f'Config file has been saved: {pruned_cfg_file}')\n\n    weights_dir_name = dir_name.replace('cfg', 'weights')\n    if not os.path.isdir(weights_dir_name):\n        os.makedirs(weights_dir_name)\n    compact_model_name = weights_dir_name + f'/layer_channel_prune_{str(opt.shortcuts)}_shortcuts_{str(opt.percent)}_percent.weights'\n\n    save_weights(compact_model2, path=compact_model_name)\n    print(f'Compact model has been saved: {compact_model_name}')\n"
  },
  {
    "path": "layer_channel_regular_prune.py",
    "content": "from models import *\nfrom utils.utils import *\nimport numpy as np\nfrom copy import deepcopy\nfrom test import test\nfrom terminaltables import AsciiTable\nimport time\nfrom utils.prune_utils import *\nimport argparse\n\nfilter_switch = [each for each in range(2048) if (each % 32 == 0)]\n\n\n# %%\ndef obtain_filters_mask(model, thre, CBL_idx, shortcut_idx, prune_idx):\n    pruned = 0\n    total = 0\n    num_filters = []\n    filters_mask = []\n    idx_new = dict()\n    # CBL_idx存储的是所有带BN的卷积层（YOLO层的前一层卷积层是不带BN的）\n    for idx in CBL_idx:\n        bn_module = model.module_list[idx][1]\n        if idx in prune_idx:\n            if idx not in shortcut_idx:\n\n                mask = obtain_bn_mask(bn_module, thre).cpu().numpy()\n\n                # 保证通道数为的倍数\n                mask_cnt = int(mask.sum())\n                if mask_cnt == 0:\n                    this_layer_sort_bn = bn_module.weight.data.abs().clone()\n                    sort_bn_values = torch.sort(this_layer_sort_bn)[0]\n                    bn_cnt = bn_module.weight.shape[0]\n                    this_layer_thre = sort_bn_values[bn_cnt - 8]\n                    mask = obtain_bn_mask(bn_module, this_layer_thre).cpu().numpy()\n                else:\n                    for i in range(len(filter_switch)):\n                        if mask_cnt <= filter_switch[i]:\n                            mask_cnt = filter_switch[i]\n                            break\n                    this_layer_sort_bn = bn_module.weight.data.abs().clone()\n                    sort_bn_values = torch.sort(this_layer_sort_bn)[0]\n                    bn_cnt = bn_module.weight.shape[0]\n                    this_layer_thre = sort_bn_values[bn_cnt - mask_cnt]\n                    mask = obtain_bn_mask(bn_module, this_layer_thre).cpu().numpy()\n\n                idx_new[idx] = mask\n                remain = int(mask.sum())\n                pruned = pruned + mask.shape[0] - remain\n\n                # if remain == 0:\n                #     print(\"Channels would be all pruned!\")\n                #     raise Exception\n\n                # print(f'layer index: {idx:>3d} \\t total channel: {mask.shape[0]:>4d} \\t '\n                #     f'remaining channel: {remain:>4d}')\n            else:\n                # 如果idx在shortcut_idx之中，则试跳连层的两层的mask相等\n                mask = idx_new[shortcut_idx[idx]]\n                idx_new[idx] = mask\n                remain = int(mask.sum())\n                pruned = pruned + mask.shape[0] - remain\n\n            if remain == 0:\n                print(\"Channels would be all pruned!\")\n                raise Exception\n\n            print(f'layer index: {idx:>3d} \\t total channel: {mask.shape[0]:>4d} \\t '\n                  f'remaining channel: {remain:>4d}')\n        else:\n            mask = np.ones(bn_module.weight.data.shape)\n            remain = mask.shape[0]\n\n        total += mask.shape[0]\n        num_filters.append(remain)\n        filters_mask.append(mask.copy())\n\n    # 因此，这里求出的prune_ratio,需要裁剪的α参数/cbl_idx中所有的α参数\n    prune_ratio = pruned / total\n    print(f'Prune channels: {pruned}\\tPrune ratio: {prune_ratio:.3f}')\n\n    return num_filters, filters_mask\n\n\ndef prune_and_eval(model, sorted_bn, shortcut_idx, percent=.0):\n    model_copy = deepcopy(model)\n    thre_index = int(len(sorted_bn) * percent)\n    # 获得α参数的阈值，小于该值的α参数对应的通道，全部裁剪掉\n    thre1 = sorted_bn[thre_index]\n\n    print(f'Channels with Gamma value less than {thre1:.8f} are pruned!')\n\n    remain_num = 0\n    idx_new = dict()\n    for idx in prune_idx:\n\n        if idx not in shortcut_idx:\n\n            bn_module = model_copy.module_list[idx][1]\n\n            mask = obtain_bn_mask(bn_module, thre1)\n            # 记录剪枝后，每一层卷积层对应的mask\n            # idx_new[idx]=mask.cpu().numpy()\n            idx_new[idx] = mask\n            remain_num += int(mask.sum())\n            bn_module.weight.data.mul_(mask)\n            # bn_module.bias.data.mul_(mask*0.0001)\n        else:\n\n            bn_module = model_copy.module_list[idx][1]\n\n            mask = idx_new[shortcut_idx[idx]]\n            idx_new[idx] = mask\n\n            remain_num += int(mask.sum())\n            bn_module.weight.data.mul_(mask)\n\n        # print(int(mask.sum()))\n\n    # with torch.no_grad():\n    #     mAP = eval_model(model_copy)[0][2]\n\n    print(f'Number of channels has been reduced from {len(sorted_bn)} to {remain_num}')\n    print(f'Prune ratio: {1 - remain_num / len(sorted_bn):.3f}')\n    # print(f'mAP of the pruned model is {mAP:.4f}')\n\n    return thre1\n\n\ndef prune_and_eval2(model, prune_shortcuts=[]):\n    model_copy = deepcopy(model)\n    for idx in prune_shortcuts:\n        for i in [idx, idx - 1]:\n            bn_module = model_copy.module_list[i][1]\n\n            mask = torch.zeros(bn_module.weight.data.shape[0]).cuda()\n            bn_module.weight.data.mul_(mask)\n\n    with torch.no_grad():\n        mAP = eval_model(model_copy)[0][2]\n\n    print(f'simply mask the BN Gama of to_be_pruned CBL as zero, now the mAP is {mAP:.4f}')\n\n\n# %%\ndef obtain_filters_mask2(model, CBL_idx, prune_shortcuts):\n    filters_mask = []\n    for idx in CBL_idx:\n        bn_module = model.module_list[idx][1]\n        mask = np.ones(bn_module.weight.data.shape[0], dtype='float32')\n        filters_mask.append(mask.copy())\n    CBLidx2mask = {idx: mask for idx, mask in zip(CBL_idx, filters_mask)}\n    for idx in prune_shortcuts:\n        for i in [idx, idx - 1]:\n            bn_module = model.module_list[i][1]\n            mask = np.zeros(bn_module.weight.data.shape[0], dtype='float32')\n            CBLidx2mask[i] = mask.copy()\n    return CBLidx2mask\n\n\ndef obtain_avg_forward_time(input, model, repeat=200):\n    model.eval()\n    start = time.time()\n    with torch.no_grad():\n        for i in range(repeat):\n            output = model(input)\n    avg_infer_time = (time.time() - start) / repeat\n\n    return avg_infer_time, output\n\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n    parser.add_argument('--cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path')\n    parser.add_argument('--data', type=str, default='data/coco.data', help='*.data file path')\n    parser.add_argument('--weights', type=str, default='weights/last.pt', help='sparse model weights')\n    parser.add_argument('--shortcuts', type=int, default=8, help='how many shortcut layers will be pruned,\\\n        pruning one shortcut will also prune two CBL,yolov3 has 23 shortcuts')\n    parser.add_argument('--percent', type=float, default=0.6, help='global channel prune percent')\n    parser.add_argument('--layer_keep', type=float, default=0.01, help='channel keep percent per layer')\n    parser.add_argument('--img-size', type=int, default=416, help='inference size (pixels)')\n    parser.add_argument('--batch-size', type=int, default=16, help='batch-size')\n    parser.add_argument('--gray-scale', action='store_true', help='gray scale trainning')\n    opt = parser.parse_args()\n    print(opt)\n\n    assert opt.cfg.find(\"mobilenet\") == -1, \"Mobilenet doesn't support layer pruning!\"\n    img_size = opt.img_size\n    device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n    model = Darknet(opt.cfg, (img_size, img_size), is_gray_scale=opt.gray_scale).to(device)\n\n    if opt.weights.endswith(\".pt\"):\n        model.load_state_dict(torch.load(opt.weights, map_location=device)['model'])\n    else:\n        _ = load_darknet_weights(model, opt.weights)\n    print('\\nloaded weights from ', opt.weights)\n\n    eval_model = lambda model: test(model=model, cfg=opt.cfg, data=opt.data, batch_size=opt.batch_size, imgsz=img_size,\n                                    rank=-1, is_gray_scale=True if opt.gray_scale else False)\n    obtain_num_parameters = lambda model: sum([param.nelement() for param in model.parameters()])\n\n    print(\"\\nlet's test the original model first:\")\n    with torch.no_grad():\n        origin_model_metric = eval_model(model)\n    origin_nparameters = obtain_num_parameters(model)\n\n    ##############################################################\n    # 先剪通道\n    # 与normal_prune不同的是这里需要获得shortcu_idx和short_all\n    # 其中shortcut_idx存储的是对应关系，故shortcut[x]就对应的是与第x-1卷积层相加层的索引值\n    # shortcut_all存储的是所有相加层\n    CBL_idx, Conv_idx, prune_idx, shortcut_idx, shortcut_all = parse_module_defs2(model.module_defs)\n\n    # 将所有要剪枝的BN层的γ参数，拷贝到bn_weights列表\n    bn_weights = gather_bn_weights(model.module_list, prune_idx)\n    # 对BN中的γ参数排序\n    # torch.sort返回二维列表，第一维是排序后的值列表，第二维是排序后的值列表对应的索引\n    sorted_bn = torch.sort(bn_weights)[0]\n\n    # 避免剪掉一层中的所有channel的最高阈值(每个BN层中gamma的最大值在所有层中最小值即为阈值上限)\n    highest_thre = []\n    for idx in prune_idx:\n        # .item()可以得到张量里的元素值\n        highest_thre.append(model.module_list[idx][1].weight.data.abs().max().item())\n    highest_thre = min(highest_thre)\n\n    # 找到highest_thre对应的下标对应的百分比\n    percent_limit = (sorted_bn == highest_thre).nonzero().item() / len(bn_weights)\n\n    print(f'Threshold should be less than {highest_thre:.8f}.')\n    print(f'The corresponding prune ratio is {percent_limit:.3f}.')\n\n    percent = opt.percent\n    threshold = prune_and_eval(model, sorted_bn, shortcut_idx, percent)\n\n    num_filters, filters_mask = obtain_filters_mask(model, threshold, CBL_idx, shortcut_idx, prune_idx)\n\n    # CBLidx2mask存储CBL_idx中，每一层BN层对应的mask\n    CBLidx2mask = {idx: mask for idx, mask in zip(CBL_idx, filters_mask)}\n\n    pruned_model = prune_model_keep_size(model, prune_idx, CBL_idx, CBLidx2mask)\n\n    with torch.no_grad():\n        mAP = eval_model(pruned_model)[0][2]\n    print('after prune_model_keep_size map is {}'.format(mAP))\n\n    # 获得原始模型的module_defs，并修改该defs中的卷积核数量\n    compact_module_defs = deepcopy(model.module_defs)\n    for idx, num in zip(CBL_idx, num_filters):\n        assert compact_module_defs[idx]['type'] == 'convolutional'\n        compact_module_defs[idx]['filters'] = str(num)\n\n    # for item_def in compact_module_defs:\n    #     print(item_def)\n\n    compact_model1 = Darknet([model.hyperparams.copy()] + compact_module_defs, is_gray_scale=opt.gray_scale).to(device)\n    compact_nparameters1 = obtain_num_parameters(compact_model1)\n\n    init_weights_from_loose_model(compact_model1, pruned_model, CBL_idx, Conv_idx, CBLidx2mask,\n                                  is_gray_scale=opt.gray_scale)\n\n    print('testing the channel pruned model...')\n    with torch.no_grad():\n        compact_model_metric1 = eval_model(compact_model1)\n\n    #########################################################\n    # 再剪层\n    print('\\nnow we prune shortcut layers and corresponding CBLs')\n\n    CBL_idx, Conv_idx, shortcut_idx = parse_module_defs4(compact_model1.module_defs)\n    print('all shortcut_idx:', [i + 1 for i in shortcut_idx])\n\n    bn_weights = gather_bn_weights(compact_model1.module_list, shortcut_idx)\n\n    sorted_bn = torch.sort(bn_weights)[0]\n\n    # highest_thre = torch.zeros(len(shortcut_idx))\n    # for i, idx in enumerate(shortcut_idx):\n    #     highest_thre[i] = compact_model1.module_list[idx][1].weight.data.abs().max().clone()\n    # _, sorted_index_thre = torch.sort(highest_thre)\n\n    # 这里更改了选层策略，由最大值排序改为均值排序，均值一般表现要稍好，但不是绝对，可以自己切换尝试；前面注释的四行为原策略。\n    bn_mean = torch.zeros(len(shortcut_idx))\n    for i, idx in enumerate(shortcut_idx):\n        bn_mean[i] = compact_model1.module_list[idx][1].weight.data.abs().mean().clone()\n    _, sorted_index_thre = torch.sort(bn_mean)\n\n    prune_shortcuts = torch.tensor(shortcut_idx)[[sorted_index_thre[:opt.shortcuts]]]\n    prune_shortcuts = [int(x) for x in prune_shortcuts]\n\n    index_all = list(range(len(compact_model1.module_defs)))\n    index_prune = []\n    for idx in prune_shortcuts:\n        index_prune.extend([idx - 1, idx, idx + 1])\n    index_remain = [idx for idx in index_all if idx not in index_prune]\n\n    print('These shortcut layers and corresponding CBL will be pruned :', index_prune)\n\n    prune_and_eval2(compact_model1, prune_shortcuts)\n\n    CBLidx2mask = obtain_filters_mask2(compact_model1, CBL_idx, prune_shortcuts)\n\n    pruned_model = prune_model_keep_size(compact_model1, CBL_idx, CBL_idx, CBLidx2mask)\n\n    with torch.no_grad():\n        mAP = eval_model(pruned_model)[0][2]\n    print(\"after transfering the offset of pruned CBL's activation, map is {}\".format(mAP))\n\n    compact_module_defs = deepcopy(compact_model1.module_defs)\n\n    for module_def in compact_module_defs:\n        if module_def['type'] == 'route':\n            from_layers = [int(s) for s in module_def['layers']]\n            if len(from_layers) == 2:\n                count = 0\n                for i in index_prune:\n                    if i <= from_layers[1]:\n                        count += 1\n                from_layers[1] = from_layers[1] - count\n                # from_layers = ', '.join([str(s) for s in from_layers])\n                module_def['layers'] = from_layers\n\n    compact_module_defs = [compact_module_defs[i] for i in index_remain]\n    compact_model2 = Darknet([compact_model1.hyperparams.copy()] + compact_module_defs, (img_size, img_size),\n                             is_gray_scale=opt.gray_scale).to(device)\n\n    compact_nparameters2 = obtain_num_parameters(compact_model2)\n\n    # init_weights_from_loose_model(compact_model2, compact_model1, CBL_idx, Conv_idx, CBLidx2mask,\n    #                               is_gray_scale=opt.gray_scale)\n\n    print('testing the final model')\n    torch.cuda.empty_cache()\n    with torch.no_grad():\n        compact_model_metric2 = eval_model(compact_model2)\n\n    ################################################################\n    # 剪枝完毕，测试速度\n    if opt.gray_scale:\n        random_input = torch.rand((1, 1, img_size, img_size)).to(device)\n    else:\n        random_input = torch.rand((1, 3, img_size, img_size)).to(device)\n\n    print('testing inference time...')\n    pruned_forward_time, output = obtain_avg_forward_time(random_input, model)\n    compact_forward_time1, compact_output1 = obtain_avg_forward_time(random_input, compact_model1)\n    compact_forward_time2, compact_output2 = obtain_avg_forward_time(random_input, compact_model2)\n\n    metric_table = [\n        [\"Metric\", \"Before\", \"After prune channels\", \"After prune layers(final)\"],\n        [\"mAP\", f'{origin_model_metric[0][2]:.6f}', f'{compact_model_metric1[0][2]:.6f}',\n         f'{compact_model_metric2[0][2]:.6f}'],\n        [\"Parameters\", f\"{origin_nparameters}\", f\"{compact_nparameters1}\", f\"{compact_nparameters2}\"],\n        [\"Inference\", f'{pruned_forward_time:.4f}', f'{compact_forward_time1:.4f}', f'{compact_forward_time2:.4f}']\n    ]\n    print(AsciiTable(metric_table).table)\n\n    pruned_cfg_name = opt.cfg.replace('/',\n                                      f'/layer_channel_regular_prune_{opt.percent}_{opt.shortcuts}_shortcut_')\n    # 创建存储目录\n    dir_name = pruned_cfg_name.split('/')[0] + '/' + pruned_cfg_name.split('/')[1]\n    if not os.path.isdir(dir_name):\n        os.makedirs(dir_name)\n    # 由于原始的compact_module_defs将anchor从字符串变为了数组，因此这里将anchors重新变为字符串\n    file = open(opt.cfg, 'r')\n    lines = file.read().split('\\n')\n    for line in lines:\n        if line.split(' = ')[0] == 'anchors':\n            anchor = line.split(' = ')[1]\n            break\n        if line.split('=')[0] == 'anchors':\n            anchor = line.split('=')[1]\n            break\n    file.close()\n    for item in compact_module_defs:\n        if item['type'] == 'shortcut':\n            item['from'] = str(item['from'][0])\n        elif item['type'] == 'route':\n            item['layers'] = \",\".join('%s' % i for i in item['layers'])\n        elif item['type'] == 'yolo':\n            item['mask'] = \",\".join('%s' % i for i in item['mask'])\n            item['anchors'] = anchor\n    pruned_cfg_file = write_cfg(pruned_cfg_name, [model.hyperparams.copy()] + compact_module_defs)\n    print(f'Config file has been saved: {pruned_cfg_file}')\n    weights_dir_name = dir_name.replace('cfg', 'weights')\n    if not os.path.isdir(weights_dir_name):\n        os.makedirs(weights_dir_name)\n    compact_model_name = weights_dir_name + f'/layer_channel_regular_prune_{str(opt.shortcuts)}_shortcuts_{str(opt.percent)}_percent.weights'\n\n    save_weights(compact_model2, path=compact_model_name)\n    print(f'Compact model has been saved: {compact_model_name}')\n"
  },
  {
    "path": "layer_prune.py",
    "content": "from models import *\nfrom utils.utils import *\nimport torch\nimport numpy as np\nfrom copy import deepcopy\nfrom test import test\nfrom terminaltables import AsciiTable\nimport time\nfrom utils.utils import *\nfrom utils.prune_utils import *\nimport argparse\n\n\ndef prune_and_eval(model, prune_shortcuts=[]):\n    model_copy = deepcopy(model)\n    for idx in prune_shortcuts:\n        for i in [idx, idx - 1]:\n            bn_module = model_copy.module_list[i][1]\n\n            mask = torch.zeros(bn_module.weight.data.shape[0]).cuda()\n            bn_module.weight.data.mul_(mask)\n\n    with torch.no_grad():\n        mAP = eval_model(model_copy)[0][2]\n\n    print(f'simply mask the BN Gama of to_be_pruned CBL as zero, now the mAP is {mAP:.4f}')\n\n\n# %%\ndef obtain_filters_mask(model, CBL_idx, prune_shortcuts):\n    filters_mask = []\n    for idx in CBL_idx:\n        bn_module = model.module_list[idx][1]\n        mask = np.ones(bn_module.weight.data.shape[0], dtype='float32')\n        filters_mask.append(mask.copy())\n    CBLidx2mask = {idx: mask for idx, mask in zip(CBL_idx, filters_mask)}\n    for idx in prune_shortcuts:\n        for i in [idx, idx - 1]:\n            bn_module = model.module_list[i][1]\n            mask = np.zeros(bn_module.weight.data.shape[0], dtype='float32')\n            CBLidx2mask[i] = mask.copy()\n    return CBLidx2mask\n\n\ndef obtain_avg_forward_time(input, model, repeat=200):\n    model.eval()\n    start = time.time()\n    with torch.no_grad():\n        for i in range(repeat):\n            output = model(input)\n    avg_infer_time = (time.time() - start) / repeat\n\n    return avg_infer_time, output\n\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n    parser.add_argument('--cfg', type=str, default='cfg/yolov3-hand.cfg', help='cfg file path')\n    parser.add_argument('--data', type=str, default='data/oxfordhand.data', help='*.data file path')\n    parser.add_argument('--weights', type=str, default='weights/last.pt', help='sparse model weights')\n    parser.add_argument('--shortcuts', type=int, default=8, help='how many shortcut layers will be pruned,\\\n        pruning one shortcut will also prune two CBL,yolov3 has 23 shortcuts')\n    parser.add_argument('--img-size', type=int, default=416, help='inference size (pixels)')\n    parser.add_argument('--batch-size', type=int, default=16, help='batch-size')\n    opt = parser.parse_args()\n    print(opt)\n\n    assert opt.cfg.find(\"mobilenet\") == -1, \"Mobilenet doesn't support layer pruning!\"\n    img_size = opt.img_size\n    device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n    model = Darknet(opt.cfg, (img_size, img_size)).to(device)\n\n    if opt.weights.endswith(\".pt\"):\n        model.load_state_dict(torch.load(opt.weights, map_location=device)['model'])\n    else:\n        load_darknet_weights(model, opt.weights)\n    print('\\nloaded weights from ', opt.weights)\n\n    eval_model = lambda model: test(model=model, cfg=opt.cfg, data=opt.data, batch_size=opt.batch_size, imgsz=img_size,\n                                    rank=-1)\n    obtain_num_parameters = lambda model: sum([param.nelement() for param in model.parameters()])\n\n    with torch.no_grad():\n        print(\"\\nlet's test the original model first:\")\n        origin_model_metric = eval_model(model)\n    origin_nparameters = obtain_num_parameters(model)\n\n    CBL_idx, Conv_idx, shortcut_idx = parse_module_defs4(model.module_defs)\n    print('all shortcut_idx:', [i + 1 for i in shortcut_idx])\n\n    # highest_thre = torch.zeros(len(shortcut_idx))\n    # for i, idx in enumerate(shortcut_idx):\n    #     highest_thre[i] = model.module_list[idx][1].weight.data.abs().max().clone()\n    # _, sorted_index_thre = torch.sort(highest_thre)\n\n    # 这里更改了选层策略，由最大值排序改为均值排序，均值一般表现要稍好，但不是绝对，可以自己切换尝试；前面注释的四行为原策略。\n    bn_mean = torch.zeros(len(shortcut_idx))\n    for i, idx in enumerate(shortcut_idx):\n        bn_mean[i] = model.module_list[idx][1].weight.data.abs().mean().clone()\n    _, sorted_index_thre = torch.sort(bn_mean)\n\n    prune_shortcuts = torch.tensor(shortcut_idx)[[sorted_index_thre[:opt.shortcuts]]]\n    prune_shortcuts = [int(x) for x in prune_shortcuts]\n\n    index_all = list(range(len(model.module_defs)))\n    index_prune = []\n    for idx in prune_shortcuts:\n        index_prune.extend([idx - 1, idx, idx + 1])\n    index_remain = [idx for idx in index_all if idx not in index_prune]\n\n    print('These shortcut layers and corresponding CBL will be pruned :', index_prune)\n\n    prune_and_eval(model, prune_shortcuts)\n\n    CBLidx2mask = obtain_filters_mask(model, CBL_idx, prune_shortcuts)\n\n    pruned_model = prune_model_keep_size(model, CBL_idx, CBL_idx, CBLidx2mask)\n\n    with torch.no_grad():\n        mAP = eval_model(pruned_model)[0][2]\n    print(\"after transfering the offset of pruned CBL's activation, map is {}\".format(mAP))\n\n    compact_module_defs = deepcopy(model.module_defs)\n\n    for j, module_def in enumerate(compact_module_defs):\n        if module_def['type'] == 'route':\n            from_layers = [int(s) for s in module_def['layers']]\n            if len(from_layers) == 1 and from_layers[0] > 0:\n                count = 0\n                for i in index_prune:\n                    if i <= from_layers[0]:\n                        count += 1\n                from_layers[0] = from_layers[0] - count\n                # from_layers = str(from_layers[0])\n                module_def['layers'] = from_layers\n\n            elif len(from_layers) == 2:\n                count = 0\n                if from_layers[1] > 0:\n                    for i in index_prune:\n                        if i <= from_layers[1]:\n                            count += 1\n                    from_layers[1] = from_layers[1] - count\n                else:\n                    for i in index_prune:\n                        if i > j + from_layers[1] and i < j:\n                            count += 1\n                    from_layers[1] = from_layers[1] + count\n\n                # from_layers = ', '.join([str(s) for s in from_layers])\n                module_def['layers'] = from_layers\n\n    compact_module_defs = [compact_module_defs[i] for i in index_remain]\n    compact_model = Darknet([model.hyperparams.copy()] + compact_module_defs, (img_size, img_size)).to(device)\n    compact_nparameters = obtain_num_parameters(compact_model)\n\n    # init_weights_from_loose_model(compact_model, pruned_model, CBL_idx, Conv_idx, CBLidx2mask)\n\n    random_input = torch.rand((1, 3, img_size, img_size)).to(device)\n\n    pruned_forward_time, pruned_output = obtain_avg_forward_time(random_input, pruned_model)\n    compact_forward_time, compact_output = obtain_avg_forward_time(random_input, compact_model)\n\n    # 在测试集上测试剪枝后的模型, 并统计模型的参数数量\n    with torch.no_grad():\n        compact_model_metric = eval_model(compact_model)\n\n    # 比较剪枝前后参数数量的变化、指标性能的变化\n    metric_table = [\n        [\"Metric\", \"Before\", \"After\"],\n        [\"mAP\", f'{origin_model_metric[0][2]:.6f}', f'{compact_model_metric[0][2]:.6f}'],\n        [\"Parameters\", f\"{origin_nparameters}\", f\"{compact_nparameters}\"],\n        [\"Inference\", f'{pruned_forward_time:.4f}', f'{compact_forward_time:.4f}']\n    ]\n    print(AsciiTable(metric_table).table)\n\n    # 生成剪枝后的cfg文件并保存模型\n    pruned_cfg_name = opt.cfg.replace('/', f'/layer_prune_{opt.shortcuts}_shortcut_')\n    # 创建存储目录\n    dir_name = pruned_cfg_name.split('/')[0] + '/' + pruned_cfg_name.split('/')[1]\n    if not os.path.isdir(dir_name):\n        os.makedirs(dir_name)\n\n    # 由于原始的compact_module_defs将anchor从字符串变为了数组，因此这里将anchors重新变为字符串\n    file = open(opt.cfg, 'r')\n    lines = file.read().split('\\n')\n    for line in lines:\n        if line.split(' = ')[0] == 'anchors':\n            anchor = line.split(' = ')[1]\n            break\n        if line.split('=')[0] == 'anchors':\n            anchor = line.split('=')[1]\n            break\n    file.close()\n    for item in compact_module_defs:\n        if item['type'] == 'shortcut':\n            item['from'] = str(item['from'][0])\n        elif item['type'] == 'route':\n            item['layers'] = \",\".join('%s' % i for i in item['layers'])\n        elif item['type'] == 'yolo':\n            item['mask'] = \",\".join('%s' % i for i in item['mask'])\n            item['anchors'] = anchor\n    pruned_cfg_file = write_cfg(pruned_cfg_name, [model.hyperparams.copy()] + compact_module_defs)\n    print(f'Config file has been saved: {pruned_cfg_file}')\n    weights_dir_name = dir_name.replace('cfg', 'weights')\n    if not os.path.isdir(weights_dir_name):\n        os.makedirs(weights_dir_name)\n    compact_model_name = weights_dir_name + f'/layer_prune_{str(opt.shortcuts)}_shortcuts.weights'\n\n    save_weights(compact_model, path=compact_model_name)\n    print(f'Compact model has been saved: {compact_model_name}')\n"
  },
  {
    "path": "models.py",
    "content": "from utils.google_utils import *\nfrom utils.parse_config import *\nfrom utils.quantized.quantized_google import *\nfrom utils.quantized.quantized_ptq_cos import *\nfrom utils.quantized.quantized_TPSQ import *\nfrom utils.layers import *\nimport copy\n\n\n# YOLO\ndef create_modules(module_defs, img_size, cfg, quantized, quantizer_output, layer_idx, reorder, TM, TN, a_bit=8,\n                   w_bit=8, steps=0, is_gray_scale=False, maxabsscaler=False, shortcut_way=-1):\n    # Constructs module list of layer blocks from module configuration in module_defs\n\n    img_size = [img_size] * 2 if isinstance(img_size, int) else img_size  # expand if necessary\n    _ = module_defs.pop(0)  # cfg training hyperparams (unused)\n    if is_gray_scale:\n        output_filters = [1]  # input channels\n    else:\n        output_filters = [3]\n\n    module_list = nn.ModuleList()\n    routs = []  # list of layers which rout to deeper layers\n    yolo_index = -1\n\n    for i, mdef in enumerate(module_defs):\n        modules = nn.Sequential()\n\n        if mdef['type'] == 'convolutional':\n            bn = int(mdef['batch_normalize'])\n            filters = int(mdef['filters'])\n            kernel_size = int(mdef['size'])\n            pad = (kernel_size - 1) // 2 if int(mdef['pad']) else 0\n            if quantized == 1:\n                modules.add_module('Conv2d', BNFold_QuantizedConv2d_For_FPGA(in_channels=output_filters[-1],\n                                                                             out_channels=filters,\n                                                                             kernel_size=kernel_size,\n                                                                             stride=int(mdef['stride']),\n                                                                             padding=pad,\n                                                                             groups=mdef[\n                                                                                 'groups'] if 'groups' in mdef else 1,\n                                                                             bias=not bn,\n                                                                             a_bits=a_bit,\n                                                                             w_bits=w_bit,\n                                                                             bn=bn,\n                                                                             activate=mdef['activation'],\n                                                                             steps=steps,\n                                                                             quantizer_output=quantizer_output,\n                                                                             reorder=reorder, TM=TM, TN=TN,\n                                                                             name=\"{:04d}\".format(i) + \"_\" + mdef[\n                                                                                                                 'type'][\n                                                                                                             :4],\n                                                                             layer_idx=layer_idx,\n                                                                             maxabsscaler=maxabsscaler))\n            elif quantized == 2:\n                modules.add_module('Conv2d', TPSQ_BNFold_QuantizedConv2d_For_FPGA(in_channels=output_filters[-1],\n                                                                                  out_channels=filters,\n                                                                                  kernel_size=kernel_size,\n                                                                                  stride=int(mdef['stride']),\n                                                                                  padding=pad,\n                                                                                  groups=mdef[\n                                                                                      'groups'] if 'groups' in mdef else 1,\n                                                                                  bias=not bn,\n                                                                                  a_bits=a_bit,\n                                                                                  w_bits=w_bit,\n                                                                                  bn=bn,\n                                                                                  activate=mdef['activation'],\n                                                                                  steps=steps,\n                                                                                  quantizer_output=quantizer_output,\n                                                                                  maxabsscaler=maxabsscaler))\n            elif quantized == 3:\n                modules.add_module('Conv2d', BNFold_COSPTQuantizedConv2d_For_FPGA(in_channels=output_filters[-1],\n                                                                                  out_channels=filters,\n                                                                                  kernel_size=kernel_size,\n                                                                                  stride=int(mdef['stride']),\n                                                                                  padding=pad,\n                                                                                  groups=mdef[\n                                                                                      'groups'] if 'groups' in mdef else 1,\n                                                                                  bias=not bn,\n                                                                                  a_bits=a_bit,\n                                                                                  w_bits=w_bit,\n                                                                                  bn=bn,\n                                                                                  activate=mdef['activation'],\n                                                                                  quantizer_output=quantizer_output,\n                                                                                  reorder=reorder, TM=TM, TN=TN,\n                                                                                  name=\"{:04d}\".format(i) + \"_\" + mdef[\n                                                                                                                      'type'][\n                                                                                                                  :4],\n                                                                                  layer_idx=layer_idx,\n                                                                                  maxabsscaler=maxabsscaler))\n            else:\n                modules.add_module('Conv2d', nn.Conv2d(in_channels=output_filters[-1],\n                                                       out_channels=filters,\n                                                       kernel_size=kernel_size,\n                                                       stride=int(mdef['stride']),\n                                                       padding=pad,\n                                                       groups=mdef['groups'] if 'groups' in mdef else 1,\n                                                       bias=not bn))\n                if bn:\n                    modules.add_module('BatchNorm2d', nn.BatchNorm2d(filters, momentum=0.1))\n\n                if mdef['activation'] == 'leaky':\n                    modules.add_module('activation', nn.LeakyReLU(0.1 if not maxabsscaler else 0.25, inplace=True))\n                    # modules.add_module('activation', nn.PReLU(num_parameters=1, init=0.10))\n                    # modules.add_module('activation', Swish())\n                if mdef['activation'] == 'relu6':\n                    modules.add_module('activation', ReLU6())\n                if mdef['activation'] == 'h_swish':\n                    modules.add_module('activation', HardSwish())\n                if mdef['activation'] == 'relu':\n                    modules.add_module('activation', nn.ReLU())\n                if mdef['activation'] == 'mish':\n                    modules.add_module('activation', Mish())\n\n        elif mdef['type'] == 'depthwise':\n            bn = int(mdef['batch_normalize'])\n            filters = int(mdef['filters'])\n            kernel_size = int(mdef['size'])\n            pad = (kernel_size - 1) // 2 if int(mdef['pad']) else 0\n            if quantized == 1:\n                modules.add_module('DepthWise2d',\n                                   BNFold_QuantizedConv2d_For_FPGA(in_channels=output_filters[-1],\n                                                                   out_channels=filters,\n                                                                   kernel_size=kernel_size,\n                                                                   stride=int(mdef['stride']),\n                                                                   padding=pad,\n                                                                   groups=output_filters[-1],\n                                                                   bias=not bn,\n                                                                   a_bits=a_bit,\n                                                                   w_bits=w_bit,\n                                                                   bn=bn,\n                                                                   activate=mdef['activation'],\n                                                                   steps=steps,\n                                                                   quantizer_output=quantizer_output,\n                                                                   reorder=reorder, TM=TM, TN=TN,\n                                                                   name=\"{:04d}\".format(i) + \"_\" + mdef['type'][:4],\n                                                                   layer_idx=layer_idx,\n                                                                   maxabsscaler=maxabsscaler))\n\n            if quantized == 2:\n                modules.add_module('DepthWise2d',\n                                   TPSQ_BNFold_QuantizedConv2d_For_FPGA(in_channels=output_filters[-1],\n                                                                        out_channels=filters,\n                                                                        kernel_size=kernel_size,\n                                                                        stride=int(mdef['stride']),\n                                                                        padding=pad,\n                                                                        groups=output_filters[-1],\n                                                                        bias=not bn,\n                                                                        a_bits=a_bit,\n                                                                        w_bits=w_bit,\n                                                                        bn=bn,\n                                                                        activate=mdef['activation'],\n                                                                        steps=steps,\n                                                                        quantizer_output=quantizer_output,\n                                                                        maxabsscaler=maxabsscaler))\n\n            elif quantized == 3:\n                modules.add_module('DepthWise2d', BNFold_COSPTQuantizedConv2d_For_FPGA(in_channels=output_filters[-1],\n                                                                                       out_channels=filters,\n                                                                                       kernel_size=kernel_size,\n                                                                                       stride=int(mdef['stride']),\n                                                                                       padding=pad,\n                                                                                       groups=output_filters[-1],\n                                                                                       bias=not bn,\n                                                                                       a_bits=a_bit,\n                                                                                       w_bits=w_bit,\n                                                                                       bn=bn,\n                                                                                       activate=mdef['activation'],\n                                                                                       quantizer_output=quantizer_output,\n                                                                                       reorder=reorder, TM=TM, TN=TN,\n                                                                                       name=\"{:04d}\".format(i) + \"_\" +\n                                                                                            mdef['type'][:4],\n                                                                                       layer_idx=layer_idx,\n                                                                                       maxabsscaler=maxabsscaler))\n            else:\n                modules.add_module('DepthWise2d', nn.Conv2d(in_channels=output_filters[-1],\n                                                            out_channels=filters,\n                                                            kernel_size=kernel_size,\n                                                            stride=int(mdef['stride']),\n                                                            padding=pad,\n                                                            groups=output_filters[-1],\n                                                            bias=not bn), )\n                if bn:\n                    modules.add_module('BatchNorm2d', nn.BatchNorm2d(filters, momentum=0.1))\n\n                if mdef['activation'] == 'leaky':\n                    modules.add_module('activation', nn.LeakyReLU(0.1 if not maxabsscaler else 0.25, inplace=True))\n                    # modules.add_module('activation', nn.PReLU(num_parameters=1, init=0.10))\n                    # modules.add_module('activation', Swish())\n                if mdef['activation'] == 'relu6':\n                    modules.add_module('activation', ReLU6())\n                if mdef['activation'] == 'h_swish':\n                    modules.add_module('activation', HardSwish())\n                if mdef['activation'] == 'relu':\n                    modules.add_module('activation', nn.ReLU())\n                if mdef['activation'] == 'mish':\n                    modules.add_module('activation', Mish())\n\n        elif mdef['type'] == 'BatchNorm2d':\n            filters = output_filters[-1]\n            modules = nn.BatchNorm2d(filters, momentum=0.03, eps=1E-4)\n            if i == 0 and filters == 3:  # normalize RGB image\n                # imagenet mean and var https://pytorch.org/docs/stable/torchvision/models.html#classification\n                modules.running_mean = torch.tensor([0.485, 0.456, 0.406])\n                modules.running_var = torch.tensor([0.0524, 0.0502, 0.0506])\n\n        elif mdef['type'] == 'maxpool':\n            k = mdef['size']  # kernel size\n            stride = mdef['stride']\n            maxpool = nn.MaxPool2d(kernel_size=k, stride=stride, padding=(k - 1) // 2)\n            if k == 2 and stride == 1:  # yolov3-tiny\n                modules.add_module('ZeroPad2d', nn.ZeroPad2d((0, 1, 0, 1)))\n                modules.add_module('MaxPool2d', maxpool)\n            else:\n                modules = maxpool\n\n        elif mdef['type'] == 'se':\n            if 'filters' in mdef:\n                filters = int(mdef['filters'])\n            modules.add_module('se', SE(channel=filters))\n            if 'reduction' in mdef:\n                modules.add_module('se', SE(output_filters[-1], reduction=int(mdef['reduction'])))\n\n        elif mdef['type'] == 'upsample':\n            modules = nn.Upsample(scale_factor=mdef['stride'])\n\n        elif mdef['type'] == 'route':  # nn.Sequential() placeholder for 'route' layer\n            layers = mdef['layers']\n            filters = sum([output_filters[l + 1 if l > 0 else l] for l in layers])\n            if 'groups' in mdef:\n                filters = filters // 2\n            routs.extend([i + l if l < 0 else l for l in layers])\n            if quantized == -1:\n                if 'groups' in mdef:\n                    modules = FeatureConcat(layers=layers, groups=True)\n                else:\n                    modules = FeatureConcat(layers=layers, groups=False)\n            elif quantized == 3:\n                if 'groups' in mdef:\n                    modules = COSPTQuantizedFeatureConcat(layers=layers, groups=True, bits=a_bit,\n                                                          quantizer_output=quantizer_output,\n                                                          reorder=reorder, TM=TM, TN=TN,\n                                                          name=\"{:04d}\".format(i) + \"_\" +\n                                                               mdef['type'][:4],\n                                                          layer_idx=layer_idx, )\n                else:\n                    modules = COSPTQuantizedFeatureConcat(layers=layers, groups=False, bits=a_bit,\n                                                          quantizer_output=quantizer_output,\n                                                          reorder=reorder, TM=TM, TN=TN,\n                                                          name=\"{:04d}\".format(i) + \"_\" +\n                                                               mdef['type'][:4],\n                                                          layer_idx=layer_idx, )\n            else:\n                if 'groups' in mdef:\n                    modules = QuantizedFeatureConcat(layers=layers, groups=True, bits=a_bit,\n                                                     quantizer_output=quantizer_output,\n                                                     reorder=reorder, TM=TM, TN=TN,\n                                                     name=\"{:04d}\".format(i) + \"_\" +\n                                                          mdef['type'][:4],\n                                                     layer_idx=layer_idx, )\n                else:\n                    modules = QuantizedFeatureConcat(layers=layers, groups=False, bits=a_bit,\n                                                     quantizer_output=quantizer_output,\n                                                     reorder=reorder, TM=TM, TN=TN,\n                                                     name=\"{:04d}\".format(i) + \"_\" +\n                                                          mdef['type'][:4],\n                                                     layer_idx=layer_idx, )\n\n        elif mdef['type'] == 'shortcut':  # nn.Sequential() placeholder for 'shortcut' layer\n            layers = mdef['from']\n            filters = output_filters[-1]\n            routs.extend([i + l if l < 0 else l for l in layers])\n            if quantized == -1 or quantized == 2:\n                modules = Shortcut(layers=layers, weight='weights_type' in mdef)\n            else:\n                if quantized == 3:\n                    if shortcut_way == 1:\n                        modules = COSPTQuantizedShortcut_min(layers=layers, weight='weights_type' in mdef, bits=a_bit,\n                                                             quantizer_output=quantizer_output,\n                                                             reorder=reorder, TM=TM, TN=TN,\n                                                             name=\"{:04d}\".format(i) + \"_\" +\n                                                                  mdef['type'][:4],\n                                                             layer_idx=layer_idx, )\n                    elif shortcut_way == 2:\n                        modules = COSPTQuantizedShortcut_max(layers=layers, weight='weights_type' in mdef, bits=a_bit,\n                                                             quantizer_output=quantizer_output,\n                                                             reorder=reorder, TM=TM, TN=TN,\n                                                             name=\"{:04d}\".format(i) + \"_\" +\n                                                                  mdef['type'][:4],\n                                                             layer_idx=layer_idx, )\n                else:\n                    if shortcut_way == 1:\n                        modules = QuantizedShortcut_min(layers=layers, weight='weights_type' in mdef, bits=a_bit,\n                                                        quantizer_output=quantizer_output,\n                                                        reorder=reorder, TM=TM, TN=TN,\n                                                        name=\"{:04d}\".format(i) + \"_\" +\n                                                             mdef['type'][:4],\n                                                        layer_idx=layer_idx, )\n                    elif shortcut_way == 2:\n                        modules = QuantizedShortcut_max(layers=layers, weight='weights_type' in mdef, bits=a_bit,\n                                                        quantizer_output=quantizer_output,\n                                                        reorder=reorder, TM=TM, TN=TN,\n                                                        name=\"{:04d}\".format(i) + \"_\" +\n                                                             mdef['type'][:4],\n                                                        layer_idx=layer_idx, )\n\n        elif mdef['type'] == 'reorg3d':  # yolov3-spp-pan-scale\n            pass\n\n        elif mdef['type'] == 'yolo':\n            yolo_index += 1\n            stride = [32, 16, 8]  # P5, P4, P3 strides\n            if any(x in cfg for x in ['panet', 'yolov4', 'cd53']):  # stride order reversed\n                if not 'yolov4-tiny' in cfg:\n                    stride = list(reversed(stride))\n            layers = mdef['from'] if 'from' in mdef else []\n            modules = YOLOLayer(anchors=mdef['anchors'][mdef['mask']],  # anchor list\n                                nc=mdef['classes'],  # number of classes\n                                img_size=img_size,  # (416, 416)\n                                yolo_index=yolo_index,  # 0, 1, 2...\n                                layers=layers,  # output layers\n                                stride=stride[yolo_index],\n                                quantizer_output=quantizer_output)\n\n            # Initialize preceding Conv2d() bias (https://arxiv.org/pdf/1708.02002.pdf section 3.3)\n            try:\n                with torch.no_grad():\n                    j = layers[yolo_index] if 'from' in mdef else -1\n                    bias_ = module_list[j][0].bias  # shape(255,)\n                    bias = bias_[:modules.no * modules.na].view(modules.na, -1)  # shape(3,85)\n                    bias[:, 4] = bias[:, 4] - 4.5  # obj ln((1-0.01)/0.01)约等于4.5\n                    bias[:, 5:] = bias[:, 5:] + math.log(0.6 / (modules.nc - 0.99))  # cls (sigmoid(p) = 1/nc)\n                    module_list[j][0].bias = torch.nn.Parameter(bias_, requires_grad=bias_.requires_grad)\n            except:\n                print('WARNING: smart bias initialization failure.')\n\n        else:\n            print('Warning: Unrecognized Layer Type: ' + mdef['type'])\n\n        # Register module list and number of output filters\n        module_list.append(modules)\n        output_filters.append(filters)\n\n    routs_binary = [False] * (i + 1)\n    for i in routs:\n        routs_binary[i] = True\n    return module_list, routs_binary\n\n\nclass YOLOLayer(nn.Module):\n    def __init__(self, anchors, nc, img_size, yolo_index, layers, stride, quantizer_output):\n        super(YOLOLayer, self).__init__()\n        self.anchors = torch.Tensor(anchors)\n        self.index = yolo_index  # index of this layer in layers\n        self.layers = layers  # model output layer indices\n        self.stride = stride  # layer stride\n        self.nl = len(layers)  # number of output layers (3)\n        self.na = len(anchors)  # number of anchors (3)\n        self.nc = nc  # number of classes (80)\n        self.no = nc + 5  # number of outputs (85)\n        self.nx, self.ny, self.ng = 0, 0, 0  # initialize number of x, y gridpoints\n        self.anchor_vec = self.anchors / self.stride\n        self.anchor_wh = self.anchor_vec.view(1, self.na, 1, 1, 2)\n\n        self.quantizer_output = quantizer_output\n\n    def create_grids(self, ng=(13, 13), device='cpu'):\n        self.nx, self.ny = ng  # x and y grid size\n        self.ng = torch.tensor(ng, dtype=torch.float)\n\n        # build xy offsets\n        if not self.training:\n            yv, xv = torch.meshgrid([torch.arange(self.ny, device=device), torch.arange(self.nx, device=device)])\n            self.grid = torch.stack((xv, yv), 2).view((1, 1, self.ny, self.nx, 2)).float()\n\n        if self.anchor_vec.device != device:\n            self.anchor_vec = self.anchor_vec.to(device)\n            self.anchor_wh = self.anchor_wh.to(device)\n\n    def forward(self, p, out):\n        ASFF = False  # https://arxiv.org/abs/1911.09516\n        if ASFF:\n            i, n = self.index, self.nl  # index in layers, number of layers\n            p = out[self.layers[i]]\n            bs, _, ny, nx = p.shape  # bs, 255, 13, 13\n            if (self.nx, self.ny) != (nx, ny):\n                self.create_grids((nx, ny), p.device)\n\n            # outputs and weights\n            # w = F.softmax(p[:, -n:], 1)  # normalized weights\n            w = torch.sigmoid(p[:, -n:]) * (2 / n)  # sigmoid weights (faster)\n            # w = w / w.sum(1).unsqueeze(1)  # normalize across layer dimension\n\n            # weighted ASFF sum\n            p = out[self.layers[i]][:, :-n] * w[:, i:i + 1]\n            for j in range(n):\n                if j != i:\n                    p += w[:, j:j + 1] * \\\n                         F.interpolate(out[self.layers[j]][:, :-n], size=[ny, nx], mode='bilinear', align_corners=False)\n        else:\n            bs, _, ny, nx = p.shape  # bs, 255, 13, 13\n            # if (self.nx, self.ny) != (nx, ny):\n            self.create_grids((nx, ny), p.device)\n\n        # p.view(bs, 255, 13, 13) -- > (bs, 3, 13, 13, 85)  # (bs, anchors, grid, grid, classes + xywh)\n        p = p.view(bs, self.na, self.no, self.ny, self.nx).permute(0, 1, 3, 4, 2).contiguous()  # prediction\n\n        if self.training:\n            return p\n\n        else:  # inference\n            io = p.clone()  # inference output\n            if self.quantizer_output == True:\n                sigmoid_output = p.clone()\n            io[..., :2] = torch.sigmoid(io[..., :2]) + self.grid  # xy\n            io[..., 2:4] = torch.exp(io[..., 2:4]) * self.anchor_wh  # wh yolo method\n            io[..., :4] *= self.stride\n            torch.sigmoid_(io[..., 4:])\n            ##输出\n            if self.quantizer_output == True:\n                xy_sigmoid_output = torch.sigmoid(sigmoid_output[..., :2])\n                cls_sigmoid_output = sigmoid_output[..., 4:] * self.stride\n                cls_sigmoid_output = torch.sigmoid_(cls_sigmoid_output)\n                xy_sigmoid_output = np.array(xy_sigmoid_output.cpu()).reshape(1, -1)\n                np.savetxt(('./quantizer_output/xy_sigmoid_output.txt'), xy_sigmoid_output,\n                           delimiter='\\n')\n                writer = open('./quantizer_output/xy_sigmoid_bin', \"wb\")\n                writer.write(xy_sigmoid_output)\n                writer.close()\n\n                cls_sigmoid_output = np.array(cls_sigmoid_output.cpu()).reshape(1, -1)\n                np.savetxt(('./quantizer_output/cls_sigmoid_output.txt'), cls_sigmoid_output,\n                           delimiter='\\n')\n                writer = open('./quantizer_output/cls_sigmoid_bin', \"wb\")\n                writer.write(cls_sigmoid_output)\n                writer.close()\n            return io.view(bs, -1, self.no), p  # view [1, 3, 13, 13, 85] as [1, 507, 85]\n\n\nclass Darknet(nn.Module):\n    # YOLOv3 object detection model\n\n    def __init__(self, cfg, img_size=(416, 416), verbose=False, quantized=-1, a_bit=8, w_bit=8,\n                 quantizer_output=False, layer_idx=-1, reorder=False, TM=32, TN=32, steps=0, is_gray_scale=False,\n                 maxabsscaler=False, shortcut_way=-1):\n        super(Darknet, self).__init__()\n\n        if isinstance(cfg, str):\n            self.module_defs = parse_model_cfg(cfg)\n        elif isinstance(cfg, list):\n            self.module_defs = cfg\n        self.quantized = quantized\n        self.a_bit = a_bit\n        self.w_bit = w_bit\n        self.quantizer_output = quantizer_output  ####输出设置超参数\n        self.layer_idx = layer_idx\n        self.reorder = reorder\n        self.TM = TM\n        self.TN = TN\n\n        self.hyperparams = copy.deepcopy(self.module_defs[0])\n        self.module_list, self.routs = create_modules(self.module_defs, img_size, cfg, quantized=self.quantized,\n                                                      quantizer_output=self.quantizer_output, reorder=self.reorder,\n                                                      TM=self.TM, TN=self.TN, layer_idx=self.layer_idx,\n                                                      a_bit=self.a_bit, w_bit=self.w_bit, steps=steps,\n                                                      is_gray_scale=is_gray_scale, maxabsscaler=maxabsscaler,\n                                                      shortcut_way=shortcut_way)\n        self.yolo_layers = get_yolo_layers(self)\n        # torch_utils.initialize_weights(self)\n\n        # Darknet Header https://github.com/AlexeyAB/darknet/issues/2914#issuecomment-496675346\n        self.version = np.array([0, 2, 5], dtype=np.int32)  # (int32) version info: major, minor, revision\n        self.seen = np.array([0], dtype=np.int64)  # (int64) number of images seen during training\n        # 输出modelsummary\n        if self.quantized == -1:\n            self.info(verbose)   # print model description\n\n    def forward(self, x, augment=False):\n\n        if not augment:\n            return self.forward_once(x)\n        else:  # Augment images (inference and test only) https://github.com/ultralytics/yolov3/issues/931\n            img_size = x.shape[-2:]  # height, width\n            s = [0.83, 0.67]  # scales\n            y = []\n            for i, xi in enumerate((x,\n                                    torch_utils.scale_img(x.flip(3), s[0], same_shape=False),  # flip-lr and scale\n                                    torch_utils.scale_img(x, s[1], same_shape=False),  # scale\n                                    )):\n                # cv2.imwrite('img%g.jpg' % i, 255 * xi[0].numpy().transpose((1, 2, 0))[:, :, ::-1])\n                y.append(self.forward_once(xi)[0])\n\n            y[1][..., :4] /= s[0]  # scale\n            y[1][..., 0] = img_size[1] - y[1][..., 0]  # flip lr\n            y[2][..., :4] /= s[1]  # scale\n\n            # for i, yi in enumerate(y):  # coco small, medium, large = < 32**2 < 96**2 <\n            #     area = yi[..., 2:4].prod(2)[:, :, None]\n            #     if i == 1:\n            #         yi *= (area < 96. ** 2).float()\n            #     elif i == 2:\n            #         yi *= (area > 32. ** 2).float()\n            #     y[i] = yi\n\n            y = torch.cat(y, 1)\n            return y, None\n\n    def forward_once(self, x, augment=False, verbose=False):\n        img_size = x.shape[-2:]  # height, width\n        yolo_out, out, feature_out = [], [], []\n        if verbose:\n            print('0', x.shape)\n            str = ''\n\n        # Augment images (inference and test only)\n        if augment:  # https://github.com/ultralytics/yolov3/issues/931\n            nb = x.shape[0]  # batch size\n            s = [0.83, 0.67]  # scales\n            x = torch.cat((x,\n                           torch_utils.scale_img(x.flip(3), s[0]),  # flip-lr and scale\n                           torch_utils.scale_img(x, s[1]),  # scale\n                           ), 0)\n\n        for i, module in enumerate(self.module_list):\n            name = module.__class__.__name__\n            if name in ['Shortcut', 'FeatureConcat', 'QuantizedShortcut_max', 'QuantizedShortcut_min',\n                        'QuantizedFeatureConcat', 'COSPTQuantizedShortcut_min',\n                        'COSPTQuantizedShortcut_max', 'COSPTQuantizedFeatureConcat']:  # sum, concat\n                if verbose:\n                    l = [i - 1] + module.layers  # layers\n                    sh = [list(x.shape)] + [list(out[i].shape) for i in module.layers]  # shapes\n                    str = ' >> ' + ' + '.join(['layer %g %s' % x for x in zip(l, sh)])\n                x = module(x, out)  # Shortcut(), FeatureConcat()\n            elif name == 'YOLOLayer':\n                yolo_out.append(module(x, out))\n            else:  # run module directly, i.e. mtype = 'convolutional', 'upsample', 'maxpool', 'batchnorm2d' etc.\n                if name == 'Upsample' and isinstance(x, list):\n                    x[0] = module(x[0])\n                    x[1] = module(x[1])\n                else:\n                    x = module(x)\n                if name == \"Sequential\" and self.module_list[i + 1].__class__.__name__ != 'YOLOLayer':\n                    feature_out.append(x)\n\n            out.append(x if self.routs[i] else [])\n            if verbose:\n                print('%g/%g %s -' % (i, len(self.module_list), name), list(x.shape), str)\n                str = ''\n\n        if self.training:  # train\n            return yolo_out, feature_out\n        else:  # inference or test\n            x, p = zip(*yolo_out)  # inference output, training output\n            x = torch.cat(x, 1)  # cat yolo outputs\n            if augment:  # de-augment results\n                x = torch.split(x, nb, dim=0)\n                x[1][..., :4] /= s[0]  # scale\n                x[1][..., 0] = img_size[1] - x[1][..., 0]  # flip lr\n                x[2][..., :4] /= s[1]  # scale\n                x = torch.cat(x, 1)\n            return x, p, feature_out\n\n    def fuse(self):\n        # Fuse Conv2d + BatchNorm2d layers throughout model\n        print('Fusing layers...')\n        fused_list = nn.ModuleList()\n        for a in list(self.children())[0]:\n            if isinstance(a, nn.Sequential):\n                for i, b in enumerate(a):\n                    if isinstance(b, nn.modules.batchnorm.BatchNorm2d):\n                        # fuse this bn layer with the previous conv2d layer\n                        conv = a[i - 1]\n                        fused = torch_utils.fuse_conv_and_bn(conv, b)\n                        a = nn.Sequential(fused, *list(a.children())[i + 1:])\n                        break\n            fused_list.append(a)\n        self.module_list = fused_list\n\n    def info(self, verbose=False):\n        torch_utils.model_info(self, verbose)\n\n\ndef get_yolo_layers(model):\n    return [i for i, m in enumerate(model.module_list) if m.__class__.__name__ == 'YOLOLayer']  # [89, 101, 113]\n\n\ndef load_darknet_weights(self, weights, cutoff=-1, pt=False, quant=False):\n    # Parses and loads the weights stored in 'weights'\n\n    # Establish cutoffs (load layers between 0 and cutoff. if cutoff = -1 all are loaded)\n    file = Path(weights).name\n    if file == 'darknet53.conv.74':\n        cutoff = 75\n    elif file == 'yolov3-tiny.conv.15':\n        cutoff = 15\n\n    # Read weights file\n    with open(weights, 'rb') as f:\n        # Read Header https://github.com/AlexeyAB/darknet/issues/2914#issuecomment-496675346\n        self.version = np.fromfile(f, dtype=np.int32, count=3)  # (int32) version info: major, minor, revision\n        self.seen = np.fromfile(f, dtype=np.int64, count=1)  # (int64) number of images seen during training\n\n        weights = np.fromfile(f, dtype=np.float32)  # The rest are weights\n\n    ptr = 0\n    for i, (mdef, module) in enumerate(zip(self.module_defs[:cutoff], self.module_list[:cutoff])):\n        if mdef['type'] == 'convolutional':\n            conv_layer = module[0]\n            if mdef['batch_normalize']:\n                if quant:\n                    # Load BN bias, weights, running mean and running variance\n                    num_b = conv_layer.beta.numel()\n                    # Bias\n                    bn_b = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(conv_layer.beta)\n                    conv_layer.beta.data.copy_(bn_b)\n                    ptr += num_b\n                    # Weight\n                    bn_w = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(conv_layer.gamma)\n                    conv_layer.gamma.data.copy_(bn_w)\n                    ptr += num_b\n                    # Running Mean\n                    bn_rm = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(conv_layer.running_mean)\n                    conv_layer.running_mean.data.copy_(bn_rm)\n                    ptr += num_b\n                    # Running Var\n                    bn_rv = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(conv_layer.running_var)\n                    conv_layer.running_var.data.copy_(bn_rv)\n                    ptr += num_b\n                else:\n                    # Load BN bias, weights, running mean and running variance\n                    bn_layer = module[1]\n                    num_b = bn_layer.bias.numel()  # Number of biases\n                    # Bias\n                    bn_b = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.bias)\n                    bn_layer.bias.data.copy_(bn_b)\n                    ptr += num_b\n                    # Weight\n                    bn_w = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.weight)\n                    bn_layer.weight.data.copy_(bn_w)\n                    ptr += num_b\n                    # Running Mean\n                    bn_rm = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.running_mean)\n                    bn_layer.running_mean.data.copy_(bn_rm)\n                    ptr += num_b\n                    # Running Var\n                    bn_rv = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.running_var)\n                    bn_layer.running_var.data.copy_(bn_rv)\n                    ptr += num_b\n                # Load conv. weights\n                num_w = conv_layer.weight.numel()\n                conv_w = torch.from_numpy(weights[ptr:ptr + num_w]).view_as(conv_layer.weight)\n                conv_layer.weight.data.copy_(conv_w)\n                ptr += num_w\n            else:\n                # if os.path.basename(file) == 'yolov3.weights' or os.path.basename(file) == 'yolov3-tiny.weights':\n                # pt标识使用coco预训练模型，读取参数时yolo层前面的一层输出为255\n                if pt and os.path.basename(file).split('.')[-1] == 'weights':\n                    num_b = 255\n                    ptr += num_b\n                    num_w = int(self.module_defs[i - 1][\"filters\"]) * 255\n                    ptr += num_w\n                else:\n                    # Load conv. bias\n                    num_b = conv_layer.bias.numel()\n                    conv_b = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(conv_layer.bias)\n                    conv_layer.bias.data.copy_(conv_b)\n                    ptr += num_b\n                    # Load conv. weights\n                    num_w = conv_layer.weight.numel()\n                    conv_w = torch.from_numpy(weights[ptr:ptr + num_w]).view_as(conv_layer.weight)\n                    conv_layer.weight.data.copy_(conv_w)\n                    ptr += num_w\n        elif mdef['type'] == 'depthwise':\n            depthwise_layer = module[0]\n            if mdef['batch_normalize']:\n                if quant:\n                    # Load BN bias, weights, running mean and running variance\n                    num_b = conv_layer.beta.numel()\n                    # Bias\n                    bn_b = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(conv_layer.beta)\n                    conv_layer.beta.data.copy_(bn_b)\n                    ptr += num_b\n                    # Weight\n                    bn_w = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(conv_layer.gamma)\n                    conv_layer.gamma.data.copy_(bn_w)\n                    ptr += num_b\n                    # Running Mean\n                    bn_rm = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(conv_layer.running_mean)\n                    conv_layer.running_mean.data.copy_(bn_rm)\n                    ptr += num_b\n                    # Running Var\n                    bn_rv = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(conv_layer.running_var)\n                    conv_layer.running_var.data.copy_(bn_rv)\n                    ptr += num_b\n                else:\n                    # Load BN bias, weights, running mean and running variance\n                    bn_layer = module[1]\n                    num_b = bn_layer.bias.numel()  # Number of biases\n                    # Bias\n                    bn_b = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.bias)\n                    bn_layer.bias.data.copy_(bn_b)\n                    ptr += num_b\n                    # Weight\n                    bn_w = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.weight)\n                    bn_layer.weight.data.copy_(bn_w)\n                    ptr += num_b\n                    # Running Mean\n                    bn_rm = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.running_mean)\n                    bn_layer.running_mean.data.copy_(bn_rm)\n                    ptr += num_b\n                    # Running Var\n                    bn_rv = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.running_var)\n                    bn_layer.running_var.data.copy_(bn_rv)\n                    ptr += num_b\n            # Load conv. weights\n            num_w = depthwise_layer.weight.numel()\n            conv_w = torch.from_numpy(weights[ptr:ptr + num_w]).view_as(depthwise_layer.weight)\n            depthwise_layer.weight.data.copy_(conv_w)\n            ptr += num_w\n        elif mdef['type'] == 'se':\n            se_layer = module[0]\n            fc = se_layer.fc\n            fc1 = fc[0]\n            num_fc1 = fc1.weight.numel()\n            fc1_w = torch.from_numpy(weights[ptr:ptr + num_fc1]).view_as(fc1.weight)\n            fc1.weight.data.copy_(fc1_w)\n            ptr += num_fc1\n            fc2 = fc[2]\n            num_fc2 = fc2.weight.numel()\n            fc2_w = torch.from_numpy(weights[ptr:ptr + num_fc2]).view_as(fc2.weight)\n            fc2.weight.data.copy_(fc2_w)\n            ptr += num_fc2\n\n    # 确保指针到达权重的最后一个位置\n    assert ptr == len(weights)\n\n\ndef save_weights(self, path='model.weights', cutoff=-1):\n    # Converts a PyTorch model to Darket format (*.pt to *.weights)\n    # Note: Does not work if model.fuse() is applied\n    with open(path, 'wb') as f:\n        # Write Header https://github.com/AlexeyAB/darknet/issues/2914#issuecomment-496675346\n        self.version.tofile(f)  # (int32) version info: major, minor, revision\n        self.seen.tofile(f)  # (int64) number of images seen during training\n\n        # Iterate through layers\n        for i, (mdef, module) in enumerate(zip(self.module_defs[:cutoff], self.module_list[:cutoff])):\n            if mdef['type'] == 'convolutional':\n                conv_layer = module[0]\n                # If batch norm, load bn first\n                if mdef['batch_normalize']:\n                    bn_layer = module[1]\n                    bn_layer.bias.data.cpu().numpy().tofile(f)\n                    bn_layer.weight.data.cpu().numpy().tofile(f)\n                    bn_layer.running_mean.data.cpu().numpy().tofile(f)\n                    bn_layer.running_var.data.cpu().numpy().tofile(f)\n                # Load conv bias\n                else:\n                    conv_layer.bias.data.cpu().numpy().tofile(f)\n                # Load conv weights\n                conv_layer.weight.data.cpu().numpy().tofile(f)\n            elif mdef['type'] == 'depthwise':\n                depthwise_layer = module[0]\n                # If batch norm, load bn first\n                if mdef['batch_normalize']:\n                    bn_layer = module[1]\n                    bn_layer.bias.data.cpu().numpy().tofile(f)\n                    bn_layer.weight.data.cpu().numpy().tofile(f)\n                    bn_layer.running_mean.data.cpu().numpy().tofile(f)\n                    bn_layer.running_var.data.cpu().numpy().tofile(f)\n                # Load conv bias\n                else:\n                    depthwise_layer.bias.data.cpu().numpy().tofile(f)\n                # Load conv weights\n                depthwise_layer.weight.data.cpu().numpy().tofile(f)\n            elif mdef['type'] == 'se':\n                se_layer = module[0]\n                fc = se_layer.fc\n                fc1 = fc[0]\n                fc2 = fc[2]\n                fc1.weight.data.cpu().numpy().tofile(f)\n                fc2.weight.data.cpu().numpy().tofile(f)\n\n\ndef convert(cfg='cfg/yolov3-spp.cfg', weights='weights/yolov3-spp.weights'):\n    # Converts between PyTorch and Darknet format per extension (i.e. *.weights convert to *.pt and vice versa)\n    # from models import *; convert('cfg/yolov3-spp.cfg', 'weights/yolov3-spp.weights')\n\n    # Initialize model\n    model = Darknet(cfg)\n\n    # Load weights and save\n    if weights.endswith('.pt'):  # if PyTorch format\n        model.load_state_dict(torch.load(weights, map_location='cpu')['model'])\n        target = weights.rsplit('.', 1)[0] + '.weights'\n        save_weights(model, path=target, cutoff=-1)\n        print(\"Success: converted '%s' to '%s'\" % (weights, target))\n\n    elif weights.endswith('.weights'):  # darknet format\n        _ = load_darknet_weights(model, weights)\n\n        chkpt = {'epoch': -1,\n                 'best_fitness': None,\n                 'training_results': None,\n                 'model': model.state_dict(),\n                 'optimizer': None}\n\n        target = weights.rsplit('.', 1)[0] + '.pt'\n        torch.save(chkpt, target)\n        print(\"Success: converted '%s' to '%'\" % (weights, target))\n\n    else:\n        print('Error: extension not supported.')\n\n\ndef attempt_download(weights):\n    # Attempt to download pretrained weights if not found locally\n    weights = weights.strip().replace(\"'\", '')\n    msg = weights + ' missing, try downloading from https://drive.google.com/open?id=1LezFG5g3BCW6iYaV89B2i64cqEUZD7e0'\n\n    if len(weights) > 0 and not os.path.isfile(weights):\n        d = {'yolov3-spp.weights': '16lYS4bcIdM2HdmyJBVDOvt3Trx6N3W2R',\n             'yolov3.weights': '1uTlyDWlnaqXcsKOktP5aH_zRDbfcDp-y',\n             'yolov3-tiny.weights': '1CCF-iNIIkYesIDzaPvdwlcf7H9zSsKZQ',\n             'yolov3-spp.pt': '1f6Ovy3BSq2wYq4UfvFUpxJFNDFfrIDcR',\n             'yolov3.pt': '1SHNFyoe5Ni8DajDNEqgB2oVKBb_NoEad',\n             'yolov3-tiny.pt': '10m_3MlpQwRtZetQxtksm9jqHrPTHZ6vo',\n             'darknet53.conv.74': '1WUVBid-XuoUBmvzBVUCBl_ELrzqwA8dJ',\n             'yolov3-tiny.conv.15': '1Bw0kCpplxUqyRYAJr9RY9SGnOJbo9nEj',\n             'yolov3-spp-ultralytics.pt': '1UcR-zVoMs7DH5dj3N1bswkiQTA4dmKF4'}\n\n        file = Path(weights).name\n        if file in d:\n            r = gdrive_download(id=d[file], name=weights)\n        else:  # download from pjreddie.com\n            url = 'https://pjreddie.com/media/files/' + file\n            print('Downloading ' + url)\n            r = os.system('curl -f ' + url + ' -o ' + weights)\n\n        # Error check\n        if not (r == 0 and os.path.exists(weights) and os.path.getsize(weights) > 1E6):  # weights exist and > 1MB\n            os.system('rm ' + weights)  # remove partial downloads\n            raise Exception(msg)\n"
  },
  {
    "path": "normal_prune.py",
    "content": "from models import *\nfrom utils.utils import *\nimport torch\nimport numpy as np\nfrom copy import deepcopy\nfrom test import test\nfrom terminaltables import AsciiTable\nimport time\nfrom utils.utils import *\nfrom utils.prune_utils import *\nimport os\nimport argparse\n\n\ndef prune_and_eval(model, sorted_bn, percent=.0):\n    model_copy = deepcopy(model)\n    thre_index = int(len(sorted_bn) * percent)\n    # 获得α参数的阈值，小于该值的α参数对应的通道，全部裁剪掉\n    thre = sorted_bn[thre_index]\n\n    print(f'Channels with Gamma value less than {thre:.4f} are pruned!')\n\n    remain_num = 0\n    for idx in prune_idx:\n        bn_module = model_copy.module_list[idx][1]\n        # 根据BN的阈值找到BN的mask\n        mask = obtain_bn_mask(bn_module, thre)\n\n        remain_num += int(mask.sum())\n        bn_module.weight.data.mul_(mask)\n    # with torch.no_grad():\n    #     mAP = eval_model(model_copy)[1].mean()\n\n    print(f'Number of channels has been reduced from {len(sorted_bn)} to {remain_num}')\n    print(f'Prune ratio: {1 - remain_num / len(sorted_bn):.3f}')\n    # print(f'mAP of the pruned model is {mAP:.4f}')\n\n    return thre\n\n\ndef obtain_filters_mask(model, thre, CBL_idx, prune_idx):\n    pruned = 0\n    total = 0\n    num_filters = []\n    filters_mask = []\n    # CBL_idx存储的是所有带BN的卷积层（YOLO层的前一层卷积层是不带BN的）\n    for idx in CBL_idx:\n        bn_module = model.module_list[idx][1]\n        if idx in prune_idx:\n\n            mask = obtain_bn_mask(bn_module, thre).cpu().numpy()\n            remain = int(mask.sum())\n            pruned = pruned + mask.shape[0] - remain\n\n            if remain == 0:\n                print(\"Channels would be all pruned!\")\n                raise Exception\n\n            print(f'layer index: {idx:>3d} \\t total channel: {mask.shape[0]:>4d} \\t '\n                  f'remaining channel: {remain:>4d}')\n        else:\n            mask = np.ones(bn_module.weight.data.shape)\n            remain = mask.shape[0]\n\n        total += mask.shape[0]\n        num_filters.append(remain)\n        filters_mask.append(mask.copy())\n\n    # 因此，这里求出的prune_ratio,需要裁剪的α参数/cbl_idx中所有的α参数\n    prune_ratio = pruned / total\n    print(f'Prune channels: {pruned}\\tPrune ratio: {prune_ratio:.3f}')\n\n    return num_filters, filters_mask\n\n\ndef obtain_avg_forward_time(input, model, repeat=200):\n    model.eval()\n    start = time.time()\n    with torch.no_grad():\n        for i in range(repeat):\n            output = model(input)\n    avg_infer_time = (time.time() - start) / repeat\n\n    return avg_infer_time, output\n\n\nif __name__ == '__main__':\n\n    parser = argparse.ArgumentParser()\n    parser.add_argument('--cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path')\n    parser.add_argument('--data', type=str, default='data/coco2014.data', help='*.data file path')\n    parser.add_argument('--weights', type=str, default='weights/last.pt', help='sparse model weights')\n    parser.add_argument('--percent', type=float, default=0.5, help='global channel prune percent')\n    parser.add_argument('--img-size', type=int, default=608, help='inference size (pixels)')\n    parser.add_argument('--batch-size', type=int, default=16, help='batch-size')\n    opt = parser.parse_args()\n    print(opt)\n\n    percent = opt.percent\n    # 指定GPU\n    # torch.cuda.set_device(2)\n\n    device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n    model = Darknet(opt.cfg).to(device)\n\n    if opt.weights:\n        if opt.weights.endswith(\".pt\"):\n            model.load_state_dict(torch.load(opt.weights, map_location=device)['model'])\n        else:\n            _ = load_darknet_weights(model, opt.weights)\n\n    data_config = parse_data_cfg(opt.data)\n\n    valid_path = data_config[\"valid\"]\n    class_names = load_classes(data_config[\"names\"])\n    # test model\n    eval_model = lambda model: test(model=model, cfg=opt.cfg, data=opt.data, batch_size=opt.batch_size,\n                                    imgsz=opt.img_size, rank=-1)\n    # 获取参数个数\n    obtain_num_parameters = lambda model: sum([param.nelement() for param in model.parameters()])\n\n    with torch.no_grad():\n        origin_model_metric = eval_model(model)\n    origin_nparameters = obtain_num_parameters(model)\n\n    # CBL表示后接BN的卷积层，Other_idx表示不接BN的卷积层和其他层\n    CBL_idx, Other_idx, prune_idx = parse_module_defs(model.module_defs)\n\n    # 将所有要剪枝的BN层的γ参数，拷贝到bn_weights列表\n    bn_weights = gather_bn_weights(model.module_list, prune_idx)\n\n    # torch.sort返回二维列表，第一维是排序后的值列表，第二维是排序后的值列表对应的索引\n    sorted_bn = torch.sort(bn_weights)[0]\n    # 对BN中的γ参数排序\n    # 避免剪掉所有channel的最高阈值(每个BN层的gamma的最大值的最小值即为阈值上限)\n    highest_thre = []\n    for idx in prune_idx:\n        # .item()可以得到张量里的元素值\n        # 获取每一层中γ参数的最大值\n        highest_thre.append(model.module_list[idx][1].weight.data.abs().max().item())\n    # 获取所有层中的最小值\n    highest_thre = min(highest_thre)\n\n    # 找到highest_thre对应的下标对应的百分比\n    percent_limit = (sorted_bn == highest_thre).nonzero().item() / len(bn_weights)\n\n    print(f'Threshold should be less than {highest_thre:.4f}.')\n    print(f'The corresponding prune ratio is {percent_limit:.3f}.')\n    # 获得在目标百分百比下的剪植阈值\n    threshold = prune_and_eval(model, sorted_bn, percent)\n\n    # 获得保留的卷积核的个数和每层对应的mask\n    num_filters, filters_mask = obtain_filters_mask(model, threshold, CBL_idx, prune_idx)\n\n    # CBLidx2mask存储CBL_idx中，每一层BN层对应的mask，将需要被剪植的层和剪植后的mask结合起来\n    CBLidx2mask = {idx: mask for idx, mask in zip(CBL_idx, filters_mask)}\n\n    # 返回剪植后的模型\n    pruned_model = prune_model_keep_size(model, prune_idx, CBL_idx, CBLidx2mask)\n\n    with torch.no_grad():\n        mAP = eval_model(pruned_model)[1].mean()\n    print('after prune_model_keep_size map is {}'.format(mAP))\n\n    # 获得原始模型的module_defs，并修改该defs中的卷积核数量\n    compact_module_defs = deepcopy(model.module_defs)\n    for idx, num in zip(CBL_idx, num_filters):\n        assert compact_module_defs[idx]['type'] == 'convolutional'\n        compact_module_defs[idx]['filters'] = str(num)\n    # 生成新模型\n    compact_model = Darknet([model.hyperparams.copy()] + compact_module_defs).to(device)\n    compact_nparameters = obtain_num_parameters(compact_model)\n    # 拷贝权重\n    init_weights_from_loose_model(compact_model, pruned_model, CBL_idx, Other_idx, CBLidx2mask)\n    # 测试运行速度\n    random_input = torch.rand((16, 3, 416, 416)).to(device)\n    pruned_forward_time, pruned_output = obtain_avg_forward_time(random_input, pruned_model)\n    compact_forward_time, compact_output = obtain_avg_forward_time(random_input, compact_model)\n\n    # 在测试集上测试剪枝后的模型, 并统计模型的参数数量\n    with torch.no_grad():\n        compact_model_metric = eval_model(compact_model)\n\n    # 比较剪枝前后参数数量的变化、指标性能的变化\n    metric_table = [\n        [\"Metric\", \"Before\", \"After\"],\n        [\"mAP\", f'{origin_model_metric[1].mean():.6f}', f'{compact_model_metric[1].mean():.6f}'],\n        [\"Parameters\", f\"{origin_nparameters}\", f\"{compact_nparameters}\"],\n        [\"Inference\", f'{pruned_forward_time:.4f}', f'{compact_forward_time:.4f}']\n    ]\n    print(AsciiTable(metric_table).table)\n\n    # 生成剪枝后的cfg文件并保存模型\n    pruned_cfg_name = opt.cfg.replace('/', f'/normal_prune_{percent}_')\n    # 创建存储目录\n    dir_name = pruned_cfg_name.split('/')[0] + '/' + pruned_cfg_name.split('/')[1]\n    if not os.path.isdir(dir_name):\n        os.makedirs(dir_name)\n\n    # 由于原始的compact_module_defs将anchor从字符串变为了数组，因此这里将anchors重新变为字符串\n    file = open(opt.cfg, 'r')\n    lines = file.read().split('\\n')\n    for line in lines:\n        if line.split(' = ')[0] == 'anchors':\n            anchor = line.split(' = ')[1]\n            break\n        if line.split('=')[0] == 'anchors':\n            anchor = line.split('=')[1]\n            break\n    file.close()\n    for item in compact_module_defs:\n        if item['type'] == 'shortcut':\n            item['from'] = str(item['from'][0])\n        elif item['type'] == 'route':\n            item['layers'] = \",\".join('%s' % i for i in item['layers'])\n        elif item['type'] == 'yolo':\n            item['mask'] = \",\".join('%s' % i for i in item['mask'])\n            item['anchors'] = anchor\n    pruned_cfg_file = write_cfg(pruned_cfg_name, [model.hyperparams.copy()] + compact_module_defs)\n    print(f'Config file has been saved: {pruned_cfg_file}')\n    weights_dir_name = dir_name.replace('cfg', 'weights')\n    if not os.path.isdir(weights_dir_name):\n        os.makedirs(weights_dir_name)\n    compact_model_name = weights_dir_name + f'/normal_prune_{str(percent)}_percent.weights'\n\n    save_weights(compact_model, path=compact_model_name)\n    print(f'Compact model has been saved: {compact_model_name}')\n"
  },
  {
    "path": "regular_prune.py",
    "content": "from models import *\nfrom utils.utils import *\nimport torch\nimport numpy as np\nfrom copy import deepcopy\nfrom test import test\nfrom terminaltables import AsciiTable\nimport time\nfrom utils.utils import *\nfrom utils.prune_utils import *\nimport os\nimport argparse\n\n\n# 该函数有很重要的意义：\n# ①先用深拷贝将原始模型拷贝下来，得到model_copy\n# ②将model_copy中，BN层中低于阈值的α参数赋值为0\n# ③在BN层中，输出y=α*x+β，由于α参数的值被赋值为0，因此输入仅加了一个偏置β\n# ④很神奇的是，network slimming中是将α参数和β参数都置0，该处只将α参数置0，但效果却很好：其实在另外一篇论文中，已经提到，可以先将β参数的效果移到\n# 下一层卷积层，再去剪掉本层的α参数\n\n# 该函数用最简单的方法，让我们看到了，如何快速看到剪枝后的效果\ndef prune_and_eval(model, sorted_bn, percent=.0):\n    model_copy = deepcopy(model)\n    thre_index = int(len(sorted_bn) * percent)\n    # 获得α参数的阈值，小于该值的α参数对应的通道，全部裁剪掉\n    thre = sorted_bn[thre_index]\n\n    print(f'Channels with Gamma value less than {thre:.4f} are pruned!')\n\n    remain_num = 0\n    for idx in prune_idx:\n\n        bn_module = model_copy.module_list[idx][1]\n\n        mask = obtain_bn_mask(bn_module, thre)\n        mask_cnt = int(mask.sum())\n        if mask_cnt == 0:\n            this_layer_sort_bn = bn_module.weight.data.abs().clone()\n            sort_bn_values = torch.sort(this_layer_sort_bn)[0]\n            bn_cnt = bn_module.weight.shape[0]\n            this_layer_thre = sort_bn_values[bn_cnt - 8]\n            mask = obtain_bn_mask(bn_module, this_layer_thre)\n        else:\n            for i in range(len(filter_switch)):\n                if mask_cnt <= filter_switch[i]:\n                    mask_cnt = filter_switch[i]\n                    break\n            this_layer_sort_bn = bn_module.weight.data.abs().clone()\n            sort_bn_values = torch.sort(this_layer_sort_bn)[0]\n            bn_cnt = bn_module.weight.shape[0]\n            this_layer_thre = sort_bn_values[bn_cnt - mask_cnt]\n            mask = obtain_bn_mask(bn_module, this_layer_thre)\n\n        remain_num += int(mask.sum())\n        bn_module.weight.data.mul_(mask)\n\n    with torch.no_grad():\n        mAP = eval_model(model_copy)[1].mean()\n\n    print(f'Number of channels has been reduced from {len(sorted_bn)} to {remain_num}')\n    print(f'Prune ratio: {1 - remain_num / len(sorted_bn):.3f}')\n    print(f'mAP of the pruned model is {mAP:.4f}')\n\n    return thre\n\n\ndef obtain_filters_mask(model, thre, CBL_idx, prune_idx):\n    pruned = 0\n    total = 0\n    num_filters = []\n    filters_mask = []\n    # CBL_idx存储的是所有带BN的卷积层（YOLO层的前一层卷积层是不带BN的）\n    for idx in CBL_idx:\n        bn_module = model.module_list[idx][1]\n        if idx in prune_idx:\n\n            mask = obtain_bn_mask(bn_module, thre).cpu().numpy()\n\n            mask_cnt = int(mask.sum())\n\n            if mask_cnt == 0:\n                this_layer_sort_bn = bn_module.weight.data.abs().clone()\n                sort_bn_values = torch.sort(this_layer_sort_bn)[0]\n                bn_cnt = bn_module.weight.shape[0]\n                this_layer_thre = sort_bn_values[bn_cnt - 8]\n                mask = obtain_bn_mask(bn_module, this_layer_thre).cpu().numpy()\n\n            else:\n                for i in range(len(filter_switch)):\n                    if mask_cnt <= filter_switch[i]:\n                        mask_cnt = filter_switch[i]\n                        break\n                this_layer_sort_bn = bn_module.weight.data.abs().clone()\n                sort_bn_values = torch.sort(this_layer_sort_bn)[0]\n                bn_cnt = bn_module.weight.shape[0]\n                this_layer_thre = sort_bn_values[bn_cnt - mask_cnt]\n                mask = obtain_bn_mask(bn_module, this_layer_thre).cpu().numpy()\n\n            remain = int(mask.sum())\n            pruned = pruned + mask.shape[0] - remain\n\n            if remain == 0:\n                print(\"Channels would be all pruned!\")\n                raise Exception\n\n            print(f'layer index: {idx:>3d} \\t total channel: {mask.shape[0]:>4d} \\t '\n                  f'remaining channel: {remain:>4d}')\n        else:\n            mask = np.ones(bn_module.weight.data.shape)\n            remain = mask.shape[0]\n\n        total += mask.shape[0]\n        num_filters.append(remain)\n        filters_mask.append(mask.copy())\n\n    # 因此，这里求出的prune_ratio,需要裁剪的α参数/cbl_idx中所有的α参数\n    prune_ratio = pruned / total\n    print(f'Prune channels: {pruned}\\tPrune ratio: {prune_ratio:.3f}')\n\n    return num_filters, filters_mask\n\n\ndef obtain_avg_forward_time(input, model, repeat=200):\n    model.eval()\n    start = time.time()\n    with torch.no_grad():\n        for i in range(repeat):\n            output = model(input)\n    avg_infer_time = (time.time() - start) / repeat\n\n    return avg_infer_time, output\n\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n    parser.add_argument('--cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path')\n    parser.add_argument('--data', type=str, default='data/coco.data', help='*.data file path')\n    parser.add_argument('--weights', type=str, default='weights/last.pt', help='sparse model weights')\n    parser.add_argument('--percent', type=float, default=0.5, help='global channel prune percent')\n    parser.add_argument('--img-size', type=int, default=608, help='inference size (pixels)')\n    parser.add_argument('--batch-size', type=int, default=16, help='batch-size')\n    opt = parser.parse_args()\n    print(opt)\n\n    percent = opt.percent\n    # 指定GPU\n    # torch.cuda.set_device(2)\n\n    filter_switch = [each for each in range(2048) if (each % 32 == 0)]\n\n    device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n    model = Darknet(opt.cfg).to(device)\n\n    if opt.weights:\n        if opt.weights.endswith(\".pt\"):\n            model.load_state_dict(torch.load(opt.weights, map_location=device)['model'])\n        else:\n            _ = load_darknet_weights(model, opt.weights)\n\n    data_config = parse_data_cfg(opt.data)\n\n    valid_path = data_config[\"valid\"]\n    class_names = load_classes(data_config[\"names\"])\n\n    eval_model = lambda model: test(model=model, cfg=opt.cfg, data=opt.data, batch_size=opt.batch_size,\n                                    imgsz=opt.img_size, rank=-1)\n\n    obtain_num_parameters = lambda model: sum([param.nelement() for param in model.parameters()])\n\n    # 这个不应该注释掉，等会要恢复\n    with torch.no_grad():\n        origin_model_metric = eval_model(model)\n    origin_nparameters = obtain_num_parameters(model)\n\n    CBL_idx, Conv_idx, prune_idx = parse_module_defs(model.module_defs)\n\n    # 将所有要剪枝的BN层的α参数，拷贝到bn_weights列表\n    bn_weights = gather_bn_weights(model.module_list, prune_idx)\n\n    # torch.sort返回二维列表，第一维是排序后的值列表，第二维是排序后的值列表对应的索引\n    sorted_bn = torch.sort(bn_weights)[0]\n\n    # 避免剪掉所有channel的最高阈值(每个BN层的gamma的最大值的最小值即为阈值上限)\n    highest_thre = []\n    for idx in prune_idx:\n        # .item()可以得到张量里的元素值\n        highest_thre.append(model.module_list[idx][1].weight.data.abs().max().item())\n    highest_thre = min(highest_thre)\n\n    # 找到highest_thre对应的下标对应的百分比\n    percent_limit = (sorted_bn == highest_thre).nonzero(as_tuple=False).item() / len(bn_weights)\n\n    print(f'Threshold should be less than {highest_thre:.4f}.')\n    print(f'The corresponding prune ratio is {percent_limit:.3f}.')\n\n    threshold = prune_and_eval(model, sorted_bn, percent)\n\n    # ****************************************************************\n    # 虽然上面已经能看到剪枝后的效果，但是没有生成剪枝后的模型结构，因此下面的代码是为了生成新的模型结构并拷贝旧模型参数到新模型\n\n    # %%\n\n    num_filters, filters_mask = obtain_filters_mask(model, threshold, CBL_idx, prune_idx)\n\n    # CBLidx2mask存储CBL_idx中，每一层BN层对应的mask\n    CBLidx2mask = {idx: mask for idx, mask in zip(CBL_idx, filters_mask)}\n\n    pruned_model = prune_model_keep_size(model, prune_idx, CBL_idx, CBLidx2mask)\n\n    with torch.no_grad():\n        mAP = eval_model(pruned_model)[1].mean()\n    print('after prune_model_keep_size map is {}'.format(mAP))\n\n    # 获得原始模型的module_defs，并修改该defs中的卷积核数量\n    compact_module_defs = deepcopy(model.module_defs)\n    for idx, num in zip(CBL_idx, num_filters):\n        assert compact_module_defs[idx]['type'] == 'convolutional'\n        compact_module_defs[idx]['filters'] = str(num)\n\n    compact_model = Darknet([model.hyperparams.copy()] + compact_module_defs).to(device)\n    compact_nparameters = obtain_num_parameters(compact_model)\n\n    init_weights_from_loose_model(compact_model, pruned_model, CBL_idx, Conv_idx, CBLidx2mask)\n\n    random_input = torch.rand((16, 3, 416, 416)).to(device)\n\n    pruned_forward_time, pruned_output = obtain_avg_forward_time(random_input, pruned_model)\n    compact_forward_time, compact_output = obtain_avg_forward_time(random_input, compact_model)\n\n    # 在测试集上测试剪枝后的模型, 并统计模型的参数数量\n    with torch.no_grad():\n        compact_model_metric = eval_model(compact_model)\n\n    # 比较剪枝前后参数数量的变化、指标性能的变化\n    metric_table = [\n        [\"Metric\", \"Before\", \"After\"],\n        [\"mAP\", f'{origin_model_metric[1].mean():.6f}', f'{compact_model_metric[1].mean():.6f}'],\n        [\"Parameters\", f\"{origin_nparameters}\", f\"{compact_nparameters}\"],\n        [\"Inference\", f'{pruned_forward_time:.4f}', f'{compact_forward_time:.4f}']\n    ]\n    print(AsciiTable(metric_table).table)\n\n    # 生成剪枝后的cfg文件并保存模型\n    pruned_cfg_name = opt.cfg.replace('/', f'/regular_prune_{percent}_')\n    # 创建存储目录\n    dir_name = pruned_cfg_name.split('/')[0] + '/' + pruned_cfg_name.split('/')[1]\n    if not os.path.isdir(dir_name):\n        os.makedirs(dir_name)\n\n    # 由于原始的compact_module_defs将anchor从字符串变为了数组，因此这里将anchors重新变为字符串\n    file = open(opt.cfg, 'r')\n    lines = file.read().split('\\n')\n    for line in lines:\n        if line.split(' = ')[0] == 'anchors':\n            anchor = line.split(' = ')[1]\n            break\n        if line.split('=')[0] == 'anchors':\n            anchor = line.split('=')[1]\n            break\n    file.close()\n    for item in compact_module_defs:\n        if item['type'] == 'shortcut':\n            item['from'] = str(item['from'][0])\n        elif item['type'] == 'route':\n            item['layers'] = \",\".join('%s' % i for i in item['layers'])\n        elif item['type'] == 'yolo':\n            item['mask'] = \",\".join('%s' % i for i in item['mask'])\n            item['anchors'] = anchor\n\n    pruned_cfg_file = write_cfg(pruned_cfg_name, [model.hyperparams.copy()] + compact_module_defs)\n    print(f'Config file has been saved: {pruned_cfg_file}')\n    weights_dir_name = dir_name.replace('cfg', 'weights')\n    if not os.path.isdir(weights_dir_name):\n        os.makedirs(weights_dir_name)\n    compact_model_name = weights_dir_name + f'/regular_prune_{str(percent)}_percent.weights'\n\n    save_weights(compact_model, path=compact_model_name)\n    print(f'Compact model has been saved: {compact_model_name}')\n"
  },
  {
    "path": "requirements.txt",
    "content": "# pip install -U -r requirements.txt\nCython\nnumpy==1.17\nopencv-python\ntorch>=1.5.1\nmatplotlib\npillow\ntensorboard\ntorchvision\nscipy\ntqdm\ngit+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI\n\n# Conda commands (in lieu of pip) ---------------------------------------------\n# conda update -yn base -c defaults conda\n# conda install -yc anaconda numpy opencv matplotlib tqdm pillow ipython\n# conda install -yc conda-forge scikit-image pycocotools tensorboard\n# conda install -yc spyder-ide spyder-line-profiler\n# conda install -yc pytorch pytorch torchvision\n# conda install -yc conda-forge protobuf numpy && pip install onnx==1.6.0  # https://github.com/onnx/onnx#linux-and-macos"
  },
  {
    "path": "shortcut_prune.py",
    "content": "import argparse\n\nfrom models import *\nfrom utils.utils import *\nimport torch\nimport numpy as np\nfrom copy import deepcopy\nfrom test import test\nfrom terminaltables import AsciiTable\nimport time\nfrom utils.utils import *\nfrom utils.prune_utils import *\nimport os\n\n\n# short-cut剪枝\n\n\n# 该函数有很重要的意义：\n# ①先用深拷贝将原始模型拷贝下来，得到model_copy\n# ②将model_copy中，BN层中低于阈值的α参数赋值为0\n# ③在BN层中，输出y=α*x+β，由于α参数的值被赋值为0，因此输入仅加了一个偏置β\n# ④很神奇的是，network slimming中是将α参数和β参数都置0，该处只将α参数置0，但效果却很好：其实在另外一篇论文中，已经提到，可以先将β参数的效果移到\n# 下一层卷积层，再去剪掉本层的α参数\n\n# 该函数用最简单的方法，让我们看到了，如何快速看到剪枝后的效果\n\ndef prune_and_eval(model, sorted_bn, shortcut_idx, percent=.0):\n    model_copy = deepcopy(model)\n    thre_index = int(len(sorted_bn) * percent)\n    # 获得α参数的阈值，小于该值的α参数对应的通道，全部裁剪掉\n    thre1 = sorted_bn[thre_index]\n\n    print(f'Channels with Gamma value less than {thre1:.8f} are pruned!')\n\n    remain_num = 0\n    idx_new = dict()\n    for idx in prune_idx:\n\n        if idx not in shortcut_idx:\n\n            bn_module = model_copy.module_list[idx][1]\n\n            mask = obtain_bn_mask(bn_module, thre1)\n            # 记录剪枝后，每一层卷积层对应的mask\n            # idx_new[idx]=mask.cpu().numpy()\n            idx_new[idx] = mask\n            remain_num += int(mask.sum())\n            bn_module.weight.data.mul_(mask)\n            # bn_module.bias.data.mul_(mask*0.0001)\n        else:\n\n            bn_module = model_copy.module_list[idx][1]\n\n            mask = idx_new[shortcut_idx[idx]]\n            idx_new[idx] = mask\n\n            remain_num += int(mask.sum())\n            bn_module.weight.data.mul_(mask)\n\n        # print(int(mask.sum()))\n\n    # with torch.no_grad():\n    #     mAP = eval_model(model_copy)[0][2]\n\n    print(f'Number of channels has been reduced from {len(sorted_bn)} to {remain_num}')\n    print(f'Prune ratio: {1 - remain_num / len(sorted_bn):.3f}')\n    # print(f'mAP of the pruned model is {mAP:.4f}')\n\n    return thre1\n\n\ndef obtain_filters_mask(model, thre, CBL_idx, shortcut_idx, prune_idx):\n    pruned = 0\n    total = 0\n    num_filters = []\n    filters_mask = []\n    idx_new = dict()\n    # CBL_idx存储的是所有带BN的卷积层（YOLO层的前一层卷积层是不带BN的）\n    for idx in CBL_idx:\n        bn_module = model.module_list[idx][1]\n        if idx in prune_idx:\n            if idx not in shortcut_idx:\n\n                mask = obtain_bn_mask(bn_module, thre).cpu().numpy()\n                idx_new[idx] = mask\n                remain = int(mask.sum())\n                pruned = pruned + mask.shape[0] - remain\n\n                # if remain == 0:\n                #     print(\"Channels would be all pruned!\")\n                #     raise Exception\n\n                # print(f'layer index: {idx:>3d} \\t total channel: {mask.shape[0]:>4d} \\t '\n                #     f'remaining channel: {remain:>4d}')\n            else:\n                # 如果idx在shortcut_idx之中，则试跳连层的两层的mask相等\n                mask = idx_new[shortcut_idx[idx]]\n                idx_new[idx] = mask\n                remain = int(mask.sum())\n                pruned = pruned + mask.shape[0] - remain\n\n            if remain == 0:\n                print(\"Channels would be all pruned!\")\n                raise Exception\n\n            print(f'layer index: {idx:>3d} \\t total channel: {mask.shape[0]:>4d} \\t '\n                  f'remaining channel: {remain:>4d}')\n        else:\n            mask = np.ones(bn_module.weight.data.shape)\n            remain = mask.shape[0]\n\n        total += mask.shape[0]\n        num_filters.append(remain)\n        filters_mask.append(mask.copy())\n\n    # 因此，这里求出的prune_ratio,需要裁剪的α参数/cbl_idx中所有的α参数\n    prune_ratio = pruned / total\n    print(f'Prune channels: {pruned}\\tPrune ratio: {prune_ratio:.3f}')\n\n    return num_filters, filters_mask\n\n\ndef obtain_avg_forward_time(input, model, repeat=200):\n    model.eval()\n    start = time.time()\n    with torch.no_grad():\n        for i in range(repeat):\n            output = model(input)\n    avg_infer_time = (time.time() - start) / repeat\n\n    return avg_infer_time, output\n\n\nif __name__ == '__main__':\n\n    parser = argparse.ArgumentParser()\n    parser.add_argument('--cfg', type=str, default='cfg/yolov3/yolov3.cfg', help='cfg file path')\n    parser.add_argument('--data', type=str, default='cfg/coco2017.data', help='*.data file path')\n    parser.add_argument('--weights', type=str, default='weights/last.pt', help='sparse model weights')\n    parser.add_argument('--percent', type=float, default=0.6, help='channel prune percent')\n    parser.add_argument('--img-size', type=int, default=608, help='inference size (pixels)')\n    parser.add_argument('--batch-size', type=int, default=16, help='batch-size')\n    opt = parser.parse_args()\n    print(opt)\n\n    # 指定GPU\n    # torch.cuda.set_device(2)\n    device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n    model = Darknet(opt.cfg).to(device)\n\n    if opt.weights:\n        if opt.weights.endswith(\".pt\"):\n            model.load_state_dict(torch.load(opt.weights, map_location=device)['model'])\n        else:\n            _ = load_darknet_weights(model, opt.weights)\n\n    data_config = parse_data_cfg(opt.data)\n\n    valid_path = data_config[\"valid\"]\n    class_names = load_classes(data_config[\"names\"])\n    # 测试模型\n    eval_model = lambda model: test(model=model, imgsz=opt.img_size, cfg=opt.cfg, data=opt.data,\n                                    batch_size=opt.batch_size, rank=-1)\n    # 获取参数总数\n    obtain_num_parameters = lambda model: sum([param.nelement() for param in model.parameters()])\n\n    with torch.no_grad():\n        origin_model_metric = eval_model(model)\n    origin_nparameters = obtain_num_parameters(model)\n    # 与normal_prune不同的是这里需要获得shortcu_idx和short_all\n    # 其中shortcut_idx存储的是对应关系，故shortcut[x]就对应的是与第x-1卷积层相加层的索引值\n    # shortcut_all存储的是所有相加层\n    CBL_idx, Conv_idx, prune_idx, shortcut_idx, shortcut_all = parse_module_defs2(model.module_defs)\n\n    # 将所有要剪枝的BN层的γ参数，拷贝到bn_weights列表\n    bn_weights = gather_bn_weights(model.module_list, prune_idx)\n    # 对BN中的γ参数排序\n    # torch.sort返回二维列表，第一维是排序后的值列表，第二维是排序后的值列表对应的索引\n    sorted_bn = torch.sort(bn_weights)[0]\n\n    # 避免剪掉一层中的所有channel的最高阈值(每个BN层中gamma的最大值在所有层中最小值即为阈值上限)\n    highest_thre = []\n    for idx in prune_idx:\n        # .item()可以得到张量里的元素值\n        highest_thre.append(model.module_list[idx][1].weight.data.abs().max().item())\n    highest_thre = min(highest_thre)\n\n    # 找到highest_thre对应的下标对应的百分比\n    percent_limit = (sorted_bn == highest_thre).nonzero().item() / len(bn_weights)\n\n    print(f'Threshold should be less than {highest_thre:.8f}.')\n    print(f'The corresponding prune ratio is {percent_limit:.3f}.')\n\n    percent = opt.percent\n    threshold = prune_and_eval(model, sorted_bn, shortcut_idx, percent)\n\n    num_filters, filters_mask = obtain_filters_mask(model, threshold, CBL_idx, shortcut_idx, prune_idx)\n\n    # CBLidx2mask存储CBL_idx中，每一层BN层对应的mask\n    CBLidx2mask = {idx: mask for idx, mask in zip(CBL_idx, filters_mask)}\n\n    pruned_model = prune_model_keep_size(model, prune_idx, CBL_idx, CBLidx2mask)\n\n    with torch.no_grad():\n        mAP = eval_model(pruned_model)[0][2]\n    print('after prune_model_keep_size map is {}'.format(mAP))\n\n    # 获得原始模型的module_defs，并修改该defs中的卷积核数量\n    compact_module_defs = deepcopy(model.module_defs)\n    for idx, num in zip(CBL_idx, num_filters):\n        assert compact_module_defs[idx]['type'] == 'convolutional'\n        compact_module_defs[idx]['filters'] = str(num)\n\n    # for item_def in compact_module_defs:\n    #     print(item_def)\n\n    compact_model = Darknet([model.hyperparams.copy()] + compact_module_defs).to(device)\n    compact_nparameters = obtain_num_parameters(compact_model)\n\n    init_weights_from_loose_model(compact_model, pruned_model, CBL_idx, Conv_idx, CBLidx2mask)\n\n    random_input = torch.rand((16, 3, 416, 416)).to(device)\n\n    pruned_forward_time, pruned_output = obtain_avg_forward_time(random_input, pruned_model)\n    compact_forward_time, compact_output = obtain_avg_forward_time(random_input, compact_model)\n\n    # 在测试集上测试剪枝后的模型, 并统计模型的参数数量\n    with torch.no_grad():\n        compact_model_metric = eval_model(compact_model)\n\n    # 比较剪枝前后参数数量的变化、指标性能的变化\n    metric_table = [\n        [\"Metric\", \"Before\", \"After\"],\n        [\"mAP\", f'{origin_model_metric[0][2]:.6f}', f'{compact_model_metric[0][2]:.6f}'],\n        [\"Parameters\", f\"{origin_nparameters}\", f\"{compact_nparameters}\"],\n        [\"Inference\", f'{pruned_forward_time:.4f}', f'{compact_forward_time:.4f}']\n    ]\n    print(AsciiTable(metric_table).table)\n\n    # 生成剪枝后的cfg文件并保存模型\n    pruned_cfg_name = opt.cfg.replace('/', f'/shortcut_prune_{percent}_')\n    # 创建存储目录\n    dir_name = pruned_cfg_name.split('/')[0] + '/' + pruned_cfg_name.split('/')[1]\n    if not os.path.isdir(dir_name):\n        os.makedirs(dir_name)\n\n    # 由于原始的compact_module_defs将anchor从字符串变为了数组，因此这里将anchors重新变为字符串\n    file = open(opt.cfg, 'r')\n    lines = file.read().split('\\n')\n    for line in lines:\n        if line.split(' = ')[0] == 'anchors':\n            anchor = line.split(' = ')[1]\n            break\n        if line.split('=')[0] == 'anchors':\n            anchor = line.split('=')[1]\n            break\n    file.close()\n    for item in compact_module_defs:\n        if item['type'] == 'shortcut':\n            item['from'] = str(item['from'][0])\n        elif item['type'] == 'route':\n            item['layers'] = \",\".join('%s' % i for i in item['layers'])\n        elif item['type'] == 'yolo':\n            item['mask'] = \",\".join('%s' % i for i in item['mask'])\n            item['anchors'] = anchor\n    pruned_cfg_file = write_cfg(pruned_cfg_name, [model.hyperparams.copy()] + compact_module_defs)\n    print(f'Config file has been saved: {pruned_cfg_file}')\n    weights_dir_name = dir_name.replace('cfg', 'weights')\n    if not os.path.isdir(weights_dir_name):\n        os.makedirs(weights_dir_name)\n    compact_model_name = weights_dir_name + f'/shortcut_prune_{str(percent)}_percent.weights'\n\n    save_weights(compact_model, path=compact_model_name)\n    print(f'Compact model has been saved: {compact_model_name}')\n"
  },
  {
    "path": "slim_prune.py",
    "content": "from models import *\nfrom utils.utils import *\nimport numpy as np\nfrom copy import deepcopy\nfrom test import test\nfrom terminaltables import AsciiTable\nimport time\nfrom utils.prune_utils import *\nimport argparse\n\n\n# %%\ndef obtain_filters_mask(model, thre, CBL_idx, prune_idx):\n    pruned = 0\n    total = 0\n    num_filters = []\n    filters_mask = []\n    for idx in CBL_idx:\n        bn_module = model.module_list[idx][1]\n        if idx in prune_idx:\n\n            weight_copy = bn_module.weight.data.abs().clone()\n\n            channels = weight_copy.shape[0]  #\n            min_channel_num = int(channels * opt.layer_keep) if int(channels * opt.layer_keep) > 0 else 1\n            mask = weight_copy.gt(thresh).float()\n\n            if int(torch.sum(mask)) < min_channel_num:\n                _, sorted_index_weights = torch.sort(weight_copy, descending=True)\n                mask[sorted_index_weights[:min_channel_num]] = 1.\n            remain = int(mask.sum())\n            pruned = pruned + mask.shape[0] - remain\n\n            print(f'layer index: {idx:>3d} \\t total channel: {mask.shape[0]:>4d} \\t '\n                  f'remaining channel: {remain:>4d}')\n        else:\n            mask = torch.ones(bn_module.weight.data.shape)\n            remain = mask.shape[0]\n\n        total += mask.shape[0]\n        num_filters.append(remain)\n        filters_mask.append(mask.clone())\n\n    prune_ratio = pruned / total\n    print(f'Prune channels: {pruned}\\tPrune ratio: {prune_ratio:.3f}')\n\n    return num_filters, filters_mask\n\n\ndef prune_and_eval(model, CBL_idx, CBLidx2mask):\n    model_copy = deepcopy(model)\n\n    for idx in CBL_idx:\n        bn_module = model_copy.module_list[idx][1]\n        mask = CBLidx2mask[idx].cuda()\n        bn_module.weight.data.mul_(mask)\n\n    with torch.no_grad():\n        mAP = eval_model(model_copy)[0][2]\n\n    print(f'mask the gamma as zero, mAP of the model is {mAP:.4f}')\n\n\ndef obtain_avg_forward_time(input, model, repeat=200):\n    model.eval()\n    start = time.time()\n    with torch.no_grad():\n        for i in range(repeat):\n            output = model(input)\n    avg_infer_time = (time.time() - start) / repeat\n\n    return avg_infer_time, output\n\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n    parser.add_argument('--cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path')\n    parser.add_argument('--data', type=str, default='data/coco.data', help='*.data file path')\n    parser.add_argument('--weights', type=str, default='weights/last.pt', help='sparse model weights')\n    parser.add_argument('--percent', type=float, default=0.8, help='global channel prune percent')\n    parser.add_argument('--layer_keep', type=float, default=0.01, help='channel keep percent per layer')\n    parser.add_argument('--img-size', type=int, default=416, help='inference size (pixels)')\n    parser.add_argument('--batch-size', type=int, default=16, help='batch-size')\n    opt = parser.parse_args()\n    print(opt)\n\n    img_size = opt.img_size\n    device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n    model = Darknet(opt.cfg, (img_size, img_size)).to(device)\n\n    if opt.weights.endswith(\".pt\"):\n        model.load_state_dict(torch.load(opt.weights, map_location=device)['model'])\n    else:\n        _ = load_darknet_weights(model, opt.weights)\n    print('\\nloaded weights from ', opt.weights)\n\n    eval_model = lambda model: test(model=model, cfg=opt.cfg, data=opt.data, batch_size=opt.batch_size,\n                                    imgsz=img_size, rank=-1)\n    obtain_num_parameters = lambda model: sum([param.nelement() for param in model.parameters()])\n\n    print(\"\\nlet's test the original model first:\")\n    with torch.no_grad():\n        origin_model_metric = eval_model(model)\n    origin_nparameters = obtain_num_parameters(model)\n\n    CBL_idx, Conv_idx, prune_idx, _, _ = parse_module_defs2(model.module_defs)\n\n    bn_weights = gather_bn_weights(model.module_list, prune_idx)\n\n    sorted_bn = torch.sort(bn_weights)[0]\n    sorted_bn, sorted_index = torch.sort(bn_weights)\n    thresh_index = int(len(bn_weights) * opt.percent)\n    thresh = sorted_bn[thresh_index].cuda()\n\n    print(f'Global Threshold should be less than {thresh:.4f}.')\n\n    num_filters, filters_mask = obtain_filters_mask(model, thresh, CBL_idx, prune_idx)\n    CBLidx2mask = {idx: mask for idx, mask in zip(CBL_idx, filters_mask)}\n    CBLidx2filters = {idx: filters for idx, filters in zip(CBL_idx, num_filters)}\n\n    for i in model.module_defs:\n        if i['type'] == 'shortcut':\n            i['is_access'] = False\n\n    print('merge the mask of layers connected to shortcut!')\n    merge_mask(model, CBLidx2mask, CBLidx2filters)\n\n    prune_and_eval(model, CBL_idx, CBLidx2mask)\n\n    for i in CBLidx2mask:\n        CBLidx2mask[i] = CBLidx2mask[i].clone().cpu().numpy()\n\n    pruned_model = prune_model_keep_size(model, prune_idx, CBL_idx, CBLidx2mask)\n    print(\n        \"\\nnow prune the model but keep size,(actually add offset of BN beta to following layers), let's see how the mAP goes\")\n\n    with torch.no_grad():\n        eval_model(pruned_model)\n\n    for i in model.module_defs:\n        if i['type'] == 'shortcut':\n            i.pop('is_access')\n\n    compact_module_defs = deepcopy(model.module_defs)\n    for idx in CBL_idx:\n        assert compact_module_defs[idx]['type'] == 'convolutional'\n        compact_module_defs[idx]['filters'] = str(CBLidx2filters[idx])\n\n    compact_model = Darknet([model.hyperparams.copy()] + compact_module_defs, (img_size, img_size)).to(device)\n    compact_nparameters = obtain_num_parameters(compact_model)\n\n    init_weights_from_loose_model(compact_model, pruned_model, CBL_idx, Conv_idx, CBLidx2mask)\n\n    random_input = torch.rand((1, 3, img_size, img_size)).to(device)\n\n    print('testing inference time...')\n    pruned_forward_time, pruned_output = obtain_avg_forward_time(random_input, pruned_model)\n    compact_forward_time, compact_output = obtain_avg_forward_time(random_input, compact_model)\n\n    print('testing the final model...')\n    with torch.no_grad():\n        compact_model_metric = eval_model(compact_model)\n\n    metric_table = [\n        [\"Metric\", \"Before\", \"After\"],\n        [\"mAP\", f'{origin_model_metric[0][2]:.6f}', f'{compact_model_metric[0][2]:.6f}'],\n        [\"Parameters\", f\"{origin_nparameters}\", f\"{compact_nparameters}\"],\n        [\"Inference\", f'{pruned_forward_time:.4f}', f'{compact_forward_time:.4f}']\n    ]\n    print(AsciiTable(metric_table).table)\n\n    pruned_cfg_name = opt.cfg.replace('/', f'/slim_prune_{opt.percent}')\n    # 创建存储目录\n    dir_name = pruned_cfg_name.split('/')[0] + '/' + pruned_cfg_name.split('/')[1]\n    if not os.path.isdir(dir_name):\n        os.makedirs(dir_name)\n\n    # 由于原始的compact_module_defs将anchor从字符串变为了数组，因此这里将anchors重新变为字符串\n    file = open(opt.cfg, 'r')\n    lines = file.read().split('\\n')\n    for line in lines:\n        if line.split(' = ')[0] == 'anchors':\n            anchor = line.split(' = ')[1]\n            break\n        if line.split('=')[0] == 'anchors':\n            anchor = line.split('=')[1]\n            break\n    file.close()\n\n    for item in compact_module_defs:\n        if item['type'] == 'shortcut':\n            item['from'] = str(item['from'][0])\n        elif item['type'] == 'route':\n            item['layers'] = \",\".join('%s' % i for i in item['layers'])\n        elif item['type'] == 'yolo':\n            item['mask'] = \",\".join('%s' % i for i in item['mask'])\n            item['anchors'] = anchor\n    pruned_cfg_file = write_cfg(pruned_cfg_name, [model.hyperparams.copy()] + compact_module_defs)\n    print(f'Config file has been saved: {pruned_cfg_file}')\n\n    weights_dir_name = dir_name.replace('cfg', 'weights')\n    if not os.path.isdir(weights_dir_name):\n        os.makedirs(weights_dir_name)\n    compact_model_name = weights_dir_name + f'/slim_prune_{str(opt.percent)}_percent.weights'\n\n    save_weights(compact_model, path=compact_model_name)\n    print(f'Compact model has been saved: {compact_model_name}')\n"
  },
  {
    "path": "test.py",
    "content": "import argparse\nimport json\n\nfrom torch.utils.data import DataLoader\nfrom models import *\nfrom utils.datasets import *\nfrom utils.utils import *\n\n\ndef test(cfg,\n         data,\n         weights=None,\n         batch_size=16,\n         imgsz=416,\n         conf_thres=0.001,\n         iou_thres=0.6,  # for nms\n         save_json=False,\n         augment=False,\n         model=None,\n         dataloader=None,\n         multi_label=True,\n         quantized=-1,\n         a_bit=8,\n         w_bit=8,\n         rank=-1,\n         plot=True,\n         is_gray_scale=False,\n         maxabsscaler=False,\n         shortcut_way=-1):\n    # Initialize/load model and set device\n    if model is None:\n        device = torch_utils.select_device(opt.device, batch_size=batch_size)\n        verbose = opt.task == 'test'\n\n        # Remove previous\n        for f in glob.glob('test_batch*.jpg'):\n            os.remove(f)\n\n        # Initialize model\n        model = Darknet(cfg, imgsz, quantized=quantized, a_bit=a_bit, w_bit=w_bit,\n                        is_gray_scale=is_gray_scale, maxabsscaler=maxabsscaler, shortcut_way=shortcut_way)\n\n        # Load weights\n        attempt_download(weights)\n        if weights.endswith('.pt'):  # pytorch format\n            model.load_state_dict(torch.load(weights, map_location=device)['model'])\n        else:  # darknet format\n            load_darknet_weights(model, weights, quant=(quantized != -1))\n\n        # Fuse\n        if quantized == -1:\n            model.fuse()\n        model.to(device)\n\n        if device.type != 'cpu' and torch.cuda.device_count() > 1:\n            model = nn.DataParallel(model)\n        # summary(model, input_size=(3, imgsz, imgsz))\n    else:  # called by train.py\n        device = next(model.parameters()).device  # get model device\n        verbose = False\n    # Configure run\n    data = parse_data_cfg(data)\n    nc = int(data['classes'])  # number of classes\n    path = data['valid']  # path to test images\n    names = load_classes(data['names'])  # class names\n    iouv = torch.linspace(0.5, 0.95, 10).to(device)  # iou vector for mAP@0.5:0.95\n    iouv = iouv[0].view(1)  # comment for mAP@0.5:0.95\n    niou = iouv.numel()\n\n    # Dataloader\n    if dataloader is None:\n        dataset = LoadImagesAndLabels(path, imgsz, batch_size, rect=True,\n                                      is_gray_scale=is_gray_scale)\n        batch_size = min(batch_size, len(dataset))\n        dataloader = DataLoader(dataset,\n                                batch_size=batch_size,\n                                num_workers=min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]),\n                                pin_memory=True,\n                                collate_fn=dataset.collate_fn)\n\n    seen = 0\n    model.eval()\n    # _ = model(torch.zeros((1, 3, imgsz, imgsz), device=device)) if device.type != 'cpu' else None  # run once\n    coco91class = coco80_to_coco91_class()\n    s = ('%20s' + '%10s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', 'mAP@0.5', 'F1')\n    p, r, f1, mp, mr, map, mf1, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.\n    pbar = tqdm(dataloader, desc=s) if rank in [-1, 0] else dataloader\n    loss = torch.zeros(3, device=device)\n    jdict, stats, ap, ap_class = [], [], [], []\n    for batch_i, (imgs, targets, paths, shapes) in enumerate(pbar):\n        if maxabsscaler:\n            imgs = imgs.to(device).float() / 256.0\n            imgs = imgs * 2 - 1\n        else:\n            imgs = imgs.to(device).float() / 256.0  # uint8 to float32, 0 - 255 to 0.0 - 1.0\n        if quantized != -1:\n            if a_bit == 16:\n                img = img * (2 ** 14)\n                sign = torch.sign(img)\n                img = sign * torch.floor(torch.abs(img) + 0.5)\n                img = img / (2 ** 14)\n        targets = targets.to(device)\n        nb, _, height, width = imgs.shape  # batch size, channels, height, width\n        whwh = torch.Tensor([width, height, width, height]).to(device)\n\n        # Disable gradients\n        with torch.no_grad():\n            # Run model\n            t = torch_utils.time_synchronized()\n\n            inf_out, train_out, _ = model(imgs, augment=augment)  # inference and training outputs\n            t0 += torch_utils.time_synchronized() - t\n\n            # Compute loss\n            if hasattr(model, 'hyp'):  # if model has loss hyperparameters\n                loss += compute_loss(train_out, targets, model)[1][:3]  # GIoU, obj, cls\n\n            # Run NMS\n            t = torch_utils.time_synchronized()\n            output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres, multi_label=multi_label)\n            t1 += torch_utils.time_synchronized() - t\n\n        # Statistics per image\n        for si, pred in enumerate(output):\n            labels = targets[targets[:, 0] == si, 1:]\n            nl = len(labels)\n            tcls = labels[:, 0].tolist() if nl else []  # target class\n            seen += 1\n\n            if pred is None:\n                if nl:\n                    stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))\n                continue\n\n            # Append to text file\n            # with open('test.txt', 'a') as file:\n            #    [file.write('%11.5g' * 7 % tuple(x) + '\\n') for x in pred]\n\n            # Clip boxes to image bounds\n            clip_coords(pred, (height, width))\n\n            # Append to pycocotools JSON dictionary\n            if save_json:\n                # [{\"image_id\": 42, \"category_id\": 18, \"bbox\": [258.15, 41.29, 348.26, 243.78], \"score\": 0.236}, ...\n                image_id = int(Path(paths[si]).stem.split('_')[-1])\n                box = pred[:, :4].clone()  # xyxy\n                scale_coords(imgs[si].shape[1:], box, shapes[si][0], shapes[si][1])  # to original shape\n                box = xyxy2xywh(box)  # xywh\n                box[:, :2] -= box[:, 2:] / 2  # xy center to top-left corner\n                for p, b in zip(pred.tolist(), box.tolist()):\n                    jdict.append({'image_id': image_id,\n                                  'category_id': coco91class[int(p[5])],\n                                  'bbox': [round(x, 3) for x in b],\n                                  'score': round(p[4], 5)})\n\n            # Assign all predictions as incorrect\n            correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device)\n            if nl:\n                detected = []  # target indices\n                tcls_tensor = labels[:, 0]\n\n                # target boxes\n                tbox = xywh2xyxy(labels[:, 1:5]) * whwh\n\n                # Per target class\n                for cls in torch.unique(tcls_tensor):\n                    ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1)  # prediction indices\n                    pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1)  # target indices\n\n                    # Search for detections\n                    if pi.shape[0]:\n                        # Prediction to target ious\n                        ious, i = box_iou(pred[pi, :4], tbox[ti]).max(1)  # best ious, indices\n\n                        # Append detections\n                        for j in (ious > iouv[0]).nonzero(as_tuple=False):\n                            d = ti[i[j]]  # detected target\n                            if d not in detected:\n                                detected.append(d)\n                                correct[pi[j]] = ious[j] > iouv  # iou_thres is 1xn\n                                if len(detected) == nl:  # all targets already located in image\n                                    break\n\n            # Append statistics (correct, conf, pcls, tcls)\n            stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))\n\n        # Plot images\n        if batch_i < 1 and plot:\n            f = 'test_batch%g_gt.jpg' % batch_i  # filename\n            plot_images(imgs, targets, paths=paths, names=names, fname=f, is_gray_scale=is_gray_scale)  # ground truth\n            f = 'test_batch%g_pred.jpg' % batch_i\n            plot_images(imgs, output_to_target(output, width, height), paths=paths, names=names, fname=f,\n                        is_gray_scale=is_gray_scale)  # predictions\n\n    # Compute statistics\n    stats = [np.concatenate(x, 0) for x in zip(*stats)]  # to numpy\n    if len(stats):\n        p, r, ap, f1, ap_class = ap_per_class(*stats)\n        if niou > 1:\n            p, r, ap, f1 = p[:, 0], r[:, 0], ap.mean(1), ap[:, 0]  # [P, R, AP@0.5:0.95, AP@0.5]\n        mp, mr, map, mf1 = p.mean(), r.mean(), ap.mean(), f1.mean()\n        nt = np.bincount(stats[3].astype(np.int64), minlength=nc)  # number of targets per class\n    else:\n        nt = torch.zeros(1)\n\n    # Print results\n    pf = '%20s' + '%10.3g' * 6  # print format\n    if rank in [-1, 0]:\n        print(pf % ('all', seen, nt.sum(), mp, mr, map, mf1))\n\n    # Print results per class\n    if verbose and nc > 1 and len(stats):\n        for i, c in enumerate(ap_class):\n            print(pf % (names[c], seen, nt[c], p[i], r[i], ap[i], f1[i]))\n\n    # Print speeds\n    if verbose or save_json:\n        t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size)  # tuple\n        print('Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g' % t)\n\n    # Save JSON\n    if save_json and map and len(jdict):\n        print('\\nCOCO mAP with pycocotools...')\n        imgIds = [int(Path(x).stem.split('_')[-1]) for x in dataloader.dataset.img_files]\n        with open('results.json', 'w') as file:\n            json.dump(jdict, file)\n\n        try:\n            from pycocotools.coco import COCO\n            from pycocotools.cocoeval import COCOeval\n\n            # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb\n            # cocovision = opt.data.split('\\\\')[-1].split('.')[0]\n            # print(cocovision)\n            # cocoGt = COCO(glob.glob('data/'+cocovision+'/instances_val*.json')[0])  # initialize COCO ground truth api\n            cocoGt = COCO(glob.glob('data/coco2014/instances_val*.json')[0])  # initialize COCO ground truth api\n            cocoDt = cocoGt.loadRes('results.json')  # initialize COCO pred api\n\n            cocoEval = COCOeval(cocoGt, cocoDt, 'bbox')\n            cocoEval.params.imgIds = imgIds  # [:32]  # only evaluate these images\n            cocoEval.evaluate()\n            cocoEval.accumulate()\n            cocoEval.summarize()\n            # mf1, map = cocoEval.stats[:2]  # update to pycocotools results (mAP@0.5:0.95, mAP@0.5)\n        except:\n            print('WARNING: pycocotools must be installed with numpy==1.17 to run correctly. '\n                  'See https://github.com/cocodataset/cocoapi/issues/356')\n\n    # Return results\n    maps = np.zeros(nc) + map\n    for i, c in enumerate(ap_class):\n        maps[c] = ap[i]\n    return (mp, mr, map, mf1, *(loss.cpu() / len(dataloader)).tolist()), maps\n\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser(prog='test.py')\n    parser.add_argument('--cfg', type=str, default='cfg/yolov3-spp.cfg', help='*.cfg path')\n    parser.add_argument('--data', type=str, default='data/coco2014.data', help='*.data path')\n    parser.add_argument('--weights', type=str, default='weights/yolov3-spp-ultralytics.pt', help='weights path')\n    parser.add_argument('--batch-size', type=int, default=16, help='size of each image batch')\n    parser.add_argument('--img-size', type=int, default=512, help='inference size (pixels)')\n    parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold')\n    parser.add_argument('--iou-thres', type=float, default=0.6, help='IOU threshold for NMS')\n    parser.add_argument('--save-json', action='store_true', help='save a cocoapi-compatible JSON results file')\n    parser.add_argument('--task', default='test', help=\"'test', 'study', 'benchmark'\")\n    parser.add_argument('--device', default='', help='device id (i.e. 0 or 0,1) or cpu')\n    parser.add_argument('--augment', action='store_true', help='augmented inference')\n    parser.add_argument('--quantized', type=int, default=-1, help='quantization way')\n    parser.add_argument('--shortcut_way', type=int, default=1, help='--shortcut quantization way')\n    parser.add_argument('--a-bit', type=int, default=8, help='a-bit')\n    parser.add_argument('--w-bit', type=int, default=8, help='w-bit')\n    parser.add_argument('--gray-scale', action='store_true', help='gray scale trainning')\n    parser.add_argument('--maxabsscaler', '-mas', action='store_true', help='Standarize input to (-1,1)')\n\n    opt = parser.parse_args()\n    opt.save_json = opt.save_json or any([x in opt.data for x in ['coco.data', 'coco2014.data', 'coco2017.data']])\n    opt.cfg = list(glob.iglob('./**/' + opt.cfg, recursive=True))[0]  # find file\n    opt.data = list(glob.iglob('./**/' + opt.data, recursive=True))[0]  # find file\n\n    print(opt)\n\n    # task = 'test', 'study', 'benchmark'\n    if opt.task == 'test':  # (default) test normally\n        test(opt.cfg,\n             opt.data,\n             opt.weights,\n             opt.batch_size,\n             opt.img_size,\n             opt.conf_thres,\n             opt.iou_thres,\n             opt.save_json,\n             opt.augment,\n             quantized=opt.quantized,\n             a_bit=opt.a_bit,\n             w_bit=opt.w_bit,\n             rank=-1,\n             is_gray_scale=opt.gray_scale,\n             maxabsscaler=opt.maxabsscaler,\n             shortcut_way=opt.shortcut_way)\n\n    elif opt.task == 'benchmark':  # mAPs at 256-640 at conf 0.5 and 0.7\n        y = []\n        for i in list(range(256, 640, 128)):  # img-size\n            for j in [0.6, 0.7]:  # iou-thres\n                t = time.time()\n                r = test(opt.cfg, opt.data, opt.weights, opt.batch_size, i, opt.conf_thres, j, opt.save_json)[0]\n                y.append(r + (time.time() - t,))\n        np.savetxt('benchmark.txt', y, fmt='%10.4g')  # y = np.loadtxt('study.txt')\n"
  },
  {
    "path": "train.py",
    "content": "import argparse\n\nimport torch.optim as optim\nimport torch.optim.lr_scheduler as lr_scheduler\nfrom torch.utils.tensorboard import SummaryWriter\n\nimport test  # import test.py to get mAP after each epoch\nfrom models import *\nfrom utils.datasets import *\nfrom utils.utils import *\nfrom utils.prune_utils import *\nimport math\nfrom torch.cuda import amp\n\nfrom utils.torch_utils import ModelEMA, select_device  # DDP import\nimport torch.distributed as dist\nfrom torch.nn.parallel import DistributedDataParallel as DDP\n\nwdir = 'weights' + os.sep  # weights dir\nlast = wdir + 'last.pt'\nbest = wdir + 'best.pt'\nresults_file = 'results.txt'\n\n# Hyperparameters\nhyp = {'giou': 3.54,  # giou loss gain\n       'cls': 37.4,  # cls loss gain\n       'cls_pw': 1.0,  # cls BCELoss positive_weight\n       'obj': 64.3,  # obj loss gain (*=img_size/320 if img_size != 320)\n       'obj_pw': 1.0,  # obj BCELoss positive_weight\n       'iou_t': 0.20,  # iou training threshold\n       'lr0': 0.01,  # initial learning rate (SGD=5E-3, Adam=5E-4)\n       'lrf': 0.0005,  # final learning rate (with cos scheduler)\n       'momentum': 0.937,  # SGD momentum\n       'weight_decay': 0.0005,  # optimizer weight decay\n       'fl_gamma': 0.0,  # focal loss gamma (efficientDet default is gamma=1.5)\n       'hsv_h': 0.0138,  # image HSV-Hue augmentation (fraction)\n       'hsv_s': 0.678,  # image HSV-Saturation augmentation (fraction)\n       'hsv_v': 0.36,  # image HSV-Value augmentation (fraction)\n       'degrees': 1.98 * 0,  # image rotation (+/- deg)\n       'translate': 0.05 * 0,  # image translation (+/- fraction)\n       'scale': 0.05 * 0,  # image scale (+/- gain)\n       'shear': 0.641 * 0}  # image shear (+/- deg)\n\n# Overwrite hyp with hyp*.txt (optional)\nf = glob.glob('hyp*.txt')\nif f:\n    print('Using %s' % f[0])\n    for k, v in zip(hyp.keys(), np.loadtxt(f[0])):\n        hyp[k] = v\n\n# Print focal loss if gamma > 0\nif hyp['fl_gamma']:\n    print('Using FocalLoss(gamma=%g)' % hyp['fl_gamma'])\n\n\ndef train(hyp):\n    cfg = opt.cfg\n    t_cfg = opt.t_cfg  # teacher model cfg for knowledge distillation\n    data = opt.data\n    epochs = opt.epochs  # 500200 batches at bs 64, 117263 images = 273 epochs\n    batch_size = opt.batch_size\n    accumulate = max(round(64 / batch_size), 1)  # accumulate n times before optimizer update (bs 64)\n    weights = opt.weights  # initial training weights\n\n    t_weights = opt.t_weights  # teacher model weights\n    imgsz_min, imgsz_max, imgsz_test = opt.img_size  # img sizes (min, max, test)\n\n    # Image Sizes\n    gs = 32  # (pixels) grid size\n    start_epoch = 0\n    assert math.fmod(imgsz_min, gs) == 0, '--img-size %g must be a %g-multiple' % (imgsz_min, gs)\n    opt.multi_scale |= imgsz_min != imgsz_max  # multi if different (min, max)\n    if opt.multi_scale:\n        if imgsz_min == imgsz_max:\n            imgsz_min //= 1.5\n            imgsz_max //= 0.667\n        grid_min, grid_max = imgsz_min // gs, imgsz_max // gs\n        imgsz_min, imgsz_max = int(grid_min * gs), int(grid_max * gs)\n    img_size = imgsz_max  # initialize with max size\n\n    # Configure run\n    init_seeds()\n    data_dict = parse_data_cfg(data)\n    train_path = data_dict['train']\n    test_path = data_dict['valid']\n    nc = int(data_dict['classes'])  # number of classes\n    hyp['cls'] *= nc / 80  # update coco-tuned hyp['cls'] to current dataset\n\n    # Remove previous results\n    for f in glob.glob('*_batch*.jpg') + glob.glob(results_file):\n        os.remove(f)\n\n    # DDP  init\n    if opt.local_rank != -1:\n        if opt.local_rank == 0:\n            print(\"--------------using ddp---------------\")\n        assert torch.cuda.device_count() > opt.local_rank\n        torch.cuda.set_device(opt.local_rank)\n        dist.init_process_group(backend='nccl', init_method='env://')  # distributed backend\n\n        assert opt.batch_size % opt.world_size == 0, '--batch-size must be multiple of CUDA device count'\n        opt.batch_size = opt.batch_size // opt.world_size\n    else:\n        dist.init_process_group(backend='nccl',  # 'distributed backend'\n                                init_method='tcp://127.0.0.1:9999',  # distributed training init method\n                                world_size=1,  # number of nodes for distributed training\n                                rank=0)  # distributed training node rank\n\n    # Initialize model\n    steps = math.ceil(len(open(train_path).readlines()) / batch_size) * epochs\n    model = Darknet(cfg, quantized=opt.quantized, a_bit=opt.a_bit, w_bit=opt.w_bit,\n                    steps=steps, is_gray_scale=opt.gray_scale, maxabsscaler=opt.maxabsscaler,\n                    shortcut_way=opt.shortcut_way).to(device)\n    if t_cfg:\n        t_model = Darknet(t_cfg).to(device)\n\n    # print('<.....................using gridmask.......................>')\n    # gridmask = GridMask(d1=96, d2=224, rotate=360, ratio=0.6, mode=1, prob=0.8)\n\n    # Optimizer\n    if opt.quantized == 2:\n        pg0, pg1, pg2, pg3 = [], [], [], []  # optimizer parameter groups\n    else:\n        pg0, pg1, pg2 = [], [], []  # optimizer parameter groups\n    for k, v in dict(model.named_parameters()).items():\n        if '.bias' in k:\n            pg2 += [v]  # biases\n        elif 'Conv2d.weight' in k:\n            pg1 += [v]  # apply weight_decay\n        elif 'scale' in k and opt.quantized == 2:\n            pg3 += [v]\n        else:\n            pg0 += [v]  # all else\n\n    if opt.adam or opt.quantized != -1:\n        # hyp['lr0'] *= 0.1  # reduce lr (i.e. SGD=5E-3, Adam=5E-4)\n        optimizer = optim.Adam(pg0, lr=hyp['lr0'] * 0.005)\n        if opt.quantized == 2:\n            optimizer.add_param_group({'params': pg3})\n        # optimizer = AdaBound(pg0, lr=hyp['lr0'], final_lr=0.1)\n    else:\n        optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)\n    optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']})  # add pg1 with weight_decay\n    optimizer.add_param_group({'params': pg2})  # add pg2 (biases)\n    if opt.quantized == 2:\n        print('Optimizer groups: %g .scale, %g .bias, %g Conv2d.weight, %g other' % (\n            len(pg3), len(pg2), len(pg1), len(pg0)))\n        del pg0, pg1, pg2, pg3\n    else:\n        print('Optimizer groups: %g .bias, %g Conv2d.weight, %g other' % (len(pg2), len(pg1), len(pg0)))\n        del pg0, pg1, pg2\n\n    best_fitness = 0.0\n    if weights != 'None':\n        attempt_download(weights)\n        if weights.endswith('.pt'):  # pytorch format\n            # possible weights are '*.pt', 'yolov3-spp.pt', 'yolov3-tiny.pt' etc.\n            chkpt = torch.load(weights, map_location=device)\n\n            # load model\n            try:\n                chkpt['model'] = {k: v for k, v in chkpt['model'].items() if model.state_dict()[k].numel() == v.numel()}\n                model.load_state_dict(chkpt['model'], strict=False)\n            except KeyError as e:\n                s = \"%s is not compatible with %s. Specify --weights '' or specify a --cfg compatible with %s. \" \\\n                    \"See https://github.com/ultralytics/yolov3/issues/657\" % (opt.weights, opt.cfg, opt.weights)\n                raise KeyError(s) from e\n\n            # load optimizer\n            if chkpt['optimizer'] is not None:\n                optimizer.load_state_dict(chkpt['optimizer'])\n                if chkpt.get('best_fitness') is not None:\n                    best_fitness = chkpt['best_fitness']\n            # load results\n            if chkpt.get('training_results') is not None:\n                with open(results_file, 'w') as file:\n                    file.write(chkpt['training_results'])  # write results.txt\n            if chkpt.get('epoch') is not None:\n                start_epoch = chkpt['epoch'] + 1\n            del chkpt\n\n        elif len(weights) > 0:  # darknet format\n            # possible weights are '*.weights', 'yolov3-tiny.conv.15',  'darknet53.conv.74' etc.\n            load_darknet_weights(model, weights, pt=opt.pt, quant=(opt.quantized != -1))\n    if t_cfg:\n        if t_weights.endswith('.pt'):\n            t_model.load_state_dict(torch.load(t_weights, map_location=device)['model'])\n        elif t_weights.endswith('.weights'):\n            load_darknet_weights(t_model, t_weights)\n        else:\n            raise Exception('pls provide proper teacher weights for knowledge distillation')\n        t_model.eval()\n        print('<.....................using knowledge distillation.......................>')\n        print('teacher model:', t_weights, '\\n')\n\n    # Scheduler https://arxiv.org/pdf/1812.01187.pdf\n    if opt.quantized != -1:\n        scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[epochs // 5, epochs // 2, epochs // 1.25],\n                                             gamma=0.3)\n    else:\n        lf = lambda x: (((1 + math.cos(x * math.pi / epochs)) / 2) ** 1.0) * 0.95 + 0.05  # cosine\n        scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)\n    scheduler.last_epoch = start_epoch - 1  # see link below\n    # https://discuss.pytorch.org/t/a-problem-occured-when-resuming-an-optimizer/28822\n\n    # # Plot lr schedule\n    # y = []\n    # for _ in range(epochs):\n    #     scheduler.step()\n    #     y.append(optimizer.param_groups[0]['lr'])\n    # plt.plot(y, '.-', label='LambdaLR')\n    # plt.xlabel('epoch')\n    # plt.ylabel('LR')\n    # plt.tight_layout()\n    # plt.savefig('LR.png', dpi=300)\n\n    # Initialize distributed training\n    if opt.local_rank != -1:\n        model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank, find_unused_parameters=True)\n    else:\n        model = torch.nn.parallel.DistributedDataParallel(model, find_unused_parameters=True)\n\n    model.yolo_layers = model.module.yolo_layers  # move yolo layer indices to top level\n\n    # Dataset\n    dataset = LoadImagesAndLabels(train_path, img_size, batch_size,\n                                  augment=True,\n                                  hyp=hyp,  # augmentation hyperparameters\n                                  rect=opt.rect,  # rectangular training\n                                  cache_images=True,\n                                  rank=opt.local_rank,\n                                  is_gray_scale=True if opt.gray_scale else False)\n\n    testset = LoadImagesAndLabels(test_path, imgsz_test, batch_size // 4,\n                                  hyp=hyp,\n                                  rect=True,\n                                  cache_images=True,\n                                  rank=opt.local_rank,\n                                  is_gray_scale=True if opt.gray_scale else False)\n\n    # 获得要剪枝的层\n    if hasattr(model, 'module'):\n        print('muti-gpus sparse')\n        if opt.prune == 0:\n            print('normal sparse training ')\n            _, _, prune_idx = parse_module_defs(model.module.module_defs)\n        elif opt.prune == 1:\n            print('shortcut sparse training')\n            _, _, prune_idx, _, _ = parse_module_defs2(model.module.module_defs)\n        elif opt.prune == 2:\n            print('layer sparse training')\n            _, _, prune_idx = parse_module_defs4(model.module.module_defs)\n\n\n    else:\n        print('single-gpu sparse')\n        if opt.prune == 0:\n            print('normal sparse training')\n            _, _, prune_idx = parse_module_defs(model.module_defs)\n        elif opt.prune == 1:\n            print('shortcut sparse training')\n            _, _, prune_idx, _, _ = parse_module_defs2(model.module_defs)\n        elif opt.prune == 2:\n            print('layer sparse training')\n            _, _, prune_idx = parse_module_defs4(model.module_defs)\n\n    train_sampler = torch.utils.data.distributed.DistributedSampler(dataset)  # ddp sampler\n    test_sampler = torch.utils.data.distributed.DistributedSampler(testset)\n\n    # Dataloader\n    batch_size = min(batch_size, len(dataset))\n    nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8])  # number of workers\n    dataloader = torch.utils.data.DataLoader(dataset,\n                                             batch_size=int(batch_size / opt.world_size),\n                                             num_workers=nw,\n                                             shuffle=False if (opt.local_rank != -1) else not opt.rect,\n                                             pin_memory=True,\n                                             collate_fn=dataset.collate_fn,\n                                             sampler=train_sampler if (opt.local_rank != -1) else None\n                                             )\n    # Testloader\n    testloader = torch.utils.data.DataLoader(LoadImagesAndLabels(test_path, imgsz_test, batch_size // 4,\n                                                                 hyp=hyp,\n                                                                 rect=True,\n                                                                 cache_images=True,\n                                                                 rank=opt.local_rank,\n                                                                 is_gray_scale=True if opt.gray_scale else False),\n                                             batch_size=batch_size // 4,\n                                             num_workers=nw,\n                                             pin_memory=True,\n                                             collate_fn=dataset.collate_fn)\n    if opt.prune != -1:\n        for idx in prune_idx:\n            if hasattr(model, 'module'):\n                bn_weights = gather_bn_weights(model.module.module_list, [idx])\n            else:\n                bn_weights = gather_bn_weights(model.module_list, [idx])\n            tb_writer.add_histogram('before_train_perlayer_bn_weights/hist', bn_weights.numpy(), idx, bins='doane')\n    # Model parameters\n    model.nc = nc  # attach number of classes to model\n    model.hyp = hyp  # attach hyperparameters to model\n    model.gr = 1.0  # giou loss ratio (obj_loss = 1.0 or giou)\n    model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device)  # attach class weights\n\n    # Model EMA\n    if opt.ema:\n        ema = torch_utils.ModelEMA(model)\n\n    # Start training\n    nb = len(dataloader)  # number of batches\n    n_burn = max(3 * nb, 500)  # burn-in iterations, max(3 epochs, 500 iterations)\n    maps = np.zeros(nc)  # mAP per class\n    # torch.autograd.set_detect_anomaly(True)\n    results = (0, 0, 0, 0, 0, 0, 0)  # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification'\n    t0 = time.time()\n    if opt.local_rank == -1 or opt.local_rank == 0:\n        print('Image sizes %g - %g train, %g test' % (imgsz_min, imgsz_max, imgsz_test))\n        print('Using %g dataloader workers' % nw)\n        print('Starting training for %g epochs...' % epochs)\n    if opt.mpt:\n        cuda = device.type != 'cpu'\n        scaler = amp.GradScaler(enabled=cuda)\n    for epoch in range(start_epoch, epochs):  # epoch ------------------------------------------------------------------\n        if opt.local_rank != -1:\n            dataloader.sampler.set_epoch(epoch)  # DDP set seed\n        # gridmask.set_prob(epoch, max_epoch)\n        model.train()\n        # 稀疏化标志\n        if opt.prune == -1:\n            sr_flag = False\n        else:\n            sr_flag = True\n        # Update image weights (optional)\n        if dataset.image_weights:\n            w = model.class_weights.cpu().numpy() * (1 - maps) ** 2  # class weights\n            image_weights = labels_to_image_weights(dataset.labels, nc=nc, class_weights=w)\n            dataset.indices = random.choices(range(dataset.n), weights=image_weights, k=dataset.n)  # rand weighted idx\n        mloss = torch.zeros(4).to(device)  # mean losses\n        if opt.local_rank == -1 or opt.local_rank == 0:\n            print(('\\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'GIoU', 'obj', 'cls', 'total', 'targets', 'img_size'))\n        pbar = tqdm(enumerate(dataloader), total=nb)  # progress bar\n        for i, (imgs, targets, paths, _) in pbar:  # batch -------------------------------------------------------------\n            ni = i + nb * epoch  # number integrated batches (since train start)\n            if opt.maxabsscaler:\n                imgs = imgs.to(device).float() / 256.0  # uint8 to float32, 0 - 255 to 0.0 - 1.0\n                imgs = imgs * 2 - 1\n            else:\n                imgs = imgs.to(device).float() / 256.0  # uint8 to float32, 0 - 255 to 0.0 - 1.0\n            if opt.quantized != -1:\n                if opt.a_bit == 16:\n                    img = img * (2 ** 14)\n                    sign = torch.sign(img)\n                    img = sign * torch.floor(torch.abs(img) + 0.5)\n                    img = img / (2 ** 14)\n            # Burn-in\n            if ni <= n_burn and opt.quantized == -1:\n                xi = [0, n_burn]  # x interp\n                model.gr = np.interp(ni, xi, [0.0, 1.0])  # giou loss ratio (obj_loss = 1.0 or giou)\n                accumulate = max(1, np.interp(ni, xi, [1, 64 / batch_size]).round())\n                for j, x in enumerate(optimizer.param_groups):\n                    # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0\n                    x['lr'] = np.interp(ni, xi, [0.1 if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])\n                    x['weight_decay'] = np.interp(ni, xi, [0.0, hyp['weight_decay'] if j == 1 else 0.0])\n                    if 'momentum' in x:\n                        x['momentum'] = np.interp(ni, xi, [0.9, hyp['momentum']])\n\n            # Multi-Scale\n            if opt.multi_scale:\n                if ni / accumulate % 1 == 0:  #  adjust img_size (67% - 150%) every 1 batch\n                    img_size = random.randrange(grid_min, grid_max + 1) * gs\n                sf = img_size / max(imgs.shape[2:])  # scale factor\n                if sf != 1:\n                    ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]]  # new shape (stretched to 32-multiple)\n                    imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)\n\n            # Forward\n            if opt.mpt:\n                with amp.autocast(enabled=cuda):\n                    targets = targets.to(device)\n                    pred, feature_s = model(imgs)\n\n                    # Loss\n                    loss, loss_items = compute_loss(pred, targets, model)\n                    if not torch.isfinite(loss):\n                        print('WARNING: non-finite loss, ending training ', loss_items)\n                        return results\n\n                    soft_target = 0\n                    if t_cfg:\n                        _, output_t, feature_t = t_model(imgs)\n                        if opt.KDstr == 1:\n                            soft_target = compute_lost_KD(pred, output_t, model.nc, imgs.size(0))\n                        elif opt.KDstr == 2:\n                            soft_target, reg_ratio = compute_lost_KD2(model, targets, pred, output_t)\n                        elif opt.KDstr == 3:\n                            soft_target = compute_lost_KD3(model, targets, pred, output_t)\n                        elif opt.KDstr == 4:\n                            soft_target = compute_lost_KD4(model, targets, pred, output_t, feature_s, feature_t,\n                                                           imgs.size(0))\n                        elif opt.KDstr == 5:\n                            soft_target = compute_lost_KD5(model, targets, pred, output_t, feature_s, feature_t,\n                                                           imgs.size(0),\n                                                           img_size)\n                        else:\n                            print(\"please select KD strategy!\")\n                        loss += soft_target\n            else:\n                targets = targets.to(device)\n                pred, feature_s = model(imgs)\n\n                # Loss\n                loss, loss_items = compute_loss(pred, targets, model)\n                if not torch.isfinite(loss):\n                    print('WARNING: non-finite loss, ending training ', loss_items)\n                    return results\n\n                soft_target = 0\n                if t_cfg:\n                    _, output_t, feature_t = t_model(imgs)\n                    if opt.KDstr == 1:\n                        soft_target = compute_lost_KD(pred, output_t, model.nc, imgs.size(0))\n                    elif opt.KDstr == 2:\n                        soft_target, reg_ratio = compute_lost_KD2(model, targets, pred, output_t)\n                    elif opt.KDstr == 3:\n                        soft_target = compute_lost_KD3(model, targets, pred, output_t)\n                    elif opt.KDstr == 4:\n                        soft_target = compute_lost_KD4(model, targets, pred, output_t, feature_s, feature_t,\n                                                       imgs.size(0))\n                    elif opt.KDstr == 5:\n                        soft_target = compute_lost_KD5(model, targets, pred, output_t, feature_s, feature_t,\n                                                       imgs.size(0),\n                                                       img_size)\n                    else:\n                        print(\"please select KD strategy!\")\n                    loss += soft_target\n            # Backward\n            loss *= batch_size / 64  # scale loss\n            if opt.mpt:\n                scaler.scale(loss).backward()\n            else:\n                loss.backward()\n            # 对要剪枝层的γ参数稀疏化\n            if hasattr(model, 'module'):\n                if opt.prune != -1:\n                    BNOptimizer.updateBN(sr_flag, model.module.module_list, opt.s, prune_idx)\n            else:\n                if opt.prune != -1:\n                    BNOptimizer.updateBN(sr_flag, model.module_list, opt.s, prune_idx)\n            # Optimize\n            if ni % accumulate == 0:\n                if opt.mpt:\n                    scaler.step(optimizer)  # optimizer.step\n                    scaler.update()\n                else:\n                    optimizer.step()\n                optimizer.zero_grad()\n\n                if opt.ema:\n                    ema.update(model)\n\n            # Print\n            mloss = (mloss * i + loss_items) / (i + 1)  # update mean losses\n            mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0)  # (GB)\n            s = ('%10s' * 2 + '%10.3g' * 6) % ('%g/%g' % (epoch, epochs - 1), mem, *mloss, len(targets), img_size)\n            pbar.set_description(s)\n\n            # Plot\n            if i == 0:\n                if not os.path.isdir('train_sample/'):\n                    os.makedirs('train_sample/')\n                f = 'train_sample/train_batch%g.jpg' % epoch  # filename\n                res = plot_images(images=imgs, targets=targets, paths=paths, fname=f, is_gray_scale=opt.gray_scale)\n                if tb_writer:\n                    tb_writer.add_image(f, res, dataformats='HWC', global_step=epoch)\n                    # tb_writer.add_graph(model, imgs)  # add model to tensorboard\n\n            # end batch ------------------------------------------------------------------------------------------------\n\n        # Update scheduler\n        scheduler.step()\n\n        # Process epoch results\n        if opt.ema:\n            ema.update_attr(model)\n\n            if hasattr(model, 'module'):\n                module_defs, module_list = ema.eam.module.module_defs, ema.eam.module.module_list\n            else:\n                module_defs, module_list = ema.eam.module_defs, ema.eam.module_list\n\n            for i, (mdef, module) in enumerate(zip(module_defs, module_list)):\n                if mdef['type'] == 'yolo':\n                    yolo_layer = module\n                    yolo_layer.nx, yolo_layer.ny = 0, 0\n        if hasattr(model, 'module'):\n            module_defs, module_list = model.module.module_defs, model.module.module_list\n        else:\n            module_defs, module_list = model.module_defs, model.module_list\n        for i, (mdef, module) in enumerate(zip(module_defs, module_list)):\n            if mdef['type'] == 'yolo':\n                yolo_layer = module\n                yolo_layer.nx, yolo_layer.ny = 0, 0\n\n        final_epoch = epoch + 1 == epochs\n        if not opt.notest or final_epoch:  # Calculate mAP\n            is_coco = any([x in data for x in ['coco.data', 'coco2014.data', 'coco2017.data']]) and model.nc == 80\n            results, maps = test.test(cfg,\n                                      data,\n                                      batch_size=batch_size // 4,\n                                      imgsz=imgsz_test,\n                                      model=ema.ema if opt.ema else model,\n                                      save_json=final_epoch and is_coco,\n                                      dataloader=testloader,\n                                      multi_label=ni > n_burn,\n                                      quantized=opt.quantized,\n                                      a_bit=opt.a_bit,\n                                      w_bit=opt.w_bit,\n                                      rank=opt.local_rank,\n                                      plot=True,\n                                      maxabsscaler=opt.maxabsscaler,\n                                      shortcut_way=opt.shortcut_way)\n            torch.cuda.empty_cache()\n        # Write\n        if opt.local_rank in [-1, 0]:\n            with open(results_file, 'a') as f:\n                f.write(s + '%10.3g' * 7 % results + '\\n')  # P, R, mAP, F1, test_losses=(GIoU, obj, cls)\n            if len(opt.name) and opt.bucket:\n                os.system('gsutil cp results.txt gs://%s/results/results%s.txt' % (opt.bucket, opt.name))\n\n        # Tensorboard\n        if tb_writer:\n            tags = ['train/giou_loss', 'train/obj_loss', 'train/cls_loss',\n                    'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/F1',\n                    'val/giou_loss', 'val/obj_loss', 'val/cls_loss']\n            for x, tag in zip(list(mloss[:-1]) + list(results), tags):\n                tb_writer.add_scalar(tag, x, epoch)\n            if opt.prune != -1:\n                if hasattr(model, 'module'):\n                    bn_weights = gather_bn_weights(model.module.module_list, [idx])\n                else:\n                    bn_weights = gather_bn_weights(model.module_list, [idx])\n                tb_writer.add_histogram('bn_weights/hist', bn_weights.numpy(), epoch, bins='doane')\n\n        # Update best mAP\n        fi = fitness(np.array(results).reshape(1, -1))  # fitness_i = weighted combination of [P, R, mAP, F1]\n        if fi > best_fitness:\n            best_fitness = fi\n\n        # Save model\n        save = (not opt.nosave) or (final_epoch and not opt.evolve)\n        if opt.ema:\n            if hasattr(model, 'module'):\n                model_temp = ema.ema.module.state_dict()\n            else:\n                model_temp = ema.ema.state_dict()\n        else:\n            if hasattr(model, 'module'):\n                model_temp = model.module.state_dict()\n            else:\n                model_temp = model.state_dict()\n        if save and dist.get_rank() == 0:  # DDP save model only once\n            with open(results_file, 'r') as f:  # create checkpoint\n                chkpt = {'epoch': epoch,\n                         'best_fitness': best_fitness,\n                         'training_results': f.read(),\n                         'model': model_temp,\n                         'optimizer': None if final_epoch else optimizer.state_dict()}\n\n            # Save last, best and delete\n            torch.save(chkpt, last)\n            if (best_fitness == fi) and not final_epoch:\n                torch.save(chkpt, best)\n            del chkpt\n\n        # end epoch ----------------------------------------------------------------------------------------------------\n    # end training\n\n    n = opt.name\n    if len(n):\n        n = '_' + n if not n.isnumeric() else n\n        fresults, flast, fbest = 'results%s.txt' % n, wdir + 'last%s.pt' % n, wdir + 'best%s.pt' % n\n        for f1, f2 in zip([wdir + 'last.pt', wdir + 'best.pt', 'results.txt'], [flast, fbest, fresults]):\n            if os.path.exists(f1):\n                os.rename(f1, f2)  # rename\n                ispt = f2.endswith('.pt')  # is *.pt\n                strip_optimizer(f2) if ispt else None  # strip optimizer\n                os.system('gsutil cp %s gs://%s/weights' % (f2, opt.bucket)) if opt.bucket and ispt else None  # upload\n\n    if not opt.evolve:\n        plot_results()  # save as results.png\n    if opt.local_rank in [-1, 0]:\n        print('%g epochs completed in %.3f hours.\\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))\n    dist.destroy_process_group() if torch.cuda.device_count() > 1 else None\n    torch.cuda.empty_cache()\n    return results\n\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n    parser.add_argument('--epochs', type=int, default=300)  # 500200 batches at bs 16, 117263 COCO images = 273 epochs\n    parser.add_argument('--batch-size', type=int, default=16)  # effective bs = batch_size * accumulate = 16 * 4 = 64\n    parser.add_argument('--cfg', type=str, default='cfg/yolov3-spp.cfg', help='*.cfg path')\n    parser.add_argument('--t_cfg', type=str, default='', help='teacher model cfg file path for knowledge distillation')\n    parser.add_argument('--data', type=str, default='data/coco2017.data', help='*.data path')\n    parser.add_argument('--multi-scale', action='store_true', help='adjust (67%% - 150%%) img_size every 10 batches')\n    parser.add_argument('--img-size', nargs='+', type=int, default=[320, 640], help='[min_train, max-train, test]')\n    parser.add_argument('--rect', action='store_true', help='rectangular training')\n    parser.add_argument('--resume', action='store_true', help='resume training from last.pt')\n    parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')\n    parser.add_argument('--notest', action='store_true', help='only test final epoch')\n    parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters')\n    parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')\n    parser.add_argument('--weights', type=str, default='weights/yolov3-spp-ultralytics.pt', help='initial weights path')\n    parser.add_argument('--t_weights', type=str, default='', help='teacher model weights')\n    parser.add_argument('--KDstr', type=int, default=-1, help='KD strategy')\n    parser.add_argument('--name', default='', help='renames results.txt to results_name.txt if supplied')\n    parser.add_argument('--device', default='', help='device id (i.e. 0 or 0,1 or cpu)')\n    parser.add_argument('--adam', action='store_true', help='use adam optimizer')\n    parser.add_argument('--ema', action='store_true', help='use ema')\n    parser.add_argument('--pretrain', '-pt', dest='pt', action='store_true',\n                        help='use pretrain model')\n    parser.add_argument('--mixedprecision', '-mpt', dest='mpt', action='store_true',\n                        help='use mixed precision training')\n    parser.add_argument('--s', type=float, default=0.001, help='scale sparse rate')\n    parser.add_argument('--prune', type=int, default=-1,\n                        help='0:nomal prune or regular prune 1:shortcut prune 2:layer prune')\n    parser.add_argument('--quantized', type=int, default=-1, help='quantization way')\n    parser.add_argument('--shortcut_way', type=int, default=1, help='--shortcut quantization way')\n    parser.add_argument('--a-bit', type=int, default=8, help='a-bit')\n    parser.add_argument('--w-bit', type=int, default=8, help='w-bit')\n    parser.add_argument('--gray-scale', action='store_true', help='gray scale trainning')\n    parser.add_argument('--maxabsscaler', '-mas', action='store_true', help='Standarize input to (-1,1)')\n    # DDP get local-rank\n    parser.add_argument('--rank', default=0, help='rank of current process')\n    parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')\n\n    opt = parser.parse_args()\n    opt.weights = last if opt.resume else opt.weights\n    opt.cfg = list(glob.iglob('./**/' + opt.cfg, recursive=True))[0]  # find file\n    # opt.data = list(glob.iglob(' ./**/' + opt.data, recursive=True))[0]  # find file\n    if opt.local_rank in [-1, 0]:\n        print(opt)\n    opt.img_size.extend([opt.img_size[-1]] * (3 - len(opt.img_size)))  # extend to 3 sizes (min, max, test)\n\n    # DDP set variables\n    opt.world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1\n    opt.global_rank = int(os.environ['RANK']) if 'RANK' in os.environ else -1\n\n    # scale hyp['obj'] by img_size (evolved at 320)\n    # hyp['obj'] *= opt.img_size[0] / 320.\n\n    # DDP set device\n    if opt.local_rank != -1:\n        if opt.local_rank == 0:\n            device = select_device(opt.device, batch_size=opt.batch_size)\n        device = torch.device('cuda', opt.local_rank)\n    else:\n        device = torch_utils.select_device(opt.device, batch_size=opt.batch_size)\n\n    tb_writer = None\n    if not opt.evolve:  # Train normally\n        if opt.local_rank in [-1, 0]:\n            print('Start Tensorboard with \"tensorboard --logdir=runs\", view at http://localhost:6006/')\n            tb_writer = SummaryWriter(comment=opt.name)\n        train(hyp)  # train normally\n\n    else:  # Evolve hyperparameters (optional)\n        opt.notest, opt.nosave = True, True  # only test/save final epoch\n        if opt.bucket:\n            os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket)  # download evolve.txt if exists\n\n        for _ in range(1):  # generations to evolve\n            if os.path.exists('evolve.txt'):  # if evolve.txt exists: select best hyps and mutate\n                # Select parent(s)\n                parent = 'single'  # parent selection method: 'single' or 'weighted'\n                x = np.loadtxt('evolve.txt', ndmin=2)\n                n = min(5, len(x))  # number of previous results to consider\n                x = x[np.argsort(-fitness(x))][:n]  # top n mutations\n                w = fitness(x) - fitness(x).min()  # weights\n                if parent == 'single' or len(x) == 1:\n                    # x = x[random.randint(0, n - 1)]  # random selection\n                    x = x[random.choices(range(n), weights=w)[0]]  # weighted selection\n                elif parent == 'weighted':\n                    x = (x * w.reshape(n, 1)).sum(0) / w.sum()  # weighted combination\n\n                # Mutate\n                method, mp, s = 3, 0.9, 0.2  # method, mutation probability, sigma\n                npr = np.random\n                npr.seed(int(time.time()))\n                g = np.array([1, 1, 1, 1, 1, 1, 1, 0, .1, 1, 0, 1, 1, 1, 1, 1, 1, 1])  # gains\n                ng = len(g)\n                if method == 1:\n                    v = (npr.randn(ng) * npr.random() * g * s + 1) ** 2.0\n                elif method == 2:\n                    v = (npr.randn(ng) * npr.random(ng) * g * s + 1) ** 2.0\n                elif method == 3:\n                    v = np.ones(ng)\n                    while all(v == 1):  # mutate until a change occurs (prevent duplicates)\n                        # v = (g * (npr.random(ng) < mp) * npr.randn(ng) * s + 1) ** 2.0\n                        v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)\n                for i, k in enumerate(hyp.keys()):  # plt.hist(v.ravel(), 300)\n                    hyp[k] = x[i + 7] * v[i]  # mutate\n\n            # Clip to limits\n            keys = ['lr0', 'iou_t', 'momentum', 'weight_decay', 'hsv_s', 'hsv_v', 'translate', 'scale', 'fl_gamma']\n            limits = [(1e-5, 1e-2), (0.00, 0.70), (0.60, 0.98), (0, 0.001), (0, .9), (0, .9), (0, .9), (0, .9), (0, 3)]\n            for k, v in zip(keys, limits):\n                hyp[k] = np.clip(hyp[k], v[0], v[1])\n\n            # Train mutation\n            results = train(hyp.copy())\n\n            # Write mutation results\n            print_mutation(hyp, results, opt.bucket)\n\n            # Plot results\n            # plot_evolution_results(hyp)\n"
  },
  {
    "path": "utils/__init__.py",
    "content": ""
  },
  {
    "path": "utils/adabound.py",
    "content": "import math\n\nimport torch\nfrom torch.optim.optimizer import Optimizer\n\n\nclass AdaBound(Optimizer):\n    \"\"\"Implements AdaBound algorithm.\n    It has been proposed in `Adaptive Gradient Methods with Dynamic Bound of Learning Rate`_.\n    Arguments:\n        params (iterable): iterable of parameters to optimize or dicts defining\n            parameter groups\n        lr (float, optional): Adam learning rate (default: 1e-3)\n        betas (Tuple[float, float], optional): coefficients used for computing\n            running averages of gradient and its square (default: (0.9, 0.999))\n        final_lr (float, optional): final (SGD) learning rate (default: 0.1)\n        gamma (float, optional): convergence speed of the bound functions (default: 1e-3)\n        eps (float, optional): term added to the denominator to improve\n            numerical stability (default: 1e-8)\n        weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n        amsbound (boolean, optional): whether to use the AMSBound variant of this algorithm\n    .. Adaptive Gradient Methods with Dynamic Bound of Learning Rate:\n        https://openreview.net/forum?id=Bkg3g2R9FX\n    \"\"\"\n\n    def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), final_lr=0.1, gamma=1e-3,\n                 eps=1e-8, weight_decay=0, amsbound=False):\n        if not 0.0 <= lr:\n            raise ValueError(\"Invalid learning rate: {}\".format(lr))\n        if not 0.0 <= eps:\n            raise ValueError(\"Invalid epsilon value: {}\".format(eps))\n        if not 0.0 <= betas[0] < 1.0:\n            raise ValueError(\"Invalid beta parameter at index 0: {}\".format(betas[0]))\n        if not 0.0 <= betas[1] < 1.0:\n            raise ValueError(\"Invalid beta parameter at index 1: {}\".format(betas[1]))\n        if not 0.0 <= final_lr:\n            raise ValueError(\"Invalid final learning rate: {}\".format(final_lr))\n        if not 0.0 <= gamma < 1.0:\n            raise ValueError(\"Invalid gamma parameter: {}\".format(gamma))\n        defaults = dict(lr=lr, betas=betas, final_lr=final_lr, gamma=gamma, eps=eps,\n                        weight_decay=weight_decay, amsbound=amsbound)\n        super(AdaBound, self).__init__(params, defaults)\n\n        self.base_lrs = list(map(lambda group: group['lr'], self.param_groups))\n\n    def __setstate__(self, state):\n        super(AdaBound, self).__setstate__(state)\n        for group in self.param_groups:\n            group.setdefault('amsbound', False)\n\n    def step(self, closure=None):\n        \"\"\"Performs a single optimization step.\n        Arguments:\n            closure (callable, optional): A closure that reevaluates the model\n                and returns the loss.\n        \"\"\"\n        loss = None\n        if closure is not None:\n            loss = closure()\n\n        for group, base_lr in zip(self.param_groups, self.base_lrs):\n            for p in group['params']:\n                if p.grad is None:\n                    continue\n                grad = p.grad.data\n                if grad.is_sparse:\n                    raise RuntimeError(\n                        'Adam does not support sparse gradients, please consider SparseAdam instead')\n                amsbound = group['amsbound']\n\n                state = self.state[p]\n\n                # State initialization\n                if len(state) == 0:\n                    state['step'] = 0\n                    # Exponential moving average of gradient values\n                    state['exp_avg'] = torch.zeros_like(p.data)\n                    # Exponential moving average of squared gradient values\n                    state['exp_avg_sq'] = torch.zeros_like(p.data)\n                    if amsbound:\n                        # Maintains max of all exp. moving avg. of sq. grad. values\n                        state['max_exp_avg_sq'] = torch.zeros_like(p.data)\n\n                exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n                if amsbound:\n                    max_exp_avg_sq = state['max_exp_avg_sq']\n                beta1, beta2 = group['betas']\n\n                state['step'] += 1\n\n                if group['weight_decay'] != 0:\n                    grad = grad.add(group['weight_decay'], p.data)\n\n                # Decay the first and second moment running average coefficient\n                exp_avg.mul_(beta1).add_(1 - beta1, grad)\n                exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n                if amsbound:\n                    # Maintains the maximum of all 2nd moment running avg. till now\n                    torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)\n                    # Use the max. for normalizing running avg. of gradient\n                    denom = max_exp_avg_sq.sqrt().add_(group['eps'])\n                else:\n                    denom = exp_avg_sq.sqrt().add_(group['eps'])\n\n                bias_correction1 = 1 - beta1 ** state['step']\n                bias_correction2 = 1 - beta2 ** state['step']\n                step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1\n\n                # Applies bounds on actual learning rate\n                # lr_scheduler cannot affect final_lr, this is a workaround to apply lr decay\n                final_lr = group['final_lr'] * group['lr'] / base_lr\n                lower_bound = final_lr * (1 - 1 / (group['gamma'] * state['step'] + 1))\n                upper_bound = final_lr * (1 + 1 / (group['gamma'] * state['step']))\n                step_size = torch.full_like(denom, step_size)\n                step_size.div_(denom).clamp_(lower_bound, upper_bound).mul_(exp_avg)\n\n                p.data.add_(-step_size)\n\n        return loss\n\n\nclass AdaBoundW(Optimizer):\n    \"\"\"Implements AdaBound algorithm with Decoupled Weight Decay (arxiv.org/abs/1711.05101)\n    It has been proposed in `Adaptive Gradient Methods with Dynamic Bound of Learning Rate`_.\n    Arguments:\n        params (iterable): iterable of parameters to optimize or dicts defining\n            parameter groups\n        lr (float, optional): Adam learning rate (default: 1e-3)\n        betas (Tuple[float, float], optional): coefficients used for computing\n            running averages of gradient and its square (default: (0.9, 0.999))\n        final_lr (float, optional): final (SGD) learning rate (default: 0.1)\n        gamma (float, optional): convergence speed of the bound functions (default: 1e-3)\n        eps (float, optional): term added to the denominator to improve\n            numerical stability (default: 1e-8)\n        weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n        amsbound (boolean, optional): whether to use the AMSBound variant of this algorithm\n    .. Adaptive Gradient Methods with Dynamic Bound of Learning Rate:\n        https://openreview.net/forum?id=Bkg3g2R9FX\n    \"\"\"\n\n    def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), final_lr=0.1, gamma=1e-3,\n                 eps=1e-8, weight_decay=0, amsbound=False):\n        if not 0.0 <= lr:\n            raise ValueError(\"Invalid learning rate: {}\".format(lr))\n        if not 0.0 <= eps:\n            raise ValueError(\"Invalid epsilon value: {}\".format(eps))\n        if not 0.0 <= betas[0] < 1.0:\n            raise ValueError(\"Invalid beta parameter at index 0: {}\".format(betas[0]))\n        if not 0.0 <= betas[1] < 1.0:\n            raise ValueError(\"Invalid beta parameter at index 1: {}\".format(betas[1]))\n        if not 0.0 <= final_lr:\n            raise ValueError(\"Invalid final learning rate: {}\".format(final_lr))\n        if not 0.0 <= gamma < 1.0:\n            raise ValueError(\"Invalid gamma parameter: {}\".format(gamma))\n        defaults = dict(lr=lr, betas=betas, final_lr=final_lr, gamma=gamma, eps=eps,\n                        weight_decay=weight_decay, amsbound=amsbound)\n        super(AdaBoundW, self).__init__(params, defaults)\n\n        self.base_lrs = list(map(lambda group: group['lr'], self.param_groups))\n\n    def __setstate__(self, state):\n        super(AdaBoundW, self).__setstate__(state)\n        for group in self.param_groups:\n            group.setdefault('amsbound', False)\n\n    def step(self, closure=None):\n        \"\"\"Performs a single optimization step.\n        Arguments:\n            closure (callable, optional): A closure that reevaluates the model\n                and returns the loss.\n        \"\"\"\n        loss = None\n        if closure is not None:\n            loss = closure()\n\n        for group, base_lr in zip(self.param_groups, self.base_lrs):\n            for p in group['params']:\n                if p.grad is None:\n                    continue\n                grad = p.grad.data\n                if grad.is_sparse:\n                    raise RuntimeError(\n                        'Adam does not support sparse gradients, please consider SparseAdam instead')\n                amsbound = group['amsbound']\n\n                state = self.state[p]\n\n                # State initialization\n                if len(state) == 0:\n                    state['step'] = 0\n                    # Exponential moving average of gradient values\n                    state['exp_avg'] = torch.zeros_like(p.data)\n                    # Exponential moving average of squared gradient values\n                    state['exp_avg_sq'] = torch.zeros_like(p.data)\n                    if amsbound:\n                        # Maintains max of all exp. moving avg. of sq. grad. values\n                        state['max_exp_avg_sq'] = torch.zeros_like(p.data)\n\n                exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n                if amsbound:\n                    max_exp_avg_sq = state['max_exp_avg_sq']\n                beta1, beta2 = group['betas']\n\n                state['step'] += 1\n\n                # Decay the first and second moment running average coefficient\n                exp_avg.mul_(beta1).add_(1 - beta1, grad)\n                exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n                if amsbound:\n                    # Maintains the maximum of all 2nd moment running avg. till now\n                    torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)\n                    # Use the max. for normalizing running avg. of gradient\n                    denom = max_exp_avg_sq.sqrt().add_(group['eps'])\n                else:\n                    denom = exp_avg_sq.sqrt().add_(group['eps'])\n\n                bias_correction1 = 1 - beta1 ** state['step']\n                bias_correction2 = 1 - beta2 ** state['step']\n                step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1\n\n                # Applies bounds on actual learning rate\n                # lr_scheduler cannot affect final_lr, this is a workaround to apply lr decay\n                final_lr = group['final_lr'] * group['lr'] / base_lr\n                lower_bound = final_lr * (1 - 1 / (group['gamma'] * state['step'] + 1))\n                upper_bound = final_lr * (1 + 1 / (group['gamma'] * state['step']))\n                step_size = torch.full_like(denom, step_size)\n                step_size.div_(denom).clamp_(lower_bound, upper_bound).mul_(exp_avg)\n\n                if group['weight_decay'] != 0:\n                    decayed_weights = torch.mul(p.data, group['weight_decay'])\n                    p.data.add_(-step_size)\n                    p.data.sub_(decayed_weights)\n                else:\n                    p.data.add_(-step_size)\n\n        return loss\n"
  },
  {
    "path": "utils/datasets.py",
    "content": "import glob\nimport math\nimport os\nimport random\nimport shutil\nimport time\nfrom pathlib import Path\nfrom threading import Thread\n\nimport cv2\nimport numpy as np\nimport torch\nfrom PIL import Image, ExifTags\nfrom torch.utils.data import Dataset\nfrom tqdm import tqdm\nfrom utils.utils import xyxy2xywh, xywh2xyxy\n\nhelp_url = 'https://github.com/ultralytics/yolov3/wiki/Train-Custom-Data'\nimg_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']\nvid_formats = ['.mov', '.avi', '.mp4']\n\n# Get orientation exif tag\nfor orientation in ExifTags.TAGS.keys():\n    if ExifTags.TAGS[orientation] == 'Orientation':\n        break\n\n\ndef exif_size(img):\n    # Returns exif-corrected PIL size\n    s = img.size  # (width, height)\n    try:\n        rotation = dict(img._getexif().items())[orientation]\n        if rotation == 6:  # rotation 270\n            s = (s[1], s[0])\n        elif rotation == 8:  # rotation 90\n            s = (s[1], s[0])\n    except:\n        pass\n\n    return s\n\n\nclass LoadImages:  # for inference\n    def __init__(self, path, img_size=416, is_gray_scale=False, rect=False):\n        path = str(Path(path))  # os-agnostic\n        files = []\n        if os.path.isdir(path):\n            files = sorted(glob.glob(os.path.join(path, '*.*')))\n        elif os.path.isfile(path):\n            files = [path]\n\n        images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]\n        videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]\n        nI, nV = len(images), len(videos)\n\n        self.img_size = img_size\n        self.files = images + videos\n        self.nF = nI + nV  # number of files\n        self.video_flag = [False] * nI + [True] * nV\n        self.mode = 'images'\n        self.is_gray_scale = is_gray_scale\n        self.rect = rect\n        if any(videos):\n            self.new_video(videos[0])  # new video\n        else:\n            self.cap = None\n        assert self.nF > 0, 'No images or videos found in ' + path\n\n    def __iter__(self):\n        self.count = 0\n        return self\n\n    def __next__(self):\n        if self.count == self.nF:\n            raise StopIteration\n        path = self.files[self.count]\n\n        if self.video_flag[self.count]:\n            # Read video\n            self.mode = 'video'\n            ret_val, img0 = self.cap.read()\n            if not ret_val:\n                self.count += 1\n                self.cap.release()\n                if self.count == self.nF:  # last video\n                    raise StopIteration\n                else:\n                    path = self.files[self.count]\n                    self.new_video(path)\n                    ret_val, img0 = self.cap.read()\n\n            self.frame += 1\n            print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nF, self.frame, self.nframes, path), end='')\n\n        else:\n            # Read image\n            self.count += 1\n            if self.is_gray_scale:\n                img0 = cv2.imread(path, flags=cv2.IMREAD_GRAYSCALE)  # gray scale\n                img0 = np.expand_dims(img0, axis=-1)\n            else:\n                img0 = cv2.imread(path)  # BGR\n            assert img0 is not None, 'Image Not Found ' + path\n            print('image %g/%g %s: ' % (self.count, self.nF, path), end='')\n\n        # Padded resize\n        if self.rect:\n            img = letterbox(img0, new_shape=self.img_size, is_gray_scale=self.is_gray_scale)[0]\n        else:\n            img = letterbox(img0, new_shape=self.img_size, auto=False, is_gray_scale=self.is_gray_scale)[0]\n        # Convert\n        img = img[:, :, ::-1].transpose(2, 0, 1).copy()  # BGR to RGB, to 3x416x416\n        img = np.ascontiguousarray(img)\n\n        # cv2.imwrite(path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1])  # save letterbox image\n        return path, img, img0, self.cap\n\n    def new_video(self, path):\n        self.frame = 0\n        self.cap = cv2.VideoCapture(path)\n        self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))\n\n    def __len__(self):\n        return self.nF  # number of files\n\n\nclass LoadWebcam:  # for inference\n    def __init__(self, pipe=0, img_size=416):\n        self.img_size = img_size\n\n        if pipe == '0':\n            pipe = 0  # local camera\n        # pipe = 'rtsp://192.168.1.64/1'  # IP camera\n        # pipe = 'rtsp://username:password@192.168.1.64/1'  # IP camera with login\n        # pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa'  # IP traffic camera\n        # pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg'  # IP golf camera\n\n        # https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/\n        # pipe = '\"rtspsrc location=\"rtsp://username:password@192.168.1.64/1\" latency=10 ! appsink'  # GStreamer\n\n        # https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/\n        # https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package  # install help\n        # pipe = \"rtspsrc location=rtsp://root:root@192.168.0.91:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink\"  # GStreamer\n\n        self.pipe = pipe\n        self.cap = cv2.VideoCapture(pipe)  # video capture object\n        self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3)  # set buffer size\n\n    def __iter__(self):\n        self.count = -1\n        return self\n\n    def __next__(self):\n        self.count += 1\n        if cv2.waitKey(1) == ord('q'):  # q to quit\n            self.cap.release()\n            cv2.destroyAllWindows()\n            raise StopIteration\n\n        # Read frame\n        if self.pipe == 0:  # local camera\n            ret_val, img0 = self.cap.read()\n            img0 = cv2.flip(img0, 1)  # flip left-right\n        else:  # IP camera\n            n = 0\n            while True:\n                n += 1\n                self.cap.grab()\n                if n % 30 == 0:  # skip frames\n                    ret_val, img0 = self.cap.retrieve()\n                    if ret_val:\n                        break\n\n        # Print\n        assert ret_val, 'Camera Error %s' % self.pipe\n        img_path = 'webcam.jpg'\n        print('webcam %g: ' % self.count, end='')\n\n        # Padded resize\n        img = letterbox(img0, new_shape=self.img_size)[0]\n\n        # Convert\n        img = img[:, :, ::-1].transpose(2, 0, 1)  # BGR to RGB, to 3x416x416\n        img = np.ascontiguousarray(img)\n\n        return img_path, img, img0, None\n\n    def __len__(self):\n        return 0\n\n\nclass LoadStreams:  # multiple IP or RTSP cameras\n    def __init__(self, sources='streams.txt', img_size=416):\n        self.mode = 'images'\n        self.img_size = img_size\n\n        if os.path.isfile(sources):\n            with open(sources, 'r') as f:\n                sources = [x.strip() for x in f.read().splitlines() if len(x.strip())]\n        else:\n            sources = [sources]\n\n        n = len(sources)\n        self.imgs = [None] * n\n        self.sources = sources\n        for i, s in enumerate(sources):\n            # Start the thread to read frames from the video stream\n            print('%g/%g: %s... ' % (i + 1, n, s), end='')\n            cap = cv2.VideoCapture(0 if s == '0' else s)\n            assert cap.isOpened(), 'Failed to open %s' % s\n            w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n            h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n            fps = cap.get(cv2.CAP_PROP_FPS) % 100\n            _, self.imgs[i] = cap.read()  # guarantee first frame\n            thread = Thread(target=self.update, args=([i, cap]), daemon=True)\n            print(' success (%gx%g at %.2f FPS).' % (w, h, fps))\n            thread.start()\n        print('')  # newline\n\n        # check for common shapes\n        s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0)  # inference shapes\n        self.rect = np.unique(s, axis=0).shape[0] == 1  # rect inference if all shapes equal\n        if not self.rect:\n            print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')\n\n    def update(self, index, cap):\n        # Read next stream frame in a daemon thread\n        n = 0\n        while cap.isOpened():\n            n += 1\n            # _, self.imgs[index] = cap.read()\n            cap.grab()\n            if n == 4:  # read every 4th frame\n                _, self.imgs[index] = cap.retrieve()\n                n = 0\n            time.sleep(0.01)  # wait time\n\n    def __iter__(self):\n        self.count = -1\n        return self\n\n    def __next__(self):\n        self.count += 1\n        img0 = self.imgs.copy()\n        if cv2.waitKey(1) == ord('q'):  # q to quit\n            cv2.destroyAllWindows()\n            raise StopIteration\n\n        # Letterbox\n        img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0]\n\n        # Stack\n        img = np.stack(img, 0)\n\n        # Convert\n        img = img[:, :, :, ::-1].transpose(0, 3, 1, 2)  # BGR to RGB, to bsx3x416x416\n        img = np.ascontiguousarray(img)\n\n        return self.sources, img, img0, None\n\n    def __len__(self):\n        return 0  # 1E12 frames = 32 streams at 30 FPS for 30 years\n\n\nclass LoadImagesAndLabels(Dataset):  # for training/testing\n    def __init__(self, path, img_size=416, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,\n                 cache_images=False, rank=-1, is_gray_scale=False, subset_len=-1):\n        path = str(Path(path))  # os-agnostic\n        assert os.path.isfile(path), 'File not found %s. See %s' % (path, help_url)\n        with open(path, 'r') as f:\n            self.img_files = [x.replace('/', os.sep) for x in f.read().splitlines()  # os-agnostic\n                              if os.path.splitext(x)[-1].lower() in img_formats]\n        if subset_len != -1:\n            assert subset_len <= len(self.img_files)\n            self.img_files = random.sample(self.img_files, subset_len)\n        n = len(self.img_files)\n        assert n > 0, 'No images found in %s. See %s' % (path, help_url)\n        bi = np.floor(np.arange(n) / batch_size).astype(np.int)  # batch index\n        nb = bi[-1] + 1  # number of batches\n\n        self.n = n\n        self.batch = bi  # batch index of image\n        self.img_size = img_size\n        self.augment = augment\n        self.hyp = hyp\n        self.image_weights = image_weights\n        self.rect = False if image_weights else rect\n        self.mosaic = self.augment and not self.rect  # load 4 images at a time into a mosaic (only during training)\n        self.is_gray_scale = is_gray_scale\n\n        # Define labels\n        self.label_files = [x.replace('images', 'labels').replace(os.path.splitext(x)[-1], '.txt')\n                            for x in self.img_files]\n\n        # Rectangular Training  https://github.com/ultralytics/yolov3/issues/232\n        if self.rect:\n            # Read image shapes (wh)\n            sp = path.replace('.txt', '.shapes')  # shapefile path\n            try:\n                with open(sp, 'r') as f:  # read existing shapefile\n                    s = [x.split() for x in f.read().splitlines()]\n                    assert len(s) == n, 'Shapefile out of sync'\n            except:\n                s = [exif_size(Image.open(f)) for f in tqdm(self.img_files, desc='Reading image shapes')]\n                np.savetxt(sp, s, fmt='%g')  # overwrites existing (if any)\n\n            # Sort by aspect ratio\n            s = np.array(s, dtype=np.float64)\n            ar = s[:, 1] / s[:, 0]  # aspect ratio\n            i = ar.argsort()\n            self.img_files = [self.img_files[i] for i in i]\n            self.label_files = [self.label_files[i] for i in i]\n            self.shapes = s[i]  # wh\n            ar = ar[i]\n\n            # Set training image shapes\n            shapes = [[1, 1]] * nb\n            for i in range(nb):\n                ari = ar[bi == i]\n                mini, maxi = ari.min(), ari.max()\n                if maxi < 1:\n                    shapes[i] = [maxi, 1]\n                elif mini > 1:\n                    shapes[i] = [1, 1 / mini]\n\n            self.batch_shapes = np.ceil(np.array(shapes) * img_size / 32.).astype(np.int) * 32\n\n        # Cache labels\n        self.imgs = [None] * n\n        self.labels = [np.zeros((0, 5), dtype=np.float32)] * n\n        extract_bounding_boxes = False\n        create_datasubset = False\n        pbar = tqdm(self.label_files, desc='Caching labels')\n        nm, nf, ne, ns, nd = 0, 0, 0, 0, 0  # number missing, found, empty, datasubset, duplicate\n        for i, file in enumerate(pbar):\n            try:\n                with open(file, 'r') as f:\n                    l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)\n            except:\n                nm += 1  # print('missing labels for image %s' % self.img_files[i])  # file missing\n                continue\n\n            if l.shape[0]:\n                assert l.shape[1] == 5, '> 5 label columns: %s' % file\n                assert (l >= 0).all(), 'negative labels: %s' % file\n                assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file\n                if np.unique(l, axis=0).shape[0] < l.shape[0]:  # duplicate rows\n                    nd += 1  # print('WARNING: duplicate rows in %s' % self.label_files[i])  # duplicate rows\n                self.labels[i] = l\n                nf += 1  # file found\n\n                # Create subdataset (a smaller dataset)\n                if create_datasubset and ns < 1E4:\n                    if ns == 0:\n                        create_folder(path='./datasubset')\n                        os.makedirs('./datasubset/images')\n                    exclude_classes = 43\n                    if exclude_classes not in l[:, 0]:\n                        ns += 1\n                        # shutil.copy(src=self.img_files[i], dst='./datasubset/images/')  # copy image\n                        with open('./datasubset/images.txt', 'a') as f:\n                            f.write(self.img_files[i] + '\\n')\n\n                # Extract object detection boxes for a second stage classifier\n                if extract_bounding_boxes:\n                    p = Path(self.img_files[i])\n                    img = cv2.imread(str(p))\n                    h, w = img.shape[:2]\n                    for j, x in enumerate(l):\n                        f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)\n                        if not os.path.exists(Path(f).parent):\n                            os.makedirs(Path(f).parent)  # make new output folder\n\n                        b = x[1:] * [w, h, w, h]  # box\n                        b[2:] = b[2:].max()  # rectangle to square\n                        b[2:] = b[2:] * 1.3 + 30  # pad\n                        b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)\n\n                        b[[0, 2]] = np.clip(b[[0, 2]], 0, w)  # clip boxes outside of image\n                        b[[1, 3]] = np.clip(b[[1, 3]], 0, h)\n                        assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes'\n            else:\n                ne += 1  # print('empty labels for image %s' % self.img_files[i])  # file empty\n                # os.system(\"rm '%s' '%s'\" % (self.img_files[i], self.label_files[i]))  # remove\n\n            pbar.desc = 'Caching labels (%g found, %g missing, %g empty, %g duplicate, for %g images)' % (\n                nf, nm, ne, nd, n)\n        assert nf > 0, 'No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url)\n\n        # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)\n        if cache_images:  # if training\n            gb = 0  # Gigabytes of cached images\n            pbar = tqdm(range(len(self.img_files)), desc='Caching images')\n            self.img_hw0, self.img_hw = [None] * n, [None] * n\n            for i in pbar:  # max 10k images\n                self.imgs[i], self.img_hw0[i], self.img_hw[i] = load_image(self, i)  # img, hw_original, hw_resized\n                gb += self.imgs[i].nbytes\n                pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)\n\n        # Detect corrupted images https://medium.com/joelthchao/programmatically-detect-corrupted-image-8c1b2006c3d3\n        detect_corrupted_images = False\n        if detect_corrupted_images:\n            from skimage import io  # conda install -c conda-forge scikit-image\n            for file in tqdm(self.img_files, desc='Detecting corrupted images'):\n                try:\n                    _ = io.imread(file)\n                except:\n                    print('Corrupted image detected: %s' % file)\n\n    def __len__(self):\n        return len(self.img_files)\n\n    # def __iter__(self):\n    #     self.count = -1\n    #     print('ran dataset iter')\n    #     #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)\n    #     return self\n\n    def __getitem__(self, index):\n        if self.image_weights:\n            index = self.indices[index]\n\n        hyp = self.hyp\n        if self.mosaic:\n            # Load mosaic\n            img, labels = load_mosaic(self, index, self.is_gray_scale)\n            shapes = None\n\n        else:\n            # Load image\n            img, (h0, w0), (h, w) = load_image(self, index, self.is_gray_scale)\n\n            # Letterbox\n            shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size  # final letterboxed shape\n            img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)\n            shapes = (h0, w0), ((h / h0, w / w0), pad)  # for COCO mAP rescaling\n\n            # Load labels\n            labels = []\n            x = self.labels[index]\n            if x.size > 0:\n                # Normalized xywh to pixel xyxy format\n                labels = x.copy()\n                labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0]  # pad width\n                labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1]  # pad height\n                labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]\n                labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]\n\n        if self.augment:\n            # Augment imagespace\n            if not self.mosaic:\n                img, labels = random_affine(img, labels,\n                                            degrees=hyp['degrees'],\n                                            translate=hyp['translate'],\n                                            scale=hyp['scale'],\n                                            shear=hyp['shear'])\n\n            # Augment colorspace\n            if not self.is_gray_scale:\n                augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])\n\n            # Apply cutouts\n            # if random.random() < 0.9:\n            #     labels = cutout(img, labels)\n\n        nL = len(labels)  # number of labels\n        if nL:\n            # convert xyxy to xywh\n            labels[:, 1:5] = xyxy2xywh(labels[:, 1:5])\n\n            # Normalize coordinates 0 - 1\n            labels[:, [2, 4]] /= img.shape[0]  # height\n            labels[:, [1, 3]] /= img.shape[1]  # width\n\n        if self.augment:\n            # random left-right flip\n            lr_flip = True\n            if lr_flip and random.random() < 0.5:\n                img = np.fliplr(img)\n                if nL:\n                    labels[:, 1] = 1 - labels[:, 1]\n\n            # random up-down flip\n            ud_flip = False\n            if ud_flip and random.random() < 0.5:\n                img = np.flipud(img)\n                if nL:\n                    labels[:, 2] = 1 - labels[:, 2]\n\n        labels_out = torch.zeros((nL, 6))\n        if nL:\n            labels_out[:, 1:] = torch.from_numpy(labels)\n\n        # Convert\n        if not self.is_gray_scale:\n            img = img[:, :, ::-1].transpose(2, 0, 1)  # BGR to RGB, to 3x416x416\n        img = np.ascontiguousarray(img)\n        if self.is_gray_scale:\n            img = np.expand_dims(img, axis=0)\n\n        return torch.from_numpy(img), labels_out, self.img_files[index], shapes\n\n    @staticmethod\n    def collate_fn(batch):\n        img, label, path, shapes = zip(*batch)  # transposed\n        for i, l in enumerate(label):\n            l[:, 0] = i  # add target image index for build_targets()\n        return torch.stack(img, 0), torch.cat(label, 0), path, shapes\n\n\ndef load_image(self, index, is_gray_scale=False):\n    # loads 1 image from dataset, returns img, original hw, resized hw\n    img = self.imgs[index]\n    if img is None:  # not cached\n        path = self.img_files[index]\n        if is_gray_scale:\n            img = cv2.imread(path, flags=cv2.IMREAD_GRAYSCALE)  # gray scale\n            img = np.expand_dims(img, axis=-1)\n        else:\n            img = cv2.imread(path)  # BGR\n        assert img is not None, 'Image Not Found ' + path\n        h0, w0 = img.shape[:2]  # orig hw\n        r = self.img_size / max(h0, w0)  # resize image to img_size\n        if r < 1 or (self.augment and r != 1):  # always resize down, only resize up if training with augmentation\n            interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR\n            img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)\n            if is_gray_scale:\n                img = np.expand_dims(img, axis=-1)\n        return img, (h0, w0), img.shape[:2]  # img, hw_original, hw_resized\n    else:\n        return self.imgs[index], self.img_hw0[index], self.img_hw[index]  # img, hw_original, hw_resized\n\n\ndef augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):\n    r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1  # random gains\n    hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))\n    dtype = img.dtype  # uint8\n\n    x = np.arange(0, 256, dtype=np.int16)\n    lut_hue = ((x * r[0]) % 180).astype(dtype)\n    lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)\n    lut_val = np.clip(x * r[2], 0, 255).astype(dtype)\n\n    img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)\n    cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img)  # no return needed\n\n    # Histogram equalization\n    # if random.random() < 0.2:\n    #     for i in range(3):\n    #         img[:, :, i] = cv2.equalizeHist(img[:, :, i])\n\n\ndef load_mosaic(self, index, is_gray_scale=False):\n    # loads images in a mosaic\n\n    labels4 = []\n    s = self.img_size\n    xc, yc = [int(random.uniform(s * 0.5, s * 1.5)) for _ in range(2)]  # mosaic center x, y\n    indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(3)]  # 3 additional image indices\n    for i, index in enumerate(indices):\n        # Load image\n        img, _, (h, w) = load_image(self, index, is_gray_scale)\n\n        # place img in img4\n        if i == 0:  # top left\n            img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8)  # base image with 4 tiles\n            x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc  # xmin, ymin, xmax, ymax (large image)\n            x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h  # xmin, ymin, xmax, ymax (small image)\n        elif i == 1:  # top right\n            x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc\n            x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h\n        elif i == 2:  # bottom left\n            x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)\n            x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(y2a - y1a, h)\n        elif i == 3:  # bottom right\n            x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)\n            x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)\n\n        img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b]  # img4[ymin:ymax, xmin:xmax]\n        padw = x1a - x1b\n        padh = y1a - y1b\n\n        # Labels\n        x = self.labels[index]\n        labels = x.copy()\n        if x.size > 0:  # Normalized xywh to pixel xyxy format\n            labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw\n            labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh\n            labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw\n            labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh\n        labels4.append(labels)\n\n    # Concat/clip labels\n    if len(labels4):\n        labels4 = np.concatenate(labels4, 0)\n        # np.clip(labels4[:, 1:] - s / 2, 0, s, out=labels4[:, 1:])  # use with center crop\n        np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:])  # use with random_affine\n\n    # Augment\n    # img4 = img4[s // 2: int(s * 1.5), s // 2:int(s * 1.5)]  # center crop (WARNING, requires box pruning)\n    img4, labels4 = random_affine(img4, labels4,\n                                  degrees=self.hyp['degrees'],\n                                  translate=self.hyp['translate'],\n                                  scale=self.hyp['scale'],\n                                  shear=self.hyp['shear'],\n                                  border=-s // 2)  # border to remove\n\n    return img4, labels4\n\n\ndef letterbox(img, new_shape=(416, 416), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True,\n              is_gray_scale=False):\n    # Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232\n    shape = img.shape[:2]  # current shape [height, width]\n    if isinstance(new_shape, int):\n        new_shape = (new_shape, new_shape)\n\n    # Scale ratio (new / old)\n    r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])\n    if not scaleup:  # only scale down, do not scale up (for better test mAP)\n        r = min(r, 1.0)\n\n    # Compute padding\n    ratio = r, r  # width, height ratios\n    new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))\n    dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]  # wh padding\n    if auto:  # minimum rectangle\n        dw, dh = np.mod(dw, 64), np.mod(dh, 64)  # wh padding\n    elif scaleFill:  # stretch\n        dw, dh = 0.0, 0.0\n        new_unpad = new_shape\n        ratio = new_shape[0] / shape[1], new_shape[1] / shape[0]  # width, height ratios\n\n    dw /= 2  # divide padding into 2 sides\n    dh /= 2\n\n    if shape[::-1] != new_unpad:  # resize\n        img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)\n        if is_gray_scale:\n            img = np.expand_dims(img, axis=-1)\n    top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))\n    left, right = int(round(dw - 0.1)), int(round(dw + 0.1))\n    img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)  # add border\n    if is_gray_scale:\n        img = np.expand_dims(img, axis=-1)\n    return img, ratio, (dw, dh)\n\n\ndef random_affine(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, border=0):\n    # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))\n    # https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4\n\n    if targets is None:  # targets = [cls, xyxy]\n        targets = []\n    height = img.shape[0] + border * 2\n    width = img.shape[1] + border * 2\n\n    # Rotation and Scale\n    R = np.eye(3)\n    a = random.uniform(-degrees, degrees)\n    # a += random.choice([-180, -90, 0, 90])  # add 90deg rotations to small rotations\n    s = random.uniform(1 - scale, 1 + scale)\n    R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s)\n\n    # Translation\n    T = np.eye(3)\n    T[0, 2] = random.uniform(-translate, translate) * img.shape[0] + border  # x translation (pixels)\n    T[1, 2] = random.uniform(-translate, translate) * img.shape[1] + border  # y translation (pixels)\n\n    # Shear\n    S = np.eye(3)\n    S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180)  # x shear (deg)\n    S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180)  # y shear (deg)\n\n    # Combined rotation matrix\n    M = S @ T @ R  # ORDER IS IMPORTANT HERE!!\n    if (border != 0) or (M != np.eye(3)).any():  # image changed\n        img = cv2.warpAffine(img, M[:2], dsize=(width, height), flags=cv2.INTER_LINEAR, borderValue=(114, 114, 114))\n\n    # Transform label coordinates\n    n = len(targets)\n    if n:\n        # warp points\n        xy = np.ones((n * 4, 3))\n        xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2)  # x1y1, x2y2, x1y2, x2y1\n        xy = (xy @ M.T)[:, :2].reshape(n, 8)\n\n        # create new boxes\n        x = xy[:, [0, 2, 4, 6]]\n        y = xy[:, [1, 3, 5, 7]]\n        xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T\n\n        # # apply angle-based reduction of bounding boxes\n        # radians = a * math.pi / 180\n        # reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5\n        # x = (xy[:, 2] + xy[:, 0]) / 2\n        # y = (xy[:, 3] + xy[:, 1]) / 2\n        # w = (xy[:, 2] - xy[:, 0]) * reduction\n        # h = (xy[:, 3] - xy[:, 1]) * reduction\n        # xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T\n\n        # reject warped points outside of image\n        xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)\n        xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)\n        w = xy[:, 2] - xy[:, 0]\n        h = xy[:, 3] - xy[:, 1]\n        area = w * h\n        area0 = (targets[:, 3] - targets[:, 1]) * (targets[:, 4] - targets[:, 2])\n        ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16))  # aspect ratio\n        i = (w > 4) & (h > 4) & (area / (area0 * s + 1e-16) > 0.2) & (ar < 10)\n\n        targets = targets[i]\n        targets[:, 1:5] = xy[i]\n\n    return img, targets\n\n\ndef cutout(image, labels):\n    # https://arxiv.org/abs/1708.04552\n    # https://github.com/hysts/pytorch_cutout/blob/master/dataloader.py\n    # https://towardsdatascience.com/when-conventional-wisdom-fails-revisiting-data-augmentation-for-self-driving-cars-4831998c5509\n    h, w = image.shape[:2]\n\n    def bbox_ioa(box1, box2):\n        # Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2\n        box2 = box2.transpose()\n\n        # Get the coordinates of bounding boxes\n        b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]\n        b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]\n\n        # Intersection area\n        inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \\\n                     (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)\n\n        # box2 area\n        box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16\n\n        # Intersection over box2 area\n        return inter_area / box2_area\n\n    # create random masks\n    scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16  # image size fraction\n    for s in scales:\n        mask_h = random.randint(1, int(h * s))\n        mask_w = random.randint(1, int(w * s))\n\n        # box\n        xmin = max(0, random.randint(0, w) - mask_w // 2)\n        ymin = max(0, random.randint(0, h) - mask_h // 2)\n        xmax = min(w, xmin + mask_w)\n        ymax = min(h, ymin + mask_h)\n\n        # apply random color mask\n        image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]\n\n        # return unobscured labels\n        if len(labels) and s > 0.03:\n            box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)\n            ioa = bbox_ioa(box, labels[:, 1:5])  # intersection over area\n            labels = labels[ioa < 0.60]  # remove >60% obscured labels\n\n    return labels\n\n\n# class FenceMask(torch.nn.Module):\n#     def __init__(self, img_size, mean, probability=0.8):\n#         super(FenceMask, self).__init__()\n#         self.x = torch.nn.Parameter((0.25 - 0.05) * torch.rand(1) + 0.05, requires_grad=True)\n#         self.y = torch.nn.Parameter((0.25 - 0.05) * torch.rand(1) + 0.05, requires_grad=True)\n#         self.l1 = torch.nn.Parameter((0.25 - 0.05) * torch.rand(1) + 0.05, requires_grad=True)\n#         self.l2 = torch.nn.Parameter((0.25 - 0.05) * torch.rand(1) + 0.05, requires_grad=True)\n#         self.mean = mean\n#         self.probability = probability\n#         self.st_prob = self.prob = probability\n#         self.img_size = img_size\n#\n#     def set_prob(self, epoch, max_epoch):\n#         self.prob = self.st_prob * min(1, epoch / max_epoch)\n#\n#     def forward(self, x):\n#         if not self.training:\n#             return x\n#         n, c, h, w = x.size()\n#         imgs = []\n#         masks = []\n#         for i in range(n):\n#             img, mask = self.Fence(x[i])\n#             imgs.append(img)\n#             masks.append(mask)\n#         imgs = torch.cat(imgs).view(n, c, h, w)\n#         masks = torch.cat(masks).view(n, c, h, w)\n#         return imgs, masks\n#\n#     def Fence(self, img):\n#         if random.uniform(0, 1) > self.prob:\n#             mask = img.new_ones(img.shape)\n#             return img, mask\n#\n#         sp = img.shape\n#         height, width = sp[1], sp[2]\n#\n#         # mask_1代表横着的条纹，mask_2代表竖着的条纹\n#         mask_1 = np.ones(shape=(sp[1], sp[2], 3))\n#         mask_2 = np.ones(shape=(sp[1], sp[2], 3))\n#         x, y, l1, l2 = int(self.x * self.img_size), int(self.y * self.img_size), int(self.l1 * self.img_size), int(self.l2 * self.img_size)\n#         for i in range(1, height // (l1 + x) + 1):\n#             mask_1[i * l1 + (i - 1) * x:i * (l1 + x):, 0:, 0] = self.mean[0]\n#             mask_1[i * l1 + (i - 1) * x:i * (l1 + x):, 0:, 1] = self.mean[1]\n#             mask_1[i * l1 + (i - 1) * x:i * (l1 + x):, 0:, 2] = self.mean[2]\n#         for i in range(1, width // (l2 + y) + 1):\n#             mask_2[0:, i * l2 + (i - 1) * y:i * (l2 + y), 0] = self.mean[0]\n#             mask_2[0:, i * l2 + (i - 1) * y:i * (l2 + y), 1] = self.mean[1]\n#             mask_2[0:, i * l2 + (i - 1) * y:i * (l2 + y), 2] = self.mean[2]\n#\n#         # 将生成的两个mask随机旋转一定角度\n#         center = (width / 2, height / 2)\n#         rotation_1, rotation_2 = random.randint(0, 360), random.randint(0, 360)\n#         M_1 = cv2.getRotationMatrix2D(center, rotation_1, 2)\n#         M_2 = cv2.getRotationMatrix2D(center, rotation_2, 2)\n#         mask_1 = cv2.warpAffine(mask_1, M_1, (width, height))\n#         mask_2 = cv2.warpAffine(mask_2, M_2, (width, height))\n#\n#         mask = (mask_1 * mask_2)\n#         # cv2.imwrite('mask.png', mask * 255)\n#         mask = mask.transpose(2, 0, 1)\n#         mask = torch.from_numpy(mask).float().cuda()\n#         img = img * mask\n#         return img, mask\n\nclass FenceMask(torch.nn.Module):\n    def __init__(self, batch_size, img_size, probability):\n        super(FenceMask, self).__init__()\n        self.img_size = img_size\n        self.batch_size = batch_size\n        self.group_size = 10\n        self.group_number = None\n        group_masks = []\n        for j in range(self.group_size):\n            masks = []\n            for k in range(batch_size):\n                x = random.randint(self.img_size / 32, self.img_size / 16)\n                y = random.randint(self.img_size / 32, self.img_size / 16)\n                l1 = random.randint(self.img_size / 16, self.img_size / 8)\n                l2 = random.randint(self.img_size / 16, self.img_size / 8)\n                # mask_1代表横着的条纹，mask_2代表竖着的条纹\n                mask_1 = np.ones(shape=(self.img_size, self.img_size, 3))\n                mask_2 = np.ones(shape=(self.img_size, self.img_size, 3))\n                height = self.img_size\n                width = self.img_size\n                for i in range(1, height // (l1 + x) + 1):\n                    mask_1[i * l1 + (i - 1) * x:i * (l1 + x):, 0:, 0] = 0\n                    mask_1[i * l1 + (i - 1) * x:i * (l1 + x):, 0:, 1] = 0\n                    mask_1[i * l1 + (i - 1) * x:i * (l1 + x):, 0:, 2] = 0\n                for i in range(1, width // (l2 + y) + 1):\n                    mask_2[0:, i * l2 + (i - 1) * y:i * (l2 + y), 0] = 0\n                    mask_2[0:, i * l2 + (i - 1) * y:i * (l2 + y), 1] = 0\n                    mask_2[0:, i * l2 + (i - 1) * y:i * (l2 + y), 2] = 0\n                # 将生成的两个mask随机旋转一定角度\n                center = (width / 2, height / 2)\n                rotation_1, rotation_2 = random.randint(0, 360), random.randint(0, 360)\n                M_1 = cv2.getRotationMatrix2D(center, rotation_1, 2)\n                M_2 = cv2.getRotationMatrix2D(center, rotation_2, 2)\n                mask_1 = cv2.warpAffine(mask_1, M_1, (width, height))\n                mask_2 = cv2.warpAffine(mask_2, M_2, (width, height))\n\n                mask = (mask_1 * mask_2)\n                # cv2.imwrite('mask.png', mask * 255)\n                mask = mask.transpose(2, 0, 1)\n                mask = torch.from_numpy(mask).unsqueeze(0)\n                masks.append(mask)\n            masks = torch.cat(masks, dim=0).int()\n            mask_white = (0.5 * torch.rand((batch_size, 3, img_size, img_size)) + 0.5) * masks\n            mask_black = (0.5 * torch.rand((batch_size, 3, img_size, img_size))) * (1 - masks)\n            masks = mask_black + mask_white\n            group_masks.append(masks.unsqueeze(0))\n        group_masks = torch.cat(group_masks, dim=0)\n        self.group_masks = torch.nn.Parameter(group_masks, requires_grad=True)\n        self.st_prob = self.prob = probability\n\n    def forward(self, x):\n        masks = None\n        if random.uniform(0, 1) > self.prob:\n            return x, masks\n        if x.size(0) != self.group_masks.size(1):\n            return x, masks\n        # for img in x:\n        #     img =img.cpu().detach().numpy()\n        #     image = img.transpose(1, 2, 0)\n        #     cv2.imshow('image', image)\n        #     cv2.waitKey(500)\n        # masks = binarize(self.masks)\n        self.group_number = random.randrange(self.group_size)\n        masks = self.group_masks[self.group_number]\n        # for img in (x*masks):\n        #     img = img.cpu().detach().numpy()\n        #     image = img.transpose(1, 2, 0)\n        #     cv2.imshow('image', image)\n        #     cv2.waitKey(500)\n\n        return x * masks, masks\n\n    def set_prob(self, epoch, max_epoch):\n        self.prob = self.st_prob * min(1, epoch / max_epoch)\n\n\nclass Grid(object):\n    def __init__(self, d1, d2, rotate=1, ratio=0.5, mode=0, prob=1.):\n        self.d1 = d1\n        self.d2 = d2\n        self.rotate = rotate\n        self.ratio = ratio\n        self.mode = mode\n        self.st_prob = self.prob = prob\n\n    def set_prob(self, epoch, max_epoch):\n        self.prob = self.st_prob * min(1, epoch / max_epoch)\n\n    def __call__(self, img):\n        if np.random.rand() > self.prob:\n            return img\n        h = img.shape[1]\n        w = img.shape[2]\n\n        # 1.5 * h, 1.5 * w works fine with the squared images\n        # But with rectangular input, the mask might not be able to recover back to the input image shape\n        # A square mask with edge length equal to the diagnoal of the input image\n        # will be able to cover all the image spot after the rotation. This is also the minimum square.\n        hh = math.ceil((math.sqrt(h * h + w * w)))\n\n        d = np.random.randint(self.d1, self.d2)\n        # d = self.d\n\n        # maybe use ceil? but i guess no big difference\n        self.l = math.ceil(d * self.ratio)\n\n        mask = np.ones((hh, hh), np.float32)\n        st_h = np.random.randint(d)\n        st_w = np.random.randint(d)\n        for i in range(-1, hh // d + 1):\n            s = d * i + st_h\n            t = s + self.l\n            s = max(min(s, hh), 0)\n            t = max(min(t, hh), 0)\n            mask[s:t, :] *= 0\n        for i in range(-1, hh // d + 1):\n            s = d * i + st_w\n            t = s + self.l\n            s = max(min(s, hh), 0)\n            t = max(min(t, hh), 0)\n            mask[:, s:t] *= 0\n        r = np.random.randint(self.rotate)\n        mask = Image.fromarray(np.uint8(mask))\n        mask = mask.rotate(r)\n        mask = np.asarray(mask)\n        mask = mask[(hh - h) // 2:(hh - h) // 2 + h, (hh - w) // 2:(hh - w) // 2 + w]\n\n        mask = torch.from_numpy(mask).float().cuda()\n        if self.mode == 1:\n            mask = 1 - mask\n\n        mask = mask.expand_as(img)\n        img = img * mask\n\n        return img\n\n\nclass GridMask(torch.nn.Module):\n    def __init__(self, d1, d2, rotate=1, ratio=0.5, mode=0, prob=1.):\n        super(GridMask, self).__init__()\n        self.rotate = rotate\n        self.ratio = ratio\n        self.mode = mode\n        self.st_prob = prob\n        self.grid = Grid(d1, d2, rotate, ratio, mode, prob)\n\n    def set_prob(self, epoch, max_epoch):\n        self.grid.set_prob(epoch, max_epoch)\n\n    def forward(self, x):\n        if not self.training:\n            return x\n        n, c, h, w = x.size()\n        y = []\n        for i in range(n):\n            y.append(self.grid(x[i]))\n        y = torch.cat(y).view(n, c, h, w)\n        return y\n\n\ndef reduce_img_size(path='../data/sm4/images', img_size=1024):  # from utils.datasets import *; reduce_img_size()\n    # creates a new ./images_reduced folder with reduced size images of maximum size img_size\n    path_new = path + '_reduced'  # reduced images path\n    create_folder(path_new)\n    for f in tqdm(glob.glob('%s/*.*' % path)):\n        try:\n            img = cv2.imread(f)\n            h, w = img.shape[:2]\n            r = img_size / max(h, w)  # size ratio\n            if r < 1.0:\n                img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_AREA)  # _LINEAR fastest\n            fnew = f.replace(path, path_new)  # .replace(Path(f).suffix, '.jpg')\n            cv2.imwrite(fnew, img)\n        except:\n            print('WARNING: image failure %s' % f)\n\n\ndef convert_images2bmp():  # from utils.datasets import *; convert_images2bmp()\n    # Save images\n    formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]\n    # for path in ['../coco/images/val2014', '../coco/images/train2014']:\n    for path in ['../data/sm4/images', '../data/sm4/background']:\n        create_folder(path + 'bmp')\n        for ext in formats:  # ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']\n            for f in tqdm(glob.glob('%s/*%s' % (path, ext)), desc='Converting %s' % ext):\n                cv2.imwrite(f.replace(ext.lower(), '.bmp').replace(path, path + 'bmp'), cv2.imread(f))\n\n    # Save labels\n    # for path in ['../coco/trainvalno5k.txt', '../coco/5k.txt']:\n    for file in ['../data/sm4/out_train.txt', '../data/sm4/out_test.txt']:\n        with open(file, 'r') as f:\n            lines = f.read()\n            # lines = f.read().replace('2014/', '2014bmp/')  # coco\n            lines = lines.replace('/images', '/imagesbmp')\n            lines = lines.replace('/background', '/backgroundbmp')\n        for ext in formats:\n            lines = lines.replace(ext, '.bmp')\n        with open(file.replace('.txt', 'bmp.txt'), 'w') as f:\n            f.write(lines)\n\n\ndef recursive_dataset2bmp(dataset='../data/sm4_bmp'):  # from utils.datasets import *; recursive_dataset2bmp()\n    # Converts dataset to bmp (for faster training)\n    formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]\n    for a, b, files in os.walk(dataset):\n        for file in tqdm(files, desc=a):\n            p = a + '/' + file\n            s = Path(file).suffix\n            if s == '.txt':  # replace text\n                with open(p, 'r') as f:\n                    lines = f.read()\n                for f in formats:\n                    lines = lines.replace(f, '.bmp')\n                with open(p, 'w') as f:\n                    f.write(lines)\n            elif s in formats:  # replace image\n                cv2.imwrite(p.replace(s, '.bmp'), cv2.imread(p))\n                if s != '.bmp':\n                    os.system(\"rm '%s'\" % p)\n\n\ndef imagelist2folder(path='data/coco_64img.txt'):  # from utils.datasets import *; imagelist2folder()\n    # Copies all the images in a text file (list of images) into a folder\n    create_folder(path[:-4])\n    with open(path, 'r') as f:\n        for line in f.read().splitlines():\n            os.system('cp \"%s\" %s' % (line, path[:-4]))\n            print(line)\n\n\ndef create_folder(path='./new_folder'):\n    # Create folder\n    if os.path.exists(path):\n        shutil.rmtree(path)  # delete output folder\n    os.makedirs(path)  # make new output folder\n"
  },
  {
    "path": "utils/gcp.sh",
    "content": "#!/usr/bin/env bash\n\n# New VM\nrm -rf sample_data yolov3\ngit clone https://github.com/ultralytics/yolov3\n# git clone -b test --depth 1 https://github.com/ultralytics/yolov3 test  # branch\n# sudo apt-get install zip\n#git clone https://github.com/NVIDIA/apex && cd apex && pip install -v --no-cache-dir --global-option=\"--cpp_ext\" --global-option=\"--cuda_ext\" . --user && cd .. && rm -rf apex\nsudo conda install -yc conda-forge scikit-image pycocotools\n# python3 -c \"from yolov3.utils.google_utils import gdrive_download; gdrive_download('193Zp_ye-3qXMonR1nZj3YyxMtQkMy50k','coco2014.zip')\"\npython3 -c \"from yolov3.utils.google_utils import gdrive_download; gdrive_download('1WQT6SOktSe8Uw6r10-2JhbEhMY5DJaph','coco2017.zip')\"\npython3 -c \"from yolov3.utils.google_utils import gdrive_download; gdrive_download('1C3HewOG9akA3y456SZLBJZfNDPkBwAto','knife.zip')\"\npython3 -c \"from yolov3.utils.google_utils import gdrive_download; gdrive_download('13g3LqdpkNE8sPosVJT6KFXlfoMypzRP4','sm4.zip')\"\nsudo shutdown\n\n# Mount local SSD\nlsblk\nsudo mkfs.ext4 -F /dev/nvme0n1\nsudo mkdir -p /mnt/disks/nvme0n1\nsudo mount /dev/nvme0n1 /mnt/disks/nvme0n1\nsudo chmod a+w /mnt/disks/nvme0n1\ncp -r coco /mnt/disks/nvme0n1\n\n# Kill All\nt=ultralytics/yolov3:v1\ndocker kill $(docker ps -a -q --filter ancestor=$t)\n\n# Evolve coco\nsudo -s\nt=ultralytics/yolov3:evolve\n# docker kill $(docker ps -a -q --filter ancestor=$t)\nfor i in 0 1 6 7\ndo\n  docker pull $t && docker run --gpus all -d --ipc=host -v \"$(pwd)\"/coco:/usr/src/coco $t bash utils/evolve.sh $i\n  sleep 30\ndone\n\n#COCO training\nn=131 && t=ultralytics/coco:v131 && sudo docker pull $t && sudo docker run -it --gpus all --ipc=host -v \"$(pwd)\"/coco:/usr/src/coco $t python3 train.py --data coco2014.data --img-size 320 640 --epochs 300 --batch 16 --weights '' --device 0 --cfg yolov3-spp.cfg --bucket ult/coco --name $n && sudo shutdown\nn=132 && t=ultralytics/coco:v131 && sudo docker pull $t && sudo docker run -it --gpus all --ipc=host -v \"$(pwd)\"/coco:/usr/src/coco $t python3 train.py --data coco2014.data --img-size 320 640 --epochs 300 --batch 64 --weights '' --device 0 --cfg yolov3-tiny.cfg --bucket ult/coco --name $n && sudo shutdown\n"
  },
  {
    "path": "utils/google_utils.py",
    "content": "# This file contains google utils: https://cloud.google.com/storage/docs/reference/libraries\n# pip install --upgrade google-cloud-storage\n\nimport os\nimport time\n\n\n# from google.cloud import storage\n\n\ndef gdrive_download(id='1HaXkef9z6y5l4vUnCYgdmEAj61c6bfWO', name='coco.zip'):\n    # https://gist.github.com/tanaikech/f0f2d122e05bf5f971611258c22c110f\n    # Downloads a file from Google Drive, accepting presented query\n    # from utils.google_utils import *; gdrive_download()\n    t = time.time()\n\n    print('Downloading https://drive.google.com/uc?export=download&id=%s as %s... ' % (id, name), end='')\n    os.remove(name) if os.path.exists(name) else None  # remove existing\n    os.remove('cookie') if os.path.exists('cookie') else None\n\n    # Attempt file download\n    os.system(\"curl -c ./cookie -s -L \\\"https://drive.google.com/uc?export=download&id=%s\\\" > /dev/null\" % id)\n    if os.path.exists('cookie'):  # large file\n        s = \"curl -Lb ./cookie \\\"https://drive.google.com/uc?export=download&confirm=`awk '/download/ {print $NF}' ./cookie`&id=%s\\\" -o %s\" % (\n            id, name)\n    else:  # small file\n        s = \"curl -s -L -o %s 'https://drive.google.com/uc?export=download&id=%s'\" % (name, id)\n    r = os.system(s)  # execute, capture return values\n    os.remove('cookie') if os.path.exists('cookie') else None\n\n    # Error check\n    if r != 0:\n        os.remove(name) if os.path.exists(name) else None  # remove partial\n        print('Download error ')  # raise Exception('Download error')\n        return r\n\n    # Unzip if archive\n    if name.endswith('.zip'):\n        print('unzipping... ', end='')\n        os.system('unzip -q %s' % name)  # unzip\n        os.remove(name)  # remove zip to free space\n\n    print('Done (%.1fs)' % (time.time() - t))\n    return r\n\n\ndef upload_blob(bucket_name, source_file_name, destination_blob_name):\n    # Uploads a file to a bucket\n    # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python\n\n    storage_client = storage.Client()\n    bucket = storage_client.get_bucket(bucket_name)\n    blob = bucket.blob(destination_blob_name)\n\n    blob.upload_from_filename(source_file_name)\n\n    print('File {} uploaded to {}.'.format(\n        source_file_name,\n        destination_blob_name))\n\n\ndef download_blob(bucket_name, source_blob_name, destination_file_name):\n    # Uploads a blob from a bucket\n    storage_client = storage.Client()\n    bucket = storage_client.get_bucket(bucket_name)\n    blob = bucket.blob(source_blob_name)\n\n    blob.download_to_filename(destination_file_name)\n\n    print('Blob {} downloaded to {}.'.format(\n        source_blob_name,\n        destination_file_name))\n"
  },
  {
    "path": "utils/layers.py",
    "content": "from utils.utils import *\n\n\ndef make_divisible(v, divisor):\n    # Function ensures all layers have a channel number that is divisible by 8\n    # https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py\n    return math.ceil(v / divisor) * divisor\n\n\nclass Flatten(nn.Module):\n    # Use after nn.AdaptiveAvgPool2d(1) to remove last 2 dimensions\n    def forward(self, x):\n        return x.view(x.size(0), -1)\n\n\nclass Concat(nn.Module):\n    # Concatenate a list of tensors along dimension\n    def __init__(self, dimension=1):\n        super(Concat, self).__init__()\n        self.d = dimension\n\n    def forward(self, x):\n        return torch.cat(x, self.d)\n\n\nclass FeatureConcat(nn.Module):\n    def __init__(self, layers, groups):\n        super(FeatureConcat, self).__init__()\n        self.layers = layers  # layer indices\n        self.groups = groups\n        self.multiple = len(layers) > 1  # multiple layers flag\n\n    def forward(self, x, outputs):\n        if self.multiple:\n            return torch.cat([outputs[i] for i in self.layers], 1)\n        else:\n            if self.groups:\n                return x[:, (x.shape[1] // 2):]\n            else:\n                return outputs[self.layers[0]]\n\n\nclass Shortcut(nn.Module):  # weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070\n    def __init__(self, layers, weight=False):\n        super(Shortcut, self).__init__()\n        self.layers = layers  # layer indices\n        self.weight = weight  # apply weights boolean\n        self.n = len(layers) + 1  # number of layers\n        if weight:\n            self.w = nn.Parameter(torch.zeros(self.n), requires_grad=True)  # layer weights\n\n    def forward(self, x, outputs):\n        # Weights\n        if self.weight:\n            w = torch.sigmoid(self.w) * (2 / self.n)  # sigmoid weights (0-1)\n            x = x * w[0]\n\n        # Fusion\n        nx = x.shape[1]  # input channels\n        for i in range(self.n - 1):\n            a = outputs[self.layers[i]] * w[i + 1] if self.weight else outputs[self.layers[i]]  # feature to add\n            na = a.shape[1]  # feature channels\n\n            # Adjust channels\n            if nx == na:  # same shape\n                x = x + a\n            elif nx > na:  # slice input\n                x[:, :na] = x[:, :na] + a  # or a = nn.ZeroPad2d((0, 0, 0, 0, 0, dc))(a); x = x + a\n            else:  # slice feature\n                x = x + a[:, :nx]\n\n        return x\n\n\nclass MixConv2d(nn.Module):  # MixConv: Mixed Depthwise Convolutional Kernels https://arxiv.org/abs/1907.09595\n    def __init__(self, in_ch, out_ch, k=(3, 5, 7), stride=1, dilation=1, bias=True, method='equal_params'):\n        super(MixConv2d, self).__init__()\n\n        groups = len(k)\n        if method == 'equal_ch':  # equal channels per group\n            i = torch.linspace(0, groups - 1E-6, out_ch).floor()  # out_ch indices\n            ch = [(i == g).sum() for g in range(groups)]\n        else:  # 'equal_params': equal parameter count per group\n            b = [out_ch] + [0] * groups\n            a = np.eye(groups + 1, groups, k=-1)\n            a -= np.roll(a, 1, axis=1)\n            a *= np.array(k) ** 2\n            a[0] = 1\n            ch = np.linalg.lstsq(a, b, rcond=None)[0].round().astype(int)  # solve for equal weight indices, ax = b\n\n        self.m = nn.ModuleList([nn.Conv2d(in_channels=in_ch,\n                                          out_channels=ch[g],\n                                          kernel_size=k[g],\n                                          stride=stride,\n                                          padding=k[g] // 2,  # 'same' pad\n                                          dilation=dilation,\n                                          bias=bias) for g in range(groups)])\n\n    def forward(self, x):\n        return torch.cat([m(x) for m in self.m], 1)\n\n\n# Activation functions below -------------------------------------------------------------------------------------------\nclass SwishImplementation(torch.autograd.Function):\n    @staticmethod\n    def forward(ctx, x):\n        ctx.save_for_backward(x)\n        return x * torch.sigmoid(x)\n\n    @staticmethod\n    def backward(ctx, grad_output):\n        x = ctx.saved_tensors[0]\n        sx = torch.sigmoid(x)  # sigmoid(ctx)\n        return grad_output * (sx * (1 + x * (1 - sx)))\n\n\nclass MishImplementation(torch.autograd.Function):\n    @staticmethod\n    def forward(ctx, x):\n        ctx.save_for_backward(x)\n        return x.mul(torch.tanh(F.softplus(x)))  # x * tanh(ln(1 + exp(x)))\n\n    @staticmethod\n    def backward(ctx, grad_output):\n        x = ctx.saved_tensors[0]\n        sx = torch.sigmoid(x)\n        fx = F.softplus(x).tanh()\n        return grad_output * (fx + x * sx * (1 - fx * fx))\n\n\nclass MemoryEfficientSwish(nn.Module):\n    def forward(self, x):\n        return SwishImplementation.apply(x)\n\n\nclass MemoryEfficientMish(nn.Module):\n    def forward(self, x):\n        return MishImplementation.apply(x)\n\n\nclass Swish(nn.Module):\n    def forward(self, x):\n        return x * torch.sigmoid(x)\n\n\nclass Mish(nn.Module):  # https://github.com/digantamisra98/Mish\n    def forward(self, x):\n        return x * F.softplus(x).tanh()\n\n\nclass ReLU6(nn.Module):\n    def __init__(self):\n        super(ReLU6, self).__init__()\n\n    def forward(self, x):\n        return F.relu6(x, inplace=True)\n\n\nclass HardSwish(nn.Module):\n    def __init__(self):\n        super(HardSwish, self).__init__()\n\n    def forward(self, x):\n        return x * (F.relu6(x + 3.0, inplace=True) / 6.0)\n\n\nclass HardSigmoid(nn.Module):\n    def __init__(self):\n        super(HardSigmoid, self).__init__()\n\n    def forward(self, x):\n        out = F.relu6(x + 3.0, inplace=True) / 6.0\n        return out\n\n\nclass SE(nn.Module):\n    def __init__(self, channel, reduction=4):\n        super(SE, self).__init__()\n        self.avg_pool = nn.AdaptiveAvgPool2d(1)\n        self.fc = nn.Sequential(\n            nn.Linear(channel, channel // reduction, bias=False),\n            nn.ReLU(inplace=True),\n            nn.Linear(channel // reduction, channel, bias=False),\n            HardSigmoid()\n            # nn.Sigmoid()\n        )\n\n    def forward(self, x):\n        b, c, _, _ = x.size()\n        y = self.avg_pool(x).view(b, c)\n        y = self.fc(y).view(b, c, 1, 1)\n        return x * y.expand_as(x)\n"
  },
  {
    "path": "utils/output_upsample.py",
    "content": "import numpy as np\nimport torch\nimport torch.nn.functional as F\nimport os\nfrom utils.parse_config import *\n\n\n# cfg = './cfg/prune_regular_0.8_keep_0.01_10_shortcut_yolov3-ship.cfg'\ndef Val_upsample(cfg,TN):\n\n    # if not os.path.isdir('./validation'):\n    #     os.makedirs('./validation')\n\n    module_defs = parse_model_cfg(cfg)\n    #_ = module_defs.pop(0)  # cfg training hyperparams (unused)\n    upsample_times = 0  # 上采样次数（第几次上采样）\n    for i, mdef in enumerate(module_defs):\n        if mdef['type'] == 'net':\n            width = mdef['width']\n            height = mdef['height']\n            channels = mdef['channels']\n        elif mdef['type'] == 'upsample':\n\n            upsample_times = upsample_times + 1\n\n            layer_idx = i - 1\n\n            activation_input = np.loadtxt('./quantizer_output/q_activation_out/q_activation_00%d_conv.txt'%(layer_idx-1))\n\n            input_scale = np.loadtxt('./quantizer_output/a_scale_out/a_scale_00%d_conv.txt'%(layer_idx-1))\n\n            Up_channels = int(256 / upsample_times)\n            Up_width = int((width * upsample_times) /32)\n            Up_height = int((height * upsample_times) /32)\n            activation_input = torch.from_numpy(activation_input).view(1, Up_channels, Up_height, Up_width)\n\n            #上采样,stride为上采样的倍数\n            stride = 2\n            temp_out = F.upsample(input=activation_input,scale_factor=stride)\n\n            #重排序\n            a_para = temp_out\n            # print(\"use activation reorder!\")\n            shape_input = a_para.shape[1]\n            num_TN = int(shape_input / TN)\n            remainder_TN = shape_input % TN\n            first = True\n            reorder_a_para = None\n\n            for k in range(num_TN):\n                temp = a_para[:, k * TN:(k + 1) * TN, :, :]\n                temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])\n                temp = temp.permute(1, 2, 0).contiguous().view(-1)\n                if first:\n                    reorder_a_para = temp.clone().cpu().data.numpy()\n                    first = False\n                else:\n                    reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())\n\n            a_para_flatten = reorder_a_para\n\n            q_activation_reorder = a_para_flatten\n            q_activation_reorder = np.array(q_activation_reorder).reshape(1, -1)\n            np.savetxt(('./quantizer_output/q_activation_reorder/%d_upsample_reorder.txt' % layer_idx),\n                       q_activation_reorder, delimiter='\\n')\n            ###保存重排序的二进制文件\n            activation_flat = q_activation_reorder.astype(np.int8)\n            writer = open('./quantizer_output/q_activation_reorder/%d_upsample_q_bin' % layer_idx, \"wb\")\n            writer.write(activation_flat)\n            writer.close()\n            ##########特征图重排序结束\n\n\n            input_scale = torch.from_numpy(input_scale)\n\n\n            #保存上采样txt文件\n            # val_results = np.array(temp_out.cpu()).reshape(1, -1)\n            # np.savetxt(('./quantizer_output/q_activation_reorder/%d_upsample_output.txt'%layer_idx), val_results,delimiter='\\n')\n            #\n            output_scale = input_scale\n            output_scale = np.array(output_scale.cpu()).reshape(1, -1)\n            np.savetxt(('./quantizer_output/a_scale_out/%d_upsample_scale.txt'%layer_idx), output_scale,delimiter='\\n')\n            #\n            # ###保存二进制文件\n            # activation_flat = val_results.astype(np.int8)\n            # writer = open('./quantizer_output/q_activation_reorder/%d_upsample_q_bin'%layer_idx, \"wb\")\n            # writer.write(activation_flat)\n            # writer.close()\n\n# Val_upsample(cfg,32)\n\n\n# import argparse\n# if __name__ == '__main__':\n#     parser = argparse.ArgumentParser()\n#     parser.add_argument('--cfg', type=str, default='cfg/yolov3-spp.cfg', help='*.cfg path')\n#     opt = parser.parse_args()\n#     opt.cfg = list(glob.iglob('./**/' + opt.cfg, recursive=True))[0]  # find file"
  },
  {
    "path": "utils/parse_config.py",
    "content": "import os\n\nimport numpy as np\n\n\ndef parse_model_cfg(path):\n    # Parse the yolo *.cfg file and return module definitions path may be 'cfg/yolov3.cfg', 'yolov3.cfg', or 'yolov3'\n    if not path.endswith('.cfg'):  # add .cfg suffix if omitted\n        path += '.cfg'\n    if not os.path.exists(path) and os.path.exists('cfg' + os.sep + path):  # add cfg/ prefix if omitted\n        path = 'cfg' + os.sep + path\n\n    with open(path, 'r') as f:\n        lines = f.read().split('\\n')\n    lines = [x for x in lines if x and not x.startswith('#')]\n    lines = [x.rstrip().lstrip() for x in lines]  # get rid of fringe whitespaces\n    mdefs = []  # module definitions\n    for line in lines:\n        if line.startswith('['):  # This marks the start of a new block\n            mdefs.append({})\n            mdefs[-1]['type'] = line[1:-1].rstrip()\n            if mdefs[-1]['type'] == 'convolutional':\n                mdefs[-1]['batch_normalize'] = 0  # pre-populate with zeros (may be overwritten later)\n        else:\n            key, val = line.split(\"=\")\n            key = key.rstrip()\n\n            if key == 'anchors':  # return nparray\n                mdefs[-1][key] = np.array([float(x) for x in val.split(',')]).reshape((-1, 2))  # np anchors\n            elif (key in ['from', 'layers', 'mask']) or (key == 'size' and ',' in val):  # return array\n                mdefs[-1][key] = [int(x) for x in val.split(',')]\n            else:\n                val = val.strip()\n                if val.isnumeric():  # return int or float\n                    mdefs[-1][key] = int(val) if (int(val) - float(val)) == 0 else float(val)\n                else:\n                    mdefs[-1][key] = val  # return string\n\n    # Check all fields are supported\n    supported = ['type', 'batch_normalize', 'filters', 'size', 'stride', 'pad', 'activation', 'layers', 'groups',\n                 'reduction', 'from', 'mask', 'anchors', 'classes', 'num', 'jitter', 'ignore_thresh', 'truth_thresh',\n                 'random', 'stride_x', 'stride_y', 'weights_type', 'weights_normalization', 'scale_x_y', 'beta_nms',\n                 'nms_kind', 'iou_loss', 'iou_normalizer', 'cls_normalizer', 'iou_thresh', 'group_id', 'resize']\n\n    f = []  # fields\n    for x in mdefs[1:]:\n        [f.append(k) for k in x if k not in f]\n    u = [x for x in f if x not in supported]  # unsupported fields\n    assert not any(u), \"Unsupported fields %s in %s. See https://github.com/ultralytics/yolov3/issues/631\" % (u, path)\n\n    return mdefs\n\n\ndef parse_data_cfg(path):\n    # Parses the data configuration file\n    if not os.path.exists(path) and os.path.exists('data' + os.sep + path):  # add data/ prefix if omitted\n        path = 'data' + os.sep + path\n\n    with open(path, 'r') as f:\n        lines = f.readlines()\n\n    options = dict()\n    for line in lines:\n        line = line.strip()\n        if line == '' or line.startswith('#'):\n            continue\n        key, val = line.split('=')\n        options[key.strip()] = val.strip()\n\n    return options\n"
  },
  {
    "path": "utils/prune_utils.py",
    "content": "import torch\nfrom terminaltables import AsciiTable\nfrom copy import deepcopy\nimport numpy as np\nimport torch.nn.functional as F\n\n\ndef parse_module_defs2(module_defs):\n    CBL_idx = []\n    Other_idx = []\n    shortcut_idx = dict()\n    shortcut_all = set()\n    ignore_idx = set()\n    for i, module_def in enumerate(module_defs):\n        if module_def['type'] == 'convolutional':\n            if module_def['batch_normalize']:\n                CBL_idx.append(i)\n            else:\n                Other_idx.append(i)\n            if module_defs[i + 1]['type'] == 'maxpool' and module_defs[i + 2]['type'] == 'route':\n                # spp前一个CBL不剪 区分spp和tiny\n                ignore_idx.add(i)\n            if module_defs[i + 1]['type'] == 'route' and 'groups' in module_defs[i + 1]:\n                ignore_idx.add(i)\n        elif module_def['type'] == 'depthwise':\n            Other_idx.append(i)\n            # 深度可分离卷积层的其前一层不剪\n            ignore_idx.add(i - 1)\n        elif module_def['type'] == 'se':\n            Other_idx.append(i)\n        # 上采样层前的卷积层不裁剪\n        elif module_def['type'] == 'upsample':\n            ignore_idx.add(i - 1)\n        elif module_def['type'] == 'shortcut':\n            identity_idx = (i + int(module_def['from'][0]))\n            if module_defs[identity_idx]['type'] == 'convolutional':\n\n                # ignore_idx.add(identity_idx)\n                shortcut_idx[i - 1] = identity_idx\n                shortcut_all.add(identity_idx)\n            elif module_defs[identity_idx]['type'] == 'shortcut':\n\n                # ignore_idx.add(identity_idx - 1)\n                shortcut_idx[i - 1] = identity_idx - 1\n                shortcut_all.add(identity_idx - 1)\n            shortcut_all.add(i - 1)\n\n    prune_idx = [idx for idx in CBL_idx if idx not in ignore_idx]\n\n    return CBL_idx, Other_idx, prune_idx, shortcut_idx, shortcut_all\n\n\ndef parse_module_defs(module_defs):\n    CBL_idx = []\n    Other_idx = []\n    ignore_idx = set()\n    for i, module_def in enumerate(module_defs):\n        if module_def['type'] == 'convolutional':\n            if module_def['batch_normalize']:\n                CBL_idx.append(i)\n            else:\n                Other_idx.append(i)\n            if module_defs[i + 1]['type'] == 'maxpool' and module_defs[i + 2]['type'] == 'route':\n                # spp前一个CBL不剪 区分tiny\n                ignore_idx.add(i)\n            if module_defs[i + 1]['type'] == 'route' and 'groups' in module_defs[i + 1]:\n                ignore_idx.add(i)\n        elif module_def['type'] == 'depthwise':\n            Other_idx.append(i)\n            # 深度可分离卷积层的其前一层不剪\n            ignore_idx.add(i - 1)\n        elif module_def['type'] == 'se':\n            Other_idx.append(i)\n        # 跳连层的前一层不剪,跳连层的来源层不剪\n        elif module_def['type'] == 'shortcut':\n            ignore_idx.add(i - 1)\n            identity_idx = (i + int(module_def['from'][0]))\n            if module_defs[identity_idx]['type'] == 'convolutional':\n                ignore_idx.add(identity_idx)\n            elif module_defs[identity_idx]['type'] == 'shortcut':\n                ignore_idx.add(identity_idx - 1)\n        # 上采样层前的卷积层不裁剪\n        elif module_def['type'] == 'upsample':\n            ignore_idx.add(i - 1)\n\n    prune_idx = [idx for idx in CBL_idx if idx not in ignore_idx]\n\n    return CBL_idx, Other_idx, prune_idx\n\n\ndef parse_module_defs4(module_defs):\n    CBL_idx = []\n    Conv_idx = []\n    shortcut_idx = []\n    for i, module_def in enumerate(module_defs):\n        if module_def['type'] == 'convolutional':\n            if module_def['batch_normalize']:\n                CBL_idx.append(i)\n            else:\n                Conv_idx.append(i)\n        elif module_def['type'] == 'shortcut':\n            shortcut_idx.append(i - 1)\n\n    return CBL_idx, Conv_idx, shortcut_idx\n\n\ndef gather_bn_weights(module_list, prune_idx):\n    size_list = [module_list[idx][1].weight.data.shape[0] for idx in prune_idx]\n\n    bn_weights = torch.zeros(sum(size_list))\n    index = 0\n    for idx, size in zip(prune_idx, size_list):\n        bn_weights[index:(index + size)] = module_list[idx][1].weight.data.abs().clone()\n        index += size\n\n    return bn_weights\n\n\ndef write_cfg(cfg_file, module_defs):\n    with open(cfg_file, 'w') as f:\n        for module_def in module_defs:\n            f.write(f\"[{module_def['type']}]\\n\")\n            for key, value in module_def.items():\n                if key != 'type':\n                    f.write(f\"{key}={value}\\n\")\n            f.write(\"\\n\")\n    return cfg_file\n\n\nclass BNOptimizer():\n\n    @staticmethod\n    def updateBN(sr_flag, module_list, s, prune_idx):\n        if sr_flag:\n            for idx in prune_idx:\n                # Squential(Conv, BN, Lrelu)\n                bn_module = module_list[idx][1]\n                bn_module.weight.grad.data.add_(s * torch.sign(bn_module.weight.data))  # L1\n\n\ndef obtain_quantiles(bn_weights, num_quantile=5):\n    sorted_bn_weights, i = torch.sort(bn_weights)\n    total = sorted_bn_weights.shape[0]\n    quantiles = sorted_bn_weights.tolist()[-1::-total // num_quantile][::-1]\n    print(\"\\nBN weights quantile:\")\n    quantile_table = [\n        [f'{i}/{num_quantile}' for i in range(1, num_quantile + 1)],\n        [\"%.3f\" % quantile for quantile in quantiles]\n    ]\n    print(AsciiTable(quantile_table).table)\n\n    return quantiles\n\n\ndef get_input_mask(module_defs, idx, CBLidx2mask, is_gray_scale=False):\n    if idx == 0:\n        if not is_gray_scale:\n            return np.ones(3)\n        else:\n            return np.ones(1)\n\n    if module_defs[idx - 1]['type'] == 'convolutional':\n        return CBLidx2mask[idx - 1]\n    # for tiny\n    elif module_defs[idx - 1]['type'] == 'maxpool':\n        if module_defs[idx - 2]['type'] == 'route':  # v4 tiny\n            return get_input_mask(module_defs, idx - 1, CBLidx2mask)\n        else:  # v3 tiny\n            return CBLidx2mask[idx - 2]\n    # for mobilenet\n    elif module_defs[idx - 1]['type'] == 'se':\n        return CBLidx2mask[idx - 3]\n    elif module_defs[idx - 1]['type'] == 'depthwise':\n        return CBLidx2mask[idx - 2]\n    elif module_defs[idx - 1]['type'] == 'shortcut':\n        return CBLidx2mask[idx - 2]\n    elif module_defs[idx - 1]['type'] == 'route':\n        route_in_idxs = []\n        for layer_i in module_defs[idx - 1]['layers']:\n            if int(layer_i) < 0:\n                route_in_idxs.append(idx - 1 + int(layer_i))\n            else:\n                route_in_idxs.append(int(layer_i))\n        if len(route_in_idxs) == 1:\n            mask = CBLidx2mask[route_in_idxs[0]]\n            if 'groups' in module_defs[idx - 1]:\n                return mask[(mask.shape[0] // 2):]\n            return mask\n        elif len(route_in_idxs) == 2:\n            # tiny剪植时使用\n            if module_defs[route_in_idxs[1] - 1]['type'] == 'maxpool':\n                return np.concatenate([CBLidx2mask[route_in_idxs[0] - 1], CBLidx2mask[route_in_idxs[1]]])\n            else:\n                if module_defs[route_in_idxs[0]]['type'] == 'upsample':\n                    mask1 = CBLidx2mask[route_in_idxs[0] - 1]\n                elif module_defs[route_in_idxs[0]]['type'] == 'convolutional':\n                    mask1 = CBLidx2mask[route_in_idxs[0]]\n                if module_defs[route_in_idxs[1]]['type'] == 'convolutional':\n                    mask2 = CBLidx2mask[route_in_idxs[1]]\n                else:\n                    mask2 = CBLidx2mask[route_in_idxs[1] - 1]\n                return np.concatenate([mask1, mask2])\n        elif len(route_in_idxs) == 4:\n            # spp结构中最后一个route\n            mask = CBLidx2mask[route_in_idxs[-1]]\n            return np.concatenate([mask, mask, mask, mask])\n        else:\n            print(\"Something wrong with route module!\")\n            raise Exception\n\n\ndef init_weights_from_loose_model(compact_model, loose_model, CBL_idx, Other_idx, CBLidx2mask, is_gray_scale=False):\n    for idx in CBL_idx:\n        compact_CBL = compact_model.module_list[idx]\n        loose_CBL = loose_model.module_list[idx]\n        out_channel_idx = np.argwhere(CBLidx2mask[idx])[:, 0].tolist()\n\n        compact_bn, loose_bn = compact_CBL[1], loose_CBL[1]\n        compact_bn.weight.data = loose_bn.weight.data[out_channel_idx].clone()\n        compact_bn.bias.data = loose_bn.bias.data[out_channel_idx].clone()\n        compact_bn.running_mean.data = loose_bn.running_mean.data[out_channel_idx].clone()\n        compact_bn.running_var.data = loose_bn.running_var.data[out_channel_idx].clone()\n\n        input_mask = get_input_mask(loose_model.module_defs, idx, CBLidx2mask, is_gray_scale=is_gray_scale)\n        in_channel_idx = np.argwhere(input_mask)[:, 0].tolist()\n        compact_conv, loose_conv = compact_CBL[0], loose_CBL[0]\n        tmp = loose_conv.weight.data[:, in_channel_idx, :, :].clone()\n        compact_conv.weight.data = tmp[out_channel_idx, :, :, :].clone()\n\n    for idx in Other_idx:\n        compact_conv = compact_model.module_list[idx][0]\n        loose_conv = loose_model.module_list[idx][0]\n\n        input_mask = get_input_mask(loose_model.module_defs, idx, CBLidx2mask)\n        in_channel_idx = np.argwhere(input_mask)[:, 0].tolist()\n        # 拷贝非剪植层的时候包括三种情况\n        # 情况1：卷积层，需要拷贝bias\n        # 情况2：se层，需要分别拷贝fc1和fc2\n        # 情况3：depthwise层，直接拷贝卷积和BN\n        if loose_model.module_defs[idx]['type'] == 'convolutional':\n            compact_conv.weight.data = loose_conv.weight.data[:, in_channel_idx, :, :].clone()\n            compact_conv.bias.data = loose_conv.bias.data.clone()\n        elif loose_model.module_defs[idx]['type'] == 'se':\n            compact_fc1 = compact_conv.fc[0]\n            loose_fc1 = loose_conv.fc[0]\n            compact_fc1.weight.data = loose_fc1.weight.data.clone()\n            compact_fc2 = compact_conv.fc[2]\n            loose_fc2 = loose_conv.fc[2]\n            compact_fc2.weight.data = loose_fc2.weight.data.clone()\n        else:\n            compact_conv.weight.data = loose_conv.weight.data.clone()\n\n            compact_bn = compact_model.module_list[idx][1]\n            loose_bn = loose_model.module_list[idx][1]\n            compact_bn.weight.data = loose_bn.weight.data.clone()\n            compact_bn.bias.data = loose_bn.bias.data.clone()\n            compact_bn.running_mean.data = loose_bn.running_mean.data.clone()\n            compact_bn.running_var.data = loose_bn.running_var.data.clone()\n\n\ndef prune_model_keep_size(model, prune_idx, CBL_idx, CBLidx2mask):\n    pruned_model = deepcopy(model)\n    activations = []\n    for i, model_def in enumerate(model.module_defs):\n\n        if model_def['type'] == 'convolutional' or model_def['type'] == 'depthwise' or model_def['type'] == 'se':\n            activation = torch.zeros(int(model_def['filters'])).cuda()\n            if i in prune_idx:\n                mask = torch.from_numpy(CBLidx2mask[i]).cuda()\n                bn_module = pruned_model.module_list[i][1]\n                bn_module.weight.data.mul_(mask)\n                if hasattr(pruned_model.module_list[i], 'activation'):\n                    ac_module = pruned_model.module_list[i][2]\n                    if ac_module.__class__.__name__ == \"LeakyReLU\":\n                        activation = F.leaky_relu((1 - mask) * bn_module.bias.data, 0.1)\n                    elif ac_module.__class__.__name__ == \"ReLU6\":\n                        activation = F.relu6((1 - mask) * bn_module.bias.data, inplace=True)\n                    elif ac_module.__class__.__name__ == \"HardSwish\":\n                        x = (1 - mask) * bn_module.bias.data\n                        activation = x * (F.relu6(x + 3.0, inplace=True) / 6.0)\n                    elif ac_module.__class__.__name__ == \"ReLU\":\n                        activation = F.relu((1 - mask) * bn_module.bias.data, 0.1)\n                    elif ac_module.__class__.__name__ == \"Mish\":\n                        x = (1 - mask) * bn_module.bias.data\n                        activation = x * F.softplus(x).tanh()\n                    else:\n                        activation = (1 - mask) * bn_module.bias.data\n                else:\n                    activation = (1 - mask) * bn_module.bias.data\n                update_activation(i, pruned_model, activation, CBL_idx)\n                bn_module.bias.data.mul_(mask)\n            activations.append(activation)\n\n        elif model_def['type'] == 'shortcut':\n            actv1 = activations[i - 1]\n            from_layer = int(model_def['from'][0])\n            actv2 = activations[i + from_layer]\n            activation = actv1 + actv2\n            update_activation(i, pruned_model, activation, CBL_idx)\n            activations.append(activation)\n\n\n\n        elif model_def['type'] == 'route':\n            # spp不参与剪枝，其中的route不用更新，仅占位\n            from_layers = [int(s) for s in model_def['layers']]\n            activation = None\n            if len(from_layers) == 1:\n                activation = activations[i + from_layers[0] if from_layers[0] < 0 else from_layers[0]]\n                if 'groups' in model_def:\n                    activation = activation[(activation.shape[0] // 2):]\n                update_activation(i, pruned_model, activation, CBL_idx)\n            elif len(from_layers) == 2:\n                actv1 = activations[i + from_layers[0]]\n                actv2 = activations[i + from_layers[1] if from_layers[1] < 0 else from_layers[1]]\n                activation = torch.cat((actv1, actv2))\n                update_activation(i, pruned_model, activation, CBL_idx)\n            activations.append(activation)\n\n        elif model_def['type'] == 'upsample':\n            # activation = torch.zeros(int(model.module_defs[i - 1]['filters'])).cuda()\n            activations.append(activations[i - 1])\n\n        elif model_def['type'] == 'yolo':\n            activations.append(None)\n\n        elif model_def['type'] == 'maxpool':  # 区分spp和tiny\n            if model.module_defs[i + 1]['type'] == 'route':\n                activations.append(None)\n            else:\n                activation = activations[i - 1]\n                update_activation(i, pruned_model, activation, CBL_idx)\n                activations.append(activation)\n\n    return pruned_model\n\n\ndef obtain_bn_mask(bn_module, thre):\n    thre = thre.to(bn_module.weight.device)\n    mask = bn_module.weight.data.abs().ge(thre).float()\n\n    return mask\n\n\ndef get_nearest_multiple(num, base):\n    down = num % base\n    up = base - down\n    if down >= up:\n        near_multi_base = num + up\n    else:\n        near_multi_base = num - down\n    return near_multi_base\n\n\ndef merge_mask(model, CBLidx2mask, CBLidx2filters, base=1):\n    for i in range(len(model.module_defs) - 1, -1, -1):\n        mtype = model.module_defs[i]['type']\n        if mtype == 'shortcut':\n            if model.module_defs[i]['is_access']:\n                continue\n\n            Merge_masks = []\n            layer_i = i\n            while mtype == 'shortcut':  # 对shortcut的上一层和from层读取：如果为卷积层且BN=1，则对应层的CBLidx2mask扩展维度后添加到Merge_masks中\n                model.module_defs[layer_i]['is_access'] = True\n\n                if model.module_defs[layer_i - 1]['type'] == 'convolutional':\n                    bn = int(model.module_defs[layer_i - 1]['batch_normalize'])\n                    if bn:\n                        Merge_masks.append(CBLidx2mask[layer_i - 1].unsqueeze(0))\n\n                layer_i = int(model.module_defs[layer_i]['from'][0]) + layer_i\n                mtype = model.module_defs[layer_i]['type']\n\n                if mtype == 'convolutional':\n                    bn = int(model.module_defs[layer_i]['batch_normalize'])\n                    if bn:\n                        Merge_masks.append(CBLidx2mask[layer_i].unsqueeze(0))\n\n            if len(Merge_masks) > 1:  # 若有多个shortcut层\n                Merge_masks = torch.cat(Merge_masks, 0)  # 按列排列,是2维张量\n                if base == 1:\n                    merge_mask = (torch.sum(Merge_masks, dim=0) > 0).float()  # 按列求和，是1维张量\n                else:\n                    sum_mask = (torch.sum(Merge_masks, dim=0)).float()\n                    merge_num = int(torch.sum(torch.sum(Merge_masks, dim=0) > 0).item())\n                    merge_num_multi = get_nearest_multiple(merge_num, base)\n                    _, y = torch.topk(sum_mask, merge_num_multi)\n                    merge_mask = torch.zeros(sum_mask.size(), dtype=torch.float32)\n                    merge_mask[y] = 1\n\n            else:\n                if base == 1:\n                    merge_mask = Merge_masks[0].float()\n                else:\n                    merge_num = int(torch.sum(Merge_masks, dim=0)).item()\n                    merge_num_multi = get_nearest_multiple(merge_num, base)\n                    _, y = torch.topk(Merge_masks[0], merge_num_multi)\n                    merge_mask = torch.zeros(Merge_masks[0].size(), dtype=torch.float32)\n                    merge_mask[y] = 1\n\n            layer_i = i\n            mtype = 'shortcut'\n            while mtype == 'shortcut':  # 对shortcut的上一层和from层读取：如果为卷积层且BN=1，则对应层的CBLidx2mask和CBLidx2filters分别使用merge_mask和merge_mask的所有元素之和\n\n                if model.module_defs[layer_i - 1]['type'] == 'convolutional':\n                    bn = int(model.module_defs[layer_i - 1]['batch_normalize'])\n                    if bn:\n                        CBLidx2mask[layer_i - 1] = merge_mask\n                        CBLidx2filters[layer_i - 1] = int(torch.sum(merge_mask).item())  # 全部求和并且得到元素值\n\n                layer_i = int(model.module_defs[layer_i]['from'][0]) + layer_i\n                mtype = model.module_defs[layer_i]['type']\n\n                if mtype == 'convolutional':\n                    bn = int(model.module_defs[layer_i]['batch_normalize'])\n                    if bn:\n                        CBLidx2mask[layer_i] = merge_mask\n                        CBLidx2filters[layer_i] = int(torch.sum(merge_mask).item())\n\n\ndef update_activation(i, pruned_model, activation, CBL_idx):\n    next_idx = i + 1\n    if pruned_model.module_defs[next_idx]['type'] == 'convolutional':\n        next_conv = pruned_model.module_list[next_idx][0]\n        conv_sum = next_conv.weight.data.sum(dim=(2, 3))\n        offset = conv_sum.matmul(activation.reshape(-1, 1)).reshape(-1)\n        if next_idx in CBL_idx:\n            next_bn = pruned_model.module_list[next_idx][1]\n            next_bn.running_mean.data.sub_(offset)\n        else:\n            next_conv.bias.data.add_(offset)\n\n\ndef prune_model_keep_size_forEagleEye(model, prune_idx, CBLidx2mask):\n    pruned_model = deepcopy(model)\n    for i, model_def in enumerate(model.module_defs):\n\n        if model_def['type'] == 'convolutional' or model_def['type'] == 'depthwise' or model_def['type'] == 'se':\n            if i in prune_idx:\n                mask = torch.from_numpy(CBLidx2mask[i]).cuda()\n                bn_module = pruned_model.module_list[i][1]\n                bn_module.weight.data.mul_(mask)\n                bn_module.bias.data.mul_(mask)\n    return pruned_model\n"
  },
  {
    "path": "utils/quantized/__init__.py",
    "content": "# Author:LiPu\n"
  },
  {
    "path": "utils/quantized/quantized_TPSQ.py",
    "content": "# Author:LiPu\nimport numpy as np\nimport os\nimport time\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import init\nfrom torch.nn.parameter import Parameter\nfrom torch.autograd import Function\n\n\n# ********************* quantizers（量化器，量化） *********************\nclass Round(Function):\n\n    @staticmethod\n    def forward(self, input):\n        sign = torch.sign(input)\n        output = sign * torch.floor(torch.abs(input) + 0.5)\n        return output\n\n    @staticmethod\n    def backward(self, grad_output):\n        grad_input = grad_output.clone()\n        return grad_input\n\n\nclass Search_Pow2(Function):\n\n    @staticmethod\n    def forward(self, input):\n        input_data = input.data.clone()\n        output = input\n        output[output < 0].data.copy_(torch.Tensor([2 ** -5]))\n        output[output > 2 ** (8 + 5)].data.copy_(torch.Tensor([2 ** (8 + 5)]))\n        ceil_float_range = 2 ** output.log2().ceil()\n        floor_float_range = 2 ** output.log2().floor()\n        if abs(ceil_float_range - output) < abs(floor_float_range - output):\n            output.data = ceil_float_range.data\n        else:\n            output.data = floor_float_range.data\n        output_data = output.data.clone()\n        self.save_for_backward(input_data, output_data)\n        return output\n\n    @staticmethod\n    def backward(self, grad_output):\n        input, output = self.saved_tensors\n        scale = output / input\n        grad_input = scale * grad_output.clone()\n        # 线性\n        # grad_input = 0.8985 * (grad_output.clone())\n        # 多项式\n        # grad_input = -0.668 * grad_output.clone() + 1.335\n        # grad_input[grad_input.ge(0.5)] = 0\n        # grad_input[grad_input.le(-0.5)] = 0\n        # 指数\n        # grad_input = 0.2379145 * torch.exp(2.2235 * grad_output.clone())\n        # grad_input[grad_input.ge(0.1)] = 0\n        # grad_input[grad_input.le(-0.1)] = 0\n\n        return grad_input\n\n\nclass Quantizer(nn.Module):\n    def __init__(self, bits, out_channels, warmup=False):\n        super().__init__()\n        self.first = True\n        self.momentum = 0.1\n        self.bits = bits\n        if warmup:\n            self.register_buffer('warmup', torch.ones(1))\n        else:\n            self.register_buffer('warmup', torch.zeros(1))\n        self.momentum = 0.1\n\n    # 截断\n    def clamp(self, input):\n        # print('==============')\n        # print((Search_Pow2.apply(self.scale)).size())\n        # print(input.size())\n        # print('==============')\n        output = 0.5 * (\n                torch.abs(input + Search_Pow2.apply(self.scale)) - torch.abs(input - Search_Pow2.apply(self.scale)))\n        return output\n\n    # 量化\n    def quantize(self, input):\n        quantized_range = torch.tensor((1 << (self.bits - 1)) - 1)\n        output = (input * quantized_range) / Search_Pow2.apply(self.scale)\n\n        return output\n\n    def round(self, input):\n        output = Round.apply(input)\n        return output\n\n    # 反量化\n    def dequantize(self, input):\n        quantized_range = torch.tensor((1 << (self.bits - 1)))\n        output = (input * Search_Pow2.apply(self.scale)) / quantized_range\n\n        return output\n\n    def forward(self, input):\n        if self.bits == 32:\n            output = input\n        elif self.bits == 1:\n            print('!Binary quantization is not supported !')\n            assert self.bits != 1\n        else:\n            output = self.clamp(input)  # 截断\n            output = self.quantize(output)  # 量化\n            output = self.round(output)\n            output = self.dequantize(output)  # 反量化\n\n        return output\n\n    def get_quantize_value(self, input):\n        if self.bits == 32:\n            output = input\n        elif self.bits == 1:\n            print('!Binary quantization is not supported!')\n            assert self.bits != 1\n        else:\n            output = self.quantize(input)  # 量化\n            output = self.round(output)\n            output = self.clamp(output)  # 截断\n        return output\n\n\nclass RangeTracker(nn.Module):\n    def __init__(self):\n        super().__init__()\n\n    def update_range(self, min_val, max_val):\n        raise NotImplementedError\n\n    @torch.no_grad()\n    def forward(self, input):\n        min_val = torch.min(input)\n        max_val = torch.max(input)\n        self.update_range(min_val, max_val)\n\n\nclass GlobalRangeTracker(RangeTracker):  # W,min_max_shape=(N, 1, 1, 1),channel级,取本次和之前相比的min_max —— (N, C, W, H)\n    def __init__(self):\n        super().__init__()\n        self.register_buffer('min_val', torch.zeros(1))\n        self.register_buffer('max_val', torch.zeros(1))\n        self.register_buffer('first_w', torch.zeros(1))\n\n    def update_range(self, min_val, max_val):\n        temp_minval = self.min_val\n        temp_maxval = self.max_val\n        if self.first_w == 0:\n            self.first_w.add_(1)\n            self.min_val.add_(min_val)\n            self.max_val.add_(max_val)\n        else:\n            self.min_val.add_(-temp_minval).add_(torch.min(temp_minval, min_val))\n            self.max_val.add_(-temp_maxval).add_(torch.max(temp_maxval, max_val))\n\n\nclass Bias_Quantizer(nn.Module):\n    def __init__(self, bits, range_tracker):\n        super().__init__()\n        self.bits = bits\n        self.range_tracker = range_tracker\n        self.register_buffer('scale', torch.zeros(1))  # 量化比例因子\n\n    def update_params(self):\n        min_val = torch.tensor(-(1 << (self.bits - 1)))\n        max_val = torch.tensor((1 << (self.bits - 1)) - 1)\n\n        quantized_range = torch.max(torch.abs(min_val), torch.abs(max_val))  # 量化后范围\n\n        float_max = torch.max(torch.abs(self.range_tracker.min_val), torch.abs(self.range_tracker.max_val))  # 量化前范围\n        floor_float_range = 2 ** float_max.log2().floor()\n        ceil_float_range = 2 ** float_max.log2().ceil()\n        if abs(ceil_float_range - float_max) < abs(floor_float_range - float_max):\n            float_range = ceil_float_range\n        else:\n            float_range = floor_float_range\n        self.scale = float_range / quantized_range  # 量化比例因子\n\n    # 量化\n    def quantize(self, input):\n        output = input / self.scale\n        return output\n\n    def round(self, input):\n        output = Round.apply(input)\n        return output\n\n    # 截断\n    def clamp(self, input):\n        min_val = torch.tensor(-(1 << (self.bits - 1)))\n        max_val = torch.tensor((1 << (self.bits - 1)) - 1)\n        output = torch.clamp(input, min_val, max_val)\n        return output\n\n    # 反量化\n    def dequantize(self, input):\n        output = (input) * self.scale\n        return output\n\n    def forward(self, input):\n        if self.bits == 32:\n            output = input\n        elif self.bits == 1:\n            print('!Binary quantization is not supported !')\n            assert self.bits != 1\n        else:\n            if self.training == True:\n                self.range_tracker(input)\n                self.update_params()\n            output = self.quantize(input)  # 量化\n            output = self.round(output)\n            output = self.clamp(output)  # 截断\n            output = self.dequantize(output)  # 反量化\n        return output\n\n    def get_quantize_value(self, input):\n        if self.bits == 32:\n            output = input\n        elif self.bits == 1:\n            print('!Binary quantization is not supported !')\n            assert self.bits != 1\n        else:\n            output = self.quantize(input)  # 量化\n            output = self.round(output)\n            output = self.clamp(output)  # 截断\n        return output\n\n    ################获得量化因子所对应的移位数\n    def get_scale(self):\n        #############移位修正\n        move_scale = math.log2(self.scale)\n        move_scale = np.array(move_scale).reshape(1, -1)\n        return move_scale\n\n\nclass Weight_Quantizer(Quantizer):\n    def __init__(self, bits, out_channels, warmup):\n        super().__init__(bits, warmup)\n        self.out_channels = out_channels\n        if self.out_channels == -1:\n            self.scale = Parameter(torch.Tensor(1))  # 量化比例因子\n        else:\n            self.scale = Parameter(torch.Tensor(self.out_channels, 1, 1, 1))  # 量化比例因子\n        init.ones_(self.scale)\n\n    def forward(self, input):\n        if self.bits == 32:\n            output = input\n        elif self.bits == 1:\n            print('!Binary quantization is not supported !')\n            assert self.bits != 1\n        else:\n            if self.warmup:\n                with torch.no_grad():\n                    max_metrics = -1\n                    max_step = -5\n                    step = (torch.max(input)) / 100\n                    for i in range(1, 100):\n                        self.scale.data.copy_(torch.Tensor([step * i]))\n                        output = self.clamp(input)  # 截断\n                        output = self.quantize(output)  # 量化\n                        output = self.round(output)\n                        output = self.dequantize(output)  # 反量化\n                        cosine_similarity = torch.cosine_similarity(input.view(-1), output.view(-1), dim=0)\n                        if cosine_similarity > max_metrics:\n                            max_metrics = cosine_similarity\n                            max_step = i\n                        del output\n                        torch.cuda.empty_cache()\n                    # print(\"max_step:\", max_step)\n                    # print(\"max_metrics:\", max_metrics)\n                    self.scale.data.copy_(torch.Tensor([step * max_step]))\n                    self.warmup.add_(-1)\n            output = self.clamp(input)  # 截断\n            output = self.quantize(output)  # 量化\n            output = self.round(output)\n            output = self.dequantize(output)  # 反量化\n        return output\n\n\nclass Activattion_Quantizer(Quantizer):\n    def __init__(self, bits, out_channels, warmup):\n        super().__init__(bits, out_channels, warmup)\n        self.out_channels = out_channels\n        if self.out_channels == -1:\n            self.scale = Parameter(torch.Tensor(1))  # 量化比例因子\n        else:\n            self.scale = Parameter(torch.Tensor(1, self.out_channels, 1, 1))  # 量化比例因子\n        init.ones_(self.scale)\n\n    def forward(self, input):\n        if self.bits == 32:\n            output = input\n        elif self.bits == 1:\n            print('!Binary quantization is not supported !')\n            assert self.bits != 1\n        else:\n            if self.warmup:\n                with torch.no_grad():\n                    max_metrics = -1\n                    max_step = -5\n                    step = (torch.max(input)) / 100\n                    # if self.out_channels == -1:\n                    for i in range(1, 100):\n                        self.scale.data.copy_(torch.Tensor([step * i]))\n                        output = self.clamp(input)  # 截断\n                        output = self.quantize(output)  # 量化\n                        output = self.round(output)\n                        output = self.dequantize(output)  # 反量化\n                        cosine_similarity = torch.cosine_similarity(input.view(-1), output.view(-1), dim=0)\n                        if cosine_similarity > max_metrics:\n                            max_metrics = cosine_similarity\n                            max_step = i\n                        del output\n                        torch.cuda.empty_cache()\n                    # print(\"max_step:\", max_step)\n                    # print(\"max_metrics:\", max_metrics)\n                    self.scale.data.copy_(torch.Tensor([step * max_step]))\n                    self.warmup.add_(-1)\n            output = self.clamp(input)  # 截断\n            output = self.quantize(output)  # 量化\n            output = self.round(output)\n            output = self.dequantize(output)  # 反量化\n        return output\n\n\ndef reshape_to_activation(input):\n    return input.reshape(1, -1, 1, 1)\n\n\ndef reshape_to_weight(input):\n    return input.reshape(-1, 1, 1, 1)\n\n\ndef reshape_to_bias(input):\n    return input.reshape(-1)\n\n\n# ********************* bn融合_量化卷积（bn融合后，同时量化A/W，并做卷积） *********************\nclass TPSQ_BNFold_QuantizedConv2d_For_FPGA(nn.Conv2d):\n    def __init__(\n            self,\n            in_channels,\n            out_channels,\n            kernel_size,\n            stride=1,\n            padding=0,\n            dilation=1,\n            groups=1,\n            bias=False,\n            eps=1e-5,\n            momentum=0.01,  # 考虑量化带来的抖动影响,对momentum进行调整(0.1 ——> 0.01),削弱batch统计参数占比，一定程度抑制抖动。经实验量化训练效果更好,acc提升1%左右\n            a_bits=8,\n            w_bits=8,\n            bn=0,\n            activate='leaky',\n            steps=0,\n            quantizer_output=False,\n            maxabsscaler=False,\n            warmup=True\n    ):\n        super().__init__(\n            in_channels=in_channels,\n            out_channels=out_channels,\n            kernel_size=kernel_size,\n            stride=stride,\n            padding=padding,\n            dilation=dilation,\n            groups=groups,\n            bias=bias\n        )\n        self.bn = bn\n        self.activate = activate\n        self.eps = eps\n        self.momentum = momentum\n        self.freeze_step = int(steps * 0.9)\n        self.gamma = Parameter(torch.Tensor(out_channels))\n        self.beta = Parameter(torch.Tensor(out_channels))\n        self.register_buffer('running_mean', torch.zeros(out_channels))\n        self.register_buffer('running_var', torch.zeros(out_channels))\n        self.register_buffer('batch_mean', torch.zeros(out_channels))\n        self.register_buffer('batch_var', torch.zeros(out_channels))\n        self.register_buffer('first_bn', torch.zeros(1))\n        self.register_buffer('step', torch.zeros(1))\n        self.quantizer_output = quantizer_output\n        self.maxabsscaler = maxabsscaler\n        init.normal_(self.gamma, 1, 0.5)\n        init.zeros_(self.beta)\n\n        self.activation_quantizer = Activattion_Quantizer(bits=a_bits, out_channels=-1,\n                                                          warmup=warmup)\n        self.weight_quantizer = Weight_Quantizer(bits=w_bits, out_channels=-1, warmup=warmup)\n        self.bias_quantizer = Bias_Quantizer(bits=w_bits, range_tracker=GlobalRangeTracker())\n\n    def forward(self, input):\n        # 训练态\n        if self.training:\n            self.step += 1\n            if self.bn:\n                # 先做普通卷积得到A，以取得BN参数\n                output = F.conv2d(\n                    input=input,\n                    weight=self.weight,\n                    bias=self.bias,\n                    stride=self.stride,\n                    padding=self.padding,\n                    dilation=self.dilation,\n                    groups=self.groups\n                )\n                # 更新BN统计参数（batch和running）\n                dims = [dim for dim in range(4) if dim != 1]\n                self.batch_mean = torch.mean(output, dim=dims)\n                self.batch_var = torch.var(output, dim=dims)\n                with torch.no_grad():\n                    if self.first_bn == 0 and torch.equal(self.running_mean, torch.zeros_like(\n                            self.running_mean)) and torch.equal(self.running_var, torch.zeros_like(self.running_var)):\n                        self.first_bn.add_(1)\n                        self.running_mean.add_(self.batch_mean)\n                        self.running_var.add_(self.batch_var)\n                    else:\n                        self.running_mean.mul_(1 - self.momentum).add_(self.batch_mean * self.momentum)\n                        self.running_var.mul_(1 - self.momentum).add_(self.batch_var * self.momentum)\n                # BN融合\n                if self.step < self.freeze_step:\n                    if self.bias is not None:\n                        bias = reshape_to_bias(\n                            self.beta + (self.bias - self.batch_mean) * (\n                                    self.gamma / torch.sqrt(self.batch_var + self.eps)))\n                    else:\n                        bias = reshape_to_bias(\n                            self.beta - self.batch_mean * (\n                                    self.gamma / torch.sqrt(self.batch_var + self.eps)))  # b融batch\n                    weight = self.weight * reshape_to_weight(\n                        self.gamma / torch.sqrt(self.batch_var + self.eps))  # w融running\n                else:\n                    if self.bias is not None:\n                        bias = reshape_to_bias(\n                            self.beta + (self.bias - self.running_mean) * (\n                                    self.gamma / torch.sqrt(self.running_var + self.eps)))\n                    else:\n                        bias = reshape_to_bias(\n                            self.beta - self.running_mean * (\n                                    self.gamma / torch.sqrt(self.running_var + self.eps)))  # b融batch\n                    weight = self.weight * reshape_to_weight(\n                        self.gamma / torch.sqrt(self.running_var + self.eps))  # w融running\n            else:\n                bias = self.bias\n                weight = self.weight\n        # 测试态\n        else:\n            # print(self.running_mean, self.running_var)\n            if self.bn:\n                # BN融合\n                if self.bias is not None:\n                    bias = reshape_to_bias(self.beta + (self.bias - self.running_mean) * (\n                            self.gamma / torch.sqrt(self.running_var + self.eps)))\n                else:\n                    bias = reshape_to_bias(\n                        self.beta - self.running_mean * self.gamma / torch.sqrt(\n                            self.running_var + self.eps))  # b融running\n                weight = self.weight * reshape_to_weight(\n                    self.gamma / torch.sqrt(self.running_var + self.eps))  # w融running\n            else:\n                bias = self.bias\n                weight = self.weight\n        # 量化A和bn融合后的W\n        q_weight = self.weight_quantizer(weight)\n        q_bias = self.bias_quantizer(bias)\n\n        if self.quantizer_output == True:  # 输出量化参数txt文档\n\n            # 创建的quantizer_output输出文件夹\n            if not os.path.isdir('./quantizer_output'):\n                os.makedirs('./quantizer_output')\n\n            if not os.path.isdir('./quantizer_output/q_weight_out'):\n                os.makedirs('./quantizer_output/q_weight_out')\n            if not os.path.isdir('./quantizer_output/w_scale_out'):\n                os.makedirs('./quantizer_output/w_scale_out')\n            if not os.path.isdir('./quantizer_output/q_weight_max'):\n                os.makedirs('./quantizer_output/q_weight_max')\n            if not os.path.isdir('./quantizer_output/max_weight_count'):\n                os.makedirs('./quantizer_output/max_weight_count')\n            #######################输出当前层的权重量化因子\n            weight_scale = self.weight_quantizer.get_scale()\n            np.savetxt(('./quantizer_output/w_scale_out/%f.txt' % time.time()), weight_scale, delimiter='\\n')\n            #######################输出当前层的量化权重\n            q_weight_txt = self.weight_quantizer.get_quantize_value(weight)\n            q_weight_txt = np.array(q_weight_txt.cpu()).reshape(1, -1)\n            q_weight_max = [np.max(q_weight_txt)]\n            # q_weight_max = np.argmax(q_weight_txt)\n            max_weight_count = [np.sum(abs(q_weight_txt) >= 127)]  # 统计该层溢出的数目\n            np.savetxt(('./quantizer_output/max_weight_count/%f.txt' % time.time()), max_weight_count)\n            np.savetxt(('./quantizer_output/q_weight_max/%f.txt' % time.time()), q_weight_max)\n            np.savetxt(('./quantizer_output/q_weight_out/%f.txt' % time.time()), q_weight_txt, delimiter='\\n')\n            # io.savemat('save.mat',{'q_weight_txt':q_weight_txt})\n\n            #######################创建输出偏置txt的文件夹\n            if not os.path.isdir('./quantizer_output/q_bias_out'):\n                os.makedirs('./quantizer_output/q_bias_out')\n            if not os.path.isdir('./quantizer_output/b_scale_out'):\n                os.makedirs('./quantizer_output/b_scale_out')\n            #######################输出当前层偏置的量化因子\n            bias_scale = self.bias_quantizer.get_scale()\n            np.savetxt(('./quantizer_output/b_scale_out/%f.txt' % time.time()), bias_scale, delimiter='\\n')\n            #######################输出当前层的量化偏置\n            q_bias_txt = self.bias_quantizer.get_quantize_value(bias)\n            q_bias_txt = np.array(q_bias_txt.cpu()).reshape(1, -1)\n            np.savetxt(('./quantizer_output/q_bias_out/%f.txt' % time.time()), q_bias_txt, delimiter='\\n')\n\n        # 量化卷积\n        output = F.conv2d(\n            input=input,\n            weight=q_weight,\n            bias=q_bias,  # 注意，这里加bias，做完整的conv+bn\n            stride=self.stride,\n            padding=self.padding,\n            dilation=self.dilation,\n            groups=self.groups\n        )\n        if self.activate == 'leaky':\n            output = F.leaky_relu(output, 0.1 if not self.maxabsscaler else 0.25, inplace=True)\n        elif self.activate == 'relu6':\n            output = F.relu6(output, inplace=True)\n        elif self.activate == 'h_swish':\n            output = output * (F.relu6(output + 3.0, inplace=True) / 6.0)\n        elif self.activate == 'relu':\n            output = F.relu(output, inplace=True)\n        elif self.activate == 'mish':\n            output = output * F.softplus(output).tanh()\n        elif self.activate == 'linear':\n            # return output\n            pass\n        else:\n            print(self.activate + \"%s is not supported !\")\n\n        if self.quantizer_output == True:\n\n            if not os.path.isdir('./quantizer_output/q_activation_out'):\n                os.makedirs('./quantizer_output/q_activation_out')\n            if not os.path.isdir('./quantizer_output/a_scale_out'):\n                os.makedirs('./quantizer_output/a_scale_out')\n            if not os.path.isdir('./quantizer_output/q_activation_max'):\n                os.makedirs('./quantizer_output/q_activation_max')\n            if not os.path.isdir('./quantizer_output/max_activation_count'):\n                os.makedirs('./quantizer_output/max_activation_count')\n            ##################输出当前激活的量化因子\n            activation_scale = self.activation_quantizer.get_scale()\n            np.savetxt(('./quantizer_output/a_scale_out/%f.txt' % time.time()), activation_scale, delimiter='\\n')\n            ##################输出当前层的量化激活\n            q_activation_txt = self.activation_quantizer.get_quantize_value(output)\n            q_activation_txt = np.array(q_activation_txt.cpu()).reshape(1, -1)\n            q_activation_max = [np.max(q_activation_txt)]  # 统计该层的最大值(即查看是否有溢出)\n            max_activation_count = [np.sum(abs(q_activation_txt) >= 127)]  # 统计该层溢出的数目\n            # q_weight_max = np.argmax(q_weight_txt)\n            np.savetxt(('./quantizer_output/max_activation_count/%f.txt' % time.time()),\n                       max_activation_count)\n            np.savetxt(('./quantizer_output/q_activation_max/%f.txt' % time.time()), q_activation_max)\n            np.savetxt(('./quantizer_output/q_activation_out/%f.txt' % time.time()), q_activation_txt,\n                       delimiter='\\n')\n\n        output = self.activation_quantizer(output)\n        return output\n\n    def BN_fuse(self):\n        if self.bn:\n            # BN融合\n            if self.bias is not None:\n                bias = reshape_to_bias(self.beta + (self.bias - self.running_mean) * (\n                        self.gamma / torch.sqrt(self.running_var + self.eps)))\n            else:\n                bias = reshape_to_bias(\n                    self.beta - self.running_mean * self.gamma / torch.sqrt(\n                        self.running_var + self.eps))  # b融running\n            weight = self.weight * reshape_to_weight(\n                self.gamma / torch.sqrt(self.running_var + self.eps))  # w融running\n        else:\n            bias = self.bias\n            weight = self.weight\n        return weight, bias\n"
  },
  {
    "path": "utils/quantized/quantized_dorefa.py",
    "content": "# Author:LiPu\nimport time\nimport numpy as np\nimport os\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import init\nfrom torch.nn.parameter import Parameter\nfrom torch.autograd import Function\n\n\nclass Round(Function):\n\n    @staticmethod\n    def forward(self, input):\n        sign = torch.sign(input)\n        output = sign * torch.floor(torch.abs(input) + 0.5)\n        return output\n\n    @staticmethod\n    def backward(self, grad_output):\n        grad_input = grad_output.clone()\n        return grad_input\n\n\n# ********************* A(特征)量化 ***********************\nclass activation_quantize(nn.Module):\n    def __init__(self, a_bits):\n        super().__init__()\n        self.a_bits = a_bits\n\n    def round(self, input):\n        output = Round.apply(input)\n        return output\n\n    def get_quantize_value(self, input):\n        output = torch.clamp(input * 0.1, 0, 1)  # 特征A截断前先进行缩放（* 0.1），以减小截断误差\n        scale = float(2 ** self.a_bits - 1)\n        output = output * scale\n        output = self.round(output)\n        return output\n\n        ################获得量化因子所对应的移位数\n\n    def get_scale(self):\n        #############移位修正\n        # scale = float(2 ** self.a_bits - 1)\n        # move_scale = math.log2(scale)\n        scale = np.array(self.a_bits).reshape(1, -1)\n        return scale\n\n    def forward(self, input):\n        if self.a_bits == 32:\n            output = input\n        elif self.a_bits == 1:\n            print('！Binary quantization is not supported ！')\n            assert self.a_bits != 1\n        else:\n            output = torch.clamp(input * 0.1, 0, 1)  # 特征A截断前先进行缩放（* 0.1），以减小截断误差\n            scale = float(2 ** self.a_bits - 1)\n            output = output * scale\n            output = self.round(output)\n            output = output / scale\n        return output\n\n\n# ********************* W(模型参数)量化 ***********************\nclass weight_quantize(nn.Module):\n    def __init__(self, w_bits):\n        super().__init__()\n        self.w_bits = w_bits\n\n    def round(self, input):\n        output = Round.apply(input)\n        return output\n\n    def get_quantize_value(self, input):\n        output = torch.tanh(input)\n        output = output / 2 / torch.max(torch.abs(output)) + 0.5  # 归一化-[0,1]\n        scale = float(2 ** self.w_bits - 1)\n        output = output * scale\n        output = self.round(output)\n        # output = 2 * output - 1\n        return output\n\n        ################获得量化因子所对应的移位数\n\n    def get_scale(self):\n        #############移位修正\n        # scale = float(2 ** self.w_bits - 1)\n        # scale = math.log2(scale)\n        scale = np.array(self.w_bits).reshape(1, -1)\n        return scale\n\n    def forward(self, input):\n        if self.w_bits == 32:\n            output = input\n        elif self.w_bits == 1:\n            print('！Binary quantization is not supported ！')\n            assert self.w_bits != 1\n        else:\n            output = torch.tanh(input)\n            output = output / 2 / torch.max(torch.abs(output)) + 0.5  # 归一化-[0,1]\n            scale = float(2 ** self.w_bits - 1)\n            output = output * scale\n            output = self.round(output)\n            output = output / scale\n            output = 2 * output - 1\n        return output\n\n    def get_weights(self, input):\n        if self.w_bits == 32:\n            output = input\n        elif self.w_bits == 1:\n            print('！Binary quantization is not supported ！')\n            assert self.w_bits != 1\n        else:\n            output = torch.tanh(input)\n            output = output / 2 / torch.max(torch.abs(output)) + 0.5  # 归一化-[0,1]\n            scale = float(2 ** self.w_bits - 1)\n            output = output * scale\n            output = self.round(output)\n        return output\n\n\n# ********************* 量化卷积（同时量化A/W，并做卷积） ***********************\nclass DorefaConv2d(nn.Conv2d):\n    def __init__(\n            self,\n            in_channels,\n            out_channels,\n            kernel_size,\n            stride=1,\n            padding=0,\n            dilation=1,\n            groups=1,\n            bias=True,\n            a_bits=8,\n            w_bits=8,\n    ):\n        super().__init__(\n            in_channels=in_channels,\n            out_channels=out_channels,\n            kernel_size=kernel_size,\n            stride=stride,\n            padding=padding,\n            dilation=dilation,\n            groups=groups,\n            bias=bias\n        )\n        # 实例化调用A和W量化器\n        self.activation_quantizer = activation_quantize(a_bits=a_bits)\n        self.weight_quantizer = weight_quantize(w_bits=w_bits)\n\n    def forward(self, input):\n        # 量化A和W\n        if input.shape[1] != 3:\n            input = self.activation_quantizer(input)\n        q_weight = self.weight_quantizer(self.weight)\n        # 量化卷积\n        output = F.conv2d(\n            input=input,\n            weight=q_weight,\n            bias=self.bias,\n            stride=self.stride,\n            padding=self.padding,\n            dilation=self.dilation,\n            groups=self.groups\n        )\n        return output\n\n\ndef reshape_to_activation(input):\n    return input.reshape(1, -1, 1, 1)\n\n\ndef reshape_to_weight(input):\n    return input.reshape(-1, 1, 1, 1)\n\n\ndef reshape_to_bias(input):\n    return input.reshape(-1)\n\n\nclass BNFold_DorefaConv2d(DorefaConv2d):\n\n    def __init__(\n            self,\n            in_channels,\n            out_channels,\n            kernel_size,\n            stride=1,\n            padding=0,\n            dilation=1,\n            groups=1,\n            bias=False,\n            eps=1e-5,\n            momentum=0.01,  # 考虑量化带来的抖动影响,对momentum进行调整(0.1 ——> 0.01),削弱batch统计参数占比，一定程度抑制抖动。经实验量化训练效果更好,acc提升1%左右\n            a_bits=8,\n            w_bits=8,\n            bn=0,\n            activate='leaky',\n            steps=0,\n            quantizer_output=False,\n            maxabsscaler=False\n    ):\n        super().__init__(\n            in_channels=in_channels,\n            out_channels=out_channels,\n            kernel_size=kernel_size,\n            stride=stride,\n            padding=padding,\n            dilation=dilation,\n            groups=groups,\n            bias=bias\n        )\n        self.bn = bn\n        self.activate = activate\n        self.eps = eps\n        self.momentum = momentum\n        self.freeze_step = int(steps * 0.9)\n        self.gamma = Parameter(torch.Tensor(out_channels))\n        self.beta = Parameter(torch.Tensor(out_channels))\n        self.register_buffer('running_mean', torch.zeros(out_channels))\n        self.register_buffer('running_var', torch.zeros(out_channels))\n        self.register_buffer('batch_mean', torch.zeros(out_channels))\n        self.register_buffer('batch_var', torch.zeros(out_channels))\n        self.register_buffer('first_bn', torch.zeros(1))\n        self.register_buffer('step', torch.zeros(1))\n        self.quantizer_output = quantizer_output\n        self.maxabsscaler = maxabsscaler\n        init.normal_(self.gamma, 1, 0.5)\n        init.zeros_(self.beta)\n\n        # 实例化量化器（A-layer级，W-channel级）\n        self.activation_quantizer = activation_quantize(a_bits=a_bits)\n        self.weight_quantizer = weight_quantize(w_bits=w_bits)\n        self.bias_quantizer = weight_quantize(w_bits=w_bits)\n\n    def forward(self, input):\n        # 训练态\n        if self.training:\n            self.step += 1\n            if self.bn:\n                # 先做普通卷积得到A，以取得BN参数\n                output = F.conv2d(\n                    input=input,\n                    weight=self.weight,\n                    bias=self.bias,\n                    stride=self.stride,\n                    padding=self.padding,\n                    dilation=self.dilation,\n                    groups=self.groups\n                )\n                # 更新BN统计参数（batch和running）\n                dims = [dim for dim in range(4) if dim != 1]\n                self.batch_mean = torch.mean(output, dim=dims)\n                self.batch_var = torch.var(output, dim=dims)\n\n                with torch.no_grad():\n                    if self.first_bn == 0 and torch.equal(self.running_mean, torch.zeros_like(\n                            self.running_mean)) and torch.equal(self.running_var, torch.zeros_like(self.running_var)):\n                        self.first_bn.add_(1)\n                        self.running_mean.add_(self.batch_mean)\n                        self.running_var.add_(self.batch_var)\n                    else:\n                        self.running_mean.mul_(1 - self.momentum).add_(self.momentum * self.batch_mean)\n                        self.running_var.mul_(1 - self.momentum).add_(self.momentum * self.batch_var)\n                # BN融合\n                if self.step < self.freeze_step:\n                    if self.bias is not None:\n                        bias = reshape_to_bias(\n                            self.beta + (self.bias - self.batch_mean) * (\n                                    self.gamma / torch.sqrt(self.batch_var + self.eps)))\n                    else:\n                        bias = reshape_to_bias(\n                            self.beta - self.batch_mean * (\n                                    self.gamma / torch.sqrt(self.batch_var + self.eps)))  # b融batch\n                    weight = self.weight * reshape_to_weight(\n                        self.gamma / torch.sqrt(self.batch_var + self.eps))  # w融running\n                else:\n                    if self.bias is not None:\n                        bias = reshape_to_bias(\n                            self.beta + (self.bias - self.running_mean) * (\n                                    self.gamma / torch.sqrt(self.running_var + self.eps)))\n                    else:\n                        bias = reshape_to_bias(\n                            self.beta - self.running_mean * (\n                                    self.gamma / torch.sqrt(self.running_var + self.eps)))  # b融batch\n                    weight = self.weight * reshape_to_weight(\n                        self.gamma / torch.sqrt(self.running_var + self.eps))  # w融running\n\n            else:\n                bias = self.bias\n                weight = self.weight\n        # 测试态\n        else:\n            # print(self.running_mean, self.running_var)\n            # BN融合\n            if self.bn:\n                if self.bias is not None:\n                    bias = reshape_to_bias(self.beta + (self.bias - self.running_mean) * (\n                            self.gamma / torch.sqrt(self.running_var + self.eps)))\n                else:\n                    bias = reshape_to_bias(\n                        self.beta - self.running_mean * (\n                                self.gamma / torch.sqrt(self.running_var + self.eps)))  # b融running\n                weight = self.weight * reshape_to_weight(\n                    self.gamma / torch.sqrt(self.running_var + self.eps))  # w融running\n            else:\n                bias = self.bias\n                weight = self.weight\n        # 量化A和bn融合后的W\n        q_weight = self.weight_quantizer(weight)\n        q_bias = self.bias_quantizer(bias)\n\n        if self.quantizer_output == True:  # 输出量化参数txt文档\n\n            # 创建的quantizer_output输出文件夹\n            if not os.path.isdir('./quantizer_output'):\n                os.makedirs('./quantizer_output')\n\n            if not os.path.isdir('./quantizer_output/q_weight_out'):\n                os.makedirs('./quantizer_output/q_weight_out')\n            if not os.path.isdir('./quantizer_output/w_scale_out'):\n                os.makedirs('./quantizer_output/w_scale_out')\n            if not os.path.isdir('./quantizer_output/q_weight_max'):\n                os.makedirs('./quantizer_output/q_weight_max')\n            if not os.path.isdir('./quantizer_output/max_weight_count'):\n                os.makedirs('./quantizer_output/max_weight_count')\n            #######################输出当前层的权重量化因子\n            weight_scale = self.weight_quantizer.get_scale()\n            np.savetxt(('./quantizer_output/w_scale_out/scale %f.txt' % time.time()), weight_scale, delimiter='\\n')\n            #######################输出当前层的量化权重\n            q_weight_txt = self.weight_quantizer.get_quantize_value(weight)\n            q_weight_txt = np.array(q_weight_txt.cpu()).reshape(1, -1)\n            q_weight_max = [np.max(q_weight_txt)]\n            # q_weight_max = np.argmax(q_weight_txt)\n            max_weight_count = [np.sum(abs(q_weight_txt) >= 255)]  # 统计该层溢出的数目\n            np.savetxt(('./quantizer_output/max_weight_count/max_weight_count %f.txt' % time.time()), max_weight_count)\n            np.savetxt(('./quantizer_output/q_weight_max/max_weight %f.txt' % time.time()), q_weight_max)\n            np.savetxt(('./quantizer_output/q_weight_out/weight %f.txt' % time.time()), q_weight_txt, delimiter='\\n')\n            # io.savemat('save.mat',{'q_weight_txt':q_weight_txt})\n\n            #######################创建输出偏置txt的文件夹\n            if not os.path.isdir('./quantizer_output/q_bias_out'):\n                os.makedirs('./quantizer_output/q_bias_out')\n            if not os.path.isdir('./quantizer_output/b_scale_out'):\n                os.makedirs('./quantizer_output/b_scale_out')\n            #######################输出当前层偏置的量化因子\n            bias_scale = self.bias_quantizer.get_scale()\n            np.savetxt(('./quantizer_output/b_scale_out/scale %f.txt' % time.time()), bias_scale, delimiter='\\n')\n            #######################输出当前层的量化偏置\n            q_bias_txt = self.bias_quantizer.get_quantize_value(bias)\n            q_bias_txt = np.array(q_bias_txt.cpu()).reshape(1, -1)\n            np.savetxt(('./quantizer_output/q_bias_out/bias %f.txt' % time.time()), q_bias_txt, delimiter='\\n')\n\n        # 量化卷积\n        if self.training:  # 训练态\n            output = F.conv2d(\n                input=input,\n                weight=q_weight,\n                # bias=self.bias,  # 注意，这里不加bias（self.bias为None）\n                bias=q_bias,\n                stride=self.stride,\n                padding=self.padding,\n                dilation=self.dilation,\n                groups=self.groups\n            )\n\n        else:  # 测试态\n            output = F.conv2d(\n                input=input,\n                weight=q_weight,\n                bias=q_bias,  # 注意，这里加bias，做完整的conv+bn\n                stride=self.stride,\n                padding=self.padding,\n                dilation=self.dilation,\n                groups=self.groups\n            )\n        if self.activate == 'leaky':\n            output = F.leaky_relu(output, 0.125 if not self.maxabsscaler else 0.25, inplace=True)\n        elif self.activate == 'relu6':\n            output = F.relu6(output, inplace=True)\n        elif self.activate == 'h_swish':\n            output = output * (F.relu6(output + 3.0, inplace=True) / 6.0)\n        elif self.activate == 'relu':\n            output = F.relu(output, inplace=True)\n        elif self.activate == 'mish':\n            output = output * F.softplus(output).tanh()\n        elif self.activate == 'linear':\n            return output\n            # pass\n        else:\n            print(self.activate + \" is not supported !\")\n\n        if self.quantizer_output == True:\n\n            if not os.path.isdir('./quantizer_output/q_activation_out'):\n                os.makedirs('./quantizer_output/q_activation_out')\n            if not os.path.isdir('./quantizer_output/a_scale_out'):\n                os.makedirs('./quantizer_output/a_scale_out')\n            if not os.path.isdir('./quantizer_output/q_activation_max'):\n                os.makedirs('./quantizer_output/q_activation_max')\n            if not os.path.isdir('./quantizer_output/max_activation_count'):\n                os.makedirs('./quantizer_output/max_activation_count')\n            ##################输出当前激活的量化因子\n            activation_scale = self.activation_quantizer.get_scale()\n            np.savetxt(('./quantizer_output/a_scale_out/scale %f.txt' % time.time()), activation_scale, delimiter='\\n')\n            ##################输出当前层的量化激活\n            q_activation_txt = self.activation_quantizer.get_quantize_value(output)\n            q_activation_txt = np.array(q_activation_txt.cpu()).reshape(1, -1)\n            q_activation_max = [np.max(q_activation_txt)]  # 统计该层的最大值(即查看是否有溢出)\n            max_activation_count = [np.sum(abs(q_activation_txt) >= 255)]  # 统计该层溢出的数目\n            # q_weight_max = np.argmax(q_weight_txt)\n            np.savetxt(('./quantizer_output/max_activation_count/max_activation_count %f.txt' % time.time()),\n                       max_activation_count)\n            np.savetxt(('./quantizer_output/q_activation_max/max_activation %f.txt' % time.time()), q_activation_max)\n            np.savetxt(('./quantizer_output/q_activation_out/activation %f.txt' % time.time()), q_activation_txt,\n                       delimiter='\\n')\n\n        output = self.activation_quantizer(output)\n        return output\n\n    def BN_fuse(self):\n        if self.bn:\n            # BN融合\n            if self.bias is not None:\n                bias = reshape_to_bias(self.beta + (self.bias - self.running_mean) * (\n                        self.gamma / torch.sqrt(self.running_var + self.eps)))\n            else:\n                bias = reshape_to_bias(\n                    self.beta - self.running_mean * self.gamma / torch.sqrt(\n                        self.running_var + self.eps))  # b融running\n            weight = self.weight * reshape_to_weight(\n                self.gamma / torch.sqrt(self.running_var + self.eps))  # w融running\n        else:\n            bias = self.bias\n            weight = self.weight\n        return weight, bias\n\n\nclass DorefaLinear(nn.Linear):\n    def __init__(self, in_features, out_features, bias=True, a_bits=2, w_bits=2):\n        super().__init__(in_features=in_features, out_features=out_features, bias=bias)\n        self.activation_quantizer = activation_quantize(a_bits=a_bits)\n        self.weight_quantizer = weight_quantize(w_bits=w_bits)\n\n    def forward(self, input):\n        # 量化A和W\n        q_input = self.activation_quantizer(input)\n        q_weight = self.weight_quantizer(self.weight)\n        # 量化全连接\n        output = F.linear(input=q_input, weight=q_weight, bias=self.bias)\n        return output\n"
  },
  {
    "path": "utils/quantized/quantized_google.py",
    "content": "import copy\nimport math\nimport time\nimport numpy as np\nimport os\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import init\nfrom torch.nn.parameter import Parameter\nfrom torch.autograd import Function\n\n\n# ********************* range_trackers(范围统计器，统计量化前范围) *********************\nclass RangeTracker(nn.Module):\n    def __init__(self, q_level):\n        super().__init__()\n        self.q_level = q_level\n\n    def update_range(self, min_val, max_val):\n        raise NotImplementedError\n\n    @torch.no_grad()\n    def forward(self, input):\n        if self.q_level == 'L':  # A,min_max_shape=(1, 1, 1, 1),layer级\n            min_val = torch.min(input)\n            max_val = torch.max(input)\n        elif self.q_level == 'C':  # W,min_max_shape=(N, 1, 1, 1),channel级\n            min_val = torch.min(torch.min(torch.min(input, 3, keepdim=True)[0], 2, keepdim=True)[0], 1, keepdim=True)[0]\n            max_val = torch.max(torch.max(torch.max(input, 3, keepdim=True)[0], 2, keepdim=True)[0], 1, keepdim=True)[0]\n        self.update_range(min_val, max_val)\n\n\nclass GlobalRangeTracker(RangeTracker):  # W,min_max_shape=(N, 1, 1, 1),channel级,取本次和之前相比的min_max —— (N, C, W, H)\n    def __init__(self, q_level, out_channels):\n        super().__init__(q_level)\n        if self.q_level == 'L':\n            self.register_buffer('min_val', torch.zeros(1))\n            self.register_buffer('max_val', torch.zeros(1))\n        elif self.q_level == 'C':\n            self.register_buffer('min_val', torch.zeros(out_channels, 1, 1, 1))\n            self.register_buffer('max_val', torch.zeros(out_channels, 1, 1, 1))\n        self.register_buffer('first_w', torch.zeros(1))\n\n    def update_range(self, min_val, max_val):\n        temp_minval = self.min_val\n        temp_maxval = self.max_val\n        if self.first_w == 0:\n            self.first_w.add_(1)\n            self.min_val.add_(min_val)\n            self.max_val.add_(max_val)\n        else:\n            self.min_val.add_(-temp_minval).add_(torch.min(temp_minval, min_val))\n            self.max_val.add_(-temp_maxval).add_(torch.max(temp_maxval, max_val))\n\n\nclass AveragedRangeTracker(RangeTracker):  # A,min_max_shape=(1, 1, 1, 1),layer级,取running_min_max —— (N, C, W, H)\n    def __init__(self, q_level, out_channels, momentum=0.1):\n        super().__init__(q_level)\n        self.momentum = momentum\n        if self.q_level == 'L':\n            self.register_buffer('min_val', torch.zeros(1))\n            self.register_buffer('max_val', torch.zeros(1))\n        elif self.q_level == 'C':\n            self.register_buffer('min_val', torch.zeros(out_channels, 1, 1, 1))\n            self.register_buffer('max_val', torch.zeros(out_channels, 1, 1, 1))\n        self.register_buffer('first_a', torch.zeros(1))\n\n    def update_range(self, min_val, max_val):\n        if self.first_a == 0:\n            self.first_a.add_(1)\n            self.min_val.add_(min_val)\n            self.max_val.add_(max_val)\n        else:\n            self.min_val.mul_(1 - self.momentum).add_(min_val * self.momentum)\n            self.max_val.mul_(1 - self.momentum).add_(max_val * self.momentum)\n\n\n# ********************* quantizers（量化器，量化） *********************\nclass Round(Function):\n\n    @staticmethod\n    def forward(self, input):\n        sign = torch.sign(input)\n        output = sign * torch.floor(torch.abs(input) + 0.5)\n        return output\n\n    @staticmethod\n    def backward(self, grad_output):\n        grad_input = grad_output.clone()\n        return grad_input\n\n\nclass Quantizer(nn.Module):\n    def __init__(self, bits, range_tracker, out_channels, Scale_freeze_step, sign=True):\n        super().__init__()\n        self.bits = bits\n        self.range_tracker = range_tracker\n        self.register_buffer('step', torch.zeros(1))\n        self.Scale_freeze_step = Scale_freeze_step\n        self.sign = sign\n        if out_channels == -1:\n            self.register_buffer('scale', torch.zeros(1))  # 量化比例因子\n            self.register_buffer('zero_point', torch.zeros(1))  # 量化零点\n        else:\n            self.register_buffer('scale', torch.zeros(out_channels, 1, 1, 1))  # 量化比例因子\n            self.register_buffer('zero_point', torch.zeros(out_channels, 1, 1, 1))  # 量化零点\n\n    def update_params(self):\n        raise NotImplementedError\n\n    # 量化\n    def quantize(self, input):\n        output = input / self.scale + self.zero_point\n        return output\n\n    def round(self, input):\n        output = Round.apply(input)\n        return output\n\n    # 截断\n    def clamp(self, input):\n        if self.sign:\n            min_val = torch.tensor(-(1 << (self.bits - 1)))\n            max_val = torch.tensor((1 << (self.bits - 1)) - 1)\n        if not self.sign:\n            min_val = torch.tensor(0)\n            max_val = torch.tensor((1 << self.bits) - 1)\n        output = torch.clamp(input, min_val, max_val)\n        return output\n\n    # 反量化\n    def dequantize(self, input):\n        output = (input - self.zero_point) * self.scale\n        return output\n\n    def forward(self, input):\n        if self.bits == 32:\n            output = input\n        elif self.bits == 1:\n            print('！Binary quantization is not supported ！')\n            assert self.bits != 1\n        else:\n            if self.training == True and self.step < self.Scale_freeze_step:\n                self.range_tracker(input)\n                self.update_params()\n            output = self.quantize(input)  # 量化\n            output = self.round(output)\n            output = self.clamp(output)  # 截断\n            output = self.dequantize(output)  # 反量化\n            self.step += 1\n        return output\n\n    def get_quantize_value(self, input):\n        if self.bits == 32:\n            output = input\n        elif self.bits == 1:\n            print('！Binary quantization is not supported ！')\n            assert self.bits != 1\n        else:\n            output = self.quantize(input)  # 量化\n            output = self.round(output)\n            output = self.clamp(output)  # 截断\n        return output\n\n    ################获得量化因子所对应的移位数\n    def get_scale(self):\n        #############移位修正\n        move_scale = math.log2(self.scale)\n        move_scale = np.array(move_scale).reshape(1, -1)\n        return move_scale\n\n\n# 对称量化\nclass SymmetricQuantizer(Quantizer):\n\n    def update_params(self):\n        if self.sign:\n            min_val = torch.tensor(-(1 << (self.bits - 1)))\n            max_val = torch.tensor((1 << (self.bits - 1)) - 1)\n        else:\n            min_val = torch.tensor(0)\n            max_val = torch.tensor((1 << self.bits) - 1)\n\n        quantized_range = torch.max(torch.abs(min_val), torch.abs(max_val))  # 量化后范围\n\n        float_max = torch.max(torch.abs(self.range_tracker.min_val), torch.abs(self.range_tracker.max_val))  # 量化前范围\n        floor_float_range = 2 ** float_max.log2().floor()\n        ceil_float_range = 2 ** float_max.log2().ceil()\n        if abs(ceil_float_range - float_max) < abs(floor_float_range - float_max):\n            float_range = ceil_float_range\n        else:\n            float_range = floor_float_range\n        self.scale = float_range / quantized_range  # 量化比例因子\n        self.zero_point = torch.zeros_like(self.scale)  # 量化零点\n\n\n# 非对称量化\nclass AsymmetricQuantizer(Quantizer):\n\n    def update_params(self):\n        if self.sign:\n            min_val = torch.tensor(-(1 << (self.bits - 1)))\n            max_val = torch.tensor((1 << (self.bits - 1)) - 1)\n        else:\n            min_val = torch.tensor(0)\n            max_val = torch.tensor((1 << self.bits) - 1)\n        quantized_range = max_val - min_val  # 量化后范围\n\n        float_range = self.range_tracker.max_val - self.range_tracker.min_val  # 量化前范围\n        ceil_float_range = 2 ** float_range.log2().ceil()\n        floor_float_range = 2 ** float_range.log2().floor()\n        if abs(ceil_float_range - float_range) < abs(floor_float_range - float_range):\n            float_range = ceil_float_range\n        else:\n            float_range = floor_float_range\n        self.scale = float_range / quantized_range  # 量化比例因子\n        self.zero_point = torch.round(max_val - self.range_tracker.max_val / self.scale)  # 量化零点\n\n\ndef reshape_to_activation(input):\n    return input.reshape(1, -1, 1, 1)\n\n\ndef reshape_to_weight(input):\n    return input.reshape(-1, 1, 1, 1)\n\n\ndef reshape_to_bias(input):\n    return input.reshape(-1)\n\n\n# ********************* bn融合_量化卷积（bn融合后，同时量化A/W，并做卷积） *********************\nclass BNFold_QuantizedConv2d_For_FPGA(nn.Conv2d):\n    def __init__(\n            self,\n            in_channels,\n            out_channels,\n            kernel_size,\n            stride=1,\n            padding=0,\n            dilation=1,\n            groups=1,\n            bias=False,\n            eps=1e-5,\n            momentum=0.01,  # 考虑量化带来的抖动影响,对momentum进行调整(0.1 ——> 0.01),削弱batch统计参数占比，一定程度抑制抖动。经实验量化训练效果更好,acc提升1%左右\n            a_bits=8,\n            w_bits=8,\n            q_type=0,\n            bn=0,\n            activate='leaky',\n            steps=0,\n            quantizer_output=False,\n            reorder=False, TM=32, TN=32,\n            name='', layer_idx=-1, maxabsscaler=False\n    ):\n        super().__init__(\n            in_channels=in_channels,\n            out_channels=out_channels,\n            kernel_size=kernel_size,\n            stride=stride,\n            padding=padding,\n            dilation=dilation,\n            groups=groups,\n            bias=bias\n        )\n        self.bn = bn\n        self.activate = activate\n        self.eps = eps\n        self.momentum = momentum\n        self.BN_freeze_step = int(steps * 0.9)\n        self.Scale_freeze_step = int(steps * 0.1)\n        self.gamma = Parameter(torch.Tensor(out_channels))\n        self.beta = Parameter(torch.Tensor(out_channels))\n        self.register_buffer('running_mean', torch.zeros(out_channels))\n        self.register_buffer('running_var', torch.zeros(out_channels))\n        self.register_buffer('batch_mean', torch.zeros(out_channels))\n        self.register_buffer('batch_var', torch.zeros(out_channels))\n        self.register_buffer('first_bn', torch.zeros(1))\n        self.register_buffer('step', torch.zeros(1))\n        self.quantizer_output = quantizer_output\n        self.reorder = reorder\n        self.TM = TM\n        self.TN = TN\n        self.name = name\n        self.layer_idx = layer_idx\n        self.w_bits = w_bits\n        self.a_bits = a_bits\n        self.maxabsscaler = maxabsscaler\n        init.normal_(self.gamma, 1, 0.5)\n        init.zeros_(self.beta)\n\n        # 实例化量化器（A-layer级，W-channel级）\n        if q_type == 0:\n            self.activation_quantizer = SymmetricQuantizer(bits=a_bits, range_tracker=AveragedRangeTracker(q_level='L',\n                                                                                                           out_channels=-1),\n                                                           out_channels=-1, Scale_freeze_step=self.Scale_freeze_step)\n            self.weight_quantizer = SymmetricQuantizer(bits=w_bits,\n                                                       range_tracker=GlobalRangeTracker(q_level='L', out_channels=-1),\n                                                       out_channels=-1, Scale_freeze_step=self.Scale_freeze_step)\n            self.bias_quantizer = SymmetricQuantizer(bits=w_bits,\n                                                     range_tracker=GlobalRangeTracker(q_level='L', out_channels=-1),\n                                                     out_channels=-1, Scale_freeze_step=self.Scale_freeze_step)\n        else:\n            self.activation_quantizer = AsymmetricQuantizer(bits=a_bits,\n                                                            range_tracker=AveragedRangeTracker(q_level='L',\n                                                                                               out_channels=-1),\n                                                            out_channels=-1, Scale_freeze_step=self.Scale_freeze_step,\n                                                            sign=False)\n            self.weight_quantizer = AsymmetricQuantizer(bits=w_bits,\n                                                        range_tracker=GlobalRangeTracker(q_level='L', out_channels=-1),\n                                                        out_channels=-1, Scale_freeze_step=self.Scale_freeze_step,\n                                                        sign=False)\n            self.bias_quantizer = AsymmetricQuantizer(bits=w_bits,\n                                                      range_tracker=GlobalRangeTracker(q_level='L', out_channels=-1),\n                                                      out_channels=-1, Scale_freeze_step=self.Scale_freeze_step,\n                                                      sign=False)\n\n    def forward(self, input):\n        # 训练态\n        if self.training:\n            self.step += 1\n            if self.bn:\n                # 先做普通卷积得到A，以取得BN参数\n                output = F.conv2d(\n                    input=input,\n                    weight=self.weight,\n                    bias=self.bias,\n                    stride=self.stride,\n                    padding=self.padding,\n                    dilation=self.dilation,\n                    groups=self.groups\n                )\n                # 更新BN统计参数（batch和running）\n                dims = [dim for dim in range(4) if dim != 1]\n                self.batch_mean = torch.mean(output, dim=dims)\n                self.batch_var = torch.var(output, dim=dims)\n                with torch.no_grad():\n                    if self.first_bn == 0 and torch.equal(self.running_mean, torch.zeros_like(\n                            self.running_mean)) and torch.equal(self.running_var, torch.zeros_like(self.running_var)):\n                        self.first_bn.add_(1)\n                        self.running_mean.add_(self.batch_mean)\n                        self.running_var.add_(self.batch_var)\n                    else:\n                        self.running_mean.mul_(1 - self.momentum).add_(self.batch_mean * self.momentum)\n                        self.running_var.mul_(1 - self.momentum).add_(self.batch_var * self.momentum)\n                # BN融合\n                if self.step < self.BN_freeze_step:\n                    if self.bias is not None:\n                        bias = reshape_to_bias(\n                            self.beta + (self.bias - self.batch_mean) * (\n                                    self.gamma / torch.sqrt(self.batch_var + self.eps)))\n                    else:\n                        bias = reshape_to_bias(\n                            self.beta - self.batch_mean * (\n                                    self.gamma / torch.sqrt(self.batch_var + self.eps)))  # b融batch\n                    weight = self.weight * reshape_to_weight(\n                        self.gamma / torch.sqrt(self.batch_var + self.eps))  # w融running\n                else:\n                    if self.bias is not None:\n                        bias = reshape_to_bias(\n                            self.beta + (self.bias - self.running_mean) * (\n                                    self.gamma / torch.sqrt(self.running_var + self.eps)))\n                    else:\n                        bias = reshape_to_bias(\n                            self.beta - self.running_mean * (\n                                    self.gamma / torch.sqrt(self.running_var + self.eps)))  # b融batch\n                    weight = self.weight * reshape_to_weight(\n                        self.gamma / torch.sqrt(self.running_var + self.eps))  # w融running\n            else:\n                bias = self.bias\n                weight = self.weight\n        # 测试态\n        else:\n            # print(self.running_mean, self.running_var)\n            if self.bn:\n                # BN融合\n                if self.bias is not None:\n                    bias = reshape_to_bias(self.beta + (self.bias - self.running_mean) * (\n                            self.gamma / torch.sqrt(self.running_var + self.eps)))\n                else:\n                    bias = reshape_to_bias(\n                        self.beta - self.running_mean * self.gamma / torch.sqrt(\n                            self.running_var + self.eps))  # b融running\n                weight = self.weight * reshape_to_weight(\n                    self.gamma / torch.sqrt(self.running_var + self.eps))  # w融running\n            else:\n                bias = self.bias\n                weight = self.weight\n        # 量化A和bn融合后的W\n        q_weight = self.weight_quantizer(weight)\n        q_bias = self.bias_quantizer(bias)\n\n        if self.quantizer_output == True:  # 输出量化参数txt文档\n\n            # 创建的quantizer_output输出文件夹\n            if not os.path.isdir('./quantizer_output'):\n                os.makedirs('./quantizer_output')\n\n            if not os.path.isdir('./quantizer_output/q_weight_out'):\n                os.makedirs('./quantizer_output/q_weight_out')\n            if not os.path.isdir('./quantizer_output/w_scale_out'):\n                os.makedirs('./quantizer_output/w_scale_out')\n            if not os.path.isdir('./quantizer_output/q_weight_max'):\n                os.makedirs('./quantizer_output/q_weight_max')\n            if not os.path.isdir('./quantizer_output/max_weight_count'):\n                os.makedirs('./quantizer_output/max_weight_count')\n\n            if not os.path.isdir('./quantizer_output/q_weight_reorder'):\n                os.makedirs('./quantizer_output/q_weight_reorder')\n            if not os.path.isdir('./quantizer_output/q_bias_reorder'):\n                os.makedirs('./quantizer_output/q_bias_reorder')\n\n            if self.layer_idx == -1:\n\n                #######################输出当前层的权重量化因子\n                weight_scale = - self.weight_quantizer.get_scale()\n                np.savetxt(('./quantizer_output/w_scale_out/w_scale_%s.txt' % self.name), weight_scale, delimiter='\\n')\n                #######################输出当前层的量化权重\n                q_weight_txt = self.weight_quantizer.get_quantize_value(weight)\n\n                #############权重重排序\n\n                w_para = q_weight_txt  # 重排序参数\n                if self.reorder == True:\n                    # print(\"use weights reorder!\")\n                    shape_output = w_para.shape[0]\n                    shape_input = w_para.shape[1]\n                    num_TN = int(shape_input / self.TN)\n                    remainder_TN = shape_input % self.TN\n                    num_TM = int(shape_output / self.TM)\n                    remainder_TM = shape_output % self.TM\n                    first = True\n                    reorder_w_para = None\n                    if self.activate == 'linear':\n                        print('layer-linear reorder!')\n                        for k in range(num_TN):\n                            temp = w_para[0:remainder_TM, k * self.TN:(k + 1) * self.TN, :, :]\n                            temp = temp.view(temp.shape[0], temp.shape[1], temp.shape[2] * temp.shape[3])\n                            temp = temp.permute(2, 0, 1).contiguous().view(-1)\n                            if first:\n                                reorder_w_para = temp.clone().cpu().data.numpy()\n                                first = False\n                            else:\n                                reorder_w_para = np.append(reorder_w_para, temp.cpu().data.numpy())\n                    else:\n                        for j in range(num_TM):\n                            if shape_input == 3 or shape_input == 1:  # 第一层\n                                print('The first layer~~~~~~~~~~~~')\n                                temp = w_para[j * self.TM:(j + 1) * self.TM,\n                                       num_TN * self.TN:num_TN * self.TN + remainder_TN, :,\n                                       :]\n                                temp = temp.view(temp.shape[0], temp.shape[1], temp.shape[2] * temp.shape[3])\n                                fill = torch.zeros(self.TM, self.TN, temp.shape[2]).to(temp.device)\n                                fill[:, 0:remainder_TN, :] = temp\n                                temp = fill.permute(2, 0, 1).contiguous().view(-1)\n                                if first:  # 创建数组存储\n                                    reorder_w_para = temp.clone().cpu().data.numpy()\n                                    first = False\n                                else:\n                                    reorder_w_para = np.append(reorder_w_para, temp.cpu().data.numpy())\n                            else:\n                                for k in range(num_TN):\n                                    temp = w_para[j * self.TM:(j + 1) * self.TM, k * self.TN:(k + 1) * self.TN, :, :]\n                                    # #合并成论文图10(a)的TM*TN*(K2)的张量格式\n                                    temp = temp.view(temp.shape[0], temp.shape[1], temp.shape[2] * temp.shape[3])\n                                    # 转换为图10(b)的重排序格式\n                                    temp = temp.permute(2, 0, 1).contiguous().view(-1)\n                                    if first:\n                                        reorder_w_para = temp.clone().cpu().data.numpy()\n                                        first = False\n                                    else:\n                                        reorder_w_para = np.append(reorder_w_para, temp.cpu().data.numpy())\n\n                    w_para_flatten = reorder_w_para\n                    # print(reorder_w_para.size)\n                    #####验证重排序结果的正确性\n                    '''if w_para_flatten.size == w_para.shape[0] * w_para.shape[1] * w_para.shape[2] * w_para.shape[3]:\n                        print(\"weights convert correctly!\")\n                    else:\n                        print(\"weights convert mismatchingly!\")'''\n\n                    q_weight_reorder = w_para_flatten\n                    q_weight_reorder = np.array(q_weight_reorder).reshape(1, -1)\n                    np.savetxt(('./quantizer_output/q_weight_reorder/w_reorder_%s.txt' % self.name), q_weight_reorder,\n                               delimiter='\\n')\n                ################权重重排序结束\n\n                q_weight_txt = np.array(q_weight_txt.cpu()).reshape(1, -1)\n                q_weight_max = [np.max(q_weight_txt)]\n                # q_weight_max = np.argmax(q_weight_txt)\n                max_weight_count = [np.sum(abs(q_weight_txt) >= (1 << (self.w_bits - 1)) - 1)]  # 统计该层溢出的数目\n                np.savetxt(('./quantizer_output/max_weight_count/max_w_count_%s.txt' % self.name), max_weight_count)\n                np.savetxt(('./quantizer_output/q_weight_max/max_w_%s.txt' % self.name), q_weight_max)\n                np.savetxt(('./quantizer_output/q_weight_out/q_weight_%s.txt' % self.name), q_weight_txt,\n                           delimiter='\\n')\n                # io.savemat('save.mat',{'q_weight_txt':q_weight_txt})\n\n                #######################创建输出偏置txt的文件夹\n                if not os.path.isdir('./quantizer_output/q_bias_out'):\n                    os.makedirs('./quantizer_output/q_bias_out')\n                if not os.path.isdir('./quantizer_output/b_scale_out'):\n                    os.makedirs('./quantizer_output/b_scale_out')\n                #######################输出当前层偏置的量化因子\n                bias_scale = - self.bias_quantizer.get_scale()\n                np.savetxt(('./quantizer_output/b_scale_out/b_scale_%s.txt' % self.name), bias_scale, delimiter='\\n')\n                #######################输出当前层的量化偏置\n                q_bias_txt = self.bias_quantizer.get_quantize_value(bias)\n                q_bias_txt = np.array(q_bias_txt.cpu()).reshape(1, -1)\n                np.savetxt(('./quantizer_output/q_bias_out/q_bias_%s.txt' % self.name), q_bias_txt, delimiter='\\n')\n\n                #############偏置重排序\n                if self.reorder == True:\n                    b_para = np.zeros(2048, dtype=int)\n                    b_para[0:q_bias_txt.size] = q_bias_txt\n                    # print(b_para.shape)\n                    # b_para = np.array(b_para.cpu()).reshape(1, -1)\n                    np.savetxt(('./quantizer_output/q_bias_reorder/q_b_reorder_%s.txt' % self.name), b_para,\n                               delimiter='\\n')\n                    ######权重和偏置的重排序数据的二进制文件保存\n                    bias_weight_reorder = np.append(b_para, q_weight_reorder)\n                    wb_flat = bias_weight_reorder.astype(np.int8)\n                    writer = open('./quantizer_output/q_weight_reorder/%s_bias_weight_q_bin' % self.name, \"wb\")\n                    writer.write(wb_flat)\n                    writer.close()\n                ################偏置重排序结束\n\n            elif int(self.name[1:4]) == self.layer_idx:\n                #######################输出当前层的权重量化因子\n                weight_scale = - self.weight_quantizer.get_scale()\n                np.savetxt(('./quantizer_output/w_scale_out/w_scale_%s.txt' % self.name), weight_scale, delimiter='\\n')\n                #######################输出当前层的量化权重\n                q_weight_txt = self.weight_quantizer.get_quantize_value(weight)\n\n                #############权重重排序\n\n                w_para = q_weight_txt  # 重排序参数\n                if self.reorder == True:\n                    # print(\"use weights reorder!\")\n                    shape_output = w_para.shape[0]\n                    shape_input = w_para.shape[1]\n                    num_TN = int(shape_input / self.TN)\n                    remainder_TN = shape_input % self.TN\n                    num_TM = int(shape_output / self.TM)\n                    remainder_TM = shape_output % self.TM\n                    first = True\n                    reorder_w_para = None\n                    if self.activate == 'linear':\n                        print('layer-linear reorder!')\n                        for k in range(num_TN):\n                            temp = w_para[0:remainder_TM, k * self.TN:(k + 1) * self.TN, :, :]\n                            temp = temp.view(temp.shape[0], temp.shape[1], temp.shape[2] * temp.shape[3])\n                            temp = temp.permute(2, 0, 1).contiguous().view(-1)\n                            if first:\n                                reorder_w_para = temp.clone().cpu().data.numpy()\n                                first = False\n                            else:\n                                reorder_w_para = np.append(reorder_w_para, temp.cpu().data.numpy())\n                    else:\n                        for j in range(num_TM):\n                            if shape_input == 3 or shape_input == 1:  # 第一层\n                                print('The first layer~~~~~~~~~~~~')\n                                temp = w_para[j * self.TM:(j + 1) * self.TM,\n                                       num_TN * self.TN:num_TN * self.TN + remainder_TN, :,\n                                       :]\n                                temp = temp.view(temp.shape[0], temp.shape[1], temp.shape[2] * temp.shape[3])\n                                fill = torch.zeros(self.TM, self.TN, temp.shape[2]).to(temp.device)\n                                fill[:, 0:remainder_TN, :] = temp\n                                temp = fill.permute(2, 0, 1).contiguous().view(-1)\n                                if first:  # 创建数组存储\n                                    reorder_w_para = temp.clone().cpu().data.numpy()\n                                    first = False\n                                else:\n                                    reorder_w_para = np.append(reorder_w_para, temp.cpu().data.numpy())\n                            else:\n                                for k in range(num_TN):\n                                    temp = w_para[j * self.TM:(j + 1) * self.TM, k * self.TN:(k + 1) * self.TN, :, :]\n                                    # #合并成论文图10(a)的TM*TN*(K2)的张量格式\n                                    temp = temp.view(temp.shape[0], temp.shape[1], temp.shape[2] * temp.shape[3])\n                                    # 转换为图10(b)的重排序格式\n                                    temp = temp.permute(2, 0, 1).contiguous().view(-1)\n                                    if first:\n                                        reorder_w_para = temp.clone().cpu().data.numpy()\n                                        first = False\n                                    else:\n                                        reorder_w_para = np.append(reorder_w_para, temp.cpu().data.numpy())\n\n                    w_para_flatten = reorder_w_para\n                    # print(reorder_w_para.size)\n                    #####验证重排序结果的正确性\n                    '''if w_para_flatten.size == w_para.shape[0] * w_para.shape[1] * w_para.shape[2] * w_para.shape[3]:\n                        print(\"weights convert correctly!\")\n                    else:\n                        print(\"weights convert mismatchingly!\")'''\n\n                    q_weight_reorder = w_para_flatten\n                    q_weight_reorder = np.array(q_weight_reorder).reshape(1, -1)\n                    np.savetxt(('./quantizer_output/q_weight_reorder/w_reorder_%s.txt' % self.name), q_weight_reorder,\n                               delimiter='\\n')\n                ################权重重排序结束\n\n                q_weight_txt = np.array(q_weight_txt.cpu()).reshape(1, -1)\n                q_weight_max = [np.max(q_weight_txt)]\n                # q_weight_max = np.argmax(q_weight_txt)\n                max_weight_count = [np.sum(abs(q_weight_txt) >= 127)]  # 统计该层溢出的数目\n                np.savetxt(('./quantizer_output/max_weight_count/max_w_count_%s.txt' % self.name), max_weight_count)\n                np.savetxt(('./quantizer_output/q_weight_max/max_w_%s.txt' % self.name), q_weight_max)\n                np.savetxt(('./quantizer_output/q_weight_out/q_weight_%s.txt' % self.name), q_weight_txt,\n                           delimiter='\\n')\n                # io.savemat('save.mat',{'q_weight_txt':q_weight_txt})\n\n                #######################创建输出偏置txt的文件夹\n                if not os.path.isdir('./quantizer_output/q_bias_out'):\n                    os.makedirs('./quantizer_output/q_bias_out')\n                if not os.path.isdir('./quantizer_output/b_scale_out'):\n                    os.makedirs('./quantizer_output/b_scale_out')\n                #######################输出当前层偏置的量化因子\n                bias_scale = - self.bias_quantizer.get_scale()\n                np.savetxt(('./quantizer_output/b_scale_out/b_scale_%s.txt' % self.name), bias_scale, delimiter='\\n')\n                #######################输出当前层的量化偏置\n                q_bias_txt = self.bias_quantizer.get_quantize_value(bias)\n                q_bias_txt = np.array(q_bias_txt.cpu()).reshape(1, -1)\n                np.savetxt(('./quantizer_output/q_bias_out/q_bias_%s.txt' % self.name), q_bias_txt, delimiter='\\n')\n\n                #############偏置重排序\n                if self.reorder == True:\n                    b_para = np.zeros(2048, dtype=int)\n                    b_para[0:q_bias_txt.size] = q_bias_txt\n                    # print(b_para.shape)\n                    # b_para = np.array(b_para.cpu()).reshape(1, -1)\n                    np.savetxt(('./quantizer_output/q_bias_reorder/q_b_reorder_%s.txt' % self.name), b_para,\n                               delimiter='\\n')\n                    ######权重和偏置的重排序数据的二进制文件保存\n                    bias_weight_reorder = np.append(b_para, q_weight_reorder)\n                    wb_flat = bias_weight_reorder.astype(np.int8)\n                    writer = open('./quantizer_output/q_weight_reorder/%s_bias_weight_q_bin' % self.name, \"wb\")\n                    writer.write(wb_flat)\n                    writer.close()\n                ################偏置重排序结束\n\n        # 量化卷积\n\n        output = F.conv2d(\n            input=input,\n            weight=q_weight,\n            bias=q_bias,\n            stride=self.stride,\n            padding=self.padding,\n            dilation=self.dilation,\n            groups=self.groups\n        )\n        if self.activate == 'leaky':\n            output = F.leaky_relu(output, 0.1 if not self.maxabsscaler else 0.25, inplace=True)\n        elif self.activate == 'relu6':\n            output = F.relu6(output, inplace=True)\n        elif self.activate == 'h_swish':\n            output = output * (F.relu6(output + 3.0, inplace=True) / 6.0)\n        elif self.activate == 'relu':\n            output = F.relu(output, inplace=True)\n        elif self.activate == 'mish':\n            output = output * F.softplus(output).tanh()\n        elif self.activate == 'linear':\n            # return output\n            pass\n        else:\n            print(self.activate + \"%s is not supported !\")\n\n        if self.quantizer_output == True:\n\n            if not os.path.isdir('./quantizer_output/q_activation_out'):\n                os.makedirs('./quantizer_output/q_activation_out')\n            if not os.path.isdir('./quantizer_output/a_scale_out'):\n                os.makedirs('./quantizer_output/a_scale_out')\n            if not os.path.isdir('./quantizer_output/q_activation_max'):\n                os.makedirs('./quantizer_output/q_activation_max')\n            if not os.path.isdir('./quantizer_output/max_activation_count'):\n                os.makedirs('./quantizer_output/max_activation_count')\n            if not os.path.isdir('./quantizer_output/q_activation_reorder'):\n                os.makedirs('./quantizer_output/q_activation_reorder')\n\n            if self.layer_idx == -1:\n                ##################输出当前激活的量化因子\n                activation_scale = - self.activation_quantizer.get_scale()\n                np.savetxt(('./quantizer_output/a_scale_out/a_scale_%s.txt' % self.name), activation_scale,\n                           delimiter='\\n')\n                ##################输出当前层的量化激活\n                q_activation_txt = self.activation_quantizer.get_quantize_value(output)\n\n                a_para = q_activation_txt\n                #############输入特征图重排序\n                if self.reorder == True:\n                    # 重排序参数\n                    # print(\"use activation reorder!\")\n                    shape_input = a_para.shape[1]\n                    num_TN = int(shape_input / self.TN)\n                    remainder_TN = shape_input % self.TN\n                    first = True\n                    reorder_a_para = None\n                    if self.activate == 'linear':\n                        print('layer-linear reorder!')\n                        temp = a_para[:, 0:remainder_TN, :, :]\n                        temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])\n                        temp = temp.permute(1, 2, 0).contiguous().view(-1)\n                        if first:\n                            reorder_a_para = temp.clone().cpu().data.numpy()\n                            first = False\n                        else:\n                            reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())\n                    else:\n                        for k in range(num_TN):\n                            temp = a_para[:, k * self.TN:(k + 1) * self.TN, :, :]\n                            temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])\n                            temp = temp.permute(1, 2, 0).contiguous().view(-1)\n                            if first:\n                                reorder_a_para = temp.clone().cpu().data.numpy()\n                                first = False\n                            else:\n                                reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())\n\n                    a_para_flatten = reorder_a_para\n                    #####验证重排序结果的正确性\n                    '''if a_para_flatten.size == a_para.shape[0] * a_para.shape[1] * a_para.shape[2] * a_para.shape[3]:\n                        print(\"activation convert correctly!\")\n                    else:\n                        print(\"activation convert mismatchingly!\")'''\n\n                    q_activation_reorder = a_para_flatten\n                    q_activation_reorder = np.array(q_activation_reorder).reshape(1, -1)\n                    np.savetxt(('./quantizer_output/q_activation_reorder/a_reorder_%s.txt' % self.name),\n                               q_activation_reorder, delimiter='\\n')\n                    ###保存重排序的二进制文件\n                    activation_flat = q_activation_reorder.astype(np.int8)\n                    writer = open('./quantizer_output/q_activation_reorder/%s_activation_q_bin' % self.name, \"wb\")\n                    writer.write(activation_flat)\n                    writer.close()\n                ##########特征图重排序结束\n\n                q_activation_txt = np.array(q_activation_txt.cpu()).reshape(1, -1)\n                q_activation_max = [np.max(q_activation_txt)]  # 统计该层的最大值(即查看是否有溢出)\n                max_activation_count = [np.sum(abs(q_activation_txt) >= (1 << (self.a_bits - 1)) - 1)]  # 统计该层溢出的数目\n                # q_weight_max = np.argmax(q_weight_txt)\n                np.savetxt(('./quantizer_output/max_activation_count/max_a_count_%s.txt' % self.name),\n                           max_activation_count)\n                np.savetxt(('./quantizer_output/q_activation_max/q_a_max_%s.txt' % self.name), q_activation_max)\n                np.savetxt(('./quantizer_output/q_activation_out/q_activation_%s.txt' % self.name), q_activation_txt,\n                           delimiter='\\n')\n\n\n            elif int(self.name[1:4]) == self.layer_idx:\n\n                ##################输出当前激活的量化因子\n                activation_scale = - self.activation_quantizer.get_scale()\n                np.savetxt(('./quantizer_output/a_scale_out/a_scale_%s.txt' % self.name), activation_scale,\n                           delimiter='\\n')\n                ##################输出当前层的量化激活\n                q_activation_txt = self.activation_quantizer.get_quantize_value(output)\n\n                a_para = q_activation_txt\n                #############输入特征图重排序\n                if self.reorder == True:\n                    # 重排序参数\n                    # print(\"use activation reorder!\")\n                    shape_input = a_para.shape[1]\n                    num_TN = int(shape_input / self.TN)\n                    remainder_TN = shape_input % self.TN\n                    first = True\n                    reorder_a_para = None\n                    if self.activate == 'linear':\n                        print('layer-linear reorder!')\n                        temp = a_para[:, 0:remainder_TN, :, :]\n                        temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])\n                        temp = temp.permute(1, 2, 0).contiguous().view(-1)\n                        if first:\n                            reorder_a_para = temp.clone().cpu().data.numpy()\n                            first = False\n                        else:\n                            reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())\n                    else:\n                        for k in range(num_TN):\n                            temp = a_para[:, k * self.TN:(k + 1) * self.TN, :, :]\n                            temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])\n                            temp = temp.permute(1, 2, 0).contiguous().view(-1)\n                            if first:\n                                reorder_a_para = temp.clone().cpu().data.numpy()\n                                first = False\n                            else:\n                                reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())\n\n                    a_para_flatten = reorder_a_para\n                    #####验证重排序结果的正确性\n                    '''if a_para_flatten.size == a_para.shape[0] * a_para.shape[1] * a_para.shape[2] * a_para.shape[3]:\n                        print(\"activation convert correctly!\")\n                    else:\n                        print(\"activation convert mismatchingly!\")'''\n\n                    q_activation_reorder = a_para_flatten\n                    q_activation_reorder = np.array(q_activation_reorder).reshape(1, -1)\n                    np.savetxt(('./quantizer_output/q_activation_reorder/a_reorder_%s.txt' % self.name),\n                               q_activation_reorder, delimiter='\\n')\n                    ###保存重排序的二进制文件\n                    activation_flat = q_activation_reorder.astype(np.int8)\n                    writer = open('./quantizer_output/q_activation_reorder/%s_activation_q_bin' % self.name, \"wb\")\n                    writer.write(activation_flat)\n                    writer.close()\n                ##########特征图重排序结束\n\n                q_activation_txt = np.array(q_activation_txt.cpu()).reshape(1, -1)\n                q_activation_max = [np.max(q_activation_txt)]  # 统计该层的最大值(即查看是否有溢出)\n                max_activation_count = [np.sum(abs(q_activation_txt) >= 127)]  # 统计该层溢出的数目\n                # q_weight_max = np.argmax(q_weight_txt)\n                np.savetxt(('./quantizer_output/max_activation_count/max_a_count_%s.txt' % self.name),\n                           max_activation_count)\n                np.savetxt(('./quantizer_output/q_activation_max/q_a_max_%s.txt' % self.name), q_activation_max)\n                np.savetxt(('./quantizer_output/q_activation_out/q_activation_%s.txt' % self.name), q_activation_txt,\n                           delimiter='\\n')\n\n        output = self.activation_quantizer(output)\n        return output\n\n    def BN_fuse(self):\n        if self.bn:\n            # BN融合\n            if self.bias is not None:\n                bias = reshape_to_bias(self.beta + (self.bias - self.running_mean) * (\n                        self.gamma / torch.sqrt(self.running_var + self.eps)))\n            else:\n                bias = reshape_to_bias(\n                    self.beta - self.running_mean * self.gamma / torch.sqrt(\n                        self.running_var + self.eps))  # b融running\n            weight = self.weight * reshape_to_weight(\n                self.gamma / torch.sqrt(self.running_var + self.eps))  # w融running\n        else:\n            bias = self.bias\n            weight = self.weight\n        return weight, bias\n\n\nclass QuantizedShortcut_max(nn.Module):  # weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070\n    def __init__(self, layers, weight=False, bits=8,\n                 quantizer_output=False, reorder=False, TM=32, TN=32, name='', layer_idx=-1, ):\n        super(QuantizedShortcut_max, self).__init__()\n        self.layers = layers  # layer indices\n        self.weight = weight  # apply weights boolean\n        self.n = len(layers) + 1  # number of layers\n        self.bits = bits\n        self.range_tracker_x = AveragedRangeTracker(q_level='L', out_channels=-1)\n        self.range_tracker_a = AveragedRangeTracker(q_level='L', out_channels=-1)\n        self.range_tracker_sum = AveragedRangeTracker(q_level='L', out_channels=-1)\n        self.register_buffer('scale', torch.zeros(1))  # 量化比例因子\n\n        self.quantizer_output = quantizer_output\n        self.reorder = reorder\n        self.TM = TM\n        self.TN = TN\n        self.name = name\n        self.layer_idx = layer_idx\n\n        if weight:\n            self.w = nn.Parameter(torch.zeros(self.n), requires_grad=True)  # layer weights\n\n    # 量化\n    def quantize(self, input):\n        output = input / self.scale\n        return output\n\n    def round(self, input):\n        output = Round.apply(input)\n        return output\n\n    # 截断\n    def clamp(self, input):\n        min_val = torch.tensor(-(1 << (self.bits - 1)))\n        max_val = torch.tensor((1 << (self.bits - 1)) - 1)\n        output = torch.clamp(input, min_val, max_val)\n        return output\n\n    # 反量化\n    def dequantize(self, input):\n        output = (input) * self.scale\n        return output\n\n    def forward(self, x, outputs):\n        # Weights\n        if self.weight:\n            w = torch.sigmoid(self.w) * (2 / self.n)  # sigmoid weights (0-1)\n            x = x * w[0]\n\n        # Fusion\n        nx = x.shape[1]  # input channels\n        for i in range(self.n - 1):\n            a = outputs[self.layers[i]] * w[i + 1] if self.weight else outputs[self.layers[i]]  # feature to add\n            na = a.shape[1]  # feature channels\n            if self.training == True:\n                # 得到输入两个feature和一个输出的scale\n                self.range_tracker_a(x)\n                self.range_tracker_x(a)\n                if nx == na:  # same shape\n                    self.range_tracker_sum(x + a)\n                elif nx > na:  # slice input\n                    self.range_tracker_sum(x[:, :na] + a)  # or a = nn.ZeroPad2d((0, 0, 0, 0, 0, dc))(a); x = x + a\n                else:  # slice feature\n                    self.range_tracker_sum(x + a[:, :nx])\n                float_max_val = max(self.range_tracker_sum.max_val, self.range_tracker_x.max_val,\n                                    self.range_tracker_a.max_val)\n                float_min_val = min(self.range_tracker_sum.min_val, self.range_tracker_x.min_val,\n                                    self.range_tracker_a.min_val)\n                quantized_min_val = torch.tensor(-(1 << (self.bits - 1)))\n                quantized_max_val = torch.tensor((1 << (self.bits - 1)) - 1)\n                quantized_range = torch.max(torch.abs(quantized_min_val), torch.abs(quantized_max_val))  # 量化后范围\n                float_max = torch.max(torch.abs(float_min_val),\n                                      torch.abs(float_max_val))  # 量化前范围\n                floor_float_range = 2 ** float_max.log2().floor()\n                ceil_float_range = 2 ** float_max.log2().ceil()\n                if abs(ceil_float_range - float_max) < abs(floor_float_range - float_max):\n                    float_range = ceil_float_range\n                else:\n                    float_range = floor_float_range\n                self.scale = float_range / quantized_range  # 量化比例因子\n\n            # 量化因子数据输出\n            if self.quantizer_output == True:\n                if not os.path.isdir('./quantizer_output/q_activation_out'):\n                    os.makedirs('./quantizer_output/q_activation_out')\n                if not os.path.isdir('./quantizer_output/a_scale_out'):\n                    os.makedirs('./quantizer_output/a_scale_out')\n                if not os.path.isdir('./quantizer_output/q_activation_max'):\n                    os.makedirs('./quantizer_output/q_activation_max')\n                if not os.path.isdir('./quantizer_output/max_activation_count'):\n                    os.makedirs('./quantizer_output/max_activation_count')\n                if not os.path.isdir('./quantizer_output/q_activation_reorder'):\n                    os.makedirs('./quantizer_output/q_activation_reorder')\n\n                if self.layer_idx == -1:\n\n                    move_scale = math.log2(self.scale)\n                    shortcut_scale = -np.array(move_scale).reshape(1, -1)\n                    np.savetxt(('./quantizer_output/a_scale_out/shortcut_scale_%s.txt' % self.name), shortcut_scale,\n                               delimiter='\\n')\n\n                elif int(self.name[1:4]) == self.layer_idx:\n\n                    move_scale = math.log2(self.scale)\n                    shortcut_scale = -np.array(move_scale).reshape(1, -1)\n                    np.savetxt(('./quantizer_output/a_scale_out/shortcut_scale_%s.txt' % self.name), shortcut_scale,\n                               delimiter='\\n')\n\n            # 量化x\n            x = self.quantize(x)  # 量化\n            x = self.round(x)\n            x = self.clamp(x)  # 截断\n            x = self.dequantize(x)  # 反量化\n\n            # 量化a\n            a = self.quantize(a)  # 量化\n            a = self.round(a)\n            a = self.clamp(a)  # 截断\n            a = self.dequantize(a)  # 反量化\n\n            # Adjust channels\n            if nx == na:  # same shape\n                x = x + a\n            elif nx > na:  # slice input\n                x[:, :na] = x[:, :na] + a  # or a = nn.ZeroPad2d((0, 0, 0, 0, 0, dc))(a); x = x + a\n            else:  # slice feature\n                x = x + a[:, :nx]\n            # 量化和\n            x = self.quantize(x)  # 量化\n            x = self.round(x)\n            x = self.clamp(x)  # 截断\n\n            # 特征图量化数据输出\n            if self.quantizer_output == True:\n                if not os.path.isdir('./quantizer_output/q_activation_out'):\n                    os.makedirs('./quantizer_output/q_activation_out')\n                if not os.path.isdir('./quantizer_output/a_scale_out'):\n                    os.makedirs('./quantizer_output/a_scale_out')\n                if not os.path.isdir('./quantizer_output/q_activation_max'):\n                    os.makedirs('./quantizer_output/q_activation_max')\n                if not os.path.isdir('./quantizer_output/max_activation_count'):\n                    os.makedirs('./quantizer_output/max_activation_count')\n                if not os.path.isdir('./quantizer_output/q_activation_reorder'):\n                    os.makedirs('./quantizer_output/q_activation_reorder')\n\n                if self.layer_idx == -1:\n\n                    q_x_shortcut = x\n\n                    if self.reorder == True:\n                        a_para = q_x_shortcut\n                        # 重排序参数\n                        # print(\"use activation reorder!\")\n                        shape_input = a_para.shape[1]\n                        num_TN = int(shape_input / self.TN)\n                        remainder_TN = shape_input % self.TN\n                        first = True\n                        reorder_a_para = None\n                        for k in range(num_TN):\n                            temp = a_para[:, k * self.TN:(k + 1) * self.TN, :, :]\n                            temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])\n                            temp = temp.permute(1, 2, 0).contiguous().view(-1)\n                            if first:\n                                reorder_a_para = temp.clone().cpu().data.numpy()\n                                first = False\n                            else:\n                                reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())\n\n                        a_para_flatten = reorder_a_para\n                        q_activation_reorder = a_para_flatten\n                        q_activation_reorder = np.array(q_activation_reorder).reshape(1, -1)\n                        np.savetxt(('./quantizer_output/q_activation_reorder/r_shortcut_%s.txt' % self.name),\n                                   q_activation_reorder, delimiter='\\n')\n                        ###保存重排序的二进制文件\n                        activation_flat = q_activation_reorder.astype(np.int8)\n                        writer = open('./quantizer_output/q_activation_reorder/%s_shortcut_q_bin' % self.name, \"wb\")\n                        writer.write(activation_flat)\n                        writer.close()\n                    ##########shortcut重排序结束\n\n                    Q_shortcut = np.array(q_x_shortcut.cpu()).reshape(1, -1)\n                    np.savetxt(('./quantizer_output/q_activation_out/Q_shortcut_%s.txt' % self.name), Q_shortcut,\n                               delimiter='\\n')\n\n                elif int(self.name[1:4]) == self.layer_idx:\n\n                    q_x_shortcut = x\n\n                    if self.reorder == True:\n                        a_para = q_x_shortcut\n                        # 重排序参数\n                        # print(\"use activation reorder!\")\n                        shape_input = a_para.shape[1]\n                        num_TN = int(shape_input / self.TN)\n                        remainder_TN = shape_input % self.TN\n                        first = True\n                        reorder_a_para = None\n                        for k in range(num_TN):\n                            temp = a_para[:, k * self.TN:(k + 1) * self.TN, :, :]\n                            temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])\n                            temp = temp.permute(1, 2, 0).contiguous().view(-1)\n                            if first:\n                                reorder_a_para = temp.clone().cpu().data.numpy()\n                                first = False\n                            else:\n                                reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())\n\n                        a_para_flatten = reorder_a_para\n                        q_activation_reorder = a_para_flatten\n                        q_activation_reorder = np.array(q_activation_reorder).reshape(1, -1)\n                        np.savetxt(('./quantizer_output/q_activation_reorder/r_shortcut_%s.txt' % self.name),\n                                   q_activation_reorder, delimiter='\\n')\n                        ###保存重排序的二进制文件\n                        activation_flat = q_activation_reorder.astype(np.int8)\n                        writer = open('./quantizer_output/q_activation_reorder/%s_shortcut_q_bin' % self.name, \"wb\")\n                        writer.write(activation_flat)\n                        writer.close()\n                    ##########shortcut重排序结束\n                    Q_shortcut = np.array(q_x_shortcut.cpu()).reshape(1, -1)\n                    np.savetxt(('./quantizer_output/q_activation_out/Q_shortcut_%s.txt' % self.name), Q_shortcut,\n                               delimiter='\\n')\n\n            x = self.dequantize(x)  # 反量化\n        return x\n\n\nclass QuantizedShortcut_min(nn.Module):  # weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070\n    def __init__(self, layers, weight=False, bits=8,\n                 quantizer_output=False, reorder=False, TM=32, TN=32, name='', layer_idx=-1, ):\n        super(QuantizedShortcut_min, self).__init__()\n        self.layers = layers  # layer indices\n        self.weight = weight  # apply weights boolean\n        self.n = len(layers) + 1  # number of layers\n        self.bits = bits\n        self.range_tracker_x = AveragedRangeTracker(q_level='L', out_channels=-1)\n        self.range_tracker_a = AveragedRangeTracker(q_level='L', out_channels=-1)\n        self.range_tracker_sum = AveragedRangeTracker(q_level='L', out_channels=-1)\n        self.register_buffer('input_scale', torch.zeros(1))  # 量化比例因子\n        self.register_buffer('scale', torch.zeros(1))  # 量化比例因子\n\n        self.quantizer_output = quantizer_output\n        self.reorder = reorder\n        self.TM = TM\n        self.TN = TN\n        self.name = name\n        self.layer_idx = layer_idx\n\n        if weight:\n            self.w = nn.Parameter(torch.zeros(self.n), requires_grad=True)  # layer weights\n\n    # 量化\n    def quantize(self, input, featrure_in=False):\n        if featrure_in:\n            output = input / self.input_scale\n        else:\n            output = input / self.scale\n        return output\n\n    def round(self, input):\n        output = Round.apply(input)\n        return output\n\n    # 截断\n    def clamp(self, input):\n        min_val = torch.tensor(-(1 << (self.bits - 1)))\n        max_val = torch.tensor((1 << (self.bits - 1)) - 1)\n        output = torch.clamp(input, min_val, max_val)\n        return output\n\n    # 反量化\n    def dequantize(self, input, featrure_in=False):\n        if featrure_in:\n            output = (input) * self.input_scale\n        else:\n            output = (input) * self.scale\n        return output\n\n    def forward(self, x, outputs):\n        # Weights\n        if self.weight:\n            w = torch.sigmoid(self.w) * (2 / self.n)  # sigmoid weights (0-1)\n            x = x * w[0]\n\n        # Fusion\n        nx = x.shape[1]  # input channels\n        for i in range(self.n - 1):\n            a = outputs[self.layers[i]] * w[i + 1] if self.weight else outputs[self.layers[i]]  # feature to add\n            na = a.shape[1]  # feature channels\n            if self.training == True:\n                # 得到输入两个feature和一个输出的scale\n                self.range_tracker_a(a)\n                self.range_tracker_x(x)\n                float_max_val = min(self.range_tracker_x.max_val, self.range_tracker_a.max_val)\n                float_min_val = max(self.range_tracker_x.min_val, self.range_tracker_a.min_val)\n                quantized_min_val = torch.tensor(-(1 << (self.bits - 1)))\n                quantized_max_val = torch.tensor((1 << (self.bits - 1)) - 1)\n                quantized_range = torch.max(torch.abs(quantized_min_val), torch.abs(quantized_max_val))  # 量化后范围\n                float_max = torch.max(torch.abs(float_min_val),\n                                      torch.abs(float_max_val))  # 量化前范围\n                floor_float_range = 2 ** float_max.log2().floor()\n                ceil_float_range = 2 ** float_max.log2().ceil()\n                if abs(ceil_float_range - float_max) < abs(floor_float_range - float_max):\n                    float_range = ceil_float_range\n                else:\n                    float_range = floor_float_range\n                self.input_scale = float_range / quantized_range  # 量化比例因子\n\n            # 量化x\n            x = self.quantize(x, featrure_in=True)  # 量化\n            x = self.round(x)\n            x = self.dequantize(x, featrure_in=True)  # 反量化\n\n            # 量化a\n            a = self.quantize(a, featrure_in=True)  # 量化\n            a = self.round(a)\n            a = self.dequantize(a, featrure_in=True)  # 反量化\n\n            # Adjust channels\n            if nx == na:  # same shape\n                x = x + a\n            elif nx > na:  # slice input\n                x[:, :na] = x[:, :na] + a  # or a = nn.ZeroPad2d((0, 0, 0, 0, 0, dc))(a); x = x + a\n            else:  # slice feature\n                x = x + a[:, :nx]\n            # 量化和\n            if self.training == True:\n                # 得到输入两个feature和一个输出的scale\n                self.range_tracker_sum(x)\n                float_max_val = self.range_tracker_sum.max_val\n                float_min_val = self.range_tracker_sum.min_val\n                quantized_min_val = torch.tensor(-(1 << (self.bits - 1)))\n                quantized_max_val = torch.tensor((1 << (self.bits - 1)) - 1)\n                quantized_range = torch.max(torch.abs(quantized_min_val), torch.abs(quantized_max_val))  # 量化后范围\n                float_max = torch.max(torch.abs(float_min_val),\n                                      torch.abs(float_max_val))  # 量化前范围\n                floor_float_range = 2 ** float_max.log2().floor()\n                ceil_float_range = 2 ** float_max.log2().ceil()\n                if abs(ceil_float_range - float_max) < abs(floor_float_range - float_max):\n                    float_range = ceil_float_range\n                else:\n                    float_range = floor_float_range\n                self.scale = float_range / quantized_range  # 量化比例因子\n            x = self.quantize(x)  # 量化\n            x = self.round(x)\n            x = self.clamp(x)  # 截断\n            # 量化因子数据输出\n            if self.quantizer_output == True:\n                if not os.path.isdir('./quantizer_output/q_activation_out'):\n                    os.makedirs('./quantizer_output/q_activation_out')\n                if not os.path.isdir('./quantizer_output/a_scale_out'):\n                    os.makedirs('./quantizer_output/a_scale_out')\n                if not os.path.isdir('./quantizer_output/q_activation_max'):\n                    os.makedirs('./quantizer_output/q_activation_max')\n                if not os.path.isdir('./quantizer_output/max_activation_count'):\n                    os.makedirs('./quantizer_output/max_activation_count')\n                if not os.path.isdir('./quantizer_output/q_activation_reorder'):\n                    os.makedirs('./quantizer_output/q_activation_reorder')\n\n                if self.layer_idx == -1:\n\n                    move_scale = math.log2(self.scale)\n                    shortcut_scale = - np.array(move_scale).reshape(1, -1)\n                    np.savetxt(('./quantizer_output/a_scale_out/shortcut_scale_%s.txt' % self.name), shortcut_scale,\n                               delimiter='\\n')\n\n                elif int(self.name[1:4]) == self.layer_idx:\n\n                    move_scale = math.log2(self.scale)\n                    shortcut_scale = - np.array(move_scale).reshape(1, -1)\n                    np.savetxt(('./quantizer_output/a_scale_out/shortcut_scale_%s.txt' % self.name), shortcut_scale,\n                               delimiter='\\n')\n            # 特征图量化数据输出\n            if self.quantizer_output == True:\n                if not os.path.isdir('./quantizer_output/q_activation_out'):\n                    os.makedirs('./quantizer_output/q_activation_out')\n                if not os.path.isdir('./quantizer_output/a_scale_out'):\n                    os.makedirs('./quantizer_output/a_scale_out')\n                if not os.path.isdir('./quantizer_output/q_activation_max'):\n                    os.makedirs('./quantizer_output/q_activation_max')\n                if not os.path.isdir('./quantizer_output/max_activation_count'):\n                    os.makedirs('./quantizer_output/max_activation_count')\n                if not os.path.isdir('./quantizer_output/q_activation_reorder'):\n                    os.makedirs('./quantizer_output/q_activation_reorder')\n\n                if self.layer_idx == -1:\n\n                    q_x_shortcut = x\n\n                    if self.reorder == True:\n                        a_para = q_x_shortcut\n                        # 重排序参数\n                        # print(\"use activation reorder!\")\n                        shape_input = a_para.shape[1]\n                        num_TN = int(shape_input / self.TN)\n                        remainder_TN = shape_input % self.TN\n                        first = True\n                        reorder_a_para = None\n                        for k in range(num_TN):\n                            temp = a_para[:, k * self.TN:(k + 1) * self.TN, :, :]\n                            temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])\n                            temp = temp.permute(1, 2, 0).contiguous().view(-1)\n                            if first:\n                                reorder_a_para = temp.clone().cpu().data.numpy()\n                                first = False\n                            else:\n                                reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())\n\n                        a_para_flatten = reorder_a_para\n                        q_activation_reorder = a_para_flatten\n                        q_activation_reorder = np.array(q_activation_reorder).reshape(1, -1)\n                        np.savetxt(('./quantizer_output/q_activation_reorder/r_shortcut_%s.txt' % self.name),\n                                   q_activation_reorder, delimiter='\\n')\n                        ###保存重排序的二进制文件\n                        activation_flat = q_activation_reorder.astype(np.int8)\n                        writer = open('./quantizer_output/q_activation_reorder/%s_shortcut_q_bin' % self.name, \"wb\")\n                        writer.write(activation_flat)\n                        writer.close()\n                    ##########shortcut重排序结束\n\n                    Q_shortcut = np.array(q_x_shortcut.cpu()).reshape(1, -1)\n                    np.savetxt(('./quantizer_output/q_activation_out/Q_shortcut_%s.txt' % self.name), Q_shortcut,\n                               delimiter='\\n')\n\n                elif int(self.name[1:4]) == self.layer_idx:\n\n                    q_x_shortcut = x\n\n                    if self.reorder == True:\n                        a_para = q_x_shortcut\n                        # 重排序参数\n                        # print(\"use activation reorder!\")\n                        shape_input = a_para.shape[1]\n                        num_TN = int(shape_input / self.TN)\n                        remainder_TN = shape_input % self.TN\n                        first = True\n                        reorder_a_para = None\n                        for k in range(num_TN):\n                            temp = a_para[:, k * self.TN:(k + 1) * self.TN, :, :]\n                            temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])\n                            temp = temp.permute(1, 2, 0).contiguous().view(-1)\n                            if first:\n                                reorder_a_para = temp.clone().cpu().data.numpy()\n                                first = False\n                            else:\n                                reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())\n\n                        a_para_flatten = reorder_a_para\n                        q_activation_reorder = a_para_flatten\n                        q_activation_reorder = np.array(q_activation_reorder).reshape(1, -1)\n                        np.savetxt(('./quantizer_output/q_activation_reorder/r_shortcut_%s.txt' % self.name),\n                                   q_activation_reorder, delimiter='\\n')\n                        ###保存重排序的二进制文件\n                        activation_flat = q_activation_reorder.astype(np.int8)\n                        writer = open('./quantizer_output/q_activation_reorder/%s_shortcut_q_bin' % self.name, \"wb\")\n                        writer.write(activation_flat)\n                        writer.close()\n                    ##########shortcut重排序结束\n                    Q_shortcut = np.array(q_x_shortcut.cpu()).reshape(1, -1)\n                    np.savetxt(('./quantizer_output/q_activation_out/Q_shortcut_%s.txt' % self.name), Q_shortcut,\n                               delimiter='\\n')\n\n            x = self.dequantize(x)  # 反量化\n        return x\n\n\nclass QuantizedFeatureConcat(nn.Module):\n    def __init__(self, layers, groups, bits=8,\n                 quantizer_output=False, reorder=False, TM=32, TN=32, name='', layer_idx=-1, ):\n        super(QuantizedFeatureConcat, self).__init__()\n        self.layers = layers  # layer indices\n        self.groups = groups\n        self.multiple = len(layers) > 1  # multiple layers flag\n        self.register_buffer('scale', torch.zeros(1))  # 量化比例因子\n        self.register_buffer('float_max_list', torch.zeros(len(layers)))\n        self.bits = bits\n        self.momentum = 0.1\n        self.quantizer_output = quantizer_output\n        self.reorder = reorder\n        self.TM = TM\n        self.TN = TN\n        self.name = name\n        self.layer_idx = layer_idx\n        # 量化\n\n    def quantize(self, input):\n        output = input / self.scale\n        return output\n\n    def round(self, input):\n        output = Round.apply(input)\n        return output\n\n    # 截断\n    def clamp(self, input):\n        min_val = torch.tensor(-(1 << (self.bits - 1)))\n        max_val = torch.tensor((1 << (self.bits - 1)) - 1)\n        output = torch.clamp(input, min_val, max_val)\n        return output\n\n    # 反量化\n    def dequantize(self, input):\n        output = (input) * self.scale\n        return output\n\n    def forward(self, x, outputs):\n        if self.multiple:\n            if self.training == True:\n                quantized_min_val = torch.tensor(-(1 << (self.bits - 1)))\n                quantized_max_val = torch.tensor((1 << (self.bits - 1)) - 1)\n                quantized_range = torch.max(torch.abs(quantized_min_val), torch.abs(quantized_max_val))  # 量化后范围\n                j = 0\n                for i in self.layers:\n                    temp = outputs[i].detach()\n                    if self.float_max_list[j] == 0:\n                        self.float_max_list[j].add_(\n                            torch.max(torch.max(temp), torch.abs(torch.min(temp))))\n                    else:\n                        self.float_max_list[j].mul_(1 - self.momentum).add_(\n                            torch.max(torch.max(temp), torch.abs(torch.min(temp))) * self.momentum)\n                    j = j + 1\n\n                    del temp\n                    torch.cuda.empty_cache()\n                float_max = max(self.float_max_list).unsqueeze(0)  # 量化前范围\n                floor_float_range = 2 ** float_max.log2().floor()\n                ceil_float_range = 2 ** float_max.log2().ceil()\n                if abs(ceil_float_range - float_max) < abs(floor_float_range - float_max):\n                    float_range = ceil_float_range\n                else:\n                    float_range = floor_float_range\n                self.scale = float_range / quantized_range  # 量化比例因子\n\n            if self.quantizer_output == True:\n\n                if self.layer_idx == -1:\n                    q_a_concat = copy.deepcopy(outputs)\n\n                    move_scale = math.log2(self.scale)\n                    concat_scale = -np.array(move_scale).reshape(1, -1)\n                    np.savetxt(('./quantizer_output/a_scale_out/concat_scale_%s.txt' % self.name), concat_scale,\n                               delimiter='\\n')\n\n                    for i in self.layers:\n                        q_a_concat[i] = self.quantize(q_a_concat[i])  # 量化\n                        q_a_concat[i] = self.round(q_a_concat[i])\n                        q_a_concat[i] = self.clamp(q_a_concat[i])  # 截断\n                    Q_concat = torch.cat([q_a_concat[i] for i in self.layers], 1)\n\n                    if self.reorder == True:\n                        a_para = Q_concat\n                        # 重排序参数\n                        # print(\"use activation reorder!\")\n                        shape_input = a_para.shape[1]\n                        num_TN = int(shape_input / self.TN)\n                        first = True\n                        reorder_a_para = None\n                        for k in range(num_TN):\n                            temp = a_para[:, k * self.TN:(k + 1) * self.TN, :, :]\n                            temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])\n                            temp = temp.permute(1, 2, 0).contiguous().view(-1)\n                            if first:\n                                reorder_a_para = temp.clone().cpu().data.numpy()\n                                first = False\n                            else:\n                                reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())\n\n                        a_para_flatten = reorder_a_para\n                        q_activation_reorder = a_para_flatten\n                        q_activation_reorder = np.array(q_activation_reorder).reshape(1, -1)\n                        np.savetxt(('./quantizer_output/q_activation_reorder/r_concat_%s.txt' % self.name),\n                                   q_activation_reorder, delimiter='\\n')\n                        ###保存重排序的二进制文件\n                        activation_flat = q_activation_reorder.astype(np.int8)\n                        writer = open('./quantizer_output/q_activation_reorder/%s_concat_q_bin' % self.name, \"wb\")\n                        writer.write(activation_flat)\n                        writer.close()\n                    ##########concat重排序结束\n\n                    Q_concat = np.array(Q_concat.cpu()).reshape(1, -1)\n                    np.savetxt(('./quantizer_output/q_activation_out/a_concat_%s.txt' % self.name), Q_concat,\n                               delimiter='\\n')\n                elif int(self.name[1:4]) == self.layer_idx:\n                    q_a_concat = copy.deepcopy(outputs)\n\n                    move_scale = math.log2(self.scale)\n                    concat_scale = -np.array(move_scale).reshape(1, -1)\n                    np.savetxt(('./quantizer_output/a_scale_out/concat_scale_%s.txt' % self.name), concat_scale,\n                               delimiter='\\n')\n\n                    for i in self.layers:\n                        q_a_concat[i] = self.quantize(q_a_concat[i])  # 量化\n                        q_a_concat[i] = self.round(q_a_concat[i])\n                        q_a_concat[i] = self.clamp(q_a_concat[i])  # 截断\n                    Q_concat = torch.cat([q_a_concat[i] for i in self.layers], 1)\n\n                    if self.reorder == True:\n                        a_para = Q_concat\n                        # 重排序参数\n                        # print(\"use activation reorder!\")\n                        shape_input = a_para.shape[1]\n                        num_TN = int(shape_input / self.TN)\n                        first = True\n                        reorder_a_para = None\n                        for k in range(num_TN):\n                            temp = a_para[:, k * self.TN:(k + 1) * self.TN, :, :]\n                            temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])\n                            temp = temp.permute(1, 2, 0).contiguous().view(-1)\n                            if first:\n                                reorder_a_para = temp.clone().cpu().data.numpy()\n                                first = False\n                            else:\n                                reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())\n\n                        a_para_flatten = reorder_a_para\n                        q_activation_reorder = a_para_flatten\n                        q_activation_reorder = np.array(q_activation_reorder).reshape(1, -1)\n                        np.savetxt(('./quantizer_output/q_activation_reorder/r_concat_%s.txt' % self.name),\n                                   q_activation_reorder, delimiter='\\n')\n                        ###保存重排序的二进制文件\n                        activation_flat = q_activation_reorder.astype(np.int8)\n                        writer = open('./quantizer_output/q_activation_reorder/%s_concat_q_bin' % self.name, \"wb\")\n                        writer.write(activation_flat)\n                        writer.close()\n                    ##########concat重排序结束\n                    Q_concat = np.array(Q_concat.cpu()).reshape(1, -1)\n                    np.savetxt(('./quantizer_output/q_activation_out/a_concat_%s.txt' % self.name), Q_concat,\n                               delimiter='\\n')\n\n            # 量化\n            for i in self.layers:\n                outputs[i] = self.quantize(outputs[i])  # 量化\n                outputs[i] = self.round(outputs[i])\n                outputs[i] = self.clamp(outputs[i])  # 截断\n                outputs[i] = self.dequantize(outputs[i])  # 反量化\n            return torch.cat([outputs[i] for i in self.layers], 1)\n        else:\n            if self.groups:\n                return x[:, (x.shape[1] // 2):]\n            else:\n                return outputs[self.layers[0]]\n"
  },
  {
    "path": "utils/quantized/quantized_lowbit.py",
    "content": "# Author:LiPu\nimport math\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Function\nimport torch.nn.functional as F\n\n\n# 定义前向传播，反向传播三值化函数\nclass Ternarize(Function):\n    '''\n    Binarize the input activations and calculate the mean across channel dimension.\n    '''\n\n    # 使用静态方法定义三值激活类\n    @staticmethod\n    def forward(self, input):\n        self.save_for_backward(input)\n        output = input.new(input.size())\n        output[input > 0.5] = 1\n\n        # 由于暂时不知道pytorch如何进行与运算，用此段代码实现\n        # output[input>=-0.5 and input<=0.5]\n        temp = torch.add((input >= -0.5), (input <= 0.5))\n        temp[temp == 2] = 1\n        temp[temp == 1] = 0\n        output[temp] = 0\n\n        output[input < -0.5] = -1\n        return output\n\n    @staticmethod\n    def backward(self, grad_output):\n        input, = self.saved_tensors\n        grad_input = grad_output.clone()\n        grad_input[input.ge(1)] = 0\n        grad_input[input.le(-1)] = 0\n        return grad_input\n\n\n# 定义前向传播，反向传播二值化函数\nclass Binarize(Function):\n    @staticmethod\n    def forward(self, input):\n        self.save_for_backward(input)\n        output = input.new(input.size())\n        output[input >= 0] = 1\n        output[input < 0] = 0\n        return output\n\n    @staticmethod\n    def backward(self, grad_output):\n        input, = self.saved_tensors\n        grad_input = grad_output.clone()\n        grad_input[input.ge(1)] = 0\n        grad_input[input.le(-1)] = 0\n        return grad_input\n\n\nbinarize = Binarize.apply\n\nternarize = Ternarize.apply\n\n\n# 重载LeakyRelu\nclass BinaryLeakyReLU(nn.LeakyReLU):\n    def __init__(self):\n        super(BinaryLeakyReLU, self).__init__()\n\n    def forward(self, input):\n        output = EQ(input)\n        return output\n\n\n# 对线性层权重做量化，必须有reset_parameters函数\nclass BinaryLinear(nn.Linear):\n\n    def forward(self, input):\n        binary_weight = ternarize(self.weight)\n        if self.bias is None:\n            return F.linear(input, binary_weight)\n        else:\n            return F.linear(input, binary_weight, self.bias)\n\n    def reset_parameters(self):\n        # Glorot initialization\n        in_features, out_features = self.weight.size()\n        stdv = math.sqrt(1.5 / (in_features + out_features))\n        self.weight.data.uniform_(-stdv, stdv)\n        if self.bias is not None:\n            self.bias.data.zero_()\n\n        self.weight.lr_scale = 1. / stdv\n\n\n# BWN量化\nclass BWNConv2d(nn.Conv2d):\n\n    def forward(self, input):\n        bw = binarize(self.weight)\n        alpha = torch.div(self.weight.norm(1), torch.numel(self.weight))\n        output = alpha * (F.conv2d(input, bw, self.bias, self.stride,\n                                   self.padding, self.dilation, self.groups))\n        return output\n\n    def reset_parameters(self):\n        # Glorot initialization\n        in_features = self.in_channels\n        out_features = self.out_channels\n        for k in self.kernel_size:\n            in_features *= k\n            out_features *= k\n        stdv = math.sqrt(1.5 / (in_features + out_features))\n        self.weight.data.uniform_(-stdv, stdv)\n        if self.bias is not None:\n            self.bias.data.zero_()\n\n        self.weight.lr_scale = 1. / stdv\n\n\n# BNN量化\nclass BinaryConv2d(nn.Conv2d):\n\n    def forward(self, input):\n        # bw = (self.weight - torch.mean(self.weight)) / torch.sqrt(torch.std(self.weight))\n        bw = binarize(self.weight)\n        return F.conv2d(input, bw, self.bias, self.stride,\n                        self.padding, self.dilation, self.groups)\n\n    def reset_parameters(self):\n        # Glorot initialization\n        in_features = self.in_channels\n        out_features = self.out_channels\n        for k in self.kernel_size:\n            in_features *= k\n            out_features *= k\n        stdv = math.sqrt(1.5 / (in_features + out_features))\n        self.weight.data.uniform_(-stdv, stdv)\n        if self.bias is not None:\n            self.bias.data.zero_()\n\n        self.weight.lr_scale = 1. / stdv\n"
  },
  {
    "path": "utils/quantized/quantized_ptq.py",
    "content": "# Author:LiPu\nimport math\nimport time\nimport numpy as np\nimport os\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.parameter import Parameter\nfrom torch.autograd import Function\n\n\n# ********************* range_trackers(范围统计器，统计量化前范围) *********************\nclass RangeTracker(nn.Module):\n    def __init__(self, q_level):\n        super().__init__()\n        self.q_level = q_level\n\n    def update_range(self, min_val, max_val):\n        raise NotImplementedError\n\n    @torch.no_grad()\n    def forward(self, input):\n        if self.q_level == 'L':  # A,min_max_shape=(1, 1, 1, 1),layer级\n            min_val = torch.min(input)\n            max_val = torch.max(input)\n        elif self.q_level == 'C':  # W,min_max_shape=(N, 1, 1, 1),channel级\n            min_val = torch.min(torch.min(torch.min(input, 3, keepdim=True)[0], 2, keepdim=True)[0], 1, keepdim=True)[0]\n            max_val = torch.max(torch.max(torch.max(input, 3, keepdim=True)[0], 2, keepdim=True)[0], 1, keepdim=True)[0]\n        self.update_range(min_val, max_val)\n\n\nclass GlobalRangeTracker(RangeTracker):  # W,min_max_shape=(N, 1, 1, 1),channel级,取本次和之前相比的min_max —— (N, C, W, H)\n    def __init__(self, q_level, out_channels):\n        super().__init__(q_level)\n        if self.q_level == 'L':\n            self.register_buffer('min_val', torch.zeros(1))\n            self.register_buffer('max_val', torch.zeros(1))\n        elif self.q_level == 'C':\n            self.register_buffer('min_val', torch.zeros(out_channels, 1, 1, 1))\n            self.register_buffer('max_val', torch.zeros(out_channels, 1, 1, 1))\n        self.register_buffer('first_w', torch.zeros(1))\n\n    def update_range(self, min_val, max_val):\n        temp_minval = self.min_val\n        temp_maxval = self.max_val\n        if self.first_w == 0:\n            self.first_w.add_(1)\n            self.min_val.add_(min_val)\n            self.max_val.add_(max_val)\n        else:\n            self.min_val.add_(-temp_minval).add_(torch.min(temp_minval, min_val))\n            self.max_val.add_(-temp_maxval).add_(torch.max(temp_maxval, max_val))\n\n\nclass AveragedRangeTracker(RangeTracker):  # A,min_max_shape=(1, 1, 1, 1),layer级,取running_min_max —— (N, C, W, H)\n    def __init__(self, q_level, out_channels, momentum=0.1):\n        super().__init__(q_level)\n        self.momentum = momentum\n        if self.q_level == 'L':\n            self.register_buffer('min_val', torch.zeros(1))\n            self.register_buffer('max_val', torch.zeros(1))\n        elif self.q_level == 'C':\n            self.register_buffer('min_val', torch.zeros(out_channels, 1, 1, 1))\n            self.register_buffer('max_val', torch.zeros(out_channels, 1, 1, 1))\n        self.register_buffer('first_a', torch.zeros(1))\n\n    def update_range(self, min_val, max_val):\n        if self.first_a == 0:\n            self.first_a.add_(1)\n            self.min_val.add_(min_val)\n            self.max_val.add_(max_val)\n        else:\n            self.min_val.mul_(1 - self.momentum).add_(min_val * self.momentum)\n            self.max_val.mul_(1 - self.momentum).add_(max_val * self.momentum)\n\n\n# ********************* quantizers（量化器，量化） *********************\nclass Round(Function):\n\n    @staticmethod\n    def forward(self, input):\n        sign = torch.sign(input)\n        output = sign * torch.floor(torch.abs(input) + 0.5)\n        return output\n\n\nclass Quantizer(nn.Module):\n    def __init__(self, bits, range_tracker, out_channels, FPGA, sign=True):\n        super().__init__()\n        self.bits = bits\n        self.range_tracker = range_tracker\n        self.FPGA = FPGA\n        self.sign = sign\n        if out_channels == -1:\n            self.register_buffer('scale', torch.zeros(1))  # 量化比例因子\n            self.register_buffer('zero_point', torch.zeros(1))  # 量化零点\n        else:\n            self.register_buffer('scale', torch.zeros(out_channels, 1, 1, 1))  # 量化比例因子\n            self.register_buffer('zero_point', torch.zeros(out_channels, 1, 1, 1))  # 量化零点\n\n    def update_params(self):\n        raise NotImplementedError\n\n    # 量化\n    def quantize(self, input):\n        output = input / self.scale + self.zero_point\n        return output\n\n    def round(self, input):\n        output = Round.apply(input)\n        return output\n\n    # 截断\n    def clamp(self, input):\n        if self.sign:\n            min_val = torch.tensor(-(1 << (self.bits - 1)))\n            max_val = torch.tensor((1 << (self.bits - 1)) - 1)\n        if not self.sign:\n            min_val = torch.tensor(0)\n            max_val = torch.tensor((1 << self.bits) - 1)\n        output = torch.clamp(input, min_val, max_val)\n        return output\n\n    # 反量化\n    def dequantize(self, input):\n        output = (input - self.zero_point) * self.scale\n        return output\n\n    def forward(self, input):\n        if self.bits == 32:\n            output = input\n        elif self.bits == 1:\n            print('！Binary quantization is not supported ！')\n            assert self.bits != 1\n        else:\n            if self.training == True:\n                self.range_tracker(input)\n                self.update_params()\n            output = self.quantize(input)  # 量化\n            output = self.round(output)\n            output = self.clamp(output)  # 截断\n            output = self.dequantize(output)  # 反量化\n        return output\n\n    def get_quantize_value(self, input):\n        if self.bits == 32:\n            output = input\n        elif self.bits == 1:\n            print('！Binary quantization is not supported ！')\n            assert self.bits != 1\n        else:\n            output = self.quantize(input)  # 量化\n            output = self.round(output)\n            output = self.clamp(output)  # 截断\n        return output\n\n    ################获得量化因子所对应的移位数\n    def get_scale(self):\n        #############移位修正\n        move_scale = math.log2(self.scale)\n        move_scale = np.array(move_scale).reshape(1, -1)\n        return move_scale\n\n\n# 对称量化\nclass SymmetricQuantizer(Quantizer):\n\n    def update_params(self):\n        if self.sign:\n            min_val = torch.tensor(-(1 << (self.bits - 1)))\n            max_val = torch.tensor((1 << (self.bits - 1)) - 1)\n        else:\n            min_val = torch.tensor(0)\n            max_val = torch.tensor((1 << self.bits) - 1)\n        quantized_range = torch.max(torch.abs(min_val), torch.abs(max_val))  # 量化后范围\n        if self.FPGA == False:\n            float_range = torch.max(torch.abs(self.range_tracker.min_val),\n                                    torch.abs(self.range_tracker.max_val))  # 量化前范围\n        else:\n            float_max = torch.max(torch.abs(self.range_tracker.min_val), torch.abs(self.range_tracker.max_val))  # 量化前范围\n            floor_float_range = 2 ** float_max.log2().floor()\n            ceil_float_range = 2 ** float_max.log2().ceil()\n            if abs(ceil_float_range - float_max) < abs(floor_float_range - float_max):\n                float_range = ceil_float_range\n            else:\n                float_range = floor_float_range\n        self.scale = float_range / quantized_range  # 量化比例因子\n        self.zero_point = torch.zeros_like(self.scale)  # 量化零点\n\n\n# 非对称量化\nclass AsymmetricQuantizer(Quantizer):\n\n    def update_params(self):\n        if self.sign:\n            min_val = torch.tensor(-(1 << (self.bits - 1)))\n            max_val = torch.tensor((1 << (self.bits - 1)) - 1)\n        else:\n            min_val = torch.tensor(0)\n            max_val = torch.tensor((1 << self.bits) - 1)\n        quantized_range = max_val - min_val  # 量化后范围\n        if self.FPGA == False:\n            float_range = self.range_tracker.max_val - self.range_tracker.min_val  # 量化前范围\n        else:\n            float_range = self.range_tracker.max_val - self.range_tracker.min_val  # 量化前范围\n            ceil_float_range = 2 ** float_range.log2().ceil()\n            floor_float_range = 2 ** float_range.log2().floor()\n            if abs(ceil_float_range - float_range) < abs(floor_float_range - float_range):\n                float_range = ceil_float_range\n            else:\n                float_range = floor_float_range\n        self.scale = float_range / quantized_range  # 量化比例因子\n        self.zero_point = torch.round(max_val - self.range_tracker.max_val / self.scale)  # 量化零点\n\n\n# ********************* 量化卷积（同时量化A/W，并做卷积） *********************\nclass PTQuantizedConv2d(nn.Conv2d):\n    def __init__(\n            self,\n            in_channels,\n            out_channels,\n            kernel_size,\n            stride=1,\n            padding=0,\n            dilation=1,\n            groups=1,\n            bias=True,\n            a_bits=8,\n            w_bits=8,\n            q_type=0):\n        super().__init__(\n            in_channels=in_channels,\n            out_channels=out_channels,\n            kernel_size=kernel_size,\n            stride=stride,\n            padding=padding,\n            dilation=dilation,\n            groups=groups,\n            bias=bias\n        )\n        # 实例化量化器（A-layer级，W-channel级）\n        if q_type == 0:\n            self.activation_quantizer = SymmetricQuantizer(bits=a_bits, range_tracker=AveragedRangeTracker(q_level='L',\n                                                                                                           out_channels=-1),\n                                                           out_channels=-1, FPGA=False)\n            self.weight_quantizer = SymmetricQuantizer(bits=w_bits, range_tracker=GlobalRangeTracker(q_level='C',\n                                                                                                     out_channels=out_channels),\n                                                       out_channels=out_channels, FPGA=False)\n        else:\n            self.activation_quantizer = AsymmetricQuantizer(bits=a_bits,\n                                                            range_tracker=AveragedRangeTracker(q_level='L',\n                                                                                               out_channels=-1),\n                                                            out_channels=-1, FPGA=False, sign=False)\n            self.weight_quantizer = AsymmetricQuantizer(bits=w_bits, range_tracker=GlobalRangeTracker(q_level='C',\n                                                                                                      out_channels=out_channels),\n                                                        out_channels=out_channels, FPGA=False, sign=False)\n\n    def forward(self, input):\n        # 量化A和W\n        if input.shape[1] != 3:\n            input = self.activation_quantizer(input)\n        q_weight = self.weight_quantizer(self.weight)\n        # 量化卷积\n        output = F.conv2d(\n            input=input,\n            weight=q_weight,\n            bias=self.bias,\n            stride=self.stride,\n            padding=self.padding,\n            dilation=self.dilation,\n            groups=self.groups\n        )\n        return output\n\n\ndef reshape_to_activation(input):\n    return input.reshape(1, -1, 1, 1)\n\n\ndef reshape_to_weight(input):\n    return input.reshape(-1, 1, 1, 1)\n\n\ndef reshape_to_bias(input):\n    return input.reshape(-1)\n\n\n# ********************* bn融合_量化卷积（bn融合后，同时量化A/W，并做卷积） *********************\n\n\nclass BNFold_PTQuantizedConv2d_For_FPGA(PTQuantizedConv2d):\n    def __init__(\n            self,\n            in_channels,\n            out_channels,\n            kernel_size,\n            stride=1,\n            padding=0,\n            dilation=1,\n            groups=1,\n            bias=False,\n            eps=1e-5,\n            momentum=0.01,  # 考虑量化带来的抖动影响,对momentum进行调整(0.1 ——> 0.01),削弱batch统计参数占比，一定程度抑制抖动。经实验量化训练效果更好,acc提升1%左右\n            a_bits=8,\n            w_bits=8,\n            q_type=0,\n            bn=0,\n            activate='leaky',\n            quantizer_output=False,\n            reorder=False, TM=32, TN=32,\n            name='', layer_idx=-1,\n            maxabsscaler=False\n    ):\n        super().__init__(\n            in_channels=in_channels,\n            out_channels=out_channels,\n            kernel_size=kernel_size,\n            stride=stride,\n            padding=padding,\n            dilation=dilation,\n            groups=groups,\n            bias=bias\n        )\n        self.bn = bn\n        self.activate = activate\n        self.eps = eps\n        self.momentum = momentum\n        self.gamma = Parameter(torch.Tensor(out_channels))\n        self.beta = Parameter(torch.Tensor(out_channels))\n        self.register_buffer('running_mean', torch.zeros(out_channels))\n        self.register_buffer('running_var', torch.zeros(out_channels))\n        self.register_buffer('batch_mean', torch.zeros(out_channels))\n        self.register_buffer('batch_var', torch.zeros(out_channels))\n        self.register_buffer('first_bn', torch.zeros(1))\n        self.quantizer_output = quantizer_output\n        self.reorder = reorder\n        self.TM = TM\n        self.TN = TN\n        self.name = name\n        self.layer_idx = layer_idx\n        self.maxabsscaler = maxabsscaler\n        self.a_bits = a_bits\n        self.w_bits = w_bits\n        # 实例化量化器（A-layer级，W-channel级）\n        if q_type == 0:\n            self.activation_quantizer = SymmetricQuantizer(bits=a_bits, range_tracker=AveragedRangeTracker(q_level='L',\n                                                                                                           out_channels=-1),\n                                                           out_channels=-1, FPGA=True)\n            self.weight_quantizer = SymmetricQuantizer(bits=w_bits,\n                                                       range_tracker=GlobalRangeTracker(q_level='L', out_channels=-1),\n                                                       out_channels=-1, FPGA=True)\n            self.bias_quantizer = SymmetricQuantizer(bits=w_bits,\n                                                     range_tracker=GlobalRangeTracker(q_level='L', out_channels=-1),\n                                                     out_channels=-1, FPGA=True)\n        else:\n            self.activation_quantizer = AsymmetricQuantizer(bits=a_bits,\n                                                            range_tracker=AveragedRangeTracker(q_level='L',\n                                                                                               out_channels=-1),\n                                                            out_channels=-1, FPGA=True, sign=False)\n            self.weight_quantizer = AsymmetricQuantizer(bits=w_bits,\n                                                        range_tracker=GlobalRangeTracker(q_level='L', out_channels=-1),\n                                                        out_channels=-1, FPGA=True, sign=False)\n            self.bias_quantizer = AsymmetricQuantizer(bits=w_bits,\n                                                      range_tracker=GlobalRangeTracker(q_level='L', out_channels=-1),\n                                                      out_channels=-1, FPGA=True, sign=False)\n\n    def forward(self, input):\n        if self.bn:\n            # BN融合\n            if self.bias is not None:\n                bias = reshape_to_bias(self.beta + (self.bias - self.running_mean) * (\n                        self.gamma / torch.sqrt(self.running_var + self.eps)))\n            else:\n                bias = reshape_to_bias(\n                    self.beta - self.running_mean * self.gamma / torch.sqrt(\n                        self.running_var + self.eps))  # b融running\n            weight = self.weight * reshape_to_weight(\n                self.gamma / torch.sqrt(self.running_var + self.eps))  # w融running\n        else:\n            bias = self.bias\n            weight = self.weight\n\n        # 量化A和bn融合后的W\n        q_weight = self.weight_quantizer(weight)\n        q_bias = self.bias_quantizer(bias)\n\n        if self.quantizer_output == True:  # 输出量化参数txt文档\n\n            # 创建的quantizer_output输出文件夹\n            if not os.path.isdir('./quantizer_output'):\n                os.makedirs('./quantizer_output')\n\n            if not os.path.isdir('./quantizer_output/q_weight_out'):\n                os.makedirs('./quantizer_output/q_weight_out')\n            if not os.path.isdir('./quantizer_output/w_scale_out'):\n                os.makedirs('./quantizer_output/w_scale_out')\n            if not os.path.isdir('./quantizer_output/q_weight_max'):\n                os.makedirs('./quantizer_output/q_weight_max')\n            if not os.path.isdir('./quantizer_output/max_weight_count'):\n                os.makedirs('./quantizer_output/max_weight_count')\n\n            if not os.path.isdir('./quantizer_output/q_weight_reorder'):\n                os.makedirs('./quantizer_output/q_weight_reorder')\n            if not os.path.isdir('./quantizer_output/q_bias_reorder'):\n                os.makedirs('./quantizer_output/q_bias_reorder')\n\n            if self.layer_idx == -1:\n\n                #######################输出当前层的权重量化因子\n                weight_scale = - self.weight_quantizer.get_scale()\n                np.savetxt(('./quantizer_output/w_scale_out/w_scale_%s.txt' % self.name), weight_scale, delimiter='\\n')\n                #######################输出当前层的量化权重\n                q_weight_txt = self.weight_quantizer.get_quantize_value(weight)\n\n                #############权重重排序\n\n                w_para = q_weight_txt  # 重排序参数\n                if self.reorder == True:\n                    # print(\"use weights reorder!\")\n                    shape_output = w_para.shape[0]\n                    shape_input = w_para.shape[1]\n                    num_TN = int(shape_input / self.TN)\n                    remainder_TN = shape_input % self.TN\n                    num_TM = int(shape_output / self.TM)\n                    remainder_TM = shape_output % self.TM\n                    first = True\n                    reorder_w_para = None\n                    if self.activate == 'linear':\n                        print('layer-linear reorder!')\n                        for k in range(num_TN):\n                            temp = w_para[0:remainder_TM, k * self.TN:(k + 1) * self.TN, :, :]\n                            temp = temp.view(temp.shape[0], temp.shape[1], temp.shape[2] * temp.shape[3])\n                            temp = temp.permute(2, 0, 1).contiguous().view(-1)\n                            if first:\n                                reorder_w_para = temp.clone().cpu().data.numpy()\n                                first = False\n                            else:\n                                reorder_w_para = np.append(reorder_w_para, temp.cpu().data.numpy())\n                    else:\n                        for j in range(num_TM):\n                            if shape_input == 3 or shape_input == 1:  # 第一层\n                                print('The first layer~~~~~~~~~~~~')\n                                temp = w_para[j * self.TM:(j + 1) * self.TM,\n                                       num_TN * self.TN:num_TN * self.TN + remainder_TN, :,\n                                       :]\n                                temp = temp.view(temp.shape[0], temp.shape[1], temp.shape[2] * temp.shape[3])\n                                fill = torch.zeros(self.TM, self.TN, temp.shape[2]).to(temp.device)\n                                fill[:, 0:remainder_TN, :] = temp\n                                temp = fill.permute(2, 0, 1).contiguous().view(-1)\n                                if first:  # 创建数组存储\n                                    reorder_w_para = temp.clone().cpu().data.numpy()\n                                    first = False\n                                else:\n                                    reorder_w_para = np.append(reorder_w_para, temp.cpu().data.numpy())\n                            else:\n                                for k in range(num_TN):\n                                    temp = w_para[j * self.TM:(j + 1) * self.TM, k * self.TN:(k + 1) * self.TN, :, :]\n                                    # #合并成论文图10(a)的TM*TN*(K2)的张量格式\n                                    temp = temp.view(temp.shape[0], temp.shape[1], temp.shape[2] * temp.shape[3])\n                                    # 转换为图10(b)的重排序格式\n                                    temp = temp.permute(2, 0, 1).contiguous().view(-1)\n                                    if first:\n                                        reorder_w_para = temp.clone().cpu().data.numpy()\n                                        first = False\n                                    else:\n                                        reorder_w_para = np.append(reorder_w_para, temp.cpu().data.numpy())\n\n                    w_para_flatten = reorder_w_para\n                    # print(reorder_w_para.size)\n                    #####验证重排序结果的正确性\n                    '''if w_para_flatten.size == w_para.shape[0] * w_para.shape[1] * w_para.shape[2] * w_para.shape[3]:\n                        print(\"weights convert correctly!\")\n                    else:\n                        print(\"weights convert mismatchingly!\")'''\n\n                    q_weight_reorder = w_para_flatten\n                    q_weight_reorder = np.array(q_weight_reorder).reshape(1, -1)\n                    np.savetxt(('./quantizer_output/q_weight_reorder/w_reorder_%s.txt' % self.name), q_weight_reorder,\n                               delimiter='\\n')\n                ################权重重排序结束\n\n                q_weight_txt = np.array(q_weight_txt.cpu()).reshape(1, -1)\n                q_weight_max = [np.max(q_weight_txt)]\n                # q_weight_max = np.argmax(q_weight_txt)\n                max_weight_count = [np.sum(abs(q_weight_txt) >= (1 << (self.w_bits - 1)) - 1)]  # 统计该层溢出的数目\n                np.savetxt(('./quantizer_output/max_weight_count/max_w_count_%s.txt' % self.name), max_weight_count)\n                np.savetxt(('./quantizer_output/q_weight_max/max_w_%s.txt' % self.name), q_weight_max)\n                np.savetxt(('./quantizer_output/q_weight_out/q_weight_%s.txt' % self.name), q_weight_txt,\n                           delimiter='\\n')\n                # io.savemat('save.mat',{'q_weight_txt':q_weight_txt})\n\n                #######################创建输出偏置txt的文件夹\n                if not os.path.isdir('./quantizer_output/q_bias_out'):\n                    os.makedirs('./quantizer_output/q_bias_out')\n                if not os.path.isdir('./quantizer_output/b_scale_out'):\n                    os.makedirs('./quantizer_output/b_scale_out')\n                #######################输出当前层偏置的量化因子\n                bias_scale = - self.bias_quantizer.get_scale()\n                np.savetxt(('./quantizer_output/b_scale_out/b_scale_%s.txt' % self.name), bias_scale, delimiter='\\n')\n                #######################输出当前层的量化偏置\n                q_bias_txt = self.bias_quantizer.get_quantize_value(bias)\n                q_bias_txt = np.array(q_bias_txt.cpu()).reshape(1, -1)\n                np.savetxt(('./quantizer_output/q_bias_out/q_bias_%s.txt' % self.name), q_bias_txt, delimiter='\\n')\n\n                #############偏置重排序\n                if self.reorder == True:\n                    b_para = np.zeros(2048, dtype=int)\n                    b_para[0:q_bias_txt.size] = q_bias_txt\n                    # print(b_para.shape)\n                    # b_para = np.array(b_para.cpu()).reshape(1, -1)\n                    np.savetxt(('./quantizer_output/q_bias_reorder/q_b_reorder_%s.txt' % self.name), b_para,\n                               delimiter='\\n')\n                    ######权重和偏置的重排序数据的二进制文件保存\n                    bias_weight_reorder = np.append(b_para, q_weight_reorder)\n                    wb_flat = bias_weight_reorder.astype(np.int8)\n                    writer = open('./quantizer_output/q_weight_reorder/%s_bias_weight_q_bin' % self.name, \"wb\")\n                    writer.write(wb_flat)\n                    writer.close()\n                ################偏置重排序结束\n\n            elif int(self.name[1:4]) == self.layer_idx:\n                #######################输出当前层的权重量化因子\n                weight_scale = - self.weight_quantizer.get_scale()\n                np.savetxt(('./quantizer_output/w_scale_out/w_scale_%s.txt' % self.name), weight_scale, delimiter='\\n')\n                #######################输出当前层的量化权重\n                q_weight_txt = self.weight_quantizer.get_quantize_value(weight)\n\n                #############权重重排序\n\n                w_para = q_weight_txt  # 重排序参数\n                if self.reorder == True:\n                    # print(\"use weights reorder!\")\n                    shape_output = w_para.shape[0]\n                    shape_input = w_para.shape[1]\n                    num_TN = int(shape_input / self.TN)\n                    remainder_TN = shape_input % self.TN\n                    num_TM = int(shape_output / self.TM)\n                    remainder_TM = shape_output % self.TM\n                    first = True\n                    reorder_w_para = None\n                    if self.activate == 'linear':\n                        print('layer-linear reorder!')\n                        for k in range(num_TN):\n                            temp = w_para[0:remainder_TM, k * self.TN:(k + 1) * self.TN, :, :]\n                            temp = temp.view(temp.shape[0], temp.shape[1], temp.shape[2] * temp.shape[3])\n                            temp = temp.permute(2, 0, 1).contiguous().view(-1)\n                            if first:\n                                reorder_w_para = temp.clone().cpu().data.numpy()\n                                first = False\n                            else:\n                                reorder_w_para = np.append(reorder_w_para, temp.cpu().data.numpy())\n                    else:\n                        for j in range(num_TM):\n                            if shape_input == 3 or shape_input == 1:  # 第一层\n                                print('The first layer~~~~~~~~~~~~')\n                                temp = w_para[j * self.TM:(j + 1) * self.TM,\n                                       num_TN * self.TN:num_TN * self.TN + remainder_TN, :,\n                                       :]\n                                temp = temp.view(temp.shape[0], temp.shape[1], temp.shape[2] * temp.shape[3])\n                                fill = torch.zeros(self.TM, self.TN, temp.shape[2]).to(temp.device)\n                                fill[:, 0:remainder_TN, :] = temp\n                                temp = fill.permute(2, 0, 1).contiguous().view(-1)\n                                if first:  # 创建数组存储\n                                    reorder_w_para = temp.clone().cpu().data.numpy()\n                                    first = False\n                                else:\n                                    reorder_w_para = np.append(reorder_w_para, temp.cpu().data.numpy())\n                            else:\n                                for k in range(num_TN):\n                                    temp = w_para[j * self.TM:(j + 1) * self.TM, k * self.TN:(k + 1) * self.TN, :, :]\n                                    # #合并成论文图10(a)的TM*TN*(K2)的张量格式\n                                    temp = temp.view(temp.shape[0], temp.shape[1], temp.shape[2] * temp.shape[3])\n                                    # 转换为图10(b)的重排序格式\n                                    temp = temp.permute(2, 0, 1).contiguous().view(-1)\n                                    if first:\n                                        reorder_w_para = temp.clone().cpu().data.numpy()\n                                        first = False\n                                    else:\n                                        reorder_w_para = np.append(reorder_w_para, temp.cpu().data.numpy())\n\n                    w_para_flatten = reorder_w_para\n                    # print(reorder_w_para.size)\n                    #####验证重排序结果的正确性\n                    '''if w_para_flatten.size == w_para.shape[0] * w_para.shape[1] * w_para.shape[2] * w_para.shape[3]:\n                        print(\"weights convert correctly!\")\n                    else:\n                        print(\"weights convert mismatchingly!\")'''\n\n                    q_weight_reorder = w_para_flatten\n                    q_weight_reorder = np.array(q_weight_reorder).reshape(1, -1)\n                    np.savetxt(('./quantizer_output/q_weight_reorder/w_reorder_%s.txt' % self.name), q_weight_reorder,\n                               delimiter='\\n')\n                ################权重重排序结束\n\n                q_weight_txt = np.array(q_weight_txt.cpu()).reshape(1, -1)\n                q_weight_max = [np.max(q_weight_txt)]\n                # q_weight_max = np.argmax(q_weight_txt)\n                max_weight_count = [np.sum(abs(q_weight_txt) >= (1 << (self.w_bits - 1)) - 1)]  # 统计该层溢出的数目\n                np.savetxt(('./quantizer_output/max_weight_count/max_w_count_%s.txt' % self.name), max_weight_count)\n                np.savetxt(('./quantizer_output/q_weight_max/max_w_%s.txt' % self.name), q_weight_max)\n                np.savetxt(('./quantizer_output/q_weight_out/q_weight_%s.txt' % self.name), q_weight_txt,\n                           delimiter='\\n')\n                # io.savemat('save.mat',{'q_weight_txt':q_weight_txt})\n\n                #######################创建输出偏置txt的文件夹\n                if not os.path.isdir('./quantizer_output/q_bias_out'):\n                    os.makedirs('./quantizer_output/q_bias_out')\n                if not os.path.isdir('./quantizer_output/b_scale_out'):\n                    os.makedirs('./quantizer_output/b_scale_out')\n                #######################输出当前层偏置的量化因子\n                bias_scale = - self.bias_quantizer.get_scale()\n                np.savetxt(('./quantizer_output/b_scale_out/b_scale_%s.txt' % self.name), bias_scale, delimiter='\\n')\n                #######################输出当前层的量化偏置\n                q_bias_txt = self.bias_quantizer.get_quantize_value(bias)\n                q_bias_txt = np.array(q_bias_txt.cpu()).reshape(1, -1)\n                np.savetxt(('./quantizer_output/q_bias_out/q_bias_%s.txt' % self.name), q_bias_txt, delimiter='\\n')\n\n                #############偏置重排序\n                if self.reorder == True:\n                    b_para = np.zeros(2048, dtype=int)\n                    b_para[0:q_bias_txt.size] = q_bias_txt\n                    # print(b_para.shape)\n                    # b_para = np.array(b_para.cpu()).reshape(1, -1)\n                    np.savetxt(('./quantizer_output/q_bias_reorder/q_b_reorder_%s.txt' % self.name), b_para,\n                               delimiter='\\n')\n                    ######权重和偏置的重排序数据的二进制文件保存\n                    bias_weight_reorder = np.append(b_para, q_weight_reorder)\n                    wb_flat = bias_weight_reorder.astype(np.int8)\n                    writer = open('./quantizer_output/q_weight_reorder/%s_bias_weight_q_bin' % self.name, \"wb\")\n                    writer.write(wb_flat)\n                    writer.close()\n                ################偏置重排序结束\n\n        # 量化卷积\n        output = F.conv2d(\n            input=input,\n            weight=q_weight,\n            bias=q_bias,  # 注意，这里加bias，做完整的conv+bn\n            stride=self.stride,\n            padding=self.padding,\n            dilation=self.dilation,\n            groups=self.groups\n        )\n        if self.activate == 'leaky':\n            output = F.leaky_relu(output, 0.125 if not self.maxabsscaler else 0.25, inplace=True)\n        elif self.activate == 'relu6':\n            output = F.relu6(output, inplace=True)\n        elif self.activate == 'h_swish':\n            output = output * (F.relu6(output + 3.0, inplace=True) / 6.0)\n        elif self.activate == 'relu':\n            output = F.relu(output, inplace=True)\n        elif self.activate == 'mish':\n            output = output * F.softplus(output).tanh()\n        elif self.activate == 'linear':\n            # return output\n            pass\n        else:\n            print(self.activate + \"%s is not supported !\")\n\n        if self.quantizer_output == True:\n\n            if not os.path.isdir('./quantizer_output/q_activation_out'):\n                os.makedirs('./quantizer_output/q_activation_out')\n            if not os.path.isdir('./quantizer_output/a_scale_out'):\n                os.makedirs('./quantizer_output/a_scale_out')\n            if not os.path.isdir('./quantizer_output/q_activation_max'):\n                os.makedirs('./quantizer_output/q_activation_max')\n            if not os.path.isdir('./quantizer_output/max_activation_count'):\n                os.makedirs('./quantizer_output/max_activation_count')\n            if not os.path.isdir('./quantizer_output/q_activation_reorder'):\n                os.makedirs('./quantizer_output/q_activation_reorder')\n\n            if self.layer_idx == -1:\n                ##################输出当前激活的量化因子\n                activation_scale = - self.activation_quantizer.get_scale()\n                np.savetxt(('./quantizer_output/a_scale_out/a_scale_%s.txt' % self.name), activation_scale,\n                           delimiter='\\n')\n                ##################输出当前层的量化激活\n                q_activation_txt = self.activation_quantizer.get_quantize_value(output)\n\n                a_para = q_activation_txt\n                #############输入特征图重排序\n                if self.reorder == True:\n                    # 重排序参数\n                    # print(\"use activation reorder!\")\n                    shape_input = a_para.shape[1]\n                    num_TN = int(shape_input / self.TN)\n                    remainder_TN = shape_input % self.TN\n                    first = True\n                    reorder_a_para = None\n                    if self.activate == 'linear':\n                        print('layer-linear reorder!')\n                        temp = a_para[:, 0:remainder_TN, :, :]\n                        temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])\n                        temp = temp.permute(1, 2, 0).contiguous().view(-1)\n                        if first:\n                            reorder_a_para = temp.clone().cpu().data.numpy()\n                            first = False\n                        else:\n                            reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())\n                    else:\n                        for k in range(num_TN):\n                            temp = a_para[:, k * self.TN:(k + 1) * self.TN, :, :]\n                            temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])\n                            temp = temp.permute(1, 2, 0).contiguous().view(-1)\n                            if first:\n                                reorder_a_para = temp.clone().cpu().data.numpy()\n                                first = False\n                            else:\n                                reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())\n\n                    a_para_flatten = reorder_a_para\n                    #####验证重排序结果的正确性\n                    '''if a_para_flatten.size == a_para.shape[0] * a_para.shape[1] * a_para.shape[2] * a_para.shape[3]:\n                        print(\"activation convert correctly!\")\n                    else:\n                        print(\"activation convert mismatchingly!\")'''\n\n                    q_activation_reorder = a_para_flatten\n                    q_activation_reorder = np.array(q_activation_reorder).reshape(1, -1)\n                    np.savetxt(('./quantizer_output/q_activation_reorder/a_reorder_%s.txt' % self.name),\n                               q_activation_reorder, delimiter='\\n')\n                    ###保存重排序的二进制文件\n                    activation_flat = q_activation_reorder.astype(np.int8)\n                    writer = open('./quantizer_output/q_activation_reorder/%s_activation_q_bin' % self.name, \"wb\")\n                    writer.write(activation_flat)\n                    writer.close()\n                ##########特征图重排序结束\n\n                q_activation_txt = np.array(q_activation_txt.cpu()).reshape(1, -1)\n                q_activation_max = [np.max(q_activation_txt)]  # 统计该层的最大值(即查看是否有溢出)\n                max_activation_count = [np.sum(abs(q_activation_txt) >= (1 << (self.w_bits - 1)) - 1)]  # 统计该层溢出的数目\n                # q_weight_max = np.argmax(q_weight_txt)\n                np.savetxt(('./quantizer_output/max_activation_count/max_a_count_%s.txt' % self.name),\n                           max_activation_count)\n                np.savetxt(('./quantizer_output/q_activation_max/q_a_max_%s.txt' % self.name), q_activation_max)\n                np.savetxt(('./quantizer_output/q_activation_out/q_activation_%s.txt' % self.name), q_activation_txt,\n                           delimiter='\\n')\n\n            elif int(self.name[1:4]) == self.layer_idx:\n\n                ##################输出当前激活的量化因子\n                activation_scale = - self.activation_quantizer.get_scale()\n                np.savetxt(('./quantizer_output/a_scale_out/a_scale_%s.txt' % self.name), activation_scale,\n                           delimiter='\\n')\n                ##################输出当前层的量化激活\n                q_activation_txt = self.activation_quantizer.get_quantize_value(output)\n\n                a_para = q_activation_txt\n                #############输入特征图重排序\n                if self.reorder == True:\n                    # 重排序参数\n                    # print(\"use activation reorder!\")\n                    shape_input = a_para.shape[1]\n                    num_TN = int(shape_input / self.TN)\n                    remainder_TN = shape_input % self.TN\n                    first = True\n                    reorder_a_para = None\n                    if self.activate == 'linear':\n                        print('layer-linear reorder!')\n                        temp = a_para[:, 0:remainder_TN, :, :]\n                        temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])\n                        temp = temp.permute(1, 2, 0).contiguous().view(-1)\n                        if first:\n                            reorder_a_para = temp.clone().cpu().data.numpy()\n                            first = False\n                        else:\n                            reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())\n                    else:\n                        for k in range(num_TN):\n                            temp = a_para[:, k * self.TN:(k + 1) * self.TN, :, :]\n                            temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])\n                            temp = temp.permute(1, 2, 0).contiguous().view(-1)\n                            if first:\n                                reorder_a_para = temp.clone().cpu().data.numpy()\n                                first = False\n                            else:\n                                reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())\n\n                    a_para_flatten = reorder_a_para\n                    #####验证重排序结果的正确性\n                    '''if a_para_flatten.size == a_para.shape[0] * a_para.shape[1] * a_para.shape[2] * a_para.shape[3]:\n                        print(\"activation convert correctly!\")\n                    else:\n                        print(\"activation convert mismatchingly!\")'''\n\n                    q_activation_reorder = a_para_flatten\n                    q_activation_reorder = np.array(q_activation_reorder).reshape(1, -1)\n                    np.savetxt(('./quantizer_output/q_activation_reorder/a_reorder_%s.txt' % self.name),\n                               q_activation_reorder, delimiter='\\n')\n                    ###保存重排序的二进制文件\n                    activation_flat = q_activation_reorder.astype(np.int8)\n                    writer = open('./quantizer_output/q_activation_reorder/%s_activation_q_bin' % self.name, \"wb\")\n                    writer.write(activation_flat)\n                    writer.close()\n                ##########特征图重排序结束\n\n                q_activation_txt = np.array(q_activation_txt.cpu()).reshape(1, -1)\n                q_activation_max = [np.max(q_activation_txt)]  # 统计该层的最大值(即查看是否有溢出)\n                max_activation_count = [np.sum(abs(q_activation_txt) >= (1 << (self.w_bits - 1)) - 1)]  # 统计该层溢出的数目\n                # q_weight_max = np.argmax(q_weight_txt)\n                np.savetxt(('./quantizer_output/max_activation_count/max_a_count_%s.txt' % self.name),\n                           max_activation_count)\n                np.savetxt(('./quantizer_output/q_activation_max/q_a_max_%s.txt' % self.name), q_activation_max)\n                np.savetxt(('./quantizer_output/q_activation_out/q_activation_%s.txt' % self.name), q_activation_txt,\n                           delimiter='\\n')\n\n        output = self.activation_quantizer(output)\n        return output\n\n    def BN_fuse(self):\n        if self.bn:\n            # BN融合\n            if self.bias is not None:\n                bias = reshape_to_bias(self.beta + (self.bias - self.running_mean) * (\n                        self.gamma / torch.sqrt(self.running_var + self.eps)))\n            else:\n                bias = reshape_to_bias(\n                    self.beta - self.running_mean * self.gamma / torch.sqrt(\n                        self.running_var + self.eps))  # b融running\n            weight = self.weight * reshape_to_weight(\n                self.gamma / torch.sqrt(self.running_var + self.eps))  # w融running\n        else:\n            bias = self.bias\n            weight = self.weight\n        return weight, bias"
  },
  {
    "path": "utils/quantized/quantized_ptq_cos.py",
    "content": "# Author:LiPu\nimport math\nimport numpy as np\nimport os\nimport copy\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.parameter import Parameter\nfrom torch.autograd import Function\n\n\n# ********************* quantizers（量化器，量化） *********************\nclass Round(Function):\n\n    @staticmethod\n    def forward(self, input):\n        sign = torch.sign(input)\n        output = sign * torch.floor(torch.abs(input) + 0.5)\n        return output\n\n\nclass Quantizer(nn.Module):\n    def __init__(self, bits, out_channels):\n        super().__init__()\n        self.bits = bits\n        if out_channels == -1:\n            self.register_buffer('scale', torch.zeros(1))  # 量化比例因子\n            self.register_buffer('float_range', torch.zeros(1))\n        else:\n            self.register_buffer('scale', torch.zeros(out_channels, 1, 1, 1))  # 量化比例因子\n            self.register_buffer('float_range', torch.zeros(out_channels, 1, 1, 1))\n        self.scale_list = [0 for i in range(bits + 7)]\n\n    def update_params(self, step):\n        min_val = torch.tensor(-(1 << (self.bits - 1)))\n        max_val = torch.tensor((1 << (self.bits - 1)) - 1)\n        quantized_range = torch.max(torch.abs(min_val), torch.abs(max_val))  # 量化后范围\n        temp = self.float_range\n        self.float_range.add_(-temp).add_(2 ** step)\n        self.scale = self.float_range / quantized_range  # 量化比例因子\n\n    # 量化\n    def quantize(self, input):\n        output = input / self.scale\n        return output\n\n    def round(self, input):\n        output = Round.apply(input)\n        return output\n\n    # 截断\n    def clamp(self, input):\n        min_val = torch.tensor(-(1 << (self.bits - 1)))\n        max_val = torch.tensor((1 << (self.bits - 1)) - 1)\n        output = torch.clamp(input, min_val, max_val)\n        return output\n\n    # 反量化\n    def dequantize(self, input):\n        output = (input) * self.scale\n        return output\n\n    def forward(self, input):\n        if self.bits == 32:\n            output = input\n        elif self.bits == 1:\n            print('！Binary quantization is not supported ！')\n            assert self.bits != 1\n        else:\n            if self.training == True:\n                max_metrics = -1\n                max_step = 0\n                for i in range(self.bits + 7):\n                    self.update_params(i - 5)\n                    output = self.quantize(input)  # 量化\n                    output = self.round(output)\n                    output = self.clamp(output)  # 截断\n                    output = self.dequantize(output)  # 反量化\n                    cosine_similarity = torch.cosine_similarity(input.view(-1), output.view(-1), dim=0)\n                    if cosine_similarity > max_metrics:\n                        max_metrics = cosine_similarity\n                        max_step = i\n                    torch.cuda.empty_cache()\n                self.scale_list[max_step] += 1\n                Global_max_step = self.scale_list.index(max(self.scale_list)) - 5\n                self.update_params(Global_max_step)\n\n            output = self.quantize(input)  # 量化\n            output = self.round(output)\n            output = self.clamp(output)  # 截断\n            output = self.dequantize(output)  # 反量化\n            return output\n\n    def get_quantize_value(self, input):\n\n        if self.bits == 32:\n            output = input\n        elif self.bits == 1:\n            print('！Binary quantization is not supported ！')\n            assert self.bits != 1\n        else:\n            output = self.quantize(input)  # 量化\n            output = self.round(output)\n            output = self.clamp(output)  # 截断\n        return output\n\n    ################获得量化因子所对应的移位数\n    def get_scale(self):\n        #############移位修正\n        move_scale = math.log2(self.scale)\n        move_scale = np.array(move_scale).reshape(1, -1)\n        return move_scale\n\n\ndef reshape_to_activation(input):\n    return input.reshape(1, -1, 1, 1)\n\n\ndef reshape_to_weight(input):\n    return input.reshape(-1, 1, 1, 1)\n\n\ndef reshape_to_bias(input):\n    return input.reshape(-1)\n\n\n# ********************* bn融合_量化卷积（bn融合后，同时量化A/W，并做卷积） *********************\n\n\nclass BNFold_COSPTQuantizedConv2d_For_FPGA(nn.Conv2d):\n    def __init__(\n            self,\n            in_channels,\n            out_channels,\n            kernel_size,\n            stride=1,\n            padding=0,\n            dilation=1,\n            groups=1,\n            bias=False,\n            eps=1e-5,\n            momentum=0.1,\n            a_bits=8,\n            w_bits=8,\n            bn=0,\n            activate='leaky',\n            quantizer_output=False,\n            reorder=False, TM=32, TN=32,\n            name='', layer_idx=-1, maxabsscaler=False\n    ):\n        super().__init__(\n            in_channels=in_channels,\n            out_channels=out_channels,\n            kernel_size=kernel_size,\n            stride=stride,\n            padding=padding,\n            dilation=dilation,\n            groups=groups,\n            bias=bias,\n        )\n\n        self.bn = bn\n        if bias == False:\n            self.bias = Parameter(torch.zeros(out_channels))\n        self.activate = activate\n        self.eps = eps\n        self.momentum = momentum\n        self.gamma = Parameter(torch.Tensor(out_channels))\n        self.beta = Parameter(torch.Tensor(out_channels))\n        self.register_buffer('running_mean', torch.zeros(out_channels))\n        self.register_buffer('running_var', torch.zeros(out_channels))\n        self.register_buffer('q_bias', torch.zeros(out_channels))\n        self.register_buffer('q_weight', torch.zeros(self.weight.shape))\n        self.efficency = 0\n        self.deviation = 0\n        self.stop = False\n        self.quantized = False\n        self.quantizer_output = quantizer_output\n        self.reorder = reorder\n        self.TM = TM\n        self.TN = TN\n        self.name = name\n        self.layer_idx = layer_idx\n        self.maxabsscaler = maxabsscaler\n        self.a_bits = a_bits\n        self.w_bits = w_bits\n        # 实例化量化器（A-layer级，W-channel级）\n        self.activation_quantizer = Quantizer(bits=a_bits, out_channels=-1)\n        self.weight_quantizer = Quantizer(bits=w_bits, out_channels=-1)\n        self.bias_quantizer = Quantizer(bits=w_bits, out_channels=-1)\n\n    def forward(self, input):\n        if not self.quantized:\n            if self.bn:\n                # BN融合\n                if self.bias is not None:\n                    self.bias.data = reshape_to_bias(self.beta + (self.bias - self.running_mean) * (\n                            self.gamma / torch.sqrt(self.running_var + self.eps)))\n                else:\n                    self.bias.data = reshape_to_bias(\n                        self.beta - self.running_mean * self.gamma / torch.sqrt(\n                            self.running_var + self.eps))  # b融running\n                self.weight.data = self.weight * reshape_to_weight(\n                    self.gamma / torch.sqrt(self.running_var + self.eps))  # w融running\n            else:\n                self.bias = self.bias\n                self.weight = self.weight\n            # 量化A和bn融合后的W\n            self.q_weight = self.weight_quantizer(self.weight)\n            self.q_bias = self.bias_quantizer(self.bias)\n            self.quantized = True\n        if self.training:\n            if isinstance(input, list):\n                quant_input = input[0]\n                float_input = input[1]\n            else:\n                quant_input = input\n                float_input = input\n\n            # 浮点卷积\n            float_output = F.conv2d(\n                input=float_input,\n                weight=self.weight,\n                bias=self.bias,\n                stride=self.stride,\n                padding=self.padding,\n                dilation=self.dilation,\n                groups=self.groups\n            )\n\n            # 计算bias_correct\n            if not self.stop:\n                # 量化卷积\n                output = F.conv2d(\n                    input=quant_input,\n                    weight=self.q_weight,\n                    bias=self.q_bias,\n                    stride=self.stride,\n                    padding=self.padding,\n                    dilation=self.dilation,\n                    groups=self.groups\n                )\n\n                # 补偿卷积\n                correct_output = F.conv2d(\n                    input=quant_input,\n                    weight=self.weight,\n                    bias=self.bias,\n                    stride=self.stride,\n                    padding=self.padding,\n                    dilation=self.dilation,\n                    groups=self.groups\n                )\n                rate = 0.05\n                error = torch.add(output, correct_output, alpha=-1).data\n                noise = error.pow(2).mean()\n                if noise > 0:\n                    eff = 1.25 * correct_output.pow(2).mean().div(noise).log10().detach().cpu().numpy()\n                    dev = math.fabs(eff - self.efficency)\n                    if dev > 0:\n                        self.efficency = (self.efficency * 4 + eff) * 0.2\n                        self.deviation = (self.deviation * 4 + dev) * 0.2\n                        if self.efficency > 4.0:\n                            rate = rate * 0.5\n                        if self.efficency > 4.3 or (self.deviation / self.efficency) < 0.05 or math.fabs(\n                                dev - self.deviation / dev) < 0.05:\n                            self.stop = True\n                    else:\n                        self.stop = True\n                else:\n                    self.stop = True\n                if not self.stop:\n                    error = error.mean(dim=[0, 2, 3])\n                    self.bias.data = torch.sub(self.bias.data, error, alpha=rate)\n                    self.q_bias = self.bias_quantizer(self.bias)\n                torch.cuda.empty_cache()\n            output = F.conv2d(\n                input=quant_input,\n                weight=self.q_weight,\n                bias=self.q_bias,\n                stride=self.stride,\n                padding=self.padding,\n                dilation=self.dilation,\n                groups=self.groups\n            )\n        else:\n            output = F.conv2d(\n                input=input,\n                weight=self.q_weight,\n                bias=self.q_bias,\n                stride=self.stride,\n                padding=self.padding,\n                dilation=self.dilation,\n                groups=self.groups\n            )\n        if self.quantizer_output == True:  # 输出量化参数txt文档\n\n            # 创建的quantizer_output输出文件夹\n            if not os.path.isdir('./quantizer_output'):\n                os.makedirs('./quantizer_output')\n\n            if not os.path.isdir('./quantizer_output/q_weight_out'):\n                os.makedirs('./quantizer_output/q_weight_out')\n            if not os.path.isdir('./quantizer_output/w_scale_out'):\n                os.makedirs('./quantizer_output/w_scale_out')\n            if not os.path.isdir('./quantizer_output/q_weight_max'):\n                os.makedirs('./quantizer_output/q_weight_max')\n            if not os.path.isdir('./quantizer_output/max_weight_count'):\n                os.makedirs('./quantizer_output/max_weight_count')\n\n            if not os.path.isdir('./quantizer_output/q_weight_reorder'):\n                os.makedirs('./quantizer_output/q_weight_reorder')\n            if not os.path.isdir('./quantizer_output/q_bias_reorder'):\n                os.makedirs('./quantizer_output/q_bias_reorder')\n\n            if self.layer_idx == -1:\n\n                #######################输出当前层的权重量化因子\n                weight_scale = - self.weight_quantizer.get_scale()\n                np.savetxt(('./quantizer_output/w_scale_out/w_scale_%s.txt' % self.name), weight_scale, delimiter='\\n')\n                #######################输出当前层的量化权重\n                q_weight_txt = self.weight_quantizer.get_quantize_value(weight)\n\n                #############权重重排序\n\n                w_para = q_weight_txt  # 重排序参数\n                if self.reorder == True:\n                    # print(\"use weights reorder!\")\n                    shape_output = w_para.shape[0]\n                    shape_input = w_para.shape[1]\n                    num_TN = int(shape_input / self.TN)\n                    remainder_TN = shape_input % self.TN\n                    num_TM = int(shape_output / self.TM)\n                    remainder_TM = shape_output % self.TM\n                    first = True\n                    reorder_w_para = None\n                    if self.activate == 'linear':\n                        print('layer-linear reorder!')\n                        for k in range(num_TN):\n                            temp = w_para[0:remainder_TM, k * self.TN:(k + 1) * self.TN, :, :]\n                            temp = temp.view(temp.shape[0], temp.shape[1], temp.shape[2] * temp.shape[3])\n                            temp = temp.permute(2, 0, 1).contiguous().view(-1)\n                            if first:\n                                reorder_w_para = temp.clone().cpu().data.numpy()\n                                first = False\n                            else:\n                                reorder_w_para = np.append(reorder_w_para, temp.cpu().data.numpy())\n                    else:\n                        for j in range(num_TM):\n                            if shape_input == 3 or shape_input == 1:  # 第一层\n                                print('The first layer~~~~~~~~~~~~')\n                                temp = w_para[j * self.TM:(j + 1) * self.TM,\n                                       num_TN * self.TN:num_TN * self.TN + remainder_TN, :,\n                                       :]\n                                temp = temp.view(temp.shape[0], temp.shape[1], temp.shape[2] * temp.shape[3])\n                                fill = torch.zeros(self.TM, self.TN, temp.shape[2]).to(temp.device)\n                                fill[:, 0:remainder_TN, :] = temp\n                                temp = fill.permute(2, 0, 1).contiguous().view(-1)\n                                if first:  # 创建数组存储\n                                    reorder_w_para = temp.clone().cpu().data.numpy()\n                                    first = False\n                                else:\n                                    reorder_w_para = np.append(reorder_w_para, temp.cpu().data.numpy())\n                            else:\n                                for k in range(num_TN):\n                                    temp = w_para[j * self.TM:(j + 1) * self.TM, k * self.TN:(k + 1) * self.TN, :, :]\n                                    # #合并成论文图10(a)的TM*TN*(K2)的张量格式\n                                    temp = temp.view(temp.shape[0], temp.shape[1], temp.shape[2] * temp.shape[3])\n                                    # 转换为图10(b)的重排序格式\n                                    temp = temp.permute(2, 0, 1).contiguous().view(-1)\n                                    if first:\n                                        reorder_w_para = temp.clone().cpu().data.numpy()\n                                        first = False\n                                    else:\n                                        reorder_w_para = np.append(reorder_w_para, temp.cpu().data.numpy())\n\n                    w_para_flatten = reorder_w_para\n                    # print(reorder_w_para.size)\n                    #####验证重排序结果的正确性\n                    '''if w_para_flatten.size == w_para.shape[0] * w_para.shape[1] * w_para.shape[2] * w_para.shape[3]:\n                        print(\"weights convert correctly!\")\n                    else:\n                        print(\"weights convert mismatchingly!\")'''\n\n                    q_weight_reorder = w_para_flatten\n                    q_weight_reorder = np.array(q_weight_reorder).reshape(1, -1)\n                    np.savetxt(('./quantizer_output/q_weight_reorder/w_reorder_%s.txt' % self.name), q_weight_reorder,\n                               delimiter='\\n')\n                ################权重重排序结束\n\n                q_weight_txt = np.array(q_weight_txt.cpu()).reshape(1, -1)\n                q_weight_max = [np.max(q_weight_txt)]\n                # q_weight_max = np.argmax(q_weight_txt)\n                max_weight_count = [np.sum(abs(q_weight_txt) >= (1 << (self.w_bits - 1)) - 1)]  # 统计该层溢出的数目\n                np.savetxt(('./quantizer_output/max_weight_count/max_w_count_%s.txt' % self.name), max_weight_count)\n                np.savetxt(('./quantizer_output/q_weight_max/max_w_%s.txt' % self.name), q_weight_max)\n                np.savetxt(('./quantizer_output/q_weight_out/q_weight_%s.txt' % self.name), q_weight_txt,\n                           delimiter='\\n')\n                # io.savemat('save.mat',{'q_weight_txt':q_weight_txt})\n\n                #######################创建输出偏置txt的文件夹\n                if not os.path.isdir('./quantizer_output/q_bias_out'):\n                    os.makedirs('./quantizer_output/q_bias_out')\n                if not os.path.isdir('./quantizer_output/b_scale_out'):\n                    os.makedirs('./quantizer_output/b_scale_out')\n                #######################输出当前层偏置的量化因子\n                bias_scale = - self.bias_quantizer.get_scale()\n                np.savetxt(('./quantizer_output/b_scale_out/b_scale_%s.txt' % self.name), bias_scale, delimiter='\\n')\n                #######################输出当前层的量化偏置\n                q_bias_txt = self.bias_quantizer.get_quantize_value(bias)\n                q_bias_txt = np.array(q_bias_txt.cpu()).reshape(1, -1)\n                np.savetxt(('./quantizer_output/q_bias_out/q_bias_%s.txt' % self.name), q_bias_txt, delimiter='\\n')\n\n                #############偏置重排序\n                if self.reorder == True:\n                    b_para = np.zeros(2048, dtype=int)\n                    b_para[0:q_bias_txt.size] = q_bias_txt\n                    # print(b_para.shape)\n                    # b_para = np.array(b_para.cpu()).reshape(1, -1)\n                    np.savetxt(('./quantizer_output/q_bias_reorder/q_b_reorder_%s.txt' % self.name), b_para,\n                               delimiter='\\n')\n                    ######权重和偏置的重排序数据的二进制文件保存\n                    bias_weight_reorder = np.append(b_para, q_weight_reorder)\n                    wb_flat = bias_weight_reorder.astype(np.int8)\n                    writer = open('./quantizer_output/q_weight_reorder/%s_bias_weight_q_bin' % self.name, \"wb\")\n                    writer.write(wb_flat)\n                    writer.close()\n                ################偏置重排序结束\n            elif int(self.name[1:4]) == self.layer_idx:\n                #######################输出当前层的权重量化因子\n                weight_scale = - self.weight_quantizer.get_scale()\n                np.savetxt(('./quantizer_output/w_scale_out/w_scale_%s.txt' % self.name), weight_scale, delimiter='\\n')\n                #######################输出当前层的量化权重\n                q_weight_txt = self.weight_quantizer.get_quantize_value(weight)\n\n                #############权重重排序\n\n                w_para = q_weight_txt  # 重排序参数\n                if self.reorder == True:\n                    # print(\"use weights reorder!\")\n                    shape_output = w_para.shape[0]\n                    shape_input = w_para.shape[1]\n                    num_TN = int(shape_input / self.TN)\n                    remainder_TN = shape_input % self.TN\n                    num_TM = int(shape_output / self.TM)\n                    remainder_TM = shape_output % self.TM\n                    first = True\n                    reorder_w_para = None\n                    if self.activate == 'linear':\n                        print('layer-linear reorder!')\n                        for k in range(num_TN):\n                            temp = w_para[0:remainder_TM, k * self.TN:(k + 1) * self.TN, :, :]\n                            temp = temp.view(temp.shape[0], temp.shape[1], temp.shape[2] * temp.shape[3])\n                            temp = temp.permute(2, 0, 1).contiguous().view(-1)\n                            if first:\n                                reorder_w_para = temp.clone().cpu().data.numpy()\n                                first = False\n                            else:\n                                reorder_w_para = np.append(reorder_w_para, temp.cpu().data.numpy())\n                    else:\n                        for j in range(num_TM):\n                            if shape_input == 3 or shape_input == 1:  # 第一层\n                                print('The first layer~~~~~~~~~~~~')\n                                temp = w_para[j * self.TM:(j + 1) * self.TM,\n                                       num_TN * self.TN:num_TN * self.TN + remainder_TN, :,\n                                       :]\n                                temp = temp.view(temp.shape[0], temp.shape[1], temp.shape[2] * temp.shape[3])\n                                fill = torch.zeros(self.TM, self.TN, temp.shape[2]).to(temp.device)\n                                fill[:, 0:remainder_TN, :] = temp\n                                temp = fill.permute(2, 0, 1).contiguous().view(-1)\n                                if first:  # 创建数组存储\n                                    reorder_w_para = temp.clone().cpu().data.numpy()\n                                    first = False\n                                else:\n                                    reorder_w_para = np.append(reorder_w_para, temp.cpu().data.numpy())\n                            else:\n                                for k in range(num_TN):\n                                    temp = w_para[j * self.TM:(j + 1) * self.TM, k * self.TN:(k + 1) * self.TN, :, :]\n                                    # #合并成论文图10(a)的TM*TN*(K2)的张量格式\n                                    temp = temp.view(temp.shape[0], temp.shape[1], temp.shape[2] * temp.shape[3])\n                                    # 转换为图10(b)的重排序格式\n                                    temp = temp.permute(2, 0, 1).contiguous().view(-1)\n                                    if first:\n                                        reorder_w_para = temp.clone().cpu().data.numpy()\n                                        first = False\n                                    else:\n                                        reorder_w_para = np.append(reorder_w_para, temp.cpu().data.numpy())\n\n                    w_para_flatten = reorder_w_para\n                    # print(reorder_w_para.size)\n                    #####验证重排序结果的正确性\n                    '''if w_para_flatten.size == w_para.shape[0] * w_para.shape[1] * w_para.shape[2] * w_para.shape[3]:\n                        print(\"weights convert correctly!\")\n                    else:\n                        print(\"weights convert mismatchingly!\")'''\n\n                    q_weight_reorder = w_para_flatten\n                    q_weight_reorder = np.array(q_weight_reorder).reshape(1, -1)\n                    np.savetxt(('./quantizer_output/q_weight_reorder/w_reorder_%s.txt' % self.name), q_weight_reorder,\n                               delimiter='\\n')\n                ################权重重排序结束\n\n                q_weight_txt = np.array(q_weight_txt.cpu()).reshape(1, -1)\n                q_weight_max = [np.max(q_weight_txt)]\n                # q_weight_max = np.argmax(q_weight_txt)\n                max_weight_count = [np.sum(abs(q_weight_txt) >= (1 << (self.w_bits - 1)) - 1)]  # 统计该层溢出的数目\n                np.savetxt(('./quantizer_output/max_weight_count/max_w_count_%s.txt' % self.name), max_weight_count)\n                np.savetxt(('./quantizer_output/q_weight_max/max_w_%s.txt' % self.name), q_weight_max)\n                np.savetxt(('./quantizer_output/q_weight_out/q_weight_%s.txt' % self.name), q_weight_txt,\n                           delimiter='\\n')\n                # io.savemat('save.mat',{'q_weight_txt':q_weight_txt})\n\n                #######################创建输出偏置txt的文件夹\n                if not os.path.isdir('./quantizer_output/q_bias_out'):\n                    os.makedirs('./quantizer_output/q_bias_out')\n                if not os.path.isdir('./quantizer_output/b_scale_out'):\n                    os.makedirs('./quantizer_output/b_scale_out')\n                #######################输出当前层偏置的量化因子\n                bias_scale = - self.bias_quantizer.get_scale()\n                np.savetxt(('./quantizer_output/b_scale_out/b_scale_%s.txt' % self.name), bias_scale, delimiter='\\n')\n                #######################输出当前层的量化偏置\n                q_bias_txt = self.bias_quantizer.get_quantize_value(bias)\n                q_bias_txt = np.array(q_bias_txt.cpu()).reshape(1, -1)\n                np.savetxt(('./quantizer_output/q_bias_out/q_bias_%s.txt' % self.name), q_bias_txt, delimiter='\\n')\n\n                #############偏置重排序\n                if self.reorder == True:\n                    b_para = np.zeros(2048, dtype=int)\n                    b_para[0:q_bias_txt.size] = q_bias_txt\n                    # print(b_para.shape)\n                    # b_para = np.array(b_para.cpu()).reshape(1, -1)\n                    np.savetxt(('./quantizer_output/q_bias_reorder/q_b_reorder_%s.txt' % self.name), b_para,\n                               delimiter='\\n')\n                    ######权重和偏置的重排序数据的二进制文件保存\n                    bias_weight_reorder = np.append(b_para, q_weight_reorder)\n                    wb_flat = bias_weight_reorder.astype(np.int8)\n                    writer = open('./quantizer_output/q_weight_reorder/%s_bias_weight_q_bin' % self.name, \"wb\")\n                    writer.write(wb_flat)\n                    writer.close()\n                ################偏置重排序结束\n\n        if self.activate == 'leaky':\n            output = F.leaky_relu(output, 0.1 if not self.maxabsscaler else 0.25, inplace=True)\n            if self.training:\n                float_output = F.leaky_relu(float_output, 0.1 if not self.maxabsscaler else 0.25, inplace=True)\n        elif self.activate == 'relu6':\n            output = F.relu6(output, inplace=True)\n            if self.training:\n                float_output = F.relu6(float_output, inplace=True)\n        elif self.activate == 'h_swish':\n            output = output * (F.relu6(output + 3.0, inplace=True) / 6.0)\n            if self.training:\n                float_output = output * (F.relu6(float_output + 3.0, inplace=True) / 6.0)\n        elif self.activate == 'relu':\n            output = F.relu(output, inplace=True)\n            if self.training:\n                float_output = F.relu(float_output, inplace=True)\n        elif self.activate == 'mish':\n            output = output * F.softplus(output).tanh()\n            if self.training:\n                float_output = output * F.softplus(float_output).tanh()\n        elif self.activate == 'linear':\n            # return output\n            pass\n        else:\n            print(self.activate + \"%s is not supported !\")\n\n        if self.quantizer_output == True:\n\n            if not os.path.isdir('./quantizer_output/q_activation_out'):\n                os.makedirs('./quantizer_output/q_activation_out')\n            if not os.path.isdir('./quantizer_output/a_scale_out'):\n                os.makedirs('./quantizer_output/a_scale_out')\n            if not os.path.isdir('./quantizer_output/q_activation_max'):\n                os.makedirs('./quantizer_output/q_activation_max')\n            if not os.path.isdir('./quantizer_output/max_activation_count'):\n                os.makedirs('./quantizer_output/max_activation_count')\n            if not os.path.isdir('./quantizer_output/q_activation_reorder'):\n                os.makedirs('./quantizer_output/q_activation_reorder')\n\n            if self.layer_idx == -1:\n                ##################输出当前激活的量化因子\n                activation_scale = - self.activation_quantizer.get_scale()\n                np.savetxt(('./quantizer_output/a_scale_out/a_scale_%s.txt' % self.name), activation_scale,\n                           delimiter='\\n')\n                ##################输出当前层的量化激活\n                q_activation_txt = self.activation_quantizer.get_quantize_value(output)\n\n                a_para = q_activation_txt\n                #############输入特征图重排序\n                if self.reorder == True:\n                    # 重排序参数\n                    # print(\"use activation reorder!\")\n                    shape_input = a_para.shape[1]\n                    num_TN = int(shape_input / self.TN)\n                    remainder_TN = shape_input % self.TN\n                    first = True\n                    reorder_a_para = None\n                    if self.activate == 'linear':\n                        print('layer-linear reorder!')\n                        temp = a_para[:, 0:remainder_TN, :, :]\n                        temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])\n                        temp = temp.permute(1, 2, 0).contiguous().view(-1)\n                        if first:\n                            reorder_a_para = temp.clone().cpu().data.numpy()\n                            first = False\n                        else:\n                            reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())\n                    else:\n                        for k in range(num_TN):\n                            temp = a_para[:, k * self.TN:(k + 1) * self.TN, :, :]\n                            temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])\n                            temp = temp.permute(1, 2, 0).contiguous().view(-1)\n                            if first:\n                                reorder_a_para = temp.clone().cpu().data.numpy()\n                                first = False\n                            else:\n                                reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())\n\n                    a_para_flatten = reorder_a_para\n                    #####验证重排序结果的正确性\n                    '''if a_para_flatten.size == a_para.shape[0] * a_para.shape[1] * a_para.shape[2] * a_para.shape[3]:\n                        print(\"activation convert correctly!\")\n                    else:\n                        print(\"activation convert mismatchingly!\")'''\n\n                    q_activation_reorder = a_para_flatten\n                    q_activation_reorder = np.array(q_activation_reorder).reshape(1, -1)\n                    np.savetxt(('./quantizer_output/q_activation_reorder/a_reorder_%s.txt' % self.name),\n                               q_activation_reorder, delimiter='\\n')\n                    ###保存重排序的二进制文件\n                    activation_flat = q_activation_reorder.astype(np.int8)\n                    writer = open('./quantizer_output/q_activation_reorder/%s_activation_q_bin' % self.name, \"wb\")\n                    writer.write(activation_flat)\n                    writer.close()\n                ##########特征图重排序结束\n\n                q_activation_txt = np.array(q_activation_txt.cpu()).reshape(1, -1)\n                q_activation_max = [np.max(q_activation_txt)]  # 统计该层的最大值(即查看是否有溢出)\n                max_activation_count = [np.sum(abs(q_activation_txt) >= (1 << (self.w_bits - 1)) - 1)]  # 统计该层溢出的数目\n                # q_weight_max = np.argmax(q_weight_txt)\n                np.savetxt(('./quantizer_output/max_activation_count/max_a_count_%s.txt' % self.name),\n                           max_activation_count)\n                np.savetxt(('./quantizer_output/q_activation_max/q_a_max_%s.txt' % self.name), q_activation_max)\n                np.savetxt(('./quantizer_output/q_activation_out/q_activation_%s.txt' % self.name), q_activation_txt,\n                           delimiter='\\n')\n\n            elif int(self.name[1:4]) == self.layer_idx:\n\n                ##################输出当前激活的量化因子\n                activation_scale = - self.activation_quantizer.get_scale()\n                np.savetxt(('./quantizer_output/a_scale_out/a_scale_%s.txt' % self.name), activation_scale,\n                           delimiter='\\n')\n                ##################输出当前层的量化激活\n                q_activation_txt = self.activation_quantizer.get_quantize_value(output)\n\n                a_para = q_activation_txt\n                #############输入特征图重排序\n                if self.reorder == True:\n                    # 重排序参数\n                    # print(\"use activation reorder!\")\n                    shape_input = a_para.shape[1]\n                    num_TN = int(shape_input / self.TN)\n                    remainder_TN = shape_input % self.TN\n                    first = True\n                    reorder_a_para = None\n                    if self.activate == 'linear':\n                        print('layer-linear reorder!')\n                        temp = a_para[:, 0:remainder_TN, :, :]\n                        temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])\n                        temp = temp.permute(1, 2, 0).contiguous().view(-1)\n                        if first:\n                            reorder_a_para = temp.clone().cpu().data.numpy()\n                            first = False\n                        else:\n                            reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())\n                    else:\n                        for k in range(num_TN):\n                            temp = a_para[:, k * self.TN:(k + 1) * self.TN, :, :]\n                            temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])\n                            temp = temp.permute(1, 2, 0).contiguous().view(-1)\n                            if first:\n                                reorder_a_para = temp.clone().cpu().data.numpy()\n                                first = False\n                            else:\n                                reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())\n\n                    a_para_flatten = reorder_a_para\n                    #####验证重排序结果的正确性\n                    '''if a_para_flatten.size == a_para.shape[0] * a_para.shape[1] * a_para.shape[2] * a_para.shape[3]:\n                        print(\"activation convert correctly!\")\n                    else:\n                        print(\"activation convert mismatchingly!\")'''\n\n                    q_activation_reorder = a_para_flatten\n                    q_activation_reorder = np.array(q_activation_reorder).reshape(1, -1)\n                    np.savetxt(('./quantizer_output/q_activation_reorder/a_reorder_%s.txt' % self.name),\n                               q_activation_reorder, delimiter='\\n')\n                    ###保存重排序的二进制文件\n                    activation_flat = q_activation_reorder.astype(np.int8)\n                    writer = open('./quantizer_output/q_activation_reorder/%s_activation_q_bin' % self.name, \"wb\")\n                    writer.write(activation_flat)\n                    writer.close()\n                ##########特征图重排序结束\n\n                q_activation_txt = np.array(q_activation_txt.cpu()).reshape(1, -1)\n                q_activation_max = [np.max(q_activation_txt)]  # 统计该层的最大值(即查看是否有溢出)\n                max_activation_count = [np.sum(abs(q_activation_txt) >= (1 << (self.w_bits - 1)) - 1)]  # 统计该层溢出的数目\n                # q_weight_max = np.argmax(q_weight_txt)\n                np.savetxt(('./quantizer_output/max_activation_count/max_a_count_%s.txt' % self.name),\n                           max_activation_count)\n                np.savetxt(('./quantizer_output/q_activation_max/q_a_max_%s.txt' % self.name), q_activation_max)\n                np.savetxt(('./quantizer_output/q_activation_out/q_activation_%s.txt' % self.name), q_activation_txt,\n                           delimiter='\\n')\n\n        output = self.activation_quantizer(output)\n        if self.training and self.activate != 'linear':\n            return [output, float_output]\n        else:\n            return output\n\n    def BN_fuse(self):\n        if self.bn:\n            # BN融合\n            if self.bias is not None:\n                bias = reshape_to_bias(self.beta + (self.bias - self.running_mean) * (\n                        self.gamma / torch.sqrt(self.running_var + self.eps)))\n            else:\n                bias = reshape_to_bias(\n                    self.beta - self.running_mean * self.gamma / torch.sqrt(\n                        self.running_var + self.eps))  # b融running\n            weight = self.weight * reshape_to_weight(\n                self.gamma / torch.sqrt(self.running_var + self.eps))  # w融running\n        else:\n            bias = self.bias\n            weight = self.weight\n        return weight, bias\n\n\nclass COSPTQuantizedShortcut_min(nn.Module):  # weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070\n    def __init__(self, layers, weight=False, bits=8,\n                 quantizer_output=False, reorder=False, TM=32, TN=32, name='', layer_idx=-1, ):\n        super(COSPTQuantizedShortcut_min, self).__init__()\n        self.layers = layers  # layer indices\n        self.weight = weight  # apply weights boolean\n        self.n = len(layers) + 1  # number of layers\n        self.bits = bits\n\n        self.register_buffer('scale_x', torch.zeros(1))  # 量化比例因子\n        self.register_buffer('float_range_x', torch.zeros(1))\n        self.scale_list_x = [0 for i in range(bits)]\n\n        self.register_buffer('scale_a', torch.zeros(1))  # 量化比例因子\n        self.register_buffer('float_range_a', torch.zeros(1))\n        self.scale_list_a = [0 for i in range(bits)]\n\n        self.register_buffer('scale_sum', torch.zeros(1))  # 量化比例因子\n        self.register_buffer('float_range_sum', torch.zeros(1))\n        self.scale_list_sum = [0 for i in range(bits)]\n\n        self.quantizer_output = quantizer_output\n        self.reorder = reorder\n        self.TM = TM\n        self.TN = TN\n        self.name = name\n        self.layer_idx = layer_idx\n\n        if weight:\n            self.w = nn.Parameter(torch.zeros(self.n), requires_grad=True)  # layer weights\n\n    # 量化\n    def quantize(self, input, type):\n        if type == \"a\":\n            output = input / self.scale_a\n        elif type == \"x\":\n            output = input / self.scale_x\n        elif type == \"sum\":\n            output = input / self.scale_sum\n        return output\n\n    def round(self, input):\n        output = Round.apply(input)\n        return output\n\n    # 截断\n    def clamp(self, input):\n        min_val = torch.tensor(-(1 << (self.bits - 1)))\n        max_val = torch.tensor((1 << (self.bits - 1)) - 1)\n        output = torch.clamp(input, min_val, max_val)\n        return output\n\n    # 反量化\n    def dequantize(self, input, type):\n        if type == \"a\":\n            output = (input) * self.scale_a\n        elif type == \"x\":\n            output = (input) * self.scale_x\n        elif type == \"sum\":\n            output = (input) * self.scale_sum\n        return output\n\n    # 更新参数\n    def update_params(self, step, type):\n        min_val = torch.tensor(-(1 << (self.bits - 1)))\n        max_val = torch.tensor((1 << (self.bits - 1)) - 1)\n        quantized_range = torch.max(torch.abs(min_val), torch.abs(max_val))  # 量化后范围\n        if type == \"a\":\n            temp = self.float_range_a\n            self.float_range_a.add_(-temp).add_(2 ** step)\n            self.scale_a = self.float_range_a / quantized_range  # 量化比例因子\n        elif type == \"x\":\n            temp = self.float_range_x\n            self.float_range_x.add_(-temp).add_(2 ** step)\n            self.scale_x = self.float_range_x / quantized_range  # 量化比例因子\n        elif type == \"sum\":\n            temp = self.float_range_sum\n            self.float_range_sum.add_(-temp).add_(2 ** step)\n            self.scale_sum = self.float_range_sum / quantized_range  # 量化比例因子\n\n    def forward(self, x, outputs):\n        if self.training:\n            float = x[1]\n            x = x[0]\n        # Weights\n        if self.weight:\n            w = torch.sigmoid(self.w) * (2 / self.n)  # sigmoid weights (0-1)\n            x = x * w[0]\n        # Fusion\n        nx = x.shape[1]  # input channels\n        for i in range(self.n - 1):\n            if self.training:\n                a = outputs[self.layers[i]][0] * w[i + 1] if self.weight else outputs[self.layers[i]][\n                    0]  # feature to add\n            else:\n                a = outputs[self.layers[i]] * w[i + 1] if self.weight else outputs[self.layers[i]]  # feature to add\n            na = a.shape[1]  # feature channels\n            if self.training == True:\n                # 得到输入两个feature和输出的scale\n                max_metrics = -1\n                max_step = 0\n                for i in range(self.bits):\n                    self.update_params(i, type=\"a\")\n                    output = self.quantize(a, type=\"a\")  # 量化\n                    output = self.round(output)\n                    output = self.clamp(output)  # 截断\n                    output = self.dequantize(output, type=\"a\")  # 反量化\n                    cosine_similarity = torch.cosine_similarity(a.view(-1), output.view(-1), dim=0)\n                    if cosine_similarity > max_metrics:\n                        max_metrics = cosine_similarity\n                        max_step = i\n                self.scale_list_a[max_step] += 1\n                Global_max_step = self.scale_list_a.index(max(self.scale_list_a))\n                self.update_params(Global_max_step, type=\"a\")\n\n                max_metrics = -1\n                max_step = 0\n                for i in range(self.bits):\n                    self.update_params(i, type=\"x\")\n                    output = self.quantize(x, type=\"x\")  # 量化\n                    output = self.round(output)\n                    output = self.clamp(output)  # 截断\n                    output = self.dequantize(output, type=\"x\")  # 反量化\n                    cosine_similarity = torch.cosine_similarity(x.view(-1), output.view(-1), dim=0)\n                    if cosine_similarity > max_metrics:\n                        max_metrics = cosine_similarity\n                        max_step = i\n                self.scale_list_x[max_step] += 1\n                Global_max_step = self.scale_list_x.index(max(self.scale_list_x))\n                self.update_params(Global_max_step, type=\"x\")\n\n                float_max_val = min(self.float_range_a, self.float_range_x)\n                self.update_params(float_max_val.log2(), type=\"a\")\n                self.update_params(float_max_val.log2(), type=\"x\")\n\n            # 量化x\n            x = self.quantize(x, type=\"x\")  # 量化\n            x = self.round(x)\n            x = self.dequantize(x, type=\"x\")  # 反量化\n\n            # 量化a\n            a = self.quantize(a, type=\"a\")  # 量化\n            a = self.round(a)\n            a = self.dequantize(a, type=\"a\")  # 反量化\n\n            # Adjust channels\n            if nx == na:  # same shape\n                x = x + a\n            elif nx > na:  # slice input\n                x[:, :na] = x[:, :na] + a  # or a = nn.ZeroPad2d((0, 0, 0, 0, 0, dc))(a); x = x + a\n            else:  # slice feature\n                x = x + a[:, :nx]\n            # 量化和\n            if self.training == True:\n                max_metrics = -1\n                max_step = 0\n                for i in range(self.bits):\n                    self.update_params(i, type=\"sum\")\n                    output = self.quantize(x, type=\"sum\")  # 量化\n                    output = self.round(output)\n                    output = self.clamp(output)  # 截断\n                    output = self.dequantize(output, type=\"sum\")  # 反量化\n                    cosine_similarity = torch.cosine_similarity(x.view(-1), output.view(-1), dim=0)\n                    if cosine_similarity > max_metrics:\n                        max_metrics = cosine_similarity\n                        max_step = i\n                self.scale_list_sum[max_step] += 1\n                Global_max_step = self.scale_list_sum.index(max(self.scale_list_sum))\n                self.update_params(Global_max_step, type=\"sum\")\n            x = self.quantize(x, type=\"sum\")  # 量化\n            x = self.round(x)\n            x = self.clamp(x)  # 截断\n            # 量化因子数据输出\n            if self.quantizer_output == True:\n                if not os.path.isdir('./quantizer_output/q_activation_out'):\n                    os.makedirs('./quantizer_output/q_activation_out')\n                if not os.path.isdir('./quantizer_output/a_scale_out'):\n                    os.makedirs('./quantizer_output/a_scale_out')\n                if not os.path.isdir('./quantizer_output/q_activation_max'):\n                    os.makedirs('./quantizer_output/q_activation_max')\n                if not os.path.isdir('./quantizer_output/max_activation_count'):\n                    os.makedirs('./quantizer_output/max_activation_count')\n                if not os.path.isdir('./quantizer_output/q_activation_reorder'):\n                    os.makedirs('./quantizer_output/q_activation_reorder')\n\n                if self.layer_idx == -1:\n\n                    move_scale = math.log2(self.scale_sum)\n                    shortcut_scale = - np.array(move_scale).reshape(1, -1)\n                    np.savetxt(('./quantizer_output/a_scale_out/shortcut_scale_%s.txt' % self.name), shortcut_scale,\n                               delimiter='\\n')\n\n                elif int(self.name[1:4]) == self.layer_idx:\n\n                    move_scale = math.log2(self.scale_sum)\n                    shortcut_scale = - np.array(move_scale).reshape(1, -1)\n                    np.savetxt(('./quantizer_output/a_scale_out/shortcut_scale_%s.txt' % self.name), shortcut_scale,\n                               delimiter='\\n')\n            # 特征图量化数据输出\n            if self.quantizer_output == True:\n                if not os.path.isdir('./quantizer_output/q_activation_out'):\n                    os.makedirs('./quantizer_output/q_activation_out')\n                if not os.path.isdir('./quantizer_output/a_scale_out'):\n                    os.makedirs('./quantizer_output/a_scale_out')\n                if not os.path.isdir('./quantizer_output/q_activation_max'):\n                    os.makedirs('./quantizer_output/q_activation_max')\n                if not os.path.isdir('./quantizer_output/max_activation_count'):\n                    os.makedirs('./quantizer_output/max_activation_count')\n                if not os.path.isdir('./quantizer_output/q_activation_reorder'):\n                    os.makedirs('./quantizer_output/q_activation_reorder')\n\n                if self.layer_idx == -1:\n\n                    q_x_shortcut = x\n\n                    if self.reorder == True:\n                        a_para = q_x_shortcut\n                        # 重排序参数\n                        # print(\"use activation reorder!\")\n                        shape_input = a_para.shape[1]\n                        num_TN = int(shape_input / self.TN)\n                        remainder_TN = shape_input % self.TN\n                        first = True\n                        reorder_a_para = None\n                        for k in range(num_TN):\n                            temp = a_para[:, k * self.TN:(k + 1) * self.TN, :, :]\n                            temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])\n                            temp = temp.permute(1, 2, 0).contiguous().view(-1)\n                            if first:\n                                reorder_a_para = temp.clone().cpu().data.numpy()\n                                first = False\n                            else:\n                                reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())\n\n                        a_para_flatten = reorder_a_para\n                        q_activation_reorder = a_para_flatten\n                        q_activation_reorder = np.array(q_activation_reorder).reshape(1, -1)\n                        np.savetxt(('./quantizer_output/q_activation_reorder/r_shortcut_%s.txt' % self.name),\n                                   q_activation_reorder, delimiter='\\n')\n                        ###保存重排序的二进制文件\n                        activation_flat = q_activation_reorder.astype(np.int8)\n                        writer = open('./quantizer_output/q_activation_reorder/%s_shortcut_q_bin' % self.name, \"wb\")\n                        writer.write(activation_flat)\n                        writer.close()\n                    ##########shortcut重排序结束\n\n                    Q_shortcut = np.array(q_x_shortcut.cpu()).reshape(1, -1)\n                    np.savetxt(('./quantizer_output/q_activation_out/Q_shortcut_%s.txt' % self.name), Q_shortcut,\n                               delimiter='\\n')\n\n                elif int(self.name[1:4]) == self.layer_idx:\n\n                    q_x_shortcut = x\n\n                    if self.reorder == True:\n                        a_para = q_x_shortcut\n                        # 重排序参数\n                        # print(\"use activation reorder!\")\n                        shape_input = a_para.shape[1]\n                        num_TN = int(shape_input / self.TN)\n                        remainder_TN = shape_input % self.TN\n                        first = True\n                        reorder_a_para = None\n                        for k in range(num_TN):\n                            temp = a_para[:, k * self.TN:(k + 1) * self.TN, :, :]\n                            temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])\n                            temp = temp.permute(1, 2, 0).contiguous().view(-1)\n                            if first:\n                                reorder_a_para = temp.clone().cpu().data.numpy()\n                                first = False\n                            else:\n                                reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())\n\n                        a_para_flatten = reorder_a_para\n                        q_activation_reorder = a_para_flatten\n                        q_activation_reorder = np.array(q_activation_reorder).reshape(1, -1)\n                        np.savetxt(('./quantizer_output/q_activation_reorder/r_shortcut_%s.txt' % self.name),\n                                   q_activation_reorder, delimiter='\\n')\n                        ###保存重排序的二进制文件\n                        activation_flat = q_activation_reorder.astype(np.int8)\n                        writer = open('./quantizer_output/q_activation_reorder/%s_shortcut_q_bin' % self.name, \"wb\")\n                        writer.write(activation_flat)\n                        writer.close()\n                    ##########shortcut重排序结束\n                    Q_shortcut = np.array(q_x_shortcut.cpu()).reshape(1, -1)\n                    np.savetxt(('./quantizer_output/q_activation_out/Q_shortcut_%s.txt' % self.name), Q_shortcut,\n                               delimiter='\\n')\n\n            x = self.dequantize(x, type=\"sum\")  # 反量化\n\n        if self.training:\n            # float compute\n            # Weights\n            if self.weight:\n                w = torch.sigmoid(self.w) * (2 / self.n)  # sigmoid weights (0-1)\n                float = float * w[0]\n\n            # Fusion\n            nx = float.shape[1]  # input channels\n            for i in range(self.n - 1):\n                a = outputs[self.layers[i]][1] * w[i + 1] if self.weight else outputs[self.layers[i]][\n                    1]  # feature to add\n                na = a.shape[1]  # feature channels\n\n                # Adjust channels\n                if nx == na:  # same shape\n                    float = float + a\n                elif nx > na:  # slice input\n                    float[:, :na] = float[:, :na] + a  # or a = nn.ZeroPad2d((0, 0, 0, 0, 0, dc))(a); x = x + a\n                else:  # slice feature\n                    float = float + a[:, :nx]\n\n            return [x, float]\n        else:\n            return x\n\n\nclass COSPTQuantizedShortcut_max(nn.Module):  # weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070\n    def __init__(self, layers, weight=False, bits=8,\n                 quantizer_output=False, reorder=False, TM=32, TN=32, name='', layer_idx=-1, ):\n        super(COSPTQuantizedShortcut_max, self).__init__()\n        self.layers = layers  # layer indices\n        self.weight = weight  # apply weights boolean\n        self.n = len(layers) + 1  # number of layers\n        self.bits = bits\n\n        self.register_buffer('scale_x', torch.zeros(1))  # 量化比例因子\n        self.register_buffer('float_range_x', torch.zeros(1))\n\n        self.register_buffer('scale_a', torch.zeros(1))  # 量化比例因子\n        self.register_buffer('float_range_a', torch.zeros(1))\n\n        self.register_buffer('scale_sum', torch.zeros(1))  # 量化比例因子\n        self.register_buffer('float_range_sum', torch.zeros(1))\n        self.scale_list = [0 for i in range(bits)]\n\n        self.quantizer_output = quantizer_output\n        self.reorder = reorder\n        self.TM = TM\n        self.TN = TN\n        self.name = name\n        self.layer_idx = layer_idx\n\n        if weight:\n            self.w = nn.Parameter(torch.zeros(self.n), requires_grad=True)  # layer weights\n\n    # 量化\n    def quantize(self, input, type):\n        if type == \"a\":\n            output = input / self.scale_a\n        elif type == \"x\":\n            output = input / self.scale_x\n        elif type == \"sum\":\n            output = input / self.scale_sum\n        return output\n\n    def round(self, input):\n        output = Round.apply(input)\n        return output\n\n    # 截断\n    def clamp(self, input):\n        min_val = torch.tensor(-(1 << (self.bits - 1)))\n        max_val = torch.tensor((1 << (self.bits - 1)) - 1)\n        output = torch.clamp(input, min_val, max_val)\n        return output\n\n    # 反量化\n    def dequantize(self, input, type):\n        if type == \"a\":\n            output = (input) * self.scale_a\n        elif type == \"x\":\n            output = (input) * self.scale_x\n        elif type == \"sum\":\n            output = (input) * self.scale_sum\n        return output\n\n    # 更新参数\n    def update_params(self, step, type):\n        min_val = torch.tensor(-(1 << (self.bits - 1)))\n        max_val = torch.tensor((1 << (self.bits - 1)) - 1)\n        quantized_range = torch.max(torch.abs(min_val), torch.abs(max_val))  # 量化后范围\n        if type == \"a\":\n            temp = self.float_range_a\n            self.float_range_a.add_(-temp).add_(2 ** step)\n            self.scale_a = self.float_range_a / quantized_range  # 量化比例因子\n        elif type == \"x\":\n            temp = self.float_range_x\n            self.float_range_x.add_(-temp).add_(2 ** step)\n            self.scale_x = self.float_range_x / quantized_range  # 量化比例因子\n        elif type == \"sum\":\n            temp = self.float_range_sum\n            self.float_range_sum.add_(-temp).add_(2 ** step)\n            self.scale_sum = self.float_range_sum / quantized_range  # 量化比例因子\n\n    def forward(self, x, outputs):\n        if self.training:\n            float = x[1]\n            x = x[0]\n        # Weights\n        if self.weight:\n            w = torch.sigmoid(self.w) * (2 / self.n)  # sigmoid weights (0-1)\n            x = x * w[0]\n\n        # Fusion\n        nx = x.shape[1]  # input channels\n        for i in range(self.n - 1):\n            if self.training:\n                a = outputs[self.layers[i]][0] * w[i + 1] if self.weight else outputs[self.layers[i]][\n                    0]  # feature to add\n            else:\n                a = outputs[self.layers[i]] * w[i + 1] if self.weight else outputs[self.layers[i]]  # feature to add\n            na = a.shape[1]  # feature channels\n            if self.training == True:\n                # 得到输入两个feature和输出的scale\n                max_metrics = -1\n                max_step = 0\n                for i in range(self.bits):\n                    cosine_similarity = 0\n                    self.update_params(i, type=\"a\")\n                    output = self.quantize(a, type=\"a\")  # 量化\n                    output = self.round(output)\n                    output = self.clamp(output)  # 截断\n                    output = self.dequantize(output, type=\"a\")  # 反量化\n                    cosine_similarity = cosine_similarity + torch.cosine_similarity(a.view(-1), output.view(-1), dim=0)\n\n                    self.update_params(i, type=\"x\")\n                    output = self.quantize(x, type=\"x\")  # 量化\n                    output = self.round(output)\n                    output = self.clamp(output)  # 截断\n                    output = self.dequantize(output, type=\"x\")  # 反量化\n                    cosine_similarity = cosine_similarity + torch.cosine_similarity(x.view(-1), output.view(-1), dim=0)\n                    # Adjust channels\n                    if nx == na:  # same shape\n                        temp_x = x + a\n                    elif nx > na:  # slice input\n                        temp_x[:, :na] = x[:, :na] + a  # or a = nn.ZeroPad2d((0, 0, 0, 0, 0, dc))(a); x = x + a\n                    else:  # slice feature\n                        temp_x = x + a[:, :nx]\n\n                    self.update_params(i, type=\"sum\")\n                    output = self.quantize(temp_x, type=\"sum\")  # 量化\n                    output = self.round(output)\n                    output = self.clamp(output)  # 截断\n                    output = self.dequantize(output, type=\"sum\")  # 反量化\n                    cosine_similarity = cosine_similarity + torch.cosine_similarity(temp_x.view(-1), output.view(-1),\n                                                                                    dim=0)\n                    del temp_x\n\n                    if cosine_similarity > max_metrics:\n                        max_metrics = cosine_similarity\n                        max_step = i\n                self.scale_list[max_step] += 1\n                Global_max_step = self.scale_list.index(max(self.scale_list))\n                self.update_params(Global_max_step, type=\"x\")\n                self.update_params(Global_max_step, type=\"a\")\n                self.update_params(Global_max_step, type=\"sum\")\n\n            # 量化x\n            x = self.quantize(x, type=\"x\")  # 量化\n            x = self.round(x)\n            x = self.dequantize(x, type=\"x\")  # 反量化\n\n            # 量化a\n            a = self.quantize(a, type=\"a\")  # 量化\n            a = self.round(a)\n            a = self.dequantize(a, type=\"a\")  # 反量化\n\n            # Adjust channels\n            if nx == na:  # same shape\n                x = x + a\n            elif nx > na:  # slice input\n                x[:, :na] = x[:, :na] + a  # or a = nn.ZeroPad2d((0, 0, 0, 0, 0, dc))(a); x = x + a\n            else:  # slice feature\n                x = x + a[:, :nx]\n            # 量化和\n            x = self.quantize(x, type=\"sum\")  # 量化\n            x = self.round(x)\n            x = self.clamp(x)  # 截断\n            # 量化因子数据输出\n            if self.quantizer_output == True:\n                if not os.path.isdir('./quantizer_output/q_activation_out'):\n                    os.makedirs('./quantizer_output/q_activation_out')\n                if not os.path.isdir('./quantizer_output/a_scale_out'):\n                    os.makedirs('./quantizer_output/a_scale_out')\n                if not os.path.isdir('./quantizer_output/q_activation_max'):\n                    os.makedirs('./quantizer_output/q_activation_max')\n                if not os.path.isdir('./quantizer_output/max_activation_count'):\n                    os.makedirs('./quantizer_output/max_activation_count')\n                if not os.path.isdir('./quantizer_output/q_activation_reorder'):\n                    os.makedirs('./quantizer_output/q_activation_reorder')\n\n                if self.layer_idx == -1:\n\n                    move_scale = math.log2(self.scale_sum)\n                    shortcut_scale = - np.array(move_scale).reshape(1, -1)\n                    np.savetxt(('./quantizer_output/a_scale_out/shortcut_scale_%s.txt' % self.name), shortcut_scale,\n                               delimiter='\\n')\n\n                elif int(self.name[1:4]) == self.layer_idx:\n\n                    move_scale = math.log2(self.scale_sum)\n                    shortcut_scale = - np.array(move_scale).reshape(1, -1)\n                    np.savetxt(('./quantizer_output/a_scale_out/shortcut_scale_%s.txt' % self.name), shortcut_scale,\n                               delimiter='\\n')\n            # 特征图量化数据输出\n            if self.quantizer_output == True:\n                if not os.path.isdir('./quantizer_output/q_activation_out'):\n                    os.makedirs('./quantizer_output/q_activation_out')\n                if not os.path.isdir('./quantizer_output/a_scale_out'):\n                    os.makedirs('./quantizer_output/a_scale_out')\n                if not os.path.isdir('./quantizer_output/q_activation_max'):\n                    os.makedirs('./quantizer_output/q_activation_max')\n                if not os.path.isdir('./quantizer_output/max_activation_count'):\n                    os.makedirs('./quantizer_output/max_activation_count')\n                if not os.path.isdir('./quantizer_output/q_activation_reorder'):\n                    os.makedirs('./quantizer_output/q_activation_reorder')\n\n                if self.layer_idx == -1:\n\n                    q_x_shortcut = x\n\n                    if self.reorder == True:\n                        a_para = q_x_shortcut\n                        # 重排序参数\n                        # print(\"use activation reorder!\")\n                        shape_input = a_para.shape[1]\n                        num_TN = int(shape_input / self.TN)\n                        remainder_TN = shape_input % self.TN\n                        first = True\n                        reorder_a_para = None\n                        for k in range(num_TN):\n                            temp = a_para[:, k * self.TN:(k + 1) * self.TN, :, :]\n                            temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])\n                            temp = temp.permute(1, 2, 0).contiguous().view(-1)\n                            if first:\n                                reorder_a_para = temp.clone().cpu().data.numpy()\n                                first = False\n                            else:\n                                reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())\n\n                        a_para_flatten = reorder_a_para\n                        q_activation_reorder = a_para_flatten\n                        q_activation_reorder = np.array(q_activation_reorder).reshape(1, -1)\n                        np.savetxt(('./quantizer_output/q_activation_reorder/r_shortcut_%s.txt' % self.name),\n                                   q_activation_reorder, delimiter='\\n')\n                        ###保存重排序的二进制文件\n                        activation_flat = q_activation_reorder.astype(np.int8)\n                        writer = open('./quantizer_output/q_activation_reorder/%s_shortcut_q_bin' % self.name, \"wb\")\n                        writer.write(activation_flat)\n                        writer.close()\n                    ##########shortcut重排序结束\n\n                    Q_shortcut = np.array(q_x_shortcut.cpu()).reshape(1, -1)\n                    np.savetxt(('./quantizer_output/q_activation_out/Q_shortcut_%s.txt' % self.name), Q_shortcut,\n                               delimiter='\\n')\n\n                elif int(self.name[1:4]) == self.layer_idx:\n\n                    q_x_shortcut = x\n\n                    if self.reorder == True:\n                        a_para = q_x_shortcut\n                        # 重排序参数\n                        # print(\"use activation reorder!\")\n                        shape_input = a_para.shape[1]\n                        num_TN = int(shape_input / self.TN)\n                        remainder_TN = shape_input % self.TN\n                        first = True\n                        reorder_a_para = None\n                        for k in range(num_TN):\n                            temp = a_para[:, k * self.TN:(k + 1) * self.TN, :, :]\n                            temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])\n                            temp = temp.permute(1, 2, 0).contiguous().view(-1)\n                            if first:\n                                reorder_a_para = temp.clone().cpu().data.numpy()\n                                first = False\n                            else:\n                                reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())\n\n                        a_para_flatten = reorder_a_para\n                        q_activation_reorder = a_para_flatten\n                        q_activation_reorder = np.array(q_activation_reorder).reshape(1, -1)\n                        np.savetxt(('./quantizer_output/q_activation_reorder/r_shortcut_%s.txt' % self.name),\n                                   q_activation_reorder, delimiter='\\n')\n                        ###保存重排序的二进制文件\n                        activation_flat = q_activation_reorder.astype(np.int8)\n                        writer = open('./quantizer_output/q_activation_reorder/%s_shortcut_q_bin' % self.name, \"wb\")\n                        writer.write(activation_flat)\n                        writer.close()\n                    ##########shortcut重排序结束\n                    Q_shortcut = np.array(q_x_shortcut.cpu()).reshape(1, -1)\n                    np.savetxt(('./quantizer_output/q_activation_out/Q_shortcut_%s.txt' % self.name), Q_shortcut,\n                               delimiter='\\n')\n\n            x = self.dequantize(x, type=\"sum\")  # 反量化\n        if self.training:\n            # float compute\n            # Weights\n            if self.weight:\n                w = torch.sigmoid(self.w) * (2 / self.n)  # sigmoid weights (0-1)\n                float = float * w[0]\n\n            # Fusion\n            nx = float.shape[1]  # input channels\n            for i in range(self.n - 1):\n                a = outputs[self.layers[i]][1] * w[i + 1] if self.weight else outputs[self.layers[i]][\n                    1]  # feature to add\n                na = a.shape[1]  # feature channels\n\n                # Adjust channels\n                if nx == na:  # same shape\n                    float = float + a\n                elif nx > na:  # slice input\n                    float[:, :na] = float[:, :na] + a  # or a = nn.ZeroPad2d((0, 0, 0, 0, 0, dc))(a); x = x + a\n                else:  # slice feature\n                    float = float + a[:, :nx]\n\n            return [x, float]\n        else:\n            return x\n\n\nclass COSPTQuantizedFeatureConcat(nn.Module):\n    def __init__(self, layers, groups, bits=8,\n                 quantizer_output=False, reorder=False, TM=32, TN=32, name='', layer_idx=-1, ):\n        super(COSPTQuantizedFeatureConcat, self).__init__()\n        self.layers = layers  # layer indices\n        self.groups = groups\n        self.multiple = len(layers) > 1  # multiple layers flag\n        self.register_buffer('scale', torch.zeros(1))  # 量化比例因子\n        self.register_buffer('float_max_list', torch.zeros(len(layers)))\n        self.bits = bits\n        self.momentum = 0.1\n        self.quantizer_output = quantizer_output\n        self.reorder = reorder\n        self.TM = TM\n        self.TN = TN\n        self.name = name\n        self.layer_idx = layer_idx\n        # 量化\n\n    def quantize(self, input):\n        output = input / self.scale\n        return output\n\n    def round(self, input):\n        output = Round.apply(input)\n        return output\n\n    # 截断\n    def clamp(self, input):\n        min_val = torch.tensor(-(1 << (self.bits - 1)))\n        max_val = torch.tensor((1 << (self.bits - 1)) - 1)\n        output = torch.clamp(input, min_val, max_val)\n        return output\n\n    # 反量化\n    def dequantize(self, input):\n        output = (input) * self.scale\n        return output\n\n    def forward(self, x, outputs):\n        if self.training:\n            float = x[1]\n            x = x[0]\n        if self.multiple:\n            if self.training == True:\n                quantized_min_val = torch.tensor(-(1 << (self.bits - 1)))\n                quantized_max_val = torch.tensor((1 << (self.bits - 1)) - 1)\n                quantized_range = torch.max(torch.abs(quantized_min_val), torch.abs(quantized_max_val))  # 量化后范围\n                j = 0\n                for i in self.layers:\n                    temp = outputs[i][0].detach()\n                    if self.float_max_list[j] == 0:\n                        self.float_max_list[j].add_(\n                            torch.max(torch.max(temp), torch.abs(torch.min(temp))))\n                    else:\n                        self.float_max_list[j].mul_(1 - self.momentum).add_(\n                            torch.max(torch.max(temp), torch.abs(torch.min(temp))) * self.momentum)\n                    j = j + 1\n\n                    del temp\n                    torch.cuda.empty_cache()\n                float_max = max(self.float_max_list).unsqueeze(0)  # 量化前范围\n                floor_float_range = 2 ** float_max.log2().floor()\n                ceil_float_range = 2 ** float_max.log2().ceil()\n                if abs(ceil_float_range - float_max) < abs(floor_float_range - float_max):\n                    float_range = ceil_float_range\n                else:\n                    float_range = floor_float_range\n                self.scale = float_range / quantized_range  # 量化比例因子\n\n            if self.quantizer_output == True:\n\n                if self.layer_idx == -1:\n                    q_a_concat = copy.deepcopy(outputs[0])\n\n                    move_scale = math.log2(self.scale)\n                    concat_scale = -np.array(move_scale).reshape(1, -1)\n                    np.savetxt(('./quantizer_output/a_scale_out/concat_scale_%s.txt' % self.name), concat_scale,\n                               delimiter='\\n')\n\n                    for i in self.layers:\n                        q_a_concat[i] = self.quantize(q_a_concat[i])  # 量化\n                        q_a_concat[i] = self.round(q_a_concat[i])\n                        q_a_concat[i] = self.clamp(q_a_concat[i])  # 截断\n                    Q_concat = torch.cat([q_a_concat[i] for i in self.layers], 1)\n\n                    if self.reorder == True:\n                        a_para = Q_concat\n                        # 重排序参数\n                        # print(\"use activation reorder!\")\n                        shape_input = a_para.shape[1]\n                        num_TN = int(shape_input / self.TN)\n                        first = True\n                        reorder_a_para = None\n                        for k in range(num_TN):\n                            temp = a_para[:, k * self.TN:(k + 1) * self.TN, :, :]\n                            temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])\n                            temp = temp.permute(1, 2, 0).contiguous().view(-1)\n                            if first:\n                                reorder_a_para = temp.clone().cpu().data.numpy()\n                                first = False\n                            else:\n                                reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())\n\n                        a_para_flatten = reorder_a_para\n                        q_activation_reorder = a_para_flatten\n                        q_activation_reorder = np.array(q_activation_reorder).reshape(1, -1)\n                        np.savetxt(('./quantizer_output/q_activation_reorder/r_concat_%s.txt' % self.name),\n                                   q_activation_reorder, delimiter='\\n')\n                        ###保存重排序的二进制文件\n                        activation_flat = q_activation_reorder.astype(np.int8)\n                        writer = open('./quantizer_output/q_activation_reorder/%s_concat_q_bin' % self.name, \"wb\")\n                        writer.write(activation_flat)\n                        writer.close()\n                    ##########concat重排序结束\n\n                    Q_concat = np.array(Q_concat.cpu()).reshape(1, -1)\n                    np.savetxt(('./quantizer_output/q_activation_out/a_concat_%s.txt' % self.name), Q_concat,\n                               delimiter='\\n')\n                elif int(self.name[1:4]) == self.layer_idx:\n                    q_a_concat = copy.deepcopy(outputs[0])\n\n                    move_scale = math.log2(self.scale)\n                    concat_scale = -np.array(move_scale).reshape(1, -1)\n                    np.savetxt(('./quantizer_output/a_scale_out/concat_scale_%s.txt' % self.name), concat_scale,\n                               delimiter='\\n')\n\n                    for i in self.layers:\n                        q_a_concat[i] = self.quantize(q_a_concat[i])  # 量化\n                        q_a_concat[i] = self.round(q_a_concat[i])\n                        q_a_concat[i] = self.clamp(q_a_concat[i])  # 截断\n                    Q_concat = torch.cat([q_a_concat[i] for i in self.layers], 1)\n\n                    if self.reorder == True:\n                        a_para = Q_concat\n                        # 重排序参数\n                        # print(\"use activation reorder!\")\n                        shape_input = a_para.shape[1]\n                        num_TN = int(shape_input / self.TN)\n                        first = True\n                        reorder_a_para = None\n                        for k in range(num_TN):\n                            temp = a_para[:, k * self.TN:(k + 1) * self.TN, :, :]\n                            temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])\n                            temp = temp.permute(1, 2, 0).contiguous().view(-1)\n                            if first:\n                                reorder_a_para = temp.clone().cpu().data.numpy()\n                                first = False\n                            else:\n                                reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())\n\n                        a_para_flatten = reorder_a_para\n                        q_activation_reorder = a_para_flatten\n                        q_activation_reorder = np.array(q_activation_reorder).reshape(1, -1)\n                        np.savetxt(('./quantizer_output/q_activation_reorder/r_concat_%s.txt' % self.name),\n                                   q_activation_reorder, delimiter='\\n')\n                        ###保存重排序的二进制文件\n                        activation_flat = q_activation_reorder.astype(np.int8)\n                        writer = open('./quantizer_output/q_activation_reorder/%s_concat_q_bin' % self.name, \"wb\")\n                        writer.write(activation_flat)\n                        writer.close()\n                    ##########concat重排序结束\n                    Q_concat = np.array(Q_concat.cpu()).reshape(1, -1)\n                    np.savetxt(('./quantizer_output/q_activation_out/a_concat_%s.txt' % self.name), Q_concat,\n                               delimiter='\\n')\n\n            # 量化\n            if self.training:\n                for i in self.layers:\n                    outputs[i][0] = self.quantize(outputs[i][0])  # 量化\n                    outputs[i][0] = self.round(outputs[i][0])\n                    outputs[i][0] = self.clamp(outputs[i][0])  # 截断\n                    outputs[i][0] = self.dequantize(outputs[i][0])  # 反量化\n                return [torch.cat([outputs[i][0] for i in self.layers], 1),\n                        torch.cat([outputs[i][1] for i in self.layers], 1)]\n            else:\n                for i in self.layers:\n                    outputs[i] = self.quantize(outputs[i])  # 量化\n                    outputs[i] = self.round(outputs[i])\n                    outputs[i] = self.clamp(outputs[i])  # 截断\n                    outputs[i] = self.dequantize(outputs[i])  # 反量化\n                return torch.cat([outputs[i] for i in self.layers], 1)\n        else:\n            if self.groups:\n                if self.training:\n                    return [x[:, (x.shape[1] // 2):], float[:, (x.shape[1] // 2):]]\n                else:\n                    return x[:, (x.shape[1] // 2):]\n            else:\n                return outputs[self.layers[0]]\n"
  },
  {
    "path": "utils/torch_utils.py",
    "content": "from copy import deepcopy\n\nimport torch.backends.cudnn as cudnn\nfrom utils.quantized.quantized_google import *\n\n\ndef init_seeds(seed=0):\n    torch.manual_seed(seed)\n\n    # Remove randomness (may be slower on Tesla GPUs) # https://pytorch.org/docs/stable/notes/randomness.html\n    if seed == 0:\n        cudnn.deterministic = True\n        cudnn.benchmark = False\n\n\ndef select_device(device='', batch_size=None):\n    # device = 'cpu' or '0' or '0,1,2,3'\n    cpu_request = device.lower() == 'cpu'\n    if device and not cpu_request:  # if device requested other than 'cpu'\n        os.environ['CUDA_VISIBLE_DEVICES'] = device  # set environment variable\n        assert torch.cuda.is_available(), 'CUDA unavailable, invalid device %s requested' % device  # check availablity\n\n    cuda = False if cpu_request else torch.cuda.is_available()\n    if cuda:\n        c = 1024 ** 2  # bytes to MB\n        ng = torch.cuda.device_count()\n        if ng > 1 and batch_size:  # check that batch_size is compatible with device_count\n            assert batch_size % ng == 0, 'batch-size %g not multiple of GPU count %g' % (batch_size, ng)\n        x = [torch.cuda.get_device_properties(i) for i in range(ng)]\n        s = 'Using CUDA '\n        for i in range(0, ng):\n            if i == 1:\n                s = ' ' * len(s)\n            print(\"%sdevice%g _CudaDeviceProperties(name='%s', total_memory=%dMB)\" %\n                  (s, i, x[i].name, x[i].total_memory / c))\n    else:\n        print('Using CPU')\n\n    print('')  # skip a line\n    return torch.device('cuda:0' if cuda else 'cpu')\n\n\ndef time_synchronized():\n    torch.cuda.synchronize() if torch.cuda.is_available() else None\n    return time.time()\n\n\ndef initialize_weights(model):\n    for m in model.modules():\n        t = type(m)\n        if t is nn.Conv2d:\n            pass  # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n        elif t is nn.BatchNorm2d:\n            m.eps = 1e-4\n            m.momentum = 0.03\n        elif t in [nn.LeakyReLU, nn.ReLU, nn.ReLU6]:\n            m.inplace = True\n\n\ndef find_modules(model, mclass=nn.Conv2d):\n    # finds layer indices matching module class 'mclass'\n    return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)]\n\n\ndef fuse_conv_and_bn(conv, bn):\n    # https://tehnokv.com/posts/fusing-batchnorm-and-conv/\n    with torch.no_grad():\n        # init\n        fusedconv = torch.nn.Conv2d(conv.in_channels,\n                                    conv.out_channels,\n                                    groups=conv.groups,\n                                    kernel_size=conv.kernel_size,\n                                    stride=conv.stride,\n                                    padding=conv.padding,\n                                    bias=True)\n        # prepare filters\n        w_conv = conv.weight.clone().view(conv.out_channels, -1)\n        w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))\n        fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size()))\n\n        # prepare spatial bias\n        if conv.bias is not None:\n            b_conv = conv.bias\n        else:\n            b_conv = torch.zeros(conv.weight.size(0))\n        b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))\n        fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)\n\n        return fusedconv\n\n\ndef model_info(model, verbose=False):\n    # Plots a line-by-line description of a PyTorch model\n    n_p = sum(x.numel() for x in model.parameters())  # number parameters\n    n_g = sum(x.numel() for x in model.parameters() if x.requires_grad)  # number gradients\n    if verbose:\n        print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))\n        for i, (name, p) in enumerate(model.named_parameters()):\n            name = name.replace('module_list.', '')\n            print('%5g %40s %9s %12g %20s %10.3g %10.3g' %\n                  (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))\n\n    try:  # FLOPS\n        from thop import profile\n        macs, _ = profile(model, inputs=(torch.zeros(1, 3, 480, 640),), verbose=False)\n        fs = ', %.1f GFLOPS' % (macs / 1024 ** 3)\n    except:\n        fs = ''\n\n    print('Model Summary: %g layers, %g parameters, %g gradients%s' % (len(list(model.parameters())), n_p, n_g, fs))\n\n\ndef load_classifier(name='resnet101', n=2):\n    # Loads a pretrained model reshaped to n-class output\n    import pretrainedmodels  # https://github.com/Cadene/pretrained-models.pytorch#torchvision\n    model = pretrainedmodels.__dict__[name](num_classes=1000, pretrained='imagenet')\n\n    # Display model properties\n    for x in ['model.input_size', 'model.input_space', 'model.input_range', 'model.mean', 'model.std']:\n        print(x + ' =', eval(x))\n\n    # Reshape output to n classes\n    filters = model.last_linear.weight.shape[1]\n    model.last_linear.bias = torch.nn.Parameter(torch.zeros(n))\n    model.last_linear.weight = torch.nn.Parameter(torch.zeros(n, filters))\n    model.last_linear.out_features = n\n    return model\n\n\ndef scale_img(img, ratio=1.0, same_shape=True):  # img(16,3,256,416), r=ratio\n    # scales img(bs,3,y,x) by ratio\n    h, w = img.shape[2:]\n    s = (int(h * ratio), int(w * ratio))  # new size\n    img = F.interpolate(img, size=s, mode='bilinear', align_corners=False)  # resize\n    if not same_shape:  # pad/crop img\n        gs = 64  # (pixels) grid size\n        h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)]\n    return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447)  # value = imagenet mean\n\n\nclass ModelEMA:\n    \"\"\" Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models\n    Keep a moving average of everything in the model state_dict (parameters and buffers).\n    This is intended to allow functionality like\n    https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage\n    A smoothed version of the weights is necessary for some training schemes to perform well.\n    E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use\n    RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA\n    smoothing of weights to match results. Pay attention to the decay constant you are using\n    relative to your update count per epoch.\n    To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but\n    disable validation of the EMA weights. Validation will have to be done manually in a separate\n    process, or after the training stops converging.\n    This class is sensitive where it is initialized in the sequence of model init,\n    GPU assignment and distributed training wrappers.\n    I've tested with the sequence in my own train.py for torch.DataParallel, apex.DDP, and single-GPU.\n    \"\"\"\n\n    def __init__(self, model, decay=0.9999, device=''):\n        # make a copy of the model for accumulating moving average of weights\n        self.ema = deepcopy(model)\n        self.ema.eval()\n        self.updates = 0  # number of EMA updates\n        self.decay = lambda x: decay * (1 - math.exp(-x / 2000))  # decay exponential ramp (to help early epochs)\n        self.device = device  # perform ema on different device from model if set\n        if device:\n            self.ema.to(device=device)\n        for p in self.ema.parameters():\n            p.requires_grad_(False)\n\n    def update(self, model):\n        self.updates += 1\n        d = self.decay(self.updates)\n        with torch.no_grad():\n            if type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel):\n                msd, esd = model.module.state_dict(), self.ema.module.state_dict()\n            else:\n                msd, esd = model.state_dict(), self.ema.state_dict()\n\n            for k, v in esd.items():\n                if v.dtype.is_floating_point:\n                    v *= d\n                    v += (1. - d) * msd[k].detach()\n\n    def update_attr(self, model):\n        # Assign attributes (which may change during training)\n        for k in model.__dict__.keys():\n            if not k.startswith('_'):\n                setattr(self.ema, k, getattr(model, k))\n"
  },
  {
    "path": "utils/utils.py",
    "content": "import glob\nimport math\nimport os\nimport random\nimport shutil\nimport subprocess\nfrom pathlib import Path\nfrom sys import platform\n\nimport cv2\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torchvision\nfrom tqdm import tqdm\n\nimport torch.nn.functional as F\nfrom . import torch_utils  # , google_utils\n\n# Set printoptions\ntorch.set_printoptions(linewidth=320, precision=5, profile='long')\nnp.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format})  # format short g, %precision=5\nmatplotlib.rc('font', **{'size': 11})\n\n# Prevent OpenCV from multithreading (to use PyTorch DataLoader)\ncv2.setNumThreads(0)\n\n\ndef init_seeds(seed=0):\n    random.seed(seed)\n    np.random.seed(seed)\n    torch_utils.init_seeds(seed=seed)\n\n\ndef load_classes(path):\n    # Loads *.names file at 'path'\n    with open(path, 'r') as f:\n        names = f.read().split('\\n')\n    return list(filter(None, names))  # filter removes empty strings (such as last line)\n\n\ndef labels_to_class_weights(labels, nc=80):\n    # Get class weights (inverse frequency) from training labels\n    if labels[0] is None:  # no labels loaded\n        return torch.Tensor()\n\n    labels = np.concatenate(labels, 0)  # labels.shape = (866643, 5) for COCO\n    classes = labels[:, 0].astype(np.int)  # labels = [class xywh]\n    weights = np.bincount(classes, minlength=nc)  # occurences per class\n\n    # Prepend gridpoint count (for uCE trianing)\n    # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum()  # gridpoints per image\n    # weights = np.hstack([gpi * len(labels)  - weights.sum() * 9, weights * 9]) ** 0.5  # prepend gridpoints to start\n\n    weights[weights == 0] = 1  # replace empty bins with 1\n    weights = 1 / weights  # number of targets per class\n    weights /= weights.sum()  # normalize\n    return torch.from_numpy(weights)\n\n\ndef labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):\n    # Produces image weights based on class mAPs\n    n = len(labels)\n    class_counts = np.array([np.bincount(labels[i][:, 0].astype(np.int), minlength=nc) for i in range(n)])\n    image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1)\n    # index = random.choices(range(n), weights=image_weights, k=1)  # weight image sample\n    return image_weights\n\n\ndef coco_class_weights():  # frequency of each class in coco train2014\n    n = [187437, 4955, 30920, 6033, 3838, 4332, 3160, 7051, 7677, 9167, 1316, 1372, 833, 6757, 7355, 3302, 3776, 4671,\n         6769, 5706, 3908, 903, 3686, 3596, 6200, 7920, 8779, 4505, 4272, 1862, 4698, 1962, 4403, 6659, 2402, 2689,\n         4012, 4175, 3411, 17048, 5637, 14553, 3923, 5539, 4289, 10084, 7018, 4314, 3099, 4638, 4939, 5543, 2038, 4004,\n         5053, 4578, 27292, 4113, 5931, 2905, 11174, 2873, 4036, 3415, 1517, 4122, 1980, 4464, 1190, 2302, 156, 3933,\n         1877, 17630, 4337, 4624, 1075, 3468, 135, 1380]\n    weights = 1 / torch.Tensor(n)\n    weights /= weights.sum()\n    # with open('data/coco.names', 'r') as f:\n    #     for k, v in zip(f.read().splitlines(), n):\n    #         print('%20s: %g' % (k, v))\n    return weights\n\n\ndef coco80_to_coco91_class():  # converts 80-index (val2014) to 91-index (paper)\n    # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/\n    # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\\n')\n    # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\\n')\n    # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)]  # darknet to coco\n    # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)]  # coco to darknet\n    x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,\n         35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,\n         64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]\n    return x\n\n\ndef xyxy2xywh(x):\n    # Transform box coordinates from [x1, y1, x2, y2] (where xy1=top-left, xy2=bottom-right) to [x, y, w, h] \n    y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)\n    y[:, 0] = (x[:, 0] + x[:, 2]) / 2  # x center\n    y[:, 1] = (x[:, 1] + x[:, 3]) / 2  # y center\n    y[:, 2] = x[:, 2] - x[:, 0]  # width\n    y[:, 3] = x[:, 3] - x[:, 1]  # height\n    return y\n\n\ndef xywh2xyxy(x):\n    # Transform box coordinates from [x, y, w, h] to [x1, y1, x2, y2] (where xy1=top-left, xy2=bottom-right)\n    y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)\n    y[:, 0] = x[:, 0] - x[:, 2] / 2  # top left x\n    y[:, 1] = x[:, 1] - x[:, 3] / 2  # top left y\n    y[:, 2] = x[:, 0] + x[:, 2] / 2  # bottom right x\n    y[:, 3] = x[:, 1] + x[:, 3] / 2  # bottom right y\n    return y\n\n\n# def xywh2xyxy(box):\n#     # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2]\n#     if isinstance(box, torch.Tensor):\n#         x, y, w, h = box.t()\n#         return torch.stack((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).t()\n#     else:  # numpy\n#         x, y, w, h = box.T\n#         return np.stack((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).T\n#\n#\n# def xyxy2xywh(box):\n#     # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h]\n#     if isinstance(box, torch.Tensor):\n#         x1, y1, x2, y2 = box.t()\n#         return torch.stack(((x1 + x2) / 2, (y1 + y2) / 2, x2 - x1, y2 - y1)).t()\n#     else:  # numpy\n#         x1, y1, x2, y2 = box.T\n#         return np.stack(((x1 + x2) / 2, (y1 + y2) / 2, x2 - x1, y2 - y1)).T\n\n\ndef scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):\n    # Rescale coords (xyxy) from img1_shape to img0_shape\n    if ratio_pad is None:  # calculate from img0_shape\n        gain = max(img1_shape) / max(img0_shape)  # gain  = old / new\n        pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2  # wh padding\n    else:\n        gain = ratio_pad[0][0]\n        pad = ratio_pad[1]\n\n    coords[:, [0, 2]] -= pad[0]  # x padding\n    coords[:, [1, 3]] -= pad[1]  # y padding\n    coords[:, :4] /= gain\n    clip_coords(coords, img0_shape)\n    return coords\n\n\ndef clip_coords(boxes, img_shape):\n    # Clip bounding xyxy bounding boxes to image shape (height, width)\n    boxes[:, 0].clamp_(0, img_shape[1])  # x1\n    boxes[:, 1].clamp_(0, img_shape[0])  # y1\n    boxes[:, 2].clamp_(0, img_shape[1])  # x2\n    boxes[:, 3].clamp_(0, img_shape[0])  # y2\n\n\ndef ap_per_class(tp, conf, pred_cls, target_cls):\n    \"\"\" Compute the average precision, given the recall and precision curves.\n    Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.\n    # Arguments\n        tp:    True positives (nparray, nx1 or nx10).\n        conf:  Objectness value from 0-1 (nparray).\n        pred_cls: Predicted object classes (nparray).\n        target_cls: True object classes (nparray).\n    # Returns\n        The average precision as computed in py-faster-rcnn.\n    \"\"\"\n\n    # Sort by objectness\n    i = np.argsort(-conf)\n    tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]\n\n    # Find unique classes\n    unique_classes = np.unique(target_cls)\n\n    # Create Precision-Recall curve and compute AP for each class\n    pr_score = 0.1  # score to evaluate P and R https://github.com/ultralytics/yolov3/issues/898\n    s = [len(unique_classes), tp.shape[1]]  # number class, number iou thresholds (i.e. 10 for mAP0.5...0.95)\n    ap, p, r = np.zeros(s), np.zeros(s), np.zeros(s)\n    for ci, c in enumerate(unique_classes):\n        i = pred_cls == c\n        n_gt = (target_cls == c).sum()  # Number of ground truth objects\n        n_p = i.sum()  # Number of predicted objects\n\n        if n_p == 0 or n_gt == 0:\n            continue\n        else:\n            # Accumulate FPs and TPs\n            fpc = (1 - tp[i]).cumsum(0)\n            tpc = tp[i].cumsum(0)\n\n            # Recall\n            recall = tpc / (n_gt + 1e-16)  # recall curve\n            r[ci] = np.interp(-pr_score, -conf[i], recall[:, 0])  # r at pr_score, negative x, xp because xp decreases\n\n            # Precision\n            precision = tpc / (tpc + fpc)  # precision curve\n            p[ci] = np.interp(-pr_score, -conf[i], precision[:, 0])  # p at pr_score\n\n            # AP from recall-precision curve\n            for j in range(tp.shape[1]):\n                ap[ci, j] = compute_ap(recall[:, j], precision[:, j])\n\n            # Plot\n            # fig, ax = plt.subplots(1, 1, figsize=(5, 5))\n            # ax.plot(recall, precision)\n            # ax.set_xlabel('Recall')\n            # ax.set_ylabel('Precision')\n            # ax.set_xlim(0, 1.01)\n            # ax.set_ylim(0, 1.01)\n            # fig.tight_layout()\n            # fig.savefig('PR_curve.png', dpi=300)\n\n    # Compute F1 score (harmonic mean of precision and recall)\n    f1 = 2 * p * r / (p + r + 1e-16)\n\n    return p, r, ap, f1, unique_classes.astype('int32')\n\n\ndef compute_ap(recall, precision):\n    \"\"\" Compute the average precision, given the recall and precision curves.\n    Source: https://github.com/rbgirshick/py-faster-rcnn.\n    # Arguments\n        recall:    The recall curve (list).\n        precision: The precision curve (list).\n    # Returns\n        The average precision as computed in py-faster-rcnn.\n    \"\"\"\n\n    # Append sentinel values to beginning and end\n    mrec = np.concatenate(([0.], recall, [min(recall[-1] + 1E-3, 1.)]))\n    mpre = np.concatenate(([0.], precision, [0.]))\n\n    # Compute the precision envelope\n    mpre = np.flip(np.maximum.accumulate(np.flip(mpre)))\n\n    # Integrate area under curve\n    method = 'interp'  # methods: 'continuous', 'interp'\n    if method == 'interp':\n        x = np.linspace(0, 1, 101)  # 101-point interp (COCO)\n        ap = np.trapz(np.interp(x, mrec, mpre), x)  # integrate\n    else:  # 'continuous'\n        i = np.where(mrec[1:] != mrec[:-1])[0]  # points where x axis (recall) changes\n        ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])  # area under curve\n\n    return ap\n\n\ndef bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False):\n    # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4\n    box2 = box2.t()\n\n    # Get the coordinates of bounding boxes\n    if x1y1x2y2:  # x1, y1, x2, y2 = box1\n        b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]\n        b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]\n    else:  # transform from xywh to xyxy\n        b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2\n        b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2\n        b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2\n        b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2\n\n    # Intersection area\n    inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \\\n            (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)\n\n    # Union Area\n    w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1\n    w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1\n    union = (w1 * h1 + 1e-16) + w2 * h2 - inter\n\n    iou = inter / union  # iou\n    if GIoU or DIoU or CIoU:\n        cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1)  # convex (smallest enclosing box) width\n        ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1)  # convex height\n        if GIoU:  # Generalized IoU https://arxiv.org/pdf/1902.09630.pdf\n            c_area = cw * ch + 1e-16  # convex area\n            return iou - (c_area - union) / c_area  # GIoU\n        if DIoU or CIoU:  # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1\n            # convex diagonal squared\n            c2 = cw ** 2 + ch ** 2 + 1e-16\n            # centerpoint distance squared\n            rho2 = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2)) ** 2 / 4 + ((b2_y1 + b2_y2) - (b1_y1 + b1_y2)) ** 2 / 4\n            if DIoU:\n                return iou - rho2 / c2  # DIoU\n            elif CIoU:  # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47\n                v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)\n                with torch.no_grad():\n                    alpha = v / (1 - iou + v)\n                return iou - (rho2 / c2 + v * alpha)  # CIoU\n\n    return iou\n\n\ndef box_iou(box1, box2):\n    # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py\n    \"\"\"\n    Return intersection-over-union (Jaccard index) of boxes.\n    Both sets of boxes are expected to be in (x1, y1, x2, y2) format.\n    Arguments:\n        box1 (Tensor[N, 4])\n        box2 (Tensor[M, 4])\n    Returns:\n        iou (Tensor[N, M]): the NxM matrix containing the pairwise\n            IoU values for every element in boxes1 and boxes2\n    \"\"\"\n\n    def box_area(box):\n        # box = 4xn\n        return (box[2] - box[0]) * (box[3] - box[1])\n\n    area1 = box_area(box1.t())\n    area2 = box_area(box2.t())\n\n    # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)\n    inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)\n    return inter / (area1[:, None] + area2 - inter)  # iou = inter / (area1 + area2 - inter)\n\n\ndef wh_iou(wh1, wh2):\n    # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2\n    wh1 = wh1[:, None]  # [N,1,2]\n    wh2 = wh2[None]  # [1,M,2]\n    inter = torch.min(wh1, wh2).prod(2)  # [N,M]\n    return inter / (wh1.prod(2) + wh2.prod(2) - inter)  # iou = inter / (area1 + area2 - inter)\n\n\nclass FocalLoss(nn.Module):\n    # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)\n    def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):\n        super(FocalLoss, self).__init__()\n        self.loss_fcn = loss_fcn  # must be nn.BCEWithLogitsLoss()\n        self.gamma = gamma\n        self.alpha = alpha\n        self.reduction = loss_fcn.reduction\n        self.loss_fcn.reduction = 'none'  # required to apply FL to each element\n\n    def forward(self, pred, true):\n        loss = self.loss_fcn(pred, true)\n        # p_t = torch.exp(-loss)\n        # loss *= self.alpha * (1.000001 - p_t) ** self.gamma  # non-zero power for gradient stability\n\n        # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py\n        pred_prob = torch.sigmoid(pred)  # prob from logits\n        p_t = true * pred_prob + (1 - true) * (1 - pred_prob)\n        alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)\n        modulating_factor = (1.0 - p_t) ** self.gamma\n        loss *= alpha_factor * modulating_factor\n\n        if self.reduction == 'mean':\n            return loss.mean()\n        elif self.reduction == 'sum':\n            return loss.sum()\n        else:  # 'none'\n            return loss\n\n\ndef smooth_BCE(eps=0.1):  # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441\n    # return positive, negative label smoothing BCE targets\n    return 1.0 - 0.5 * eps, 0.5 * eps\n\n\ndef compute_loss(p, targets, model):  # predictions, targets, model\n    ft = torch.cuda.FloatTensor if p[0].is_cuda else torch.Tensor\n    lcls, lbox, lobj = ft([0]), ft([0]), ft([0])\n    tcls, tbox, indices, anchor_vec = build_targets(p, targets, model)\n    h = model.hyp  # hyperparameters\n    red = 'mean'  # Loss reduction (sum or mean)\n\n    # Define criteria\n    BCEcls = nn.BCEWithLogitsLoss(pos_weight=ft([h['cls_pw']]), reduction=red)\n    BCEobj = nn.BCEWithLogitsLoss(pos_weight=ft([h['obj_pw']]), reduction=red)\n\n    # class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3\n    cp, cn = smooth_BCE(eps=0.0)\n\n    # focal loss\n    g = h['fl_gamma']  # focal loss gamma\n    if g > 0:\n        BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)\n\n    # Compute losses\n    np, ng = 0, 0  # number grid points, targets\n    for i, pi in enumerate(p):  # layer index, layer predictions\n        b, a, gj, gi = indices[i]  # image, anchor, gridy, gridx\n        tobj = torch.zeros_like(pi[..., 0])  # target obj\n        np += tobj.numel()\n\n        # Compute losses\n        nb = len(b)\n        if nb:  # number of targets\n            ng += nb\n            ps = pi[b, a, gj, gi]  # prediction subset corresponding to targets\n            # ps[:, 2:4] = torch.sigmoid(ps[:, 2:4])  # wh power loss (uncomment)\n\n            # GIoU\n            pxy = torch.sigmoid(ps[:, 0:2])  # pxy = pxy * s - (s - 1) / 2,  s = 1.5  (scale_xy)\n            pwh = torch.exp(ps[:, 2:4]).clamp(max=1E3) * anchor_vec[i]\n            pbox = torch.cat((pxy, pwh), 1)  # predicted box\n            giou = bbox_iou(pbox.t(), tbox[i], x1y1x2y2=False, GIoU=True)  # giou computation\n            lbox += (1.0 - giou).sum() if red == 'sum' else (1.0 - giou).mean()  # giou loss\n            tobj[b, a, gj, gi] = (1.0 - model.gr) + model.gr * giou.detach().clamp(0).type(tobj.dtype)  # giou ratio\n\n            if model.nc > 1:  # cls loss (only if multiple classes)\n                t = torch.full_like(ps[:, 5:], cn)  # targets\n                t[range(nb), tcls[i]] = cp\n                lcls += BCEcls(ps[:, 5:], t)  # BCE\n                # lcls += CE(ps[:, 5:], tcls[i])  # CE\n\n            # Append targets to text file\n            # with open('targets.txt', 'a') as file:\n            #     [file.write('%11.5g ' * 4 % tuple(x) + '\\n') for x in torch.cat((txy[i], twh[i]), 1)]\n\n        lobj += BCEobj(pi[..., 4], tobj)  # obj loss\n\n    lbox *= h['giou']\n    lobj *= h['obj']\n    lcls *= h['cls']\n    if red == 'sum':\n        bs = tobj.shape[0]  # batch size\n        lobj *= 3 / (6300 * bs) * 2  # 3 / np * 2\n        if ng:\n            lcls *= 3 / ng / model.nc\n            lbox *= 3 / ng\n\n    loss = lbox + lobj + lcls\n    return loss, torch.cat((lbox, lobj, lcls, loss)).detach()\n\n\ndef compute_lost_KD(output_s, output_t, num_classes, batch_size):\n    T = 3.0\n    Lambda_ST = 0.001\n    criterion_st = torch.nn.KLDivLoss(reduction='sum')\n    output_s = torch.cat([i.view(-1, num_classes + 5) for i in output_s])\n    output_t = torch.cat([i.view(-1, num_classes + 5) for i in output_t])\n    loss_st = criterion_st(nn.functional.log_softmax(output_s / T, dim=1),\n                           nn.functional.softmax(output_t / T, dim=1)) * (T * T) / batch_size\n    return loss_st * Lambda_ST\n\n\ndef compute_lost_KD2(model, targets, output_s, output_t):\n    reg_m = 0.0\n    T = 3.0\n    Lambda_cls, Lambda_box = 0.0001, 0.001\n\n    criterion_st = torch.nn.KLDivLoss(reduction='sum')\n    ft = torch.cuda.FloatTensor if output_s[0].is_cuda else torch.Tensor\n    lcls, lbox = ft([0]), ft([0])\n\n    tcls, tbox, indices, anchor_vec = build_targets(output_s, targets, model)\n    reg_ratio, reg_num, reg_nb = 0, 0, 0\n    for i, (ps, pt) in enumerate(zip(output_s, output_t)):  # layer index, layer predictions\n        b, a, gj, gi = indices[i]  # image, anchor, gridy, gridx\n\n        nb = len(b)\n        if nb:  # number of targets\n            pss = ps[b, a, gj, gi]  # prediction subset corresponding to targets\n            pts = pt[b, a, gj, gi]\n\n            psxy = torch.sigmoid(pss[:, 0:2])  # pxy = pxy * s - (s - 1) / 2,  s = 1.5  (scale_xy)\n            psbox = torch.cat((psxy, torch.exp(pss[:, 2:4]) * anchor_vec[i]), 1).view(-1, 4)  # predicted box\n\n            ptxy = torch.sigmoid(pts[:, 0:2])  # pxy = pxy * s - (s - 1) / 2,  s = 1.5  (scale_xy)\n            ptbox = torch.cat((ptxy, torch.exp(pts[:, 2:4]) * anchor_vec[i]), 1).view(-1, 4)  # predicted box\n\n            l2_dis_s = (psbox - tbox[i]).pow(2).sum(1)\n            l2_dis_s_m = l2_dis_s + reg_m\n            l2_dis_t = (ptbox - tbox[i]).pow(2).sum(1)\n            l2_num = l2_dis_s_m > l2_dis_t\n            lbox += l2_dis_s[l2_num].sum()\n            reg_num += l2_num.sum().item()\n            reg_nb += nb\n\n        output_s_i = ps[..., 4:].view(-1, model.nc + 1)\n        output_t_i = pt[..., 4:].view(-1, model.nc + 1)\n        lcls += criterion_st(nn.functional.log_softmax(output_s_i / T, dim=1),\n                             nn.functional.softmax(output_t_i / T, dim=1)) * (T * T) / ps.size(0)\n\n    if reg_nb:\n        reg_ratio = reg_num / reg_nb\n\n    return lcls * Lambda_cls + lbox * Lambda_box, reg_ratio\n\n\ndef compute_lost_KD3(model, targets, output_s, output_t):\n    T = 3.0\n    Lambda_cls, Lambda_box = 0.0001, 0.001\n\n    criterion_st = torch.nn.KLDivLoss(reduction='sum')\n    ft = torch.cuda.FloatTensor if output_s[0].is_cuda else torch.Tensor\n    lcls, lbox = ft([0]), ft([0])\n\n    tcls, tbox, indices, anchor_vec = build_targets(output_s, targets, model)\n    for i, (ps, pt) in enumerate(zip(output_s, output_t)):  # layer index, layer predictions\n        b, a, gj, gi = indices[i]  # image, anchor, gridy, gridx\n\n        nb = len(b)\n        if nb:  # number of targets\n            pss = ps[b, a, gj, gi]  # prediction subset corresponding to targets\n            pts = pt[b, a, gj, gi]\n\n            psxy = torch.sigmoid(pss[:, 0:2])  # pxy = pxy * s - (s - 1) / 2,  s = 1.5  (scale_xy)\n            psbox = torch.cat((psxy, torch.exp(pss[:, 2:4]) * anchor_vec[i]), 1).view(-1, 4)  # predicted box\n\n            ptxy = torch.sigmoid(pts[:, 0:2])  # pxy = pxy * s - (s - 1) / 2,  s = 1.5  (scale_xy)\n            ptbox = torch.cat((ptxy, torch.exp(pts[:, 2:4]) * anchor_vec[i]), 1).view(-1, 4)  # predicted box\n\n            l2_dis = (psbox - ptbox).pow(2).sum(1)\n            lbox += l2_dis.sum()\n\n        output_s_i = ps[..., 4:].view(-1, model.nc + 1)\n        output_t_i = pt[..., 4:].view(-1, model.nc + 1)\n        lcls += criterion_st(nn.functional.log_softmax(output_s_i / T, dim=1),\n                             nn.functional.softmax(output_t_i / T, dim=1)) * (T * T) / ps.size(0)\n\n    return lcls * Lambda_cls + lbox * Lambda_box\n\n\ndef compute_lost_KD4(model, targets, output_s, output_t, feature_s, feature_t, batch_size):\n    T = 3.0\n    Lambda_cls, Lambda_box, Lambda_feature = 0.001, 0.001, 0.001\n    criterion_st = torch.nn.KLDivLoss(reduction='sum')\n    criterion_stf = torch.nn.KLDivLoss(reduction='sum')\n    ft = torch.cuda.FloatTensor if output_s[0].is_cuda else torch.Tensor\n    lcls, lbox, lfeature = ft([0]), ft([0]), ft([0])\n    tcls, tbox, indices, anchor_vec = build_targets(output_s, targets, model)\n    for i, (ps, pt) in enumerate(zip(output_s, output_t)):  # layer index, layer predictions\n        b, a, gj, gi = indices[i]  # image, anchor, gridy, gridx\n\n        nb = len(b)\n        if nb:  # number of targets\n            pss = ps[b, a, gj, gi]  # prediction subset corresponding to targets\n            pts = pt[b, a, gj, gi]\n\n            psxy = torch.sigmoid(pss[:, 0:2])  # pxy = pxy * s - (s - 1) / 2,  s = 1.5  (scale_xy)\n            psbox = torch.cat((psxy, torch.exp(pss[:, 2:4]) * anchor_vec[i]), 1).view(-1, 4)  # predicted box\n\n            ptxy = torch.sigmoid(pts[:, 0:2])  # pxy = pxy * s - (s - 1) / 2,  s = 1.5  (scale_xy)\n            ptbox = torch.cat((ptxy, torch.exp(pts[:, 2:4]) * anchor_vec[i]), 1).view(-1, 4)  # predicted box\n\n            l2_dis = (psbox - ptbox).pow(2).sum(1)\n            lbox += l2_dis.sum()\n        # cls loss\n        output_s_i = ps[..., 4:].view(-1, model.nc + 1)\n        output_t_i = pt[..., 4:].view(-1, model.nc + 1)\n        lcls += criterion_st(nn.functional.log_softmax(output_s_i / T, dim=1),\n                             nn.functional.softmax(output_t_i / T, dim=1)) * (T * T) / ps.size(0)\n    # feature loss\n    if len(feature_t) != len(feature_s):\n        print(\"feature mismatch!\")\n        exit()\n    for i in range(len(feature_t)):\n        # feature_t[i] = feature_t[i].pow(2).sum(1)\n        feature_t[i] = feature_t[i].abs().sum(1).view(feature_t[i].size(0), -1)\n        # feature_s[i] = feature_s[i].pow(2).sum(1)\n        feature_s[i] = feature_s[i].abs().sum(1).view(feature_s[i].size(0), -1)\n        lfeature += criterion_stf(nn.functional.log_softmax(feature_s[i] / T, dim=1),\n                                  nn.functional.softmax(feature_t[i] / T, dim=1)) * (T * T) / batch_size\n    return lcls * Lambda_cls + lbox * Lambda_box + lfeature * Lambda_feature\n\n\ndef indices_merge(indices):\n    indices_merge = []\n\n    for i in range(len(indices)):\n        temp = list(indices[i])\n        temp[2] = temp[2] * (2 ** (5 - i))\n        temp[3] = temp[3] * (2 ** (5 - i))\n        indices_merge.append(temp)\n    return indices_merge\n\n\ndef fine_grained_imitation_feature_mask(feature_s, feature_t, indices, img_size):\n    if feature_t.size() != feature_s.size():\n        print(\"feature mismatch!\")\n        exit()\n    B, Gj, Gi = torch.Tensor(0).long().cuda(), torch.Tensor(0).long().cuda(), torch.Tensor(0).long().cuda()\n    feature_size = feature_s.size()[1]\n    scale = img_size / feature_size\n    for j in range(len(indices)):\n        if 2 ** (5 - j) < scale:\n            break\n        b, _, gj, gi = indices[j]  # image, gridy, gridx\n        gj, gi = (gj / scale).long(), (gi / scale).long()\n        for i in range(gj.size()[0]):\n            if 2 ** (5 - j) == scale:\n                break\n            b_temp = (torch.ones(int(2 ** (5 - j) / scale - 1)).long().cuda() * b[i])\n            gj_temp = torch.arange(int(gj[i].item()) + 1, int(gj[i].item() + 2 ** (5 - j) / scale)).cuda()\n            gi_temp = torch.arange(int(gi[i].item()) + 1, int(gi[i].item() + 2 ** (5 - j) / scale)).cuda()\n            b = torch.cat((b, b_temp))\n            gj = torch.cat((gj, gj_temp))\n            gi = torch.cat((gi, gi_temp))\n        B = torch.cat((B, b))\n        Gj = torch.cat((Gj, gj))\n        Gi = torch.cat((Gi, gi))\n    mask = torch.zeros(feature_s.size())\n    mask[B, Gj, Gi] = 1\n    return mask\n\n\ndef compute_lost_KD5(model, targets, output_s, output_t, feature_s, feature_t, batch_size, img_size):\n    T = 3.0\n    Lambda_cls, Lambda_box, Lambda_feature = 0.001, 0.001, 0.001\n    criterion_st = torch.nn.KLDivLoss(reduction='sum')\n    ft = torch.cuda.FloatTensor if output_s[0].is_cuda else torch.Tensor\n    lcls, lbox, lfeature = ft([0]), ft([0]), ft([0])\n    tcls, tbox, indices, anchor_vec = build_targets(output_s, targets, model)\n    for i, (ps, pt) in enumerate(zip(output_s, output_t)):  # layer index, layer predictions\n        b, a, gj, gi = indices[i]  # image, anchor, gridy, gridx\n\n        nb = len(b)\n        if nb:  # number of targets\n            pss = ps[b, a, gj, gi]  # prediction subset corresponding to targets\n            pts = pt[b, a, gj, gi]\n\n            psxy = torch.sigmoid(pss[:, 0:2])  # pxy = pxy * s - (s - 1) / 2,  s = 1.5  (scale_xy)\n            psbox = torch.cat((psxy, torch.exp(pss[:, 2:4]) * anchor_vec[i]), 1).view(-1, 4)  # predicted box\n\n            ptxy = torch.sigmoid(pts[:, 0:2])  # pxy = pxy * s - (s - 1) / 2,  s = 1.5  (scale_xy)\n            ptbox = torch.cat((ptxy, torch.exp(pts[:, 2:4]) * anchor_vec[i]), 1).view(-1, 4)  # predicted box\n\n            l2_dis = (psbox - ptbox).pow(2).sum(1)\n            lbox += l2_dis.sum()\n        # cls loss\n        output_s_i = ps[..., 4:].view(-1, model.nc + 1)\n        output_t_i = pt[..., 4:].view(-1, model.nc + 1)\n        lcls += criterion_st(nn.functional.log_softmax(output_s_i / T, dim=1),\n                             nn.functional.softmax(output_t_i / T, dim=1)) * (T * T) / ps.size(0)\n    # feature loss\n    if len(feature_t) != len(feature_s):\n        print(\"feature mismatch!\")\n        exit()\n    merge = indices_merge(indices)\n    for i in range(len(feature_t)):\n        # feature_t[i] = feature_t[i].pow(2).sum(1)\n        feature_t[i] = feature_t[i].abs().sum(1)\n        # feature_s[i] = feature_s[i].pow(2).sum(1)\n        feature_s[i] = feature_s[i].abs().sum(1)\n        mask = fine_grained_imitation_feature_mask(feature_s[i], feature_t[i], merge, img_size)\n        mask = mask.to(targets.device)\n        feature_t[i] = (feature_t[i] * mask).view(batch_size, -1)\n        feature_s[i] = (feature_s[i] * mask).view(batch_size, -1)\n        lfeature += criterion_st(nn.functional.log_softmax(feature_s[i] / T, dim=1),\n                                 nn.functional.softmax(feature_t[i] / T, dim=1)) * (T * T) / batch_size\n    # print(lcls.data)\n    # print(lbox.data)\n    # print(lfeature.data)\n    return lcls * Lambda_cls + lbox * Lambda_box + lfeature * Lambda_feature\n\n\ndef fine_grained_imitation_mask(feature_s, feature_t, indices):\n    if len(feature_t) != len(feature_s):\n        print(\"feature mismatch!\")\n        exit()\n    mask = []\n    for i in range(len(feature_t)):\n        temp = torch.zeros(feature_s[i].size())\n        b, a, gj, gi = indices[i]  # image, anchor, gridy, gridx\n        temp[b, a, gj, gi] = 1\n        mask.append(temp)\n    return mask\n\n\n# FineGrainedmask\ndef compute_lost_KD6(model, targets, output_s, output_t, batch_size):\n    T = 3.0\n    Lambda_feature = 0.001\n    criterion_st = torch.nn.KLDivLoss(reduction='sum')\n    feature_s = list(output_s)\n    feature_t = list(output_t)\n    tcls, tbox, indices, anchor_vec = build_targets(output_s, targets, model)\n    mask = fine_grained_imitation_mask(feature_s, feature_t, indices)\n    test = indices_merge(indices)\n    # feature loss\n    for i in range(len(mask)):\n        mask[i] = mask[i].to(targets.device)\n        feature_t[i] = feature_t[i] * mask[i]\n        feature_s[i] = feature_s[i] * mask[i]\n    feature_s = torch.cat([i.view(-1, 3 * (model.nc + 5)) for i in feature_s])\n    feature_t = torch.cat([i.view(-1, 3 * (model.nc + 5)) for i in feature_t])\n    lfeature = criterion_st(nn.functional.log_softmax(feature_s / T, dim=1),\n                            nn.functional.softmax(feature_t / T, dim=1)) * (T * T) / batch_size\n    return lfeature * Lambda_feature\n\n\ndef Failure_Case_Loss_FM(masks, imgs, targets):\n    criterion = torch.nn.KLDivLoss(reduction='sum')\n    if masks == None:\n        return torch.zeros([1]).to(imgs.device)\n    PBI = 0\n    PBO = 0\n    # masks_target = []\n    for i in range(masks.size(0)):\n        mask = masks[i]\n        pbi = torch.sum(mask[0]) / (mask.shape[1] * mask.shape[2])\n        PBI = PBI + pbi\n        target = targets[targets[:, 0] == i]\n        # mask_target = torch.zeros((3, mask.shape[1], mask.shape[2])).to(mask.device)\n        for object in target:\n            x, y = mask.shape[1] * float(object[2]), mask.shape[2] * float(object[3])\n            w, h = mask.shape[1] * float(object[4]), mask.shape[2] * float(object[5])\n            mask_object = torch.zeros((3, mask.shape[1], mask.shape[2])).to(mask.device)\n            mask_object[:, round(y - h / 2):round(y + h / 2), round(x - w / 2):round(x + w / 2)] = 1\n            pbo = torch.sum((mask * mask_object)) / torch.sum(mask_object)\n            PBO = PBO + pbo\n            # mask_target = mask_target + mask_object\n        # masks_target.append(mask_target.unsqueeze(0))\n    # masks_target = torch.cat(masks_target, dim=0)\n    F_loss = abs(PBI - PBO) / imgs.shape[0]\n    # return criterion(masks*imgs, masks_target*imgs)#, PBI, PBO\n\n    fence_imgs = F.log_softmax((masks * imgs).view(imgs.size(0), -1), dim=-1)\n    original_imgs = F.softmax(imgs.view(imgs.size(0), -1), dim=-1)\n    D_loss = criterion(fence_imgs, original_imgs)\n\n    return F_loss + D_loss\n\n\ndef build_targets(p, targets, model):\n    # targets = [image, class, x, y, w, h]\n\n    nt = targets.shape[0]\n    tcls, tbox, indices, av = [], [], [], []\n    reject, use_all_anchors = True, True\n    gain = torch.ones(6, device=targets.device)  # normalized to gridspace gain\n\n    # m = list(model.modules())[-1]\n    # for i in range(m.nl):\n    #    anchors = m.anchors[i]\n    multi_gpu = type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)\n    for i, j in enumerate(model.yolo_layers):\n        # get number of grid points and anchor vec for this yolo layer\n        anchors = model.module.module_list[j].anchor_vec if multi_gpu else model.module_list[j].anchor_vec\n\n        # iou of targets-anchors\n        gain[2:] = torch.tensor(p[i].shape)[[3, 2, 3, 2]]  # xyxy gain\n        t, a = targets * gain, []\n        gwh = t[:, 4:6]\n        if nt:\n            iou = wh_iou(anchors, gwh)  # iou(3,n) = wh_iou(anchors(3,2), gwh(n,2))\n\n            if use_all_anchors:\n                na = anchors.shape[0]  # number of anchors\n                a = torch.arange(na).view(-1, 1).repeat(1, nt).view(-1)\n                t = t.repeat(na, 1)\n            else:  # use best anchor only\n                iou, a = iou.max(0)  # best iou and anchor\n\n            # reject anchors below iou_thres (OPTIONAL, increases P, lowers R)\n            if reject:\n                j = iou.view(-1) > model.hyp['iou_t']  # iou threshold hyperparameter\n                t, a = t[j], a[j]\n\n        # Indices\n        b, c = t[:, :2].long().t()  # target image, class\n        gxy = t[:, 2:4]  # grid x, y\n        gwh = t[:, 4:6]  # grid w, h\n        gi, gj = gxy.long().t()  # grid x, y indices\n        indices.append((b, a, gj, gi))\n\n        # Box\n        gxy -= gxy.floor()  # xy\n        tbox.append(torch.cat((gxy, gwh), 1))  # xywh (grids)\n        av.append(anchors[a])  # anchor vec\n\n        # Class\n        tcls.append(c)\n        if c.shape[0]:  # if any targets\n            assert c.max() < model.nc, 'Model accepts %g classes labeled from 0-%g, however you labelled a class %g. ' \\\n                                       'See https://github.com/ultralytics/yolov3/wiki/Train-Custom-Data' % (\n                                           model.nc, model.nc - 1, c.max())\n\n    return tcls, tbox, indices, av\n\n\ndef non_max_suppression(prediction, conf_thres=0.1, iou_thres=0.6, multi_label=True, classes=None, agnostic=False):\n    \"\"\"\n    Performs  Non-Maximum Suppression on inference results\n    Returns detections with shape:\n        nx6 (x1, y1, x2, y2, conf, cls)\n    \"\"\"\n\n    # Box constraints\n    min_wh, max_wh = 2, 4096  # (pixels) minimum and maximum box width and height\n\n    method = 'merge'\n    nc = prediction[0].shape[1] - 5  # number of classes\n    multi_label &= nc > 1  # multiple labels per box\n    output = [None] * len(prediction)\n\n    for xi, x in enumerate(prediction):  # image index, image inference\n        # Apply conf constraint\n        x = x[x[:, 4] > conf_thres]\n\n        # Apply width-height constraint\n        x = x[((x[:, 2:4] > min_wh) & (x[:, 2:4] < max_wh)).all(1)]\n\n        # If none remain process next image\n        if not x.shape[0]:\n            continue\n\n        # Compute conf\n        x[..., 5:] *= x[..., 4:5]  # conf = obj_conf * cls_conf\n\n        # Box (center x, center y, width, height) to (x1, y1, x2, y2)\n        box = xywh2xyxy(x[:, :4])\n\n        # Detections matrix nx6 (xyxy, conf, cls)\n        if multi_label:\n            i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).t()\n            x = torch.cat((box[i], x[i, j + 5].unsqueeze(1), j.float().unsqueeze(1)), 1)\n        else:  # best class only\n            conf, j = x[:, 5:].max(1)\n            x = torch.cat((box, conf.unsqueeze(1), j.float().unsqueeze(1)), 1)\n\n        # Filter by class\n        if classes:\n            x = x[(j.view(-1, 1) == torch.tensor(classes, device=j.device)).any(1)]\n\n        # Apply finite constraint\n        if not torch.isfinite(x).all():\n            x = x[torch.isfinite(x).all(1)]\n\n        # If none remain process next image\n        n = x.shape[0]  # number of boxes\n        if not n:\n            continue\n\n        # Sort by confidence\n        # if method == 'fast_batch':\n        #    x = x[x[:, 4].argsort(descending=True)]\n\n        # Batched NMS\n        c = x[:, 5] * 0 if agnostic else x[:, 5]  # classes\n        boxes, scores = x[:, :4].clone() + c.view(-1, 1) * max_wh, x[:, 4]  # boxes (offset by class), scores\n        if method == 'merge':  # Merge NMS (boxes merged using weighted mean)\n            i = torchvision.ops.boxes.nms(boxes, scores, iou_thres)\n            if 1 < n < 3E3:  # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)\n                try:\n                    # weights = (box_iou(boxes, boxes).tril_() > iou_thres) * scores.view(-1, 1)  # box weights\n                    # weights /= weights.sum(0)  # normalize\n                    # x[:, :4] = torch.mm(weights.T, x[:, :4])\n                    weights = (box_iou(boxes[i], boxes) > iou_thres) * scores[None]  # box weights\n                    x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True)  # merged boxes\n                except:  # possible CUDA error https://github.com/ultralytics/yolov3/issues/1139\n                    pass\n        elif method == 'vision':\n            i = torchvision.ops.boxes.nms(boxes, scores, iou_thres)\n        elif method == 'fast':  # FastNMS from https://github.com/dbolya/yolact\n            iou = box_iou(boxes, boxes).triu_(diagonal=1)  # upper triangular iou matrix\n            i = iou.max(0)[0] < iou_thres\n\n        output[xi] = x[i]\n    return output\n\n\ndef get_yolo_layers(model):\n    bool_vec = [x['type'] == 'yolo' for x in model.module_defs]\n    return [i for i, x in enumerate(bool_vec) if x]  # [82, 94, 106] for yolov3\n\n\ndef print_model_biases(model):\n    # prints the bias neurons preceding each yolo layer\n    print('\\nModel Bias Summary: %8s%18s%18s%18s' % ('layer', 'regression', 'objectness', 'classification'))\n    try:\n        multi_gpu = type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)\n        for l in model.yolo_layers:  # print pretrained biases\n            if multi_gpu:\n                na = model.module.module_list[l].na  # number of anchors\n                b = model.module.module_list[l - 1][0].bias.view(na, -1)  # bias 3x85\n            else:\n                na = model.module_list[l].na\n                b = model.module_list[l - 1][0].bias.view(na, -1)  # bias 3x85\n            print(' ' * 20 + '%8g %18s%18s%18s' % (l, '%5.2f+/-%-5.2f' % (b[:, :4].mean(), b[:, :4].std()),\n                                                   '%5.2f+/-%-5.2f' % (b[:, 4].mean(), b[:, 4].std()),\n                                                   '%5.2f+/-%-5.2f' % (b[:, 5:].mean(), b[:, 5:].std())))\n    except:\n        pass\n\n\ndef strip_optimizer(f='weights/last.pt'):  # from utils.utils import *; strip_optimizer()\n    # Strip optimizer from *.pt files for lighter files (reduced by 2/3 size)\n    x = torch.load(f, map_location=torch.device('cpu'))\n    x['optimizer'] = None\n    torch.save(x, f)\n\n\ndef create_backbone(f='weights/last.pt'):  # from utils.utils import *; create_backbone()\n    # create a backbone from a *.pt file\n    x = torch.load(f, map_location=torch.device('cpu'))\n    x['optimizer'] = None\n    x['training_results'] = None\n    x['epoch'] = -1\n    for p in x['model'].values():\n        try:\n            p.requires_grad = True\n        except:\n            pass\n    torch.save(x, 'weights/backbone.pt')\n\n\ndef coco_class_count(path='../coco/labels/train2014/'):\n    # Histogram of occurrences per class\n    nc = 80  # number classes\n    x = np.zeros(nc, dtype='int32')\n    files = sorted(glob.glob('%s/*.*' % path))\n    for i, file in enumerate(files):\n        labels = np.loadtxt(file, dtype=np.float32).reshape(-1, 5)\n        x += np.bincount(labels[:, 0].astype('int32'), minlength=nc)\n        print(i, len(files))\n\n\ndef coco_only_people(path='../coco/labels/train2017/'):  # from utils.utils import *; coco_only_people()\n    # Find images with only people\n    files = sorted(glob.glob('%s/*.*' % path))\n    for i, file in enumerate(files):\n        labels = np.loadtxt(file, dtype=np.float32).reshape(-1, 5)\n        if all(labels[:, 0] == 0):\n            print(labels.shape[0], file)\n\n\ndef select_best_evolve(path='evolve*.txt'):  # from utils.utils import *; select_best_evolve()\n    # Find best evolved mutation\n    for file in sorted(glob.glob(path)):\n        x = np.loadtxt(file, dtype=np.float32, ndmin=2)\n        print(file, x[fitness(x).argmax()])\n\n\ndef crop_images_random(path='../images/', scale=0.50):  # from utils.utils import *; crop_images_random()\n    # crops images into random squares up to scale fraction\n    # WARNING: overwrites images!\n    for file in tqdm(sorted(glob.glob('%s/*.*' % path))):\n        img = cv2.imread(file)  # BGR\n        if img is not None:\n            h, w = img.shape[:2]\n\n            # create random mask\n            a = 30  # minimum size (pixels)\n            mask_h = random.randint(a, int(max(a, h * scale)))  # mask height\n            mask_w = mask_h  # mask width\n\n            # box\n            xmin = max(0, random.randint(0, w) - mask_w // 2)\n            ymin = max(0, random.randint(0, h) - mask_h // 2)\n            xmax = min(w, xmin + mask_w)\n            ymax = min(h, ymin + mask_h)\n\n            # apply random color mask\n            cv2.imwrite(file, img[ymin:ymax, xmin:xmax])\n\n\ndef coco_single_class_labels(path='../coco/labels/train2014/', label_class=43):\n    # Makes single-class coco datasets. from utils.utils import *; coco_single_class_labels()\n    if os.path.exists('new/'):\n        shutil.rmtree('new/')  # delete output folder\n    os.makedirs('new/')  # make new output folder\n    os.makedirs('new/labels/')\n    os.makedirs('new/images/')\n    for file in tqdm(sorted(glob.glob('%s/*.*' % path))):\n        with open(file, 'r') as f:\n            labels = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)\n        i = labels[:, 0] == label_class\n        if any(i):\n            img_file = file.replace('labels', 'images').replace('txt', 'jpg')\n            labels[:, 0] = 0  # reset class to 0\n            with open('new/images.txt', 'a') as f:  # add image to dataset list\n                f.write(img_file + '\\n')\n            with open('new/labels/' + Path(file).name, 'a') as f:  # write label\n                for l in labels[i]:\n                    f.write('%g %.6f %.6f %.6f %.6f\\n' % tuple(l))\n            shutil.copyfile(src=img_file, dst='new/images/' + Path(file).name.replace('txt', 'jpg'))  # copy images\n\n\ndef kmean_anchors(path='./data/coco64.txt', n=9, img_size=(320, 1024), thr=0.20, gen=1000):\n    # Creates kmeans anchors for use in *.cfg files: from utils.utils import *; _ = kmean_anchors()\n    # n: number of anchors\n    # img_size: (min, max) image size used for multi-scale training (can be same values)\n    # thr: IoU threshold hyperparameter used for training (0.0 - 1.0)\n    # gen: generations to evolve anchors using genetic algorithm\n    from utils.datasets import LoadImagesAndLabels\n\n    def print_results(k):\n        k = k[np.argsort(k.prod(1))]  # sort small to large\n        iou = wh_iou(wh, torch.Tensor(k))\n        max_iou = iou.max(1)[0]\n        bpr, aat = (max_iou > thr).float().mean(), (iou > thr).float().mean() * n  # best possible recall, anch > thr\n        print('%.2f iou_thr: %.3f best possible recall, %.2f anchors > thr' % (thr, bpr, aat))\n        print('n=%g, img_size=%s, IoU_all=%.3f/%.3f-mean/best, IoU>thr=%.3f-mean: ' %\n              (n, img_size, iou.mean(), max_iou.mean(), iou[iou > thr].mean()), end='')\n        for i, x in enumerate(k):\n            print('%i,%i' % (round(x[0]), round(x[1])), end=',  ' if i < len(k) - 1 else '\\n')  # use in *.cfg\n        return k\n\n    def fitness(k):  # mutation fitness\n        iou = wh_iou(wh, torch.Tensor(k))  # iou\n        max_iou = iou.max(1)[0]\n        return (max_iou * (max_iou > thr).float()).mean()  # product\n\n    # Get label wh\n    wh = []\n    dataset = LoadImagesAndLabels(path, augment=True, rect=True)\n    nr = 1 if img_size[0] == img_size[1] else 10  # number augmentation repetitions\n    for s, l in zip(dataset.shapes, dataset.labels):\n        wh.append(l[:, 3:5] * (s / s.max()))  # image normalized to letterbox normalized wh\n    wh = np.concatenate(wh, 0).repeat(nr, axis=0)  # augment 10x\n    wh *= np.random.uniform(img_size[0], img_size[1], size=(wh.shape[0], 1))  # normalized to pixels (multi-scale)\n    wh = wh[(wh > 2.0).all(1)]  # remove below threshold boxes (< 2 pixels wh)\n\n    # Darknet yolov3.cfg anchors\n    use_darknet = False\n    if use_darknet and n == 9:\n        k = np.array([[10, 13], [16, 30], [33, 23], [30, 61], [62, 45], [59, 119], [116, 90], [156, 198], [373, 326]])\n    else:\n        # Kmeans calculation\n        from scipy.cluster.vq import kmeans\n        print('Running kmeans for %g anchors on %g points...' % (n, len(wh)))\n        s = wh.std(0)  # sigmas for whitening\n        k, dist = kmeans(wh / s, n, iter=30)  # points, mean distance\n        k *= s\n    wh = torch.Tensor(wh)\n    k = print_results(k)\n\n    # # Plot\n    # k, d = [None] * 20, [None] * 20\n    # for i in tqdm(range(1, 21)):\n    #     k[i-1], d[i-1] = kmeans(wh / s, i)  # points, mean distance\n    # fig, ax = plt.subplots(1, 2, figsize=(14, 7))\n    # ax = ax.ravel()\n    # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.')\n    # fig, ax = plt.subplots(1, 2, figsize=(14, 7))  # plot wh\n    # ax[0].hist(wh[wh[:, 0]<100, 0],400)\n    # ax[1].hist(wh[wh[:, 1]<100, 1],400)\n    # fig.tight_layout()\n    # fig.savefig('wh.png', dpi=200)\n\n    # Evolve\n    npr = np.random\n    f, sh, mp, s = fitness(k), k.shape, 0.9, 0.1  # fitness, generations, mutation prob, sigma\n    for _ in tqdm(range(gen), desc='Evolving anchors'):\n        v = np.ones(sh)\n        while (v == 1).all():  # mutate until a change occurs (prevent duplicates)\n            v = ((npr.random(sh) < mp) * npr.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0)  # 98.6, 61.6\n        kg = (k.copy() * v).clip(min=2.0)\n        fg = fitness(kg)\n        if fg > f:\n            f, k = fg, kg.copy()\n            print_results(k)\n    k = print_results(k)\n\n    return k\n\n\ndef print_mutation(hyp, results, bucket=''):\n    # Print mutation results to evolve.txt (for use with train.py --evolve)\n    a = '%10s' * len(hyp) % tuple(hyp.keys())  # hyperparam keys\n    b = '%10.3g' * len(hyp) % tuple(hyp.values())  # hyperparam values\n    c = '%10.4g' * len(results) % results  # results (P, R, mAP, F1, test_loss)\n    print('\\n%s\\n%s\\nEvolved fitness: %s\\n' % (a, b, c))\n\n    if bucket:\n        os.system('gsutil cp gs://%s/evolve.txt .' % bucket)  # download evolve.txt\n\n    with open('evolve.txt', 'a') as f:  # append result\n        f.write(c + b + '\\n')\n    x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0)  # load unique rows\n    np.savetxt('evolve.txt', x[np.argsort(-fitness(x))], '%10.3g')  # save sort by fitness\n\n    if bucket:\n        os.system('gsutil cp evolve.txt gs://%s' % bucket)  # upload evolve.txt\n\n\ndef apply_classifier(x, model, img, im0):\n    # applies a second stage classifier to yolo outputs\n    im0 = [im0] if isinstance(im0, np.ndarray) else im0\n    for i, d in enumerate(x):  # per image\n        if d is not None and len(d):\n            d = d.clone()\n\n            # Reshape and pad cutouts\n            b = xyxy2xywh(d[:, :4])  # boxes\n            b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1)  # rectangle to square\n            b[:, 2:] = b[:, 2:] * 1.3 + 30  # pad\n            d[:, :4] = xywh2xyxy(b).long()\n\n            # Rescale boxes from img_size to im0 size\n            scale_coords(img.shape[2:], d[:, :4], im0[i].shape)\n\n            # Classes\n            pred_cls1 = d[:, 5].long()\n            ims = []\n            for j, a in enumerate(d):  # per item\n                cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]\n                im = cv2.resize(cutout, (224, 224))  # BGR\n                # cv2.imwrite('test%i.jpg' % j, cutout)\n\n                im = im[:, :, ::-1].transpose(2, 0, 1)  # BGR to RGB, to 3x416x416\n                im = np.ascontiguousarray(im, dtype=np.float32)  # uint8 to float32\n                im /= 255.0  # 0 - 255 to 0.0 - 1.0\n                ims.append(im)\n\n            pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1)  # classifier prediction\n            x[i] = x[i][pred_cls1 == pred_cls2]  # retain matching class detections\n\n    return x\n\n\ndef fitness(x):\n    # Returns fitness (for use with results.txt or evolve.txt)\n    w = [0.0, 0.00, 1, 0.00]  # weights for [P, R, mAP, F1]@0.5 or [P, R, mAP@0.5, mAP@0.5:0.95]\n    return (x[:, :4] * w).sum(1)\n\n\ndef output_to_target(output, width, height):\n    \"\"\"\n    Convert a YOLO model output to target format\n\n    [batch_id, class_id, x, y, w, h, conf]\n\n    \"\"\"\n    for i in range(len(output)):\n        if isinstance(output[i], torch.Tensor):\n            output[i] = output[i].cpu().numpy()\n\n    targets = []\n    for i, o in enumerate(output):\n\n        if o is not None:\n            for pred in o:\n                box = pred[:4]\n                w = (box[2] - box[0]) / width\n                h = (box[3] - box[1]) / height\n                x = box[0] / width + w / 2\n                y = box[1] / height + h / 2\n                conf = pred[4]\n                cls = int(pred[5])\n\n                targets.append([i, cls, x, y, w, h, conf])\n\n    return np.array(targets)\n\n\n# Plotting functions ---------------------------------------------------------------------------------------------------\ndef plot_one_box(x, img, color=None, label=None, line_thickness=None):\n    # Plots one bounding box on image img\n    tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1  # line/font thickness\n    color = color or [random.randint(0, 255) for _ in range(3)]\n    c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))\n    cv2.rectangle(img, c1, c2, color, thickness=tl)\n    if label:\n        tf = max(tl - 1, 1)  # font thickness\n        t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]\n        c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3\n        cv2.rectangle(img, c1, c2, color, -1)  # filled\n        cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)\n\n\ndef plot_wh_methods():  # from utils.utils import *; plot_wh_methods()\n    # Compares the two methods for width-height anchor multiplication\n    # https://github.com/ultralytics/yolov3/issues/168\n    x = np.arange(-4.0, 4.0, .1)\n    ya = np.exp(x)\n    yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2\n\n    fig = plt.figure(figsize=(6, 3), dpi=150)\n    plt.plot(x, ya, '.-', label='yolo method')\n    plt.plot(x, yb ** 2, '.-', label='^2 power method')\n    plt.plot(x, yb ** 2.5, '.-', label='^2.5 power method')\n    plt.xlim(left=-4, right=4)\n    plt.ylim(bottom=0, top=6)\n    plt.xlabel('input')\n    plt.ylabel('output')\n    plt.legend()\n    fig.tight_layout()\n    fig.savefig('comparison.png', dpi=200)\n\n\ndef plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16,\n                is_gray_scale=False):\n    tl = 3  # line thickness\n    tf = max(tl - 1, 1)  # font thickness\n\n    if isinstance(images, torch.Tensor):\n        images = images.cpu().numpy()\n\n    if isinstance(targets, torch.Tensor):\n        targets = targets.cpu().numpy()\n\n    # un-normalise\n    if np.max(images[0]) <= 1:\n        images *= 255\n\n    bs, _, h, w = images.shape  # batch size, _, height, width\n    bs = min(bs, max_subplots)  # limit plot images\n    ns = np.ceil(bs ** 0.5)  # number of subplots (square)\n\n    # Check if we should resize\n    scale_factor = max_size / max(h, w)\n    if scale_factor < 1:\n        h = math.ceil(scale_factor * h)\n        w = math.ceil(scale_factor * w)\n\n    # Empty array for output\n    mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8)\n\n    # Fix class - colour map\n    prop_cycle = plt.rcParams['axes.prop_cycle']\n    # https://stackoverflow.com/questions/51350872/python-from-color-name-to-rgb\n    hex2rgb = lambda h: tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))\n    color_lut = [hex2rgb(h) for h in prop_cycle.by_key()['color']]\n\n    for i, img in enumerate(images):\n        if i == max_subplots:  # if last batch has fewer images than we expect\n            break\n\n        block_x = int(w * (i // ns))\n        block_y = int(h * (i % ns))\n\n        img = img.transpose(1, 2, 0)\n        if scale_factor < 1:\n            img = cv2.resize(img, (w, h))\n            img = np.expand_dims(img, axis=-1)\n\n        mosaic[block_y:block_y + h, block_x:block_x + w, :] = img\n        if len(targets) > 0:\n            image_targets = targets[targets[:, 0] == i]\n            boxes = xywh2xyxy(image_targets[:, 2:6]).T\n            classes = image_targets[:, 1].astype('int')\n            gt = image_targets.shape[1] == 6  # ground truth if no conf column\n            conf = None if gt else image_targets[:, 6]  # check for confidence presence (gt vs pred)\n\n            boxes[[0, 2]] *= w\n            boxes[[0, 2]] += block_x\n            boxes[[1, 3]] *= h\n            boxes[[1, 3]] += block_y\n            for j, box in enumerate(boxes.T):\n                cls = int(classes[j])\n                color = color_lut[cls % len(color_lut)]\n                cls = names[cls] if names else cls\n                if gt or conf[j] > 0.1:  # 0.3 conf thresh\n                    label = '%s' % cls if gt else '%s %.1f' % (cls, conf[j])\n                    plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl)\n\n        # Draw image filename labels\n        if paths is not None:\n            label = os.path.basename(paths[i])[:40]  # trim to 40 char\n            t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]\n            cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf,\n                        lineType=cv2.LINE_AA)\n\n        # Image border\n        cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3)\n\n    if fname is not None:\n        cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB))\n\n    return mosaic\n\n\ndef plot_test_txt():  # from utils.utils import *; plot_test()\n    # Plot test.txt histograms\n    x = np.loadtxt('test.txt', dtype=np.float32)\n    box = xyxy2xywh(x[:, :4])\n    cx, cy = box[:, 0], box[:, 1]\n\n    fig, ax = plt.subplots(1, 1, figsize=(6, 6))\n    ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0)\n    ax.set_aspect('equal')\n    fig.tight_layout()\n    plt.savefig('hist2d.png', dpi=300)\n\n    fig, ax = plt.subplots(1, 2, figsize=(12, 6))\n    ax[0].hist(cx, bins=600)\n    ax[1].hist(cy, bins=600)\n    fig.tight_layout()\n    plt.savefig('hist1d.png', dpi=200)\n\n\ndef plot_targets_txt():  # from utils.utils import *; plot_targets_txt()\n    # Plot targets.txt histograms\n    x = np.loadtxt('targets.txt', dtype=np.float32).T\n    s = ['x targets', 'y targets', 'width targets', 'height targets']\n    fig, ax = plt.subplots(2, 2, figsize=(8, 8))\n    ax = ax.ravel()\n    for i in range(4):\n        ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std()))\n        ax[i].legend()\n        ax[i].set_title(s[i])\n    fig.tight_layout()\n    plt.savefig('targets.jpg', dpi=200)\n\n\ndef plot_evolution_results(hyp):  # from utils.utils import *; plot_evolution_results(hyp)\n    # Plot hyperparameter evolution results in evolve.txt\n    x = np.loadtxt('evolve.txt', ndmin=2)\n    f = fitness(x)\n    weights = (f - f.min()) ** 2  # for weighted results\n    fig = plt.figure(figsize=(12, 10))\n    matplotlib.rc('font', **{'size': 8})\n    for i, (k, v) in enumerate(hyp.items()):\n        y = x[:, i + 7]\n        # mu = (y * weights).sum() / weights.sum()  # best weighted result\n        mu = y[f.argmax()]  # best single result\n        plt.subplot(4, 5, i + 1)\n        plt.plot(mu, f.max(), 'o', markersize=10)\n        plt.plot(y, f, '.')\n        plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9})  # limit to 40 characters\n        print('%15s: %.3g' % (k, mu))\n    fig.tight_layout()\n    plt.savefig('evolve.png', dpi=200)\n\n\ndef plot_results_overlay(start=0, stop=0):  # from utils.utils import *; plot_results_overlay()\n    # Plot training results files 'results*.txt', overlaying train and val losses\n    s = ['train', 'train', 'train', 'Precision', 'mAP@0.5', 'val', 'val', 'val', 'Recall', 'F1']  # legends\n    t = ['GIoU', 'Objectness', 'Classification', 'P-R', 'mAP-F1']  # titles\n    for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')):\n        results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T\n        n = results.shape[1]  # number of rows\n        x = range(start, min(stop, n) if stop else n)\n        fig, ax = plt.subplots(1, 5, figsize=(14, 3.5))\n        ax = ax.ravel()\n        for i in range(5):\n            for j in [i, i + 5]:\n                y = results[j, x]\n                if i in [0, 1, 2]:\n                    y[y == 0] = np.nan  # dont show zero loss values\n                ax[i].plot(x, y, marker='.', label=s[j])\n            ax[i].set_title(t[i])\n            ax[i].legend()\n            ax[i].set_ylabel(f) if i == 0 else None  # add filename\n        fig.tight_layout()\n        fig.savefig(f.replace('.txt', '.png'), dpi=200)\n\n\ndef plot_results(start=0, stop=0, bucket='', id=()):  # from utils.utils import *; plot_results()\n    # Plot training 'results*.txt' as seen in https://github.com/ultralytics/yolov3#training\n    fig, ax = plt.subplots(2, 5, figsize=(12, 6))\n    ax = ax.ravel()\n    s = ['GIoU', 'Objectness', 'Classification', 'Precision', 'Recall',\n         'val GIoU', 'val Objectness', 'val Classification', 'mAP@0.5', 'F1']\n    if bucket:\n        os.system('rm -rf storage.googleapis.com')\n        files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id]\n    else:\n        files = glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')\n    for f in sorted(files):\n        try:\n            results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T\n            n = results.shape[1]  # number of rows\n            x = range(start, min(stop, n) if stop else n)\n            for i in range(10):\n                y = results[i, x]\n                if i in [0, 1, 2, 5, 6, 7]:\n                    y[y == 0] = np.nan  # dont show zero loss values\n                    # y /= y[0]  # normalize\n                ax[i].plot(x, y, marker='.', label=Path(f).stem, linewidth=2, markersize=8)\n                ax[i].set_title(s[i])\n                if i in [5, 6, 7]:  # share train and val loss y axes\n                    ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])\n        except:\n            print('Warning: Plotting error for %s, skipping file' % f)\n\n    fig.tight_layout()\n    ax[1].legend()\n    fig.savefig('results.png', dpi=200)\n"
  },
  {
    "path": "weights/pretrain_weights/download_yolov3_weights.sh",
    "content": "#!/bin/bash\n\n# make '/weights' directory if it does not exist and cd into it\n# mkdir -p weights && cd weights\n\n# copy darknet weight files, continue '-c' if partially downloaded\n# wget -c https://pjreddie.com/media/files/yolov3.weights\n# wget -c https://pjreddie.com/media/files/yolov3-tiny.weights\n# wget -c https://pjreddie.com/media/files/yolov3-spp.weights\n\n# yolov3 pytorch weights\n# download from Google Drive: https://drive.google.com/drive/folders/1uxgUBemJVw9wZsdpboYbzUN4bcRhsuAI\n\n# darknet53 weights (first 75 layers only)\n# wget -c https://pjreddie.com/media/files/darknet53.conv.74\n\n# yolov3-tiny weights from darknet (first 16 layers only)\n# ./darknet partial cfg/yolov3-tiny.cfg yolov3-tiny.weights yolov3-tiny.conv.15 15\n# mv yolov3-tiny.conv.15 ../\n\n# new method\npython3 -c \"from models import *;\nattempt_download('weights/yolov3.pt');\nattempt_download('weights/yolov3-spp.pt')\"\n"
  }
]