Repository: SpursLipu/YOLOv3v4-ModelCompression-MultidatasetTraining-Multibackbone
Branch: master
Commit: 0202ec6be7d6
Files: 103
Total size: 972.9 KB
Directory structure:
gitextract_flko7qgo/
├── EagleEye_normal_prune.py
├── EagleEye_regular_prune.py
├── EagleEye_slim_prune.py
├── LICENSE
├── PTQ.py
├── README.md
├── cfg/
│ ├── yolov2/
│ │ ├── yolov2-hand.cfg
│ │ ├── yolov2-tiny-hand.cfg
│ │ ├── yolov2-tiny.cfg
│ │ └── yolov2.cfg
│ ├── yolov3/
│ │ ├── yolov3-UAV.cfg
│ │ ├── yolov3-asff.cfg
│ │ ├── yolov3-bdd100k.cfg
│ │ ├── yolov3-hand.cfg
│ │ ├── yolov3-onDIOR.cfg
│ │ ├── yolov3-screw.cfg
│ │ ├── yolov3-ship.cfg
│ │ ├── yolov3-spp-matrix.cfg
│ │ ├── yolov3-spp-pan-scale.cfg
│ │ ├── yolov3-spp.cfg
│ │ ├── yolov3-spp3.cfg
│ │ ├── yolov3-visdrone.cfg
│ │ └── yolov3.cfg
│ ├── yolov3-ghostnet/
│ │ └── yolov3-ghost-coco.cfg
│ ├── yolov3-mobilenet/
│ │ ├── yolov3-mobilenet-UAV.cfg
│ │ ├── yolov3-mobilenet-coco.cfg
│ │ ├── yolov3-mobilenet-hand.cfg
│ │ ├── yolov3-mobilenet-screw.cfg
│ │ └── yolov3-mobilenet-visdrone.cfg
│ ├── yolov3-singlechannel/
│ │ └── yolov3-singlechannel.cfg
│ ├── yolov3tiny/
│ │ ├── yolov3-tiny-UAV.cfg
│ │ ├── yolov3-tiny-hand.cfg
│ │ ├── yolov3-tiny-ship-one.cfg
│ │ ├── yolov3-tiny-ship.cfg
│ │ ├── yolov3-tiny.cfg
│ │ ├── yolov3-tiny3.cfg
│ │ ├── yolov3-tiny_bdd100k.cfg
│ │ ├── yolov3-tiny_onDIOR.cfg
│ │ └── yolov3-tiny_visdrone.cfg
│ ├── yolov3tiny-efficientnetB0/
│ │ └── yolov3tiny-efficientnetB0.cfg
│ ├── yolov3tiny-mobilenet-small/
│ │ ├── yolov3tiny-mobilenet-small-UAV.cfg
│ │ ├── yolov3tiny-mobilenet-small-coco.cfg
│ │ ├── yolov3tiny-mobilenet-small-screw.cfg
│ │ └── yolov3tiny-mobilenet-small-visdrone.cfg
│ ├── yolov4/
│ │ ├── yolov4-hand.cfg
│ │ ├── yolov4-relu.cfg
│ │ ├── yolov4-visdrone.cfg
│ │ └── yolov4.cfg
│ └── yolov4tiny/
│ └── yolov4-tiny.cfg
├── convert.py
├── convert_FPGA.py
├── convert_FPGA_2.py
├── data/
│ ├── UAV_Samples_label.data
│ ├── UAV_Samples_label.names
│ ├── bdd100k.data
│ ├── bdd100k.names
│ ├── coco.names
│ ├── coco2014.data
│ ├── coco2017.data
│ ├── dior.data
│ ├── dior.names
│ ├── get_coco2014.sh
│ ├── get_coco2017.sh
│ ├── oxfordhand.data
│ ├── oxfordhand.names
│ ├── screw.data
│ ├── screw.names
│ ├── trainset.data
│ ├── trainset.names
│ ├── visdrone.data
│ └── visdrone.names
├── detect.py
├── info.py
├── layer_channel_prune.py
├── layer_channel_regular_prune.py
├── layer_prune.py
├── models.py
├── normal_prune.py
├── regular_prune.py
├── requirements.txt
├── shortcut_prune.py
├── slim_prune.py
├── test.py
├── train.py
├── utils/
│ ├── __init__.py
│ ├── adabound.py
│ ├── datasets.py
│ ├── gcp.sh
│ ├── google_utils.py
│ ├── layers.py
│ ├── output_upsample.py
│ ├── parse_config.py
│ ├── prune_utils.py
│ ├── quantized/
│ │ ├── __init__.py
│ │ ├── quantized_TPSQ.py
│ │ ├── quantized_dorefa.py
│ │ ├── quantized_google.py
│ │ ├── quantized_lowbit.py
│ │ ├── quantized_ptq.py
│ │ └── quantized_ptq_cos.py
│ ├── torch_utils.py
│ └── utils.py
└── weights/
└── pretrain_weights/
└── download_yolov3_weights.sh
================================================
FILE CONTENTS
================================================
================================================
FILE: EagleEye_normal_prune.py
================================================
from sys import float_repr_style
from models import *
from utils.utils import *
from utils.prune_utils import *
from utils.datasets import *
import os
import test
import argparse
from thop import profile
# from thop import profile
# from distiller.model_summaries import model_performance_summary
def obtain_avg_forward_time(input, model, repeat=200):
model.eval()
start = time.time()
with torch.no_grad():
for i in range(repeat):
output = model(input)
avg_infer_time = (time.time() - start) / repeat
return avg_infer_time, output
def obtain_filters_mask(model, CBL_idx, prune_idx, idx_mask):
pruned = 0
total = 0
num_filters = []
filters_mask = []
# CBL_idx存储的是所有带BN的卷积层(YOLO层的前一层卷积层是不带BN的)
for idx in CBL_idx:
bn_module = model.module_list[idx][1]
if idx in prune_idx:
mask = idx_mask[idx]
# mask = obtain_bn_mask(bn_module, thre).cpu().numpy()
remain = int(mask.sum())
pruned = pruned + mask.shape[0] - remain
if remain == 0:
print("Channels would be all pruned!")
raise Exception
# print(f'layer index: {idx:>3d} \t total channel: {mask.shape[0]:>4d} \t '
# f'remaining channel: {remain:>4d}')
else:
mask = torch.ones(bn_module.weight.data.shape)
remain = mask.shape[0]
total += mask.shape[0]
num_filters.append(remain)
filters_mask.append(mask.clone())
# 因此,这里求出的prune_ratio,需要裁剪的α参数/cbl_idx中所有的α参数
# prune_ratio = pruned / total
# print(f'Prune channels: {pruned}\tPrune ratio: {prune_ratio:.3f}')
return num_filters, filters_mask
def obtain_l1_mask(conv_module, random_rate):
w_copy = conv_module.weight.data.abs().clone()
w_copy = torch.sum(w_copy, dim=(1, 2, 3))
length = w_copy.cpu().numpy().shape[0]
num_retain = int(length * (1 - random_rate))
if num_retain == 0:
num_retain = 1
_, y = torch.topk(w_copy, num_retain)
mask = torch.zeros(length, dtype=torch.float32).to(w_copy.device)
mask[y] = 1
return mask
#macs = flops / 2
def performance_summary(model, opt=None, prefix=""):
macs, _ = profile(model, inputs=(torch.zeros(1, 3, 480, 640).to(device),), verbose=False)
return macs
def rand_prune_and_eval(model, min_rate, max_rate):
while True:
model_copy = deepcopy(model)
remain_num = 0
idx_new = dict()
for idx in prune_idx:
# bn_module = model_copy.module_list[idx][1]
conv_module = model_copy.module_list[idx][0]
random_rate = (max_rate - min_rate) * (np.random.rand(1)) + min_rate
mask = obtain_l1_mask(conv_module, random_rate)
idx_new[idx] = mask
remain_num += int(mask.sum())
conv_module.weight.data = conv_module.weight.data.permute(1, 2, 3, 0).mul(mask).float().permute(3, 0, 1, 2)
# bn_module.weight.data.mul_(mask)
# ---------------
num_filters, filters_mask = obtain_filters_mask(model_copy, CBL_idx, prune_idx, idx_new)
CBLidx2mask = {idx: mask for idx, mask in zip(CBL_idx, filters_mask)}
compact_module_defs = deepcopy(model.module_defs)
for idx, num in zip(CBL_idx, num_filters):
assert compact_module_defs[idx]['type'] == 'convolutional'
compact_module_defs[idx]['filters'] = str(num)
compact_model = Darknet([model.hyperparams.copy()] + compact_module_defs).to(device)
current_parameters = obtain_num_parameters(compact_model)
# print(current_parameters/origin_nparameters, end=';')
current_macs = performance_summary(compact_model)
# if current_parameters / origin_nparameters > remain_ratio + delta or current_parameters / origin_nparameters < remain_ratio - delta:
# macs = flops/2
if current_macs / origin_macs > remain_ratio + delta or current_macs / origin_macs < remain_ratio - delta:
# print('missing')
model_copy.cpu()
compact_model.cpu()
torch.cuda.empty_cache()
continue
print("yes---")
for i in CBLidx2mask:
CBLidx2mask[i] = CBLidx2mask[i].clone().cpu().numpy()
pruned_model = prune_model_keep_size_forEagleEye(model, prune_idx, CBLidx2mask)
init_weights_from_loose_model(compact_model, pruned_model, CBL_idx, Conv_idx, CBLidx2mask)
compact_model.train()
with torch.no_grad():
for batch_i, (imgs, targets, paths, shapes) in enumerate(tqdm(dataloader)):
imgs = imgs.cuda().float() / 255.0
compact_model(imgs)
if batch_i > steps:
break
del model_copy
torch.cuda.empty_cache()
break
return compact_module_defs, current_parameters, compact_model
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, default='cfg/yolov3/yolov3.cfg', help='cfg file path')
parser.add_argument('--data', type=str, default='data/coco2017.data', help='*.data file path')
parser.add_argument('--weights', type=str, default='weights/pretrain_weights/yolov3.weights',
help='sparse model weights')
parser.add_argument('--percent', type=float, default=0.5, help='global channel prune percent')
parser.add_argument('--delta', type=float, default=0.05, help='delta')
parser.add_argument('--img-size', type=int, default=416, help='inference size (pixels)')
parser.add_argument('--batch-size', type=int, default=16, help='batch-size')
parser.add_argument('--number', type=int, default=200, help='number of subnetwork')
opt = parser.parse_args()
print(opt)
t0 = time.time()
remain_ratio = 1 - opt.percent
number = opt.number
img_size = opt.img_size
batch_size = opt.batch_size
delta = opt.delta
hyp = {'giou': 3.54, # giou loss gain
'cls': 37.4, # cls loss gain
'cls_pw': 1.0, # cls BCELoss positive_weight
'obj': 64.3, # obj loss gain (*=img_size/320 if img_size != 320)
'obj_pw': 1.0, # obj BCELoss positive_weight
'iou_t': 0.20, # iou training threshold
'lr0': 0.01, # initial learning rate (SGD=5E-3, Adam=5E-4)
'lrf': 0.0005, # final learning rate (with cos scheduler)
'momentum': 0.937, # SGD momentum
'weight_decay': 0.0005, # optimizer weight decay
'fl_gamma': 0.0, # focal loss gamma (efficientDet default is gamma=1.5)
'hsv_h': 0.0138, # image HSV-Hue augmentation (fraction)
'hsv_s': 0.678, # image HSV-Saturation augmentation (fraction)
'hsv_v': 0.36, # image HSV-Value augmentation (fraction)
'degrees': 1.98 * 0, # image rotation (+/- deg)
'translate': 0.05 * 0, # image translation (+/- fraction)
'scale': 0.05 * 0, # image scale (+/- gain)
'shear': 0.641 * 0} # image shear (+/- deg)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = Darknet(opt.cfg).to(device)
if opt.weights:
if opt.weights.endswith(".pt"):
model.load_state_dict(torch.load(opt.weights, map_location=device)['model'])
else:
_ = load_darknet_weights(model, opt.weights)
data_config = parse_data_cfg(opt.data)
valid_path = data_config["valid"]
train_path = data_config["train"]
class_names = load_classes(data_config["names"])
steps = math.ceil((len(open(train_path).readlines()) / batch_size) * 0.1)
obtain_num_parameters = lambda model: sum([param.nelement() for param in model.parameters()])
dataset = LoadImagesAndLabels(train_path,
img_size,
batch_size,
augment=True,
hyp=hyp, # augmentation hyperparameters
rect=False, # rectangular training
cache_images=False)
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
num_workers=min([os.cpu_count(), batch_size, 16]),
shuffle=True, # Shuffle=True unless rectangular training is used
pin_memory=True,
collate_fn=dataset.collate_fn)
test_dataset = LoadImagesAndLabels(valid_path, img_size, batch_size,
hyp=hyp,
rect=True,
cache_images=False)
testloader = torch.utils.data.DataLoader(test_dataset,
batch_size=batch_size,
num_workers=min([os.cpu_count(), batch_size, 8]),
shuffle=False,
pin_memory=True,
collate_fn=test_dataset.collate_fn)
with torch.no_grad():
origin_model_metric = test.test(opt.cfg,
opt.data,
batch_size=batch_size,
imgsz=img_size,
model=model,
dataloader=testloader,
rank=-1,
plot=False)
origin_nparameters = obtain_num_parameters(model)
origin_macs = performance_summary(model)
CBL_idx, Conv_idx, prune_idx = parse_module_defs(model.module_defs)
print("-------------------------------------------------------")
max_mAP = 0
for i in range(number):
compact_module_defs, current_parameters, compact_model = rand_prune_and_eval(model, 0, 1)
with torch.no_grad():
# 防止随机生成的较差的模型撑爆显存,增大nmsconf阈值
mAP = test.test(opt.cfg,
opt.data,
batch_size=batch_size,
imgsz=img_size,
conf_thres=0.1,
model=compact_model,
dataloader=testloader,
rank=-1,
plot=False)[0][2]
print('candidate: ' + str(i), end=" ")
print('remain_ratio: ' + str(current_parameters / origin_nparameters))
print(f'mAP of the pruned model is {mAP:.4f}')
if mAP > max_mAP:
max_mAP = mAP
compact_model_winnner = deepcopy(compact_model)
cfg_name = 'cfg_backup/' + str(i) + '.cfg'
if not os.path.isdir('cfg_backup/'):
os.makedirs('cfg_backup/')
pruned_cfg_file = write_cfg(cfg_name, [model.hyperparams.copy()] + compact_module_defs)
del compact_model
torch.cuda.empty_cache()
# 获得原始模型的module_defs,并修改该defs中的卷积核数量
compact_module_defs = deepcopy(compact_model_winnner.module_defs)
compact_nparameters = obtain_num_parameters(compact_model_winnner)
compact_macs = performance_summary(compact_model_winnner)
compact_flops = compact_macs*2 / 1024**3
origin_flops = origin_macs*2 / 1024**3
random_input = torch.rand((16, 3, 416, 416)).to(device)
pruned_forward_time, pruned_output = obtain_avg_forward_time(random_input, model)
compact_forward_time, compact_output = obtain_avg_forward_time(random_input, compact_model_winnner)
# 在测试集上测试剪枝后的模型, 并统计模型的参数数量
with torch.no_grad():
compact_model_metric = test.test(opt.cfg,
opt.data,
batch_size=batch_size,
imgsz=img_size,
model=compact_model_winnner,
dataloader=testloader,
rank=-1,
plot=False)
# 比较剪枝前后参数数量的变化、指标性能的变化
metric_table = [
["Metric", "Before", "After"],
["mAP", f'{origin_model_metric[1].mean():.6f}', f'{compact_model_metric[1].mean():.6f}'],
["Parameters", f"{origin_nparameters}", f"{compact_nparameters}"],
["GFLOPs",f"{origin_flops}",f"{compact_flops}"],
["Inference", f'{pruned_forward_time:.4f}', f'{compact_forward_time:.4f}']
]
print(AsciiTable(metric_table).table)
# 生成剪枝后的cfg文件并保存模型
pruned_cfg_name = 'cfg/rand-normal_' + str(remain_ratio) + '_' + str(number) + '/' + 'rand-normal_-' + str(
remain_ratio) + '_' + str(number) + '.cfg'
# 创建存储目录
dir_name = 'cfg/rand-normal_' + str(remain_ratio) + '_' + str(number) + '/'
if not os.path.isdir(dir_name):
os.makedirs(dir_name)
# 由于原始的compact_module_defs将anchor从字符串变为了数组,因此这里将anchors重新变为字符串
file = open(opt.cfg, 'r')
lines = file.read().split('\n')
for line in lines:
if line.split(' = ')[0] == 'anchors':
anchor = line.split(' = ')[1]
break
if line.split('=')[0] == 'anchors':
anchor = line.split('=')[1]
break
file.close()
for item in compact_module_defs:
if item['type'] == 'shortcut':
item['from'] = str(item['from'][0])
elif item['type'] == 'route':
item['layers'] = ",".join('%s' % i for i in item['layers'])
elif item['type'] == 'yolo':
item['mask'] = ",".join('%s' % i for i in item['mask'])
item['anchors'] = anchor
pruned_cfg_file = write_cfg(pruned_cfg_name, [model.hyperparams.copy()] + compact_module_defs)
print(f'Config file has been saved: {pruned_cfg_file}')
weights_dir_name = dir_name.replace('cfg', 'weights')
if not os.path.isdir(weights_dir_name):
os.makedirs(weights_dir_name)
compact_model_name = weights_dir_name + 'rand-normal_' + str(remain_ratio) + '_' + str(number) + '.weights'
save_weights(compact_model_winnner, path=compact_model_name)
print(f'Compact model has been saved: {compact_model_name}')
print('%g sub networks completed in %.3f hours.\n' % (number, (time.time() - t0) / 3600))
================================================
FILE: EagleEye_regular_prune.py
================================================
from models import *
from utils.utils import *
from utils.prune_utils import *
from utils.datasets import *
import os
import test
import argparse
from thop import profile
def obtain_avg_forward_time(input, model, repeat=200):
model.eval()
start = time.time()
with torch.no_grad():
for i in range(repeat):
output = model(input)
avg_infer_time = (time.time() - start) / repeat
return avg_infer_time, output
def obtain_filters_mask(model, CBL_idx, prune_idx, idx_mask):
pruned = 0
total = 0
num_filters = []
filters_mask = []
# CBL_idx存储的是所有带BN的卷积层(YOLO层的前一层卷积层是不带BN的)
for idx in CBL_idx:
bn_module = model.module_list[idx][1]
if idx in prune_idx:
mask = idx_mask[idx]
# mask = obtain_bn_mask(bn_module, thre).cpu().numpy()
remain = int(mask.sum())
pruned = pruned + mask.shape[0] - remain
if remain == 0:
print("Channels would be all pruned!")
raise Exception
else:
mask = torch.ones(bn_module.weight.data.shape)
remain = mask.shape[0]
total += mask.shape[0]
num_filters.append(remain)
filters_mask.append(mask.clone())
return num_filters, filters_mask
def obtain_l1_mask(conv_module, random_rate):
w_copy = conv_module.weight.data.abs().clone()
w_copy = torch.sum(w_copy, dim=(1, 2, 3))
length = w_copy.cpu().numpy().shape[0]
num_retain = int(length * (1 - random_rate))
num_retain = get_nearest_multiple(num_retain,channel_base)
if num_retain > length:
num_retain = length
if num_retain == 0:
num_retain = channel_base
_, y = torch.topk(w_copy, num_retain)
mask = torch.zeros(length, dtype=torch.float32).to(w_copy.device)
mask[y] = 1
return mask
#macs = flops / 2
def performance_summary(model, opt=None, prefix=""):
macs, _ = profile(model, inputs=(torch.zeros(1, 3, 480, 640).to(device),), verbose=False)
return macs
def rand_prune_and_eval(model, min_rate, max_rate):
while True:
model_copy = deepcopy(model)
remain_num = 0
idx_new = dict()
for idx in prune_idx:
# bn_module = model_copy.module_list[idx][1]
conv_module = model_copy.module_list[idx][0]
random_rate = (max_rate - min_rate) * (np.random.rand(1)) + min_rate
mask = obtain_l1_mask(conv_module, random_rate)
idx_new[idx] = mask
remain_num += int(mask.sum())
conv_module.weight.data = conv_module.weight.data.permute(1, 2, 3, 0).mul(mask).float().permute(3, 0, 1, 2)
# bn_module.weight.data.mul_(mask)
# ---------------
num_filters, filters_mask = obtain_filters_mask(model_copy, CBL_idx, prune_idx, idx_new)
CBLidx2mask = {idx: mask for idx, mask in zip(CBL_idx, filters_mask)}
CBLidx2filters = {idx: filters for idx, filters in zip(CBL_idx, num_filters)}
compact_module_defs = deepcopy(model.module_defs)
for i in model_copy.module_defs:
if i['type'] == 'shortcut':
i['is_access'] = False
merge_mask(model_copy, CBLidx2mask, CBLidx2filters, base=channel_base)
for idx, num in CBLidx2filters.items():
#num = get_nearest_multiple(num,channel_base)
assert compact_module_defs[idx]['type'] == 'convolutional'
compact_module_defs[idx]['filters'] = str(num)
compact_model = Darknet([model.hyperparams.copy()] + compact_module_defs).to(device)
current_parameters = obtain_num_parameters(compact_model)
# print(current_parameters/origin_nparameters, end=';')
current_macs = performance_summary(compact_model)
# macs = flops/2
if current_macs / origin_macs > remain_ratio + delta or current_macs / origin_macs < remain_ratio - delta:
# print('missing')
del model_copy
del compact_model
torch.cuda.empty_cache()
continue
print("yes---")
for i in CBLidx2mask:
CBLidx2mask[i] = CBLidx2mask[i].clone().cpu().numpy()
pruned_model = prune_model_keep_size_forEagleEye(model, prune_idx, CBLidx2mask)
init_weights_from_loose_model(compact_model, pruned_model, CBL_idx, Conv_idx, CBLidx2mask)
compact_model.train()
with torch.no_grad():
for batch_i, (imgs, targets, paths, shapes) in enumerate(tqdm(dataloader)):
imgs = imgs.cuda().float() / 255.0
compact_model(imgs)
if batch_i > steps:
break
del model_copy
torch.cuda.empty_cache()
break
return compact_module_defs, current_parameters, compact_model
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, default='cfg/yolov3/yolov3.cfg', help='cfg file path')
parser.add_argument('--data', type=str, default='data/coco2017.data', help='*.data file path')
parser.add_argument('--weights', type=str, default='weights/pretrain_weights/yolov3.weights',
help='sparse model weights')
parser.add_argument('--percent', type=float, default=0.5, help='global channel prune percent')
parser.add_argument('--delta', type=float, default=0.05, help='delta')
parser.add_argument('--img-size', type=int, default=416, help='inference size (pixels)')
parser.add_argument('--batch-size', type=int, default=16, help='batch-size')
parser.add_argument('--number', type=int, default=200, help='number of subnetwork')
parser.add_argument('--channel-base', type=int, default=8, help='channel of subnetwork is multiple of channel-base')
opt = parser.parse_args()
print(opt)
t0 = time.time()
remain_ratio = 1 - opt.percent
number = opt.number
img_size = opt.img_size
batch_size = opt.batch_size
delta = opt.delta
channel_base = opt.channel_base
hyp = {'giou': 3.54, # giou loss gain
'cls': 37.4, # cls loss gain
'cls_pw': 1.0, # cls BCELoss positive_weight
'obj': 64.3, # obj loss gain (*=img_size/320 if img_size != 320)
'obj_pw': 1.0, # obj BCELoss positive_weight
'iou_t': 0.20, # iou training threshold
'lr0': 0.01, # initial learning rate (SGD=5E-3, Adam=5E-4)
'lrf': 0.0005, # final learning rate (with cos scheduler)
'momentum': 0.937, # SGD momentum
'weight_decay': 0.0005, # optimizer weight decay
'fl_gamma': 0.0, # focal loss gamma (efficientDet default is gamma=1.5)
'hsv_h': 0.0138, # image HSV-Hue augmentation (fraction)
'hsv_s': 0.678, # image HSV-Saturation augmentation (fraction)
'hsv_v': 0.36, # image HSV-Value augmentation (fraction)
'degrees': 1.98 * 0, # image rotation (+/- deg)
'translate': 0.05 * 0, # image translation (+/- fraction)
'scale': 0.05 * 0, # image scale (+/- gain)
'shear': 0.641 * 0} # image shear (+/- deg)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = Darknet(opt.cfg).to(device)
if opt.weights:
if opt.weights.endswith(".pt"):
model.load_state_dict(torch.load(opt.weights, map_location=device)['model'])
else:
_ = load_darknet_weights(model, opt.weights)
data_config = parse_data_cfg(opt.data)
valid_path = data_config["valid"]
train_path = data_config["train"]
class_names = load_classes(data_config["names"])
steps = math.ceil((len(open(train_path).readlines()) / batch_size) * 0.1)
obtain_num_parameters = lambda model: sum([param.nelement() for param in model.parameters()])
dataset = LoadImagesAndLabels(train_path,
img_size,
batch_size,
augment=True,
hyp=hyp, # augmentation hyperparameters
rect=False, # rectangular training
cache_images=False)
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
num_workers=min([os.cpu_count(), batch_size, 16]),
shuffle=True, # Shuffle=True unless rectangular training is used
pin_memory=True,
collate_fn=dataset.collate_fn)
test_dataset = LoadImagesAndLabels(valid_path, img_size, batch_size,
hyp=hyp,
rect=True,
cache_images=False)
testloader = torch.utils.data.DataLoader(test_dataset,
batch_size=batch_size,
num_workers=min([os.cpu_count(), batch_size, 8]),
shuffle=False,
pin_memory=True,
collate_fn=test_dataset.collate_fn)
with torch.no_grad():
origin_model_metric = test.test(opt.cfg,
opt.data,
batch_size=batch_size,
imgsz=img_size,
model=model,
dataloader=testloader,
rank=-1,
plot=False)
origin_nparameters = obtain_num_parameters(model)
origin_macs = performance_summary(model)
CBL_idx, Conv_idx, prune_idx, _, _ = parse_module_defs2(model.module_defs)
print("-------------------------------------------------------")
max_mAP = 0
for i in range(number):
compact_module_defs, current_parameters, compact_model = rand_prune_and_eval(model, 0, 1)
with torch.no_grad():
# 防止随机生成的较差的模型撑爆显存,增大nmsconf阈值
mAP = test.test(opt.cfg,
opt.data,
batch_size=batch_size,
imgsz=img_size,
conf_thres=0.1,
model=compact_model,
dataloader=testloader,
rank=-1,
plot=False)[0][2]
print('candidate: ' + str(i), end=" ")
print('remain_ratio: ' + str(current_parameters / origin_nparameters))
print(f'mAP of the pruned model is {mAP:.4f}')
if mAP > max_mAP:
max_mAP = mAP
compact_model_winnner = deepcopy(compact_model)
cfg_name = 'cfg_backup/' + str(i) + '.cfg'
if not os.path.isdir('cfg_backup/'):
os.makedirs('cfg_backup/')
pruned_cfg_file = write_cfg(cfg_name, [model.hyperparams.copy()] + compact_module_defs)
del compact_model
torch.cuda.empty_cache()
# 获得原始模型的module_defs,并修改该defs中的卷积核数量
compact_module_defs = deepcopy(compact_model_winnner.module_defs)
compact_nparameters = obtain_num_parameters(compact_model_winnner)
compact_macs = performance_summary(compact_model_winnner)
compact_flops = compact_macs*2 / 1024**3
origin_flops = origin_macs*2 / 1024**3
random_input = torch.rand((16, 3, 416, 416)).to(device)
pruned_forward_time, pruned_output = obtain_avg_forward_time(random_input, model)
compact_forward_time, compact_output = obtain_avg_forward_time(random_input, compact_model_winnner)
# 在测试集上测试剪枝后的模型, 并统计模型的参数数量
with torch.no_grad():
compact_model_metric = test.test(opt.cfg,
opt.data,
batch_size=batch_size,
imgsz=img_size,
model=compact_model_winnner,
dataloader=testloader,
rank=-1,
plot=False)
# 比较剪枝前后参数数量的变化、指标性能的变化
metric_table = [
["Metric", "Before", "After"],
["mAP", f'{origin_model_metric[1].mean():.6f}', f'{compact_model_metric[1].mean():.6f}'],
["Parameters", f"{origin_nparameters}", f"{compact_nparameters}"],
["GFLOPs",f"{origin_flops}",f"{compact_flops}"],
["Inference", f'{pruned_forward_time:.4f}', f'{compact_forward_time:.4f}']
]
print(AsciiTable(metric_table).table)
# 生成剪枝后的cfg文件并保存模型
pruned_cfg_name = 'cfg/rand-slim_' + str(remain_ratio) + '_' + str(number) + '/' + 'rand-slim_' + str(
remain_ratio) + '_' + str(number) + '.cfg'
# 创建存储目录
dir_name = 'cfg/rand-slim_' + str(remain_ratio) + '_' + str(number) + '/'
if not os.path.isdir(dir_name):
os.makedirs(dir_name)
# 由于原始的compact_module_defs将anchor从字符串变为了数组,因此这里将anchors重新变为字符串
file = open(opt.cfg, 'r')
lines = file.read().split('\n')
for line in lines:
if line.split(' = ')[0] == 'anchors':
anchor = line.split(' = ')[1]
break
if line.split('=')[0] == 'anchors':
anchor = line.split('=')[1]
break
file.close()
for item in compact_module_defs:
if item['type'] == 'shortcut':
item['from'] = str(item['from'][0])
elif item['type'] == 'route':
item['layers'] = ",".join('%s' % i for i in item['layers'])
elif item['type'] == 'yolo':
item['mask'] = ",".join('%s' % i for i in item['mask'])
item['anchors'] = anchor
pruned_cfg_file = write_cfg(pruned_cfg_name, [model.hyperparams.copy()] + compact_module_defs)
print(f'Config file has been saved: {pruned_cfg_file}')
weights_dir_name = dir_name.replace('cfg', 'weights')
if not os.path.isdir(weights_dir_name):
os.makedirs(weights_dir_name)
compact_model_name = weights_dir_name + 'rand-slim_' + str(remain_ratio) + '_' + str(number) + '.weights'
save_weights(compact_model_winnner, path=compact_model_name)
print(f'Compact model has been saved: {compact_model_name}')
print('%g sub networks completed in %.3f hours.\n' % (number, (time.time() - t0) / 3600))
================================================
FILE: EagleEye_slim_prune.py
================================================
from models import *
from utils.utils import *
from utils.prune_utils import *
from utils.datasets import *
import os
import test
import argparse
from thop import profile
def obtain_avg_forward_time(input, model, repeat=200):
model.eval()
start = time.time()
with torch.no_grad():
for i in range(repeat):
output = model(input)
avg_infer_time = (time.time() - start) / repeat
return avg_infer_time, output
def obtain_filters_mask(model, CBL_idx, prune_idx, idx_mask):
pruned = 0
total = 0
num_filters = []
filters_mask = []
# CBL_idx存储的是所有带BN的卷积层(YOLO层的前一层卷积层是不带BN的)
for idx in CBL_idx:
bn_module = model.module_list[idx][1]
if idx in prune_idx:
mask = idx_mask[idx]
# mask = obtain_bn_mask(bn_module, thre).cpu().numpy()
remain = int(mask.sum())
pruned = pruned + mask.shape[0] - remain
if remain == 0:
print("Channels would be all pruned!")
raise Exception
else:
mask = torch.ones(bn_module.weight.data.shape)
remain = mask.shape[0]
total += mask.shape[0]
num_filters.append(remain)
filters_mask.append(mask.clone())
return num_filters, filters_mask
def obtain_l1_mask(conv_module, random_rate):
w_copy = conv_module.weight.data.abs().clone()
w_copy = torch.sum(w_copy, dim=(1, 2, 3))
length = w_copy.cpu().numpy().shape[0]
num_retain = int(length * (1 - random_rate))
if num_retain == 0:
num_retain = 1
_, y = torch.topk(w_copy, num_retain)
mask = torch.zeros(length, dtype=torch.float32).to(w_copy.device)
mask[y] = 1
return mask
#macs = flops / 2
def performance_summary(model, opt=None, prefix=""):
macs, _ = profile(model, inputs=(torch.zeros(1, 3, 480, 640).to(device),), verbose=False)
return macs
def rand_prune_and_eval(model, min_rate, max_rate):
while True:
model_copy = deepcopy(model)
remain_num = 0
idx_new = dict()
for idx in prune_idx:
# bn_module = model_copy.module_list[idx][1]
conv_module = model_copy.module_list[idx][0]
random_rate = (max_rate - min_rate) * (np.random.rand(1)) + min_rate
mask = obtain_l1_mask(conv_module, random_rate)
idx_new[idx] = mask
remain_num += int(mask.sum())
conv_module.weight.data = conv_module.weight.data.permute(1, 2, 3, 0).mul(mask).float().permute(3, 0, 1, 2)
# bn_module.weight.data.mul_(mask)
# ---------------
num_filters, filters_mask = obtain_filters_mask(model_copy, CBL_idx, prune_idx, idx_new)
CBLidx2mask = {idx: mask for idx, mask in zip(CBL_idx, filters_mask)}
CBLidx2filters = {idx: filters for idx, filters in zip(CBL_idx, num_filters)}
compact_module_defs = deepcopy(model.module_defs)
for i in model_copy.module_defs:
if i['type'] == 'shortcut':
i['is_access'] = False
merge_mask(model_copy, CBLidx2mask, CBLidx2filters)
for idx, num in CBLidx2filters.items():
assert compact_module_defs[idx]['type'] == 'convolutional'
compact_module_defs[idx]['filters'] = str(num)
compact_model = Darknet([model.hyperparams.copy()] + compact_module_defs).to(device)
current_parameters = obtain_num_parameters(compact_model)
# print(current_parameters/origin_nparameters, end=';')
current_macs = performance_summary(compact_model)
# macs = flops/2
if current_macs / origin_macs > remain_ratio + delta or current_macs / origin_macs < remain_ratio - delta:
# print('missing')
del model_copy
del compact_model
torch.cuda.empty_cache()
continue
print("yes---")
for i in CBLidx2mask:
CBLidx2mask[i] = CBLidx2mask[i].clone().cpu().numpy()
pruned_model = prune_model_keep_size_forEagleEye(model, prune_idx, CBLidx2mask)
init_weights_from_loose_model(compact_model, pruned_model, CBL_idx, Conv_idx, CBLidx2mask)
compact_model.train()
with torch.no_grad():
for batch_i, (imgs, targets, paths, shapes) in enumerate(tqdm(dataloader)):
imgs = imgs.cuda().float() / 255.0
compact_model(imgs)
if batch_i > steps:
break
del model_copy
torch.cuda.empty_cache()
break
return compact_module_defs, current_parameters, compact_model
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, default='cfg/yolov3/yolov3.cfg', help='cfg file path')
parser.add_argument('--data', type=str, default='data/coco2017.data', help='*.data file path')
parser.add_argument('--weights', type=str, default='weights/pretrain_weights/yolov3.weights',
help='sparse model weights')
parser.add_argument('--percent', type=float, default=0.5, help='global channel prune percent')
parser.add_argument('--delta', type=float, default=0.05, help='delta')
parser.add_argument('--img-size', type=int, default=416, help='inference size (pixels)')
parser.add_argument('--batch-size', type=int, default=16, help='batch-size')
parser.add_argument('--number', type=int, default=200, help='number of subnetwork')
opt = parser.parse_args()
print(opt)
t0 = time.time()
remain_ratio = 1 - opt.percent
number = opt.number
img_size = opt.img_size
batch_size = opt.batch_size
delta = opt.delta
hyp = {'giou': 3.54, # giou loss gain
'cls': 37.4, # cls loss gain
'cls_pw': 1.0, # cls BCELoss positive_weight
'obj': 64.3, # obj loss gain (*=img_size/320 if img_size != 320)
'obj_pw': 1.0, # obj BCELoss positive_weight
'iou_t': 0.20, # iou training threshold
'lr0': 0.01, # initial learning rate (SGD=5E-3, Adam=5E-4)
'lrf': 0.0005, # final learning rate (with cos scheduler)
'momentum': 0.937, # SGD momentum
'weight_decay': 0.0005, # optimizer weight decay
'fl_gamma': 0.0, # focal loss gamma (efficientDet default is gamma=1.5)
'hsv_h': 0.0138, # image HSV-Hue augmentation (fraction)
'hsv_s': 0.678, # image HSV-Saturation augmentation (fraction)
'hsv_v': 0.36, # image HSV-Value augmentation (fraction)
'degrees': 1.98 * 0, # image rotation (+/- deg)
'translate': 0.05 * 0, # image translation (+/- fraction)
'scale': 0.05 * 0, # image scale (+/- gain)
'shear': 0.641 * 0} # image shear (+/- deg)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = Darknet(opt.cfg).to(device)
if opt.weights:
if opt.weights.endswith(".pt"):
model.load_state_dict(torch.load(opt.weights, map_location=device)['model'])
else:
_ = load_darknet_weights(model, opt.weights)
data_config = parse_data_cfg(opt.data)
valid_path = data_config["valid"]
train_path = data_config["train"]
class_names = load_classes(data_config["names"])
steps = math.ceil((len(open(train_path).readlines()) / batch_size) * 0.1)
obtain_num_parameters = lambda model: sum([param.nelement() for param in model.parameters()])
dataset = LoadImagesAndLabels(train_path,
img_size,
batch_size,
augment=True,
hyp=hyp, # augmentation hyperparameters
rect=False, # rectangular training
cache_images=False)
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
num_workers=min([os.cpu_count(), batch_size, 16]),
shuffle=True, # Shuffle=True unless rectangular training is used
pin_memory=True,
collate_fn=dataset.collate_fn)
test_dataset = LoadImagesAndLabels(valid_path, img_size, batch_size,
hyp=hyp,
rect=True,
cache_images=False)
testloader = torch.utils.data.DataLoader(test_dataset,
batch_size=batch_size,
num_workers=min([os.cpu_count(), batch_size, 8]),
shuffle=False,
pin_memory=True,
collate_fn=test_dataset.collate_fn)
with torch.no_grad():
origin_model_metric = test.test(opt.cfg,
opt.data,
batch_size=batch_size,
imgsz=img_size,
model=model,
dataloader=testloader,
rank=-1,
plot=False)
origin_nparameters = obtain_num_parameters(model)
origin_macs = performance_summary(model)
CBL_idx, Conv_idx, prune_idx, _, _ = parse_module_defs2(model.module_defs)
print("-------------------------------------------------------")
max_mAP = 0
for i in range(number):
compact_module_defs, current_parameters, compact_model = rand_prune_and_eval(model, 0, 1)
with torch.no_grad():
# 防止随机生成的较差的模型撑爆显存,增大nmsconf阈值
mAP = test.test(opt.cfg,
opt.data,
batch_size=batch_size,
imgsz=img_size,
conf_thres=0.1,
model=compact_model,
dataloader=testloader,
rank=-1,
plot=False)[0][2]
print('candidate: ' + str(i), end=" ")
print('remain_ratio: ' + str(current_parameters / origin_nparameters))
print(f'mAP of the pruned model is {mAP:.4f}')
if mAP > max_mAP:
max_mAP = mAP
compact_model_winnner = deepcopy(compact_model)
cfg_name = 'cfg_backup/' + str(i) + '.cfg'
if not os.path.isdir('cfg_backup/'):
os.makedirs('cfg_backup/')
pruned_cfg_file = write_cfg(cfg_name, [model.hyperparams.copy()] + compact_module_defs)
del compact_model
torch.cuda.empty_cache()
# 获得原始模型的module_defs,并修改该defs中的卷积核数量
compact_module_defs = deepcopy(compact_model_winnner.module_defs)
compact_nparameters = obtain_num_parameters(compact_model_winnner)
compact_macs = performance_summary(compact_model_winnner)
compact_flops = compact_macs*2 / 1024**3
origin_flops = origin_macs*2 / 1024**3
random_input = torch.rand((16, 3, 416, 416)).to(device)
pruned_forward_time, pruned_output = obtain_avg_forward_time(random_input, model)
compact_forward_time, compact_output = obtain_avg_forward_time(random_input, compact_model_winnner)
# 在测试集上测试剪枝后的模型, 并统计模型的参数数量
with torch.no_grad():
compact_model_metric = test.test(opt.cfg,
opt.data,
batch_size=batch_size,
imgsz=img_size,
model=compact_model_winnner,
dataloader=testloader,
rank=-1,
plot=False)
# 比较剪枝前后参数数量的变化、指标性能的变化
metric_table = [
["Metric", "Before", "After"],
["mAP", f'{origin_model_metric[1].mean():.6f}', f'{compact_model_metric[1].mean():.6f}'],
["Parameters", f"{origin_nparameters}", f"{compact_nparameters}"],
["GFLOPs",f"{origin_flops}",f"{compact_flops}"],
["Inference", f'{pruned_forward_time:.4f}', f'{compact_forward_time:.4f}']
]
print(AsciiTable(metric_table).table)
# 生成剪枝后的cfg文件并保存模型
pruned_cfg_name = 'cfg/rand-slim_' + str(remain_ratio) + '_' + str(number) + '/' + 'rand-slim_' + str(
remain_ratio) + '_' + str(number) + '.cfg'
# 创建存储目录
dir_name = 'cfg/rand-slim_' + str(remain_ratio) + '_' + str(number) + '/'
if not os.path.isdir(dir_name):
os.makedirs(dir_name)
# 由于原始的compact_module_defs将anchor从字符串变为了数组,因此这里将anchors重新变为字符串
file = open(opt.cfg, 'r')
lines = file.read().split('\n')
for line in lines:
if line.split(' = ')[0] == 'anchors':
anchor = line.split(' = ')[1]
break
if line.split('=')[0] == 'anchors':
anchor = line.split('=')[1]
break
file.close()
for item in compact_module_defs:
if item['type'] == 'shortcut':
item['from'] = str(item['from'][0])
elif item['type'] == 'route':
item['layers'] = ",".join('%s' % i for i in item['layers'])
elif item['type'] == 'yolo':
item['mask'] = ",".join('%s' % i for i in item['mask'])
item['anchors'] = anchor
pruned_cfg_file = write_cfg(pruned_cfg_name, [model.hyperparams.copy()] + compact_module_defs)
print(f'Config file has been saved: {pruned_cfg_file}')
weights_dir_name = dir_name.replace('cfg', 'weights')
if not os.path.isdir(weights_dir_name):
os.makedirs(weights_dir_name)
compact_model_name = weights_dir_name + 'rand-slim_' + str(remain_ratio) + '_' + str(number) + '.weights'
save_weights(compact_model_winnner, path=compact_model_name)
print(f'Compact model has been saved: {compact_model_name}')
print('%g sub networks completed in %.3f hours.\n' % (number, (time.time() - t0) / 3600))
================================================
FILE: LICENSE
================================================
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc.
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
Copyright (C)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see .
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
Copyright (C)
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
.
================================================
FILE: PTQ.py
================================================
import argparse
import test
from torch.utils.data import DataLoader
from models import *
from utils.datasets import *
from utils.utils import *
wdir = 'weights' + os.sep # weights dir
PTQ_weights = wdir + 'PTQ.pt'
def PTQ(cfg,
data,
weights=None,
batch_size=64,
imgsz=416,
augment=False,
a_bit=8,
w_bit=8, ):
# Initialize/load model and set device
device = torch_utils.select_device(opt.device, batch_size=batch_size)
print('PTQ only support for one gpu!')
print('') # skip a line
# Initialize model
model = Darknet(cfg, is_gray_scale=opt.gray_scale, maxabsscaler=opt.maxabsscaler)
q_model = Darknet(cfg, quantized=3, a_bit=a_bit, w_bit=w_bit, is_gray_scale=opt.gray_scale,
maxabsscaler=opt.maxabsscaler,
shortcut_way=opt.shortcut_way)
# Load weights
attempt_download(weights)
if weights.endswith('.pt'): # pytorch format
model.load_state_dict(torch.load(weights, map_location=device)['model'])
q_model.load_state_dict(torch.load(weights, map_location=device)['model'])
else: # darknet format
load_darknet_weights(model, weights)
load_darknet_weights(q_model, weights, quant=True)
model.to(device)
q_model.to(device)
# Configure run
data_dict = parse_data_cfg(data)
cali_path = data_dict['train']
test_path = data_dict['valid']
# Dataloader
cali_dataset = LoadImagesAndLabels(cali_path, imgsz, batch_size, rect=True,
is_gray_scale=True if opt.gray_scale else False, subset_len=opt.subset_len)
cali_batch_size = min(batch_size, len(cali_dataset))
cali_dataloader = DataLoader(cali_dataset,
batch_size=cali_batch_size,
num_workers=min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]),
pin_memory=True,
collate_fn=cali_dataset.collate_fn)
test_dataset = LoadImagesAndLabels(test_path, imgsz, batch_size, rect=True,
is_gray_scale=True if opt.gray_scale else False)
test_batch_size = min(batch_size, len(test_dataset))
test_dataloader = DataLoader(test_dataset,
batch_size=test_batch_size,
num_workers=min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]),
pin_memory=True,
collate_fn=test_dataset.collate_fn)
print('') # skip a line
print('<.....................test original model.......................>')
test.test(cfg,
data=opt.data,
batch_size=batch_size,
imgsz=imgsz,
model=model,
dataloader=test_dataloader,
rank=-1,
maxabsscaler=opt.maxabsscaler)
q_model.train()
print('') # skip a line
print('<.....................Quantize.......................>')
for batch_i, (imgs, _, _, _) in enumerate(tqdm(cali_dataloader)):
if opt.maxabsscaler:
imgs = imgs.to(device).float() / 256.0 # uint8 to float32, 0 - 255 to 0.0 - 1.0
imgs = imgs * 2 - 1
else:
imgs = imgs.to(device).float() / 256.0 # uint8 to float32, 0 - 255 to 0.0 - 1.0
# Disable gradients
with torch.no_grad():
_, _ = q_model(imgs, augment=augment) # inference and training outputs
print('') # skip a line
print('<.....................test quantized model.......................>')
print('') # skip a line
test.test(cfg,
data=opt.data,
batch_size=batch_size,
imgsz=imgsz,
model=q_model,
dataloader=test_dataloader,
quantized=3,
a_bit=opt.a_bit,
w_bit=opt.w_bit,
rank=-1,
maxabsscaler=opt.maxabsscaler)
# Save model
if hasattr(q_model, 'module'):
model_temp = q_model.module.state_dict()
else:
model_temp = q_model.state_dict()
chkpt = {'epoch': None,
'best_fitness': None,
'training_results': None,
'model': model_temp,
'optimizer': None}
# Save last, best and delete
torch.save(chkpt, PTQ_weights)
del chkpt
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='test.py')
parser.add_argument('--cfg', type=str, default='cfg/yolov3-spp.cfg', help='*.cfg path')
parser.add_argument('--data', type=str, default='data/coco2014.data', help='*.data path')
parser.add_argument('--weights', type=str, default='weights/yolov3-spp-ultralytics.pt', help='weights path')
parser.add_argument('--batch-size', type=int, default=16, help='size of each image batch')
parser.add_argument('--img-size', type=int, default=512, help='inference size (pixels)')
parser.add_argument('--device', default='', help='device id (i.e. 0 or 0,1) or cpu')
parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--a-bit', type=int, default=8,
help='a-bit')
parser.add_argument('--w-bit', type=int, default=8,
help='w-bit')
parser.add_argument('--subset_len', type=int, default=-1, help='calibration set len')
parser.add_argument('--gray_scale', action='store_true', help='gray scale trainning')
parser.add_argument('--maxabsscaler', '-mas', action='store_true', help='Standarize input to (-1,1)')
parser.add_argument('--shortcut_way', type=int, default=1, help='--shortcut quantization way')
opt = parser.parse_args()
opt.cfg = list(glob.iglob('./**/' + opt.cfg, recursive=True))[0] # find file
opt.data = list(glob.iglob('./**/' + opt.data, recursive=True))[0] # find file
print(opt)
PTQ(opt.cfg,
opt.data,
opt.weights,
opt.batch_size,
opt.img_size,
opt.augment,
a_bit=opt.a_bit,
w_bit=opt.w_bit)
================================================
FILE: README.md
================================================
# YOLOv3-ModelCompression-MultidatasetTraining
This project mainly include three parts.
1.Provides training methods for multiple mainstream object detection datasets(coco2017, coco2014, BDD100k, Visdrone,
Hand)
2.Provides a mainstream model compression algorithm including pruning, quantization, and knowledge distillation.
3.Provides multiple backbone for yolov3 including Darknet-YOLOv3,Tiny-YOLOv3,Mobilenetv3-YOLOv3
Source using Pytorch implementation to [ultralytics/yolov3](https://github.com/ultralytics/yolov3) for yolov3 source
code. Pruning method based on BN layer
by [coldlarry/YOLOv3-complete-pruning](https://github.com/coldlarry/YOLOv3-complete-pruning), thanks to both of you.
**If you can't download weights file and datasets from BaiDu, please send e-mail(spurslipu@pku.edu.cn) to me, I will
rely as soon as I can.**
# Update
January 4, 2020. Provides download links and training methods to the Visdrone dataset.
January 19, 2020. Dior, Bdd100k and Visdrone training will be provided, as well as the converted weights file.
March 1, 2020. Provides Mobilenetv3 backbone.
April 7, 2020. Implement two models based on Mobilenetv3: Yolov3-Mobilenet, and Yolov3tin-Mobilene-small, provide
pre-training weights, extend the normal pruning methods to the two Mobilenet-based models.
April 27, 2020. Update mobilenetv3 pre-training weights, add a layer pruning method, methods from
the [tanluren/yolov3-channel-and-layer-pruning/yolov3](https://github.com/tanluren/yolov3-channel-and-layer-pruning),
Thanks for sharing.
May 22, 2020. Updated some new optimizations from [ultralytics/yolov3](https://github.com/ultralytics/yolov3), update
cfg file and weights of YOLOv4.
May 22, 2020. The 8-bit quantization method was updated and some bugs were fixed.
July 12, 2020. The problem of mAP returning to 0 after pruning in yolov3-mobilenet was fixed. See issue#41 for more
details.
September 30, 2020. The BN_Fold training method was updated to reduce the precision loss caused by BN fusion, and the
POW (2) quantization method targeted at FPGA was updated. See the quantization section for details.
# Requirements
Our project based on [ultralytics/yolov3](https://github.com/ultralytics/yolov3),
see [ultralytics/yolov3](https://github.com/ultralytics/yolov3) for details. Here is a brief explanation:
- `numpy`
- `torch >= 1.1.0`
- `opencv-python`
- `tqdm`
# Current support
|Function||
| --- |--- |
|Multi-Backbone training|√ |
|Multi-Datasets|√ |
|Pruning|√ |
|Quantization|√ |
|Knowledge Distillation|√ |
# Training
`python3 train.py --data ... --cfg ... `For training model command, the -pt command is required when using coco
pre-training model.
`python3 test.py --data ... --cfg ... ` For testing model command
`python3 detect.py --data ... --cfg ... --source ...` For detecting model command, the default address of source is
data/samples, the output result is saved in the /output, and the detection resource can be pictures and videos.
# Multi-Datasets
This project provides preprocessed datasets for the YOLOv3, configuration files (.cfg), dataset index files (.data),
dataset category files (.names), and anchor box sizes (including 9 boxes for YOLOv3 and 6 boxes for tiny- YOLOv3) that
are reclustered using the K-means algorithm.
mAP
|Dataset|YOLOv3-640|YOLOv4-640|YOLOv3-mobilenet-640|
| --- |--- |--- |--- |
|Dior|0.749|
|bdd100k|0.543|
|visdrone|0.311|0.383|0.348|
Datasets, download and unzip to /data.
- [COCO2017](https://pan.baidu.com/s/1KysFL6AmdbCBq4tHDebqlw)
Extract code:hjln
- [COCO2014](https://pan.baidu.com/s/1EoXOR77yEVokqPCaxg8QGg)
Extract code:rhqx
- [COCO weights](https://pan.baidu.com/s/1JZylwRQIgAd389oWUu0djg)
Extract code:k8ms
Training command
```bash
python3 train.py --data data/coco2017.data --batch-size ... --weights weights/yolov3-608.weights -pt --cfg cfg/yolov3/yolov3.cfg --img-size ... --epochs ...
```
- [Dior](https://pan.baidu.com/s/1z0IQPBN16I-EctjwN9Idyg)
Extract code:vnuq
- [Dior weights](https://pan.baidu.com/s/12lYOgBAo1R5VkOZqDqCFJQ)
Extract code:l8wz
Training command
```bash
python3 train.py --data data/dior.data --batch-size ... --weights weights/yolov3-608.weights -pt --cfg cfg/yolov3/yolov3-onDIOR.cfg --img-size ... --epochs ...
```
- [bdd100k](https://pan.baidu.com/s/157Md2qeFgmcOv5UmnIGI_g)
Extract code:8duw
- [bdd100k weights](https://pan.baidu.com/s/1wWiHlLxIaK_WHy_mG2wmAA)
Extract code:xeqo
Training command
```bash
python3 train.py --data data/bdd100k.data --batch-size ... --weights weights/yolov3-608.weights -pt --cfg cfg/yolov3/yolov3-bdd100k.cfg --img-size ... --epochs ...
```
- [visdrone](https://pan.baidu.com/s/1CPGmS3tLI7my4_m7qDhB4Q)
Extract code:dy4c
- [YOLOv3-visdrone weights](https://pan.baidu.com/s/1N4qDP3b0tt8TIWuTFefDEw)
Extract code:87lf
- [YOLOv4-visdrone weights](https://pan.baidu.com/s/1zOFyt_AFiNk0fAFa8yE9RQ)
Extract code:xblu
- [YOLOv3-mobilenet-visdrone weights](https://pan.baidu.com/s/1BHC8b6xHmTuN8h74QJFt1g)
Extract code:fb6y
Training command
```bash
python train.py --data data/visdrone.data --batch-size ... --weights weights/yolov3-608.weights -pt --cfg cfg/yolov3/yolov3-visdrone.cfg --img-size ... --epochs ...
```
- [oxfordhand](https://pan.baidu.com/s/1JL4gFGh-W_gYEEsiIQssZw)
Extract code:3du4
Training command
```bash
python train.py --data data/oxfordhand.data --batch-size ... --weights weights/yolov3-608.weights -pt --cfg cfg/yolov3/yolov3-hand.cfg --img-size ... --epochs ...
```
## 1.Dior
The DIRO dataset is one of the largest, most diverse, and publicly available object detection datasets in the Earth
observation community. Among them, the number of instances of ships and vehicles is high, which achieves a good balance
between small instances and large ones. The images were collected from Google Earth.
[Introduction](https://cloud.tencent.com/developer/article/1509762)
### Test results


## 2.bdd100k
Bdd100 is a large, diverse data set of driving videos containing 100,000 videos. Each video was about 40 seconds long,
and the researchers marked bounding boxes for all 100,000 key frames of objects that often appeared on the road. The
data set covers different weather conditions, including sunny, cloudy and rainy days, and different times of day and
night.
[Website](http://bair.berkeley.edu/blog/2018/05/30/bdd/)
[Download](http://bdd-data.berkeley.edu)
[Paper](https://arxiv.org/abs/1805.04687)
### Test results

## 3.Visdrone
The VisDrone2019 dataset was collected by AISKYEYE team at the Machine Learning and Data Mining Laboratory at Tianjin
University, China. Benchmark data set contains 288 video clips, and consists of 261908 frames and 10209 frames a static
image, by all sorts of installed on the unmanned aerial vehicle (uav) camera capture, covers a wide range of aspects,
including location (thousands of kilometers apart from China in 14 different cities), environment (city and country),
object (pedestrians, vehicles, bicycles, etc.) and density (sparse and crowded scenario). This data set was collected
using a variety of uav platforms (i.e., uAvs with different models) in a variety of situations and under various weather
and light conditions. These frames are manually marked with more than 2.6 million border frames, which are often targets
of interest, such as pedestrians, cars, bicycles and tricycles. Some important attributes are also provided, including
scene visibility, object categories, and occlusion, to improve data utilization.
[Website](http://www.aiskyeye.com/)
### Test results of YOLOv3

### Test results of YOLOv4


# Multi-Backbone
Based on mobilenetv3, two network structures are designed.
|Structure |backbone|Postprocessing |Parameters |GFLOPS |mAP0.5 |mAP0.5:0.95 |speed(inference/NMS/total) |FPS |
| --- | --- | --- | --- | --- | --- | --- | --- | --- |
|YOLOv3 |38.74M |20.39M |59.13M |117.3 |0.580 |0.340 |12.3/1.7/14.0 ms|71.4fps |
|YOLOv3tiny |6.00M |2.45M |8.45M |9.9 |0.347 |0.168 |3.5/1.8/5.3 ms |188.7fps |
|YOLOv3-mobilenetv3 |2.84M |20.25M |23.09M |32.2 |0.547 |0.346 |7.9/1.8/9.7 ms |103.1fps |
|YOLOv3tiny-mobilenetv3-small|0.92M |2.00M |2.92M |2.9 |0.379 |0.214 |5.2/1.9/7.1 ms |140.8fps |
|YOLOv4 |- |- |61.35M |107.1 |0.650 |0.438 |13.5/1.8/15.3 ms|65.4fps |
|YOLOv4-tiny |- |- |5.78M |12.3 |0.435 |0.225 |4.1/1.7/5.8 ms |172.4fps |
1. YOLOv3,YOLOv3tiny and YOLOv4 were trained and tested on coco2014, and Yolov3-Mobilenetv3 and YOLOv3tiny
Mobilenetv3-Small were trained and tested on coco2017.
2. The inference speed test on GTX2080ti*4, and image size is 608.
3. The training set should match the testing set, because mismatch will cause the mistakes of mAP.
Read [issue](https://github.com/ultralytics/yolov3/issues/970) for detial.
## Train command
1.YOLOv3
```bash
python3 train.py --data data/... --batch-size ... -pt --weights weights/yolov3-608.weights --cfg cfg/yolov3/yolov3.cfg --img_size ...
```
Weights Download
- [COCO pretraining weights](https://pan.baidu.com/s/1JZylwRQIgAd389oWUu0djg)
Extract code:k8ms
2.YOLOv3tiny
```bash
python3 train.py --data data/... --batch-size ... -pt --weights weights/yolov3tiny.weights --cfg cfg/yolov3tiny/yolov3-tiny.cfg --img_size ...
```
- [COCO pretraining weights](https://pan.baidu.com/s/1iWGxdjR3TWxEe37__msyRA)
Extract code:udfe
3.YOLOv3tiny-mobilenet-small
```bash
python3 train.py --data data/... --batch-size ... -pt --weights weights/yolov3tiny-mobilenet-small.weights --cfg cfg/yolov3tiny-mobilenet-small/yolov3tiny-mobilenet-small-coco.cfg --img_size ...
```
- [COCO pretraining weights](https://pan.baidu.com/s/1mSFjWLU91H2OhNemsAeiiQ)
Extract code:pxz4
4.YOLOv3-mobilenet
```bash
python3 train.py --data data/... --batch-size ... -pt --weights weights/yolov3-mobilenet.weights --cfg cfg/yolov3-mobilenet/yolov3-mobilenet-coco.cfg --img_size ...
```
- [COCO pretraining weights](https://pan.baidu.com/s/1EI2Xh1i18CRLoZo_P3NVHw)
Extract code:3vm8
5.YOLOv4
```bash
python3 train.py --data data/... --batch-size ... -pt --weights weights/yolov4.weights --cfg cfg/yolov4/yolov4.cfg --img_size ...
```
- [COCO pretraining weights](https://pan.baidu.com/s/1jAGNNC19oQhAIgBfUrkzmQ)
Extract code:njdg
# Model Compression
## 1. Pruning
### Features
|method |advantage|disadvantage |
| --- | --- | --- |
|Normal pruning |Not prune for shortcut layer. It has a considerable and stable compression rate but requires no fine tuning.|The compression rate is limited. |
|Shortcut pruning |Very high compression rate. |Fine-tuning is necessary. |
|Silmming |Shortcut fusion method is used to improve the precision of shear planting.|Best way for shortcut pruning|
|Regular pruning |Designed for hardware deployment, the number of filters after pruning is a multiple of 2, no fine-tuning, support tiny-yolov3 and Mobilenet.|Part of the compression ratio is sacrificed for regularization. |
|layer pruning |ResBlock is used as the basic unit for purning, which is conducive to hardware deployment. |It can only cut backbone. |
|layer-channel pruning |First, use channel pruning and then use layer pruning, and pruning rate was very high. |Accuracy may be affected. |
### Step
1.Training
```bash
python3 train.py --data ... -pt --batch-size ... --weights ... --cfg ...
```
2.Sparse training
`--s`Specifies the sparsity factor,`--prune`Specify the sparsity type.
`--prune 0` is the sparsity of normal pruning and regular pruning.
`--prune 1` is the sparsity of shortcut pruning.
`--prune 2` is the sparsity of layer pruning.
command:
```bash
python3 train.py --data ... -pt --batch-size 32 --weights ... --cfg ... --s 0.001 --prune 0
```
3.Pruning
- normal pruning
```bash
python3 normal_prune.py --cfg ... --data ... --weights ... --percent ...
```
- regular pruning
```bash
python3 regular_prune.py --cfg ... --data ... --weights ... --percent ...
```
- shortcut pruning
```bash
python3 shortcut_prune.py --cfg ... --data ... --weights ... --percent ...
```
- silmming
```bash
python3 slim_prune.py --cfg ... --data ... --weights ... --percent ...
```
- layer pruning
```bash
python3 layer_prune.py --cfg ... --data ... --weights ... --shortcut ...
```
- layer-channel pruning
```bash
python3 layer_channel_prune.py --cfg ... --data ... --weights ... --shortcut ... --percent ...
```
It is important to note that the cfg and weights variables in OPT need to be pointed to the cfg and weights files
generated by step 2.
In addition, you can get more compression by increasing the percent value in the code.
(If the sparsity is not enough and the percent value is too high, the program will report an error.)
### Pruning experiment
1.normal pruning oxfordhand,img_size = 608,test on GTX2080Ti*4
|model |parameter before pruning |mAP before pruning|inference time before pruning|percent |parameter after pruning |mAP after pruning |inference time after pruning
| --- | --- | --- | --- | --- | --- | --- | --- |
|yolov3(without fine tuning) |58.67M |0.806 |0.1139s |0.8 |10.32M |0.802 |0.0844s |
|yolov3-mobilenet(fine tuning) |22.75M |0.812 |0.0345s |0.97 |2.72M |0.795 |0.0211s |
|yolov3tiny(fine tuning) |8.27M |0.708 |0.0144s |0.5 |1.13M |0.641 |0.0116s |
2.regular pruning oxfordhand,img_size = 608,test ong GTX2080Ti*4
|model |parameter before pruning |mAP before pruning|inference time before pruning|percent |parameter after pruning |mAP after pruning |inference time after pruning
| --- | --- | --- | --- | --- | --- | --- | --- |
|yolov3(without fine tuning) |58.67M |0.806 |0.1139s |0.8 |12.15M |0.805 |0.0874s |
|yolov3-mobilenet(fine tuning) |22.75M |0.812 |0.0345s |0.97 |2.75M |0.803 |0.0208s |
|yolov3tiny(fine tuning) |8.27M |0.708 |0.0144s |0.5 |1.82M |0.703 |0.0122s |
3.shortcut pruning oxfordhand,img_size = 608,test ong GTX2080Ti*4
|model |parameter before pruning |mAP before pruning|inference time before pruning|percent |parameter after pruning |mAP after pruning |inference time after pruning
| --- | --- | --- | --- | --- | --- | --- | --- |
|yolov3 |58.67M |0.806 | |0.8 |6.35M |0.816 | |
|yolov4 |60.94M |0.896 | |0.6 |13.97M |0.855 | |
## 2.quantization
`--quantized 2` Dorefa quantization method
```bash
python train.py --data ... --batch-size ... --weights ... --cfg ... --img-size ... --epochs ... --quantized 2
```
`--quantized 1` Google quantization method
```bash
python train.py --data ... --batch-size ... --weights ... --cfg ... --img-size ... --epochs ... --quantized 1
```
`--FPGA` Pow(2) quantization for FPGA.
### experiment
oxfordhand, yolov3, 640image-size
|method |mAP |
| --- | --- |
|Baseline |0.847 |
|Google8bit |0.851 |
|Google8bit + BN Flod |0.851 |
|Google8bit + BN Flod + FPGA |0.852 |
|Google4bit + BN Flod + FPGA |0.842 |
## 3.Knowledge Distillation
### Knowledge Distillation
The distillation method is based on the basic distillation method proposed by Hinton in 2015, and has been partially
improved in combination with the detection network.
Distilling the Knowledge in a Neural Network
[paper](https://arxiv.org/abs/1503.02531)
command : `--t_cfg --t_weights --KDstr`
`--t_cfg` cfg file of teacher model
`--t_weights` weights file of teacher model
`--KDstr` KD strategy
`--KDstr 1` KLloss can be obtained directly from the output of teacher network and the output of student network and added to the overall loss.
`--KDstr 2` To distinguish between box loss and class loss, the student does not learn directly from the teacher. L2 distance is calculated respectively for student, teacher and GT. When student is greater than teacher, an additional loss is added for student and GT.
`--KDstr 3` To distinguish between Boxloss and ClassLoss, the student learns directly from the teacher.
`--KDstr 4` KDloss is divided into three categories, box loss, class loss and feature loss.
`--KDstr 5` On the basis of KDstr 4, the fine-grain-mask is added into the feature
example:
```bash
python train.py --data ... --batch-size ... --weights ... --cfg ... --img-size ... --epochs ... --t_cfg ... --t_weights ...
```
Usually, the pre-compression model is used as the teacher model, and the post-compression model is used as the student
model for distillation training to improve the mAP of student network.
### experiment
oxfordhand,yolov3tiny as teacher model,normal pruning yolov3tiny as student model
|teacher model |mAP of teacher model |student model|directly fine tuning|KDstr 1 |KDstr 2 |KDstr 3 |KDstr 4(L1) |KDstr 5(L1) |
| --- | --- | --- | --- | --- | --- | --- |--- |--- |
|yolov3tiny608 |0.708 |normal pruning yolov3tiny608 |0.658 |0.666 |0.661 |0.672 |0.673 |0.674 |
================================================
FILE: cfg/yolov2/yolov2-hand.cfg
================================================
[net]
# Testing
batch = 1
subdivisions = 1
# Training
# batch=64
# subdivisions=8
width = 608
height = 608
channels = 3
momentum = 0.9
decay = 0.0005
angle = 0
saturation = 1.5
exposure = 1.5
hue = .1
learning_rate = 0.001
burn_in = 1000
max_batches = 500200
policy = steps
steps = 400000,450000
scales = .1,.1
[convolutional]
batch_normalize = 1
filters = 32
size = 3
stride = 1
pad = 1
activation = leaky
[maxpool]
size = 2
stride = 2
[convolutional]
batch_normalize = 1
filters = 64
size = 3
stride = 1
pad = 1
activation = leaky
[maxpool]
size = 2
stride = 2
[convolutional]
batch_normalize = 1
filters = 128
size = 3
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 64
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 128
size = 3
stride = 1
pad = 1
activation = leaky
[maxpool]
size = 2
stride = 2
[convolutional]
batch_normalize = 1
filters = 256
size = 3
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 128
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 256
size = 3
stride = 1
pad = 1
activation = leaky
[maxpool]
size = 2
stride = 2
[convolutional]
batch_normalize = 1
filters = 512
size = 3
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 256
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 512
size = 3
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 256
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 512
size = 3
stride = 1
pad = 1
activation = leaky
[maxpool]
size = 2
stride = 2
[convolutional]
batch_normalize = 1
filters = 1024
size = 3
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 512
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 1024
size = 3
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 512
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 1024
size = 3
stride = 1
pad = 1
activation = leaky
#######
[convolutional]
batch_normalize = 1
size = 3
stride = 1
pad = 1
filters = 1024
activation = leaky
[convolutional]
batch_normalize = 1
size = 3
stride = 1
pad = 1
filters = 1024
activation = leaky
[convolutional]
batch_normalize = 1
size = 3
stride = 1
pad = 1
filters = 1024
activation = leaky
[convolutional]
size = 1
stride = 1
pad = 1
filters = 30
activation = linear
[yolo]
mask = 0,1,2,3,4
anchors = 9,13, 15,21, 24,29, 38,43, 70,74
classes = 1
num = 5
jitter = .3
ignore_thresh = .7
truth_thresh = 1
random = 1
================================================
FILE: cfg/yolov2/yolov2-tiny-hand.cfg
================================================
[net]
batch = 64
subdivisions = 8
width = 416
height = 416
channels = 3
momentum = 0.9
decay = 0.0005
angle = 0
saturation = 1.5
exposure = 1.5
hue = .1
learning_rate = 0.001
max_batches = 40200
policy = steps
steps = -1,100,20000,30000
scales = .1,10,.1,.1
[convolutional]
batch_normalize = 1
filters = 16
size = 3
stride = 1
pad = 1
activation = leaky
[maxpool]
size = 2
stride = 2
[convolutional]
batch_normalize = 1
filters = 32
size = 3
stride = 1
pad = 1
activation = leaky
[maxpool]
size = 2
stride = 2
[convolutional]
batch_normalize = 1
filters = 64
size = 3
stride = 1
pad = 1
activation = leaky
[maxpool]
size = 2
stride = 2
[convolutional]
batch_normalize = 1
filters = 128
size = 3
stride = 1
pad = 1
activation = leaky
[maxpool]
size = 2
stride = 2
[convolutional]
batch_normalize = 1
filters = 256
size = 3
stride = 1
pad = 1
activation = leaky
[maxpool]
size = 2
stride = 2
[convolutional]
batch_normalize = 1
filters = 512
size = 3
stride = 1
pad = 1
activation = leaky
[maxpool]
size = 2
stride = 1
[convolutional]
batch_normalize = 1
filters = 1024
size = 3
stride = 1
pad = 1
activation = leaky
###########
[convolutional]
batch_normalize = 1
size = 3
stride = 1
pad = 1
filters = 1024
activation = leaky
[convolutional]
size = 1
stride = 1
pad = 1
filters = 30
activation = linear
[yolo]
mask = 0,1,2,3,4
anchors = 9,13, 15,21, 24,29, 38,43, 70,74
classes = 1
num = 5
jitter = .3
ignore_thresh = .7
truth_thresh = 1
random = 1
================================================
FILE: cfg/yolov2/yolov2-tiny.cfg
================================================
[net]
batch = 64
subdivisions = 8
width = 416
height = 416
channels = 3
momentum = 0.9
decay = 0.0005
angle = 0
saturation = 1.5
exposure = 1.5
hue = .1
learning_rate = 0.001
max_batches = 40200
policy = steps
steps = -1,100,20000,30000
scales = .1,10,.1,.1
[convolutional]
batch_normalize = 1
filters = 16
size = 3
stride = 1
pad = 1
activation = leaky
[maxpool]
size = 2
stride = 2
[convolutional]
batch_normalize = 1
filters = 32
size = 3
stride = 1
pad = 1
activation = leaky
[maxpool]
size = 2
stride = 2
[convolutional]
batch_normalize = 1
filters = 64
size = 3
stride = 1
pad = 1
activation = leaky
[maxpool]
size = 2
stride = 2
[convolutional]
batch_normalize = 1
filters = 128
size = 3
stride = 1
pad = 1
activation = leaky
[maxpool]
size = 2
stride = 2
[convolutional]
batch_normalize = 1
filters = 256
size = 3
stride = 1
pad = 1
activation = leaky
[maxpool]
size = 2
stride = 2
[convolutional]
batch_normalize = 1
filters = 512
size = 3
stride = 1
pad = 1
activation = leaky
[maxpool]
size = 2
stride = 1
[convolutional]
batch_normalize = 1
filters = 1024
size = 3
stride = 1
pad = 1
activation = leaky
###########
[convolutional]
batch_normalize = 1
size = 3
stride = 1
pad = 1
filters = 1024
activation = leaky
[convolutional]
size = 1
stride = 1
pad = 1
filters = 425
activation = linear
[yolo]
mask = 0,1,2,3,4
anchors = 7,12, 19,30, 45,61, 90,141, 240,279
classes = 80
num = 5
jitter = .3
ignore_thresh = .7
truth_thresh = 1
random = 1
================================================
FILE: cfg/yolov2/yolov2.cfg
================================================
[net]
# Testing
batch = 1
subdivisions = 1
# Training
# batch=64
# subdivisions=8
width = 608
height = 608
channels = 3
momentum = 0.9
decay = 0.0005
angle = 0
saturation = 1.5
exposure = 1.5
hue = .1
learning_rate = 0.001
burn_in = 1000
max_batches = 500200
policy = steps
steps = 400000,450000
scales = .1,.1
[convolutional]
batch_normalize = 1
filters = 32
size = 3
stride = 1
pad = 1
activation = leaky
[maxpool]
size = 2
stride = 2
[convolutional]
batch_normalize = 1
filters = 64
size = 3
stride = 1
pad = 1
activation = leaky
[maxpool]
size = 2
stride = 2
[convolutional]
batch_normalize = 1
filters = 128
size = 3
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 64
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 128
size = 3
stride = 1
pad = 1
activation = leaky
[maxpool]
size = 2
stride = 2
[convolutional]
batch_normalize = 1
filters = 256
size = 3
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 128
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 256
size = 3
stride = 1
pad = 1
activation = leaky
[maxpool]
size = 2
stride = 2
[convolutional]
batch_normalize = 1
filters = 512
size = 3
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 256
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 512
size = 3
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 256
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 512
size = 3
stride = 1
pad = 1
activation = leaky
[maxpool]
size = 2
stride = 2
[convolutional]
batch_normalize = 1
filters = 1024
size = 3
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 512
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 1024
size = 3
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 512
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 1024
size = 3
stride = 1
pad = 1
activation = leaky
#######
[convolutional]
batch_normalize = 1
size = 3
stride = 1
pad = 1
filters = 1024
activation = leaky
[convolutional]
batch_normalize = 1
size = 3
stride = 1
pad = 1
filters = 1024
activation = leaky
[convolutional]
batch_normalize = 1
size = 3
stride = 1
pad = 1
filters = 1024
activation = leaky
[convolutional]
size = 1
stride = 1
pad = 1
filters = 425
activation = linear
[yolo]
mask = 0,1,2,3,4
anchors = 7,12, 19,30, 45,61, 90,141, 240,279
classes = 80
num = 5
jitter = .3
ignore_thresh = .7
truth_thresh = 1
random = 1
================================================
FILE: cfg/yolov3/yolov3-UAV.cfg
================================================
[net]
# Testing
#batch=1
#subdivisions=1
# Training
batch=16
subdivisions=1
width=416
height=416
channels=3
momentum=0.9
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.001
burn_in=1000
max_batches = 500200
policy=steps
steps=400000,450000
scales=.1,.1
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=leaky
# Downsample
[convolutional]
batch_normalize=1
filters=64
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=32
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=128
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=256
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=512
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
######################
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=18
activation=linear
[yolo]
mask = 6,7,8
anchors = 5,6, 8,12, 9,8, 10,10, 11,12, 13,16, 15,13, 18,18, 22,25
classes=1
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 61
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=18
activation=linear
[yolo]
mask = 3,4,5
anchors = 5,6, 8,12, 9,8, 10,10, 11,12, 13,16, 15,13, 18,18, 22,25
classes=1
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 36
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=18
activation=linear
[yolo]
mask = 0,1,2
anchors = 5,6, 8,12, 9,8, 10,10, 11,12, 13,16, 15,13, 18,18, 22,25
classes=1
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
================================================
FILE: cfg/yolov3/yolov3-asff.cfg
================================================
# Generated by Glenn Jocher (glenn.jocher@ultralytics.com) for https://github.com/ultralytics/yolov3
# def kmean_anchors(path='../coco/train2017.txt', n=12, img_size=(320, 640)): # from utils.utils import *; kmean_anchors()
# Evolving anchors: 100%|██████████| 1000/1000 [41:15<00:00, 2.48s/it]
# 0.20 iou_thr: 0.992 best possible recall, 4.25 anchors > thr
# kmeans anchors (n=12, img_size=(320, 640), IoU=0.005/0.184/0.634-min/mean/best): 6,9, 15,16, 17,35, 37,26, 36,67, 63,42, 57,100, 121,81, 112,169, 241,158, 195,310, 426,359
[net]
# Testing
# batch=1
# subdivisions=1
# Training
batch=64
subdivisions=16
width=608
height=608
channels=3
momentum=0.9
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.001
burn_in=1000
max_batches = 500200
policy=steps
steps=400000,450000
scales=.1,.1
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=leaky
# Downsample
[convolutional]
batch_normalize=1
filters=64
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=32
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=128
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=256
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=512
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
######################
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
# SPP --------------------------------------------------------------------------
[maxpool]
stride=1
size=5
[route]
layers=-2
[maxpool]
stride=1
size=9
[route]
layers=-4
[maxpool]
stride=1
size=13
[route]
layers=-1,-3,-5,-6
# SPP --------------------------------------------------------------------------
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=258
activation=linear
# YOLO -------------------------------------------------------------------------
[route]
layers = -3
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 61
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=258
activation=linear
# YOLO -------------------------------------------------------------------------
[route]
layers = -3
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 36
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=258
activation=linear
[yolo]
from=88,99,110
mask = 6,7,8
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
classes=80
num=9
[yolo]
from=88,99,110
mask = 3,4,5
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
classes=80
num=9
[yolo]
from=88,99,110
mask = 0,1,2
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
classes=80
num=9
================================================
FILE: cfg/yolov3/yolov3-bdd100k.cfg
================================================
[net]
# Testing
#batch=1
#subdivisions=1
# Training
batch=16
subdivisions=1
width=416
height=416
channels=3
momentum=0.9
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.001
burn_in=1000
max_batches = 500200
policy=steps
steps=400000,450000
scales=.1,.1
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=leaky
# Downsample
[convolutional]
batch_normalize=1
filters=64
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=32
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=128
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=256
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=512
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
######################
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=45
activation=linear
[yolo]
mask = 6,7,8
anchors = 3,7, 5,18, 6,9, 10,32, 11,14, 17,21, 24,36, 45,59, 93,132
classes=10
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 61
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=45
activation=linear
[yolo]
mask = 3,4,5
anchors = 3,7, 5,18, 6,9, 10,32, 11,14, 17,21, 24,36, 45,59, 93,132
classes=10
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 36
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=45
activation=linear
[yolo]
mask = 0,1,2
anchors = 3,7, 5,18, 6,9, 10,32, 11,14, 17,21, 24,36, 45,59, 93,132
classes=10
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
================================================
FILE: cfg/yolov3/yolov3-hand.cfg
================================================
[net]
# Testing
#batch=1
#subdivisions=1
# Training
batch=16
subdivisions=1
width=416
height=416
channels=3
momentum=0.9
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.001
burn_in=1000
max_batches = 500200
policy=steps
steps=400000,450000
scales=.1,.1
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=leaky
# Downsample
[convolutional]
batch_normalize=1
filters=64
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=32
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=128
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=256
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=512
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
######################
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=18
activation=linear
[yolo]
mask = 6,7,8
anchors = 8,13, 14,20, 22,25, 26,36, 35,49, 40,31, 51,69, 63,47, 94,103
classes=1
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 61
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=18
activation=linear
[yolo]
mask = 3,4,5
anchors = 8,13, 14,20, 22,25, 26,36, 35,49, 40,31, 51,69, 63,47, 94,103
classes=1
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 36
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=18
activation=linear
[yolo]
mask = 0,1,2
anchors = 8,13, 14,20, 22,25, 26,36, 35,49, 40,31, 51,69, 63,47, 94,103
classes=1
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
================================================
FILE: cfg/yolov3/yolov3-onDIOR.cfg
================================================
[net]
# Testing
#batch=1
#subdivisions=1
# Training
batch=16
subdivisions=1
width=416
height=416
channels=3
momentum=0.9
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.001
burn_in=1000
max_batches = 500200
policy=steps
steps=400000,450000
scales=.1,.1
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=leaky
# Downsample
[convolutional]
batch_normalize=1
filters=64
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=32
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=128
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=256
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=512
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
######################
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=75
activation=linear
[yolo]
mask = 6,7,8
anchors = 5,5, 6,13, 10,26, 13,6, 15,15, 27,10, 28,33, 61,74, 167,169
classes=20
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 61
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=75
activation=linear
[yolo]
mask = 3,4,5
anchors = 5,5, 6,13, 10,26, 13,6, 15,15, 27,10, 28,33, 61,74, 167,169
classes=20
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 36
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=75
activation=linear
[yolo]
mask = 0,1,2
anchors = 5,5, 6,13, 10,26, 13,6, 15,15, 27,10, 28,33, 61,74, 167,169
classes=20
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
================================================
FILE: cfg/yolov3/yolov3-screw.cfg
================================================
[net]
# Testing
# batch=1
# subdivisions=1
# Training
batch=64
subdivisions=16
width=256
height=256
channels=3
momentum=0.9
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
# learning_rate=0.0001
# burn_in=1000
# max_batches = 50200
# policy=steps
# steps=40000,45000
# scales=.1,.1
learning_rate=0.0001
burn_in=1000
max_batches = 40000
policy=steps
steps=20000,30000
scales=.1,.1
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=leaky
# Downsample
[convolutional]
batch_normalize=1
filters=64
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=32
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=128
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=256
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=512
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
######################
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=21
activation=linear
[yolo]
mask = 6,7,8
anchors = 20,29, 26,34, 29,40, 33,44, 35,47, 37,50, 39,52, 43,57, 74,87
classes=2
num=9
jitter=.3
ignore_thresh = .5
truth_thresh = 1
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 61
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=21
activation=linear
[yolo]
mask = 3,4,5
anchors = 20,29, 26,34, 29,40, 33,44, 35,47, 37,50, 39,52, 43,57, 74,87
classes=2
num=9
jitter=.3
ignore_thresh = .5
truth_thresh = 1
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 36
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=21
activation=linear
[yolo]
mask = 0,1,2
anchors = 20,29, 26,34, 29,40, 33,44, 35,47, 37,50, 39,52, 43,57, 74,87
classes=2
num=9
jitter=.3
ignore_thresh = .5
truth_thresh = 1
random=1
================================================
FILE: cfg/yolov3/yolov3-ship.cfg
================================================
[net]
# Testing
#batch=1
#subdivisions=1
# Training
batch = 16
subdivisions = 1
width = 416
height = 416
channels = 3
momentum = 0.9
decay = 0.0005
angle = 0
saturation = 1.5
exposure = 1.5
hue = .1
learning_rate = 0.001
burn_in = 1000
max_batches = 500200
policy = steps
steps = 400000,450000
scales = .1,.1
[convolutional]
batch_normalize = 1
filters = 32
size = 3
stride = 1
pad = 1
activation = leaky
# Downsample
[convolutional]
batch_normalize = 1
filters = 64
size = 3
stride = 2
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 32
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 64
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
# Downsample
[convolutional]
batch_normalize = 1
filters = 128
size = 3
stride = 2
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 64
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 128
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
[convolutional]
batch_normalize = 1
filters = 64
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 128
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
# Downsample
[convolutional]
batch_normalize = 1
filters = 256
size = 3
stride = 2
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 128
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 256
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
[convolutional]
batch_normalize = 1
filters = 128
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 256
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
[convolutional]
batch_normalize = 1
filters = 128
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 256
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
[convolutional]
batch_normalize = 1
filters = 128
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 256
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
[convolutional]
batch_normalize = 1
filters = 128
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 256
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
[convolutional]
batch_normalize = 1
filters = 128
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 256
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
[convolutional]
batch_normalize = 1
filters = 128
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 256
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
[convolutional]
batch_normalize = 1
filters = 128
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 256
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
# Downsample
[convolutional]
batch_normalize = 1
filters = 512
size = 3
stride = 2
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 256
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 512
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
[convolutional]
batch_normalize = 1
filters = 256
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 512
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
[convolutional]
batch_normalize = 1
filters = 256
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 512
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
[convolutional]
batch_normalize = 1
filters = 256
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 512
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
[convolutional]
batch_normalize = 1
filters = 256
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 512
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
[convolutional]
batch_normalize = 1
filters = 256
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 512
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
[convolutional]
batch_normalize = 1
filters = 256
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 512
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
[convolutional]
batch_normalize = 1
filters = 256
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 512
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
# Downsample
[convolutional]
batch_normalize = 1
filters = 1024
size = 3
stride = 2
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 512
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 1024
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
[convolutional]
batch_normalize = 1
filters = 512
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 1024
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
[convolutional]
batch_normalize = 1
filters = 512
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 1024
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
[convolutional]
batch_normalize = 1
filters = 512
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 1024
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
######################
[convolutional]
batch_normalize = 1
filters = 512
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
size = 3
stride = 1
pad = 1
filters = 1024
activation = leaky
[convolutional]
batch_normalize = 1
filters = 512
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
size = 3
stride = 1
pad = 1
filters = 1024
activation = leaky
[convolutional]
batch_normalize = 1
filters = 512
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
size = 3
stride = 1
pad = 1
filters = 1024
activation = leaky
[convolutional]
size = 1
stride = 1
pad = 1
filters = 30
activation = linear
[yolo]
mask = 6,7,8
anchors = 112,107, 148,331, 184,196, 234,284, 297,135, 297,350, 352,261, 358,193, 377,352
classes = 5
num = 9
jitter = .3
ignore_thresh = .7
truth_thresh = 1
random = 1
[route]
layers = -4
[convolutional]
batch_normalize = 1
filters = 256
size = 1
stride = 1
pad = 1
activation = leaky
[upsample]
stride = 2
[route]
layers = -1, 61
[convolutional]
batch_normalize = 1
filters = 256
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
size = 3
stride = 1
pad = 1
filters = 512
activation = leaky
[convolutional]
batch_normalize = 1
filters = 256
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
size = 3
stride = 1
pad = 1
filters = 512
activation = leaky
[convolutional]
batch_normalize = 1
filters = 256
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
size = 3
stride = 1
pad = 1
filters = 512
activation = leaky
[convolutional]
size = 1
stride = 1
pad = 1
filters = 30
activation = linear
[yolo]
mask = 3,4,5
anchors = 112,107, 148,331, 184,196, 234,284, 297,135, 297,350, 352,261, 358,193, 377,352
classes = 5
num = 9
jitter = .3
ignore_thresh = .7
truth_thresh = 1
random = 1
[route]
layers = -4
[convolutional]
batch_normalize = 1
filters = 128
size = 1
stride = 1
pad = 1
activation = leaky
[upsample]
stride = 2
[route]
layers = -1, 36
[convolutional]
batch_normalize = 1
filters = 128
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
size = 3
stride = 1
pad = 1
filters = 256
activation = leaky
[convolutional]
batch_normalize = 1
filters = 128
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
size = 3
stride = 1
pad = 1
filters = 256
activation = leaky
[convolutional]
batch_normalize = 1
filters = 128
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
size = 3
stride = 1
pad = 1
filters = 256
activation = leaky
[convolutional]
size = 1
stride = 1
pad = 1
filters = 30
activation = linear
[yolo]
mask = 0,1,2
anchors = 112,107, 148,331, 184,196, 234,284, 297,135, 297,350, 352,261, 358,193, 377,352
classes = 5
num = 9
jitter = .3
ignore_thresh = .7
truth_thresh = 1
random = 1
================================================
FILE: cfg/yolov3/yolov3-spp-matrix.cfg
================================================
[net]
# Testing
# batch=1
# subdivisions=1
# Training
batch=64
subdivisions=16
width=416
height=416
channels=3
momentum=0.9
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.001
burn_in=1000
max_batches = 500500
policy=steps
steps=400000,450000
scales=.1,.1
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=leaky
# Downsample
[convolutional]
batch_normalize=1
filters=64
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=32
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=128
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=256
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=512
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
######################
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
### SPP ###
[maxpool]
stride=1
size=5
[route]
layers=-2
[maxpool]
stride=1
size=9
[route]
layers=-4
[maxpool]
stride=1
size=13
[route]
layers=-1,-3,-5,-6
### End SPP ###
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear
# 89
[yolo]
mask = 6,7,8
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326, 10,7, 16,15, 33,12, 5,13, 8,30, 17,23, 30,31, 62,23, 59,60, 15,61, 31,45, 30,119, 116,45, 156,99, 373,163, 58,90, 78,198, 187,326
classes=80
num=27
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 61
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear
# 101
[yolo]
mask = 3,4,5
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326, 10,7, 16,15, 33,12, 5,13, 8,30, 17,23, 30,31, 62,23, 59,60, 15,61, 31,45, 30,119, 116,45, 156,99, 373,163, 58,90, 78,198, 187,326
classes=80
num=27
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 36
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear
# 113
[yolo]
mask = 0,1,2
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326, 10,7, 16,15, 33,12, 5,13, 8,30, 17,23, 30,31, 62,23, 59,60, 15,61, 31,45, 30,119, 116,45, 156,99, 373,163, 58,90, 78,198, 187,326
classes=80
num=27
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
##################
[route]
layers = 110
# 115
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
# 116
[convolutional]
batch_normalize=1
filters=128
size=1
stride_x=1
stride_y=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear
[yolo]
mask = 9,10,11
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326, 10,7, 16,15, 33,12, 5,13, 8,30, 17,23, 30,31, 62,23, 59,60, 15,61, 31,45, 30,119, 116,45, 156,99, 373,163, 58,90, 78,198, 187,326
classes=80
num=27
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
[route]
layers = 110
# 121
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
# 122
[convolutional]
batch_normalize=1
filters=128
size=1
stride_x=2
stride_y=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear
[yolo]
mask = 12,13,14
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326, 10,7, 16,15, 33,12, 5,13, 8,30, 17,23, 30,31, 62,23, 59,60, 15,61, 31,45, 30,119, 116,45, 156,99, 373,163, 58,90, 78,198, 187,326
classes=80
num=27
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
##################
[route]
layers = 98
[convolutional]
share_index=115
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
share_index=116
batch_normalize=1
filters=128
size=1
stride_x=1
stride_y=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear
[yolo]
mask = 15,16,17
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326, 10,7, 16,15, 33,12, 5,13, 8,30, 17,23, 30,31, 62,23, 59,60, 15,61, 31,45, 30,119, 116,45, 156,99, 373,163, 58,90, 78,198, 187,326
classes=80
num=27
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
[route]
layers = 98
[convolutional]
share_index=121
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
share_index=122
batch_normalize=1
filters=128
size=1
stride_x=2
stride_y=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear
[yolo]
mask = 18,19,20
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326, 10,7, 16,15, 33,12, 5,13, 8,30, 17,23, 30,31, 62,23, 59,60, 15,61, 31,45, 30,119, 116,45, 156,99, 373,163, 58,90, 78,198, 187,326
classes=80
num=27
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
##################
[route]
layers = 86
[convolutional]
share_index=115
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
share_index=116
batch_normalize=1
filters=128
size=1
stride_x=1
stride_y=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear
[yolo]
mask = 21,22,23
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326, 10,7, 16,15, 33,12, 5,13, 8,30, 17,23, 30,31, 62,23, 59,60, 15,61, 31,45, 30,119, 116,45, 156,99, 373,163, 58,90, 78,198, 187,326
classes=80
num=27
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
[route]
layers = 86
[convolutional]
share_index=121
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
share_index=122
batch_normalize=1
filters=128
size=1
stride_x=2
stride_y=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear
[yolo]
mask = 24,25,26
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326, 10,7, 16,15, 33,12, 5,13, 8,30, 17,23, 30,31, 62,23, 59,60, 15,61, 31,45, 30,119, 116,45, 156,99, 373,163, 58,90, 78,198, 187,326
classes=80
num=27
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
================================================
FILE: cfg/yolov3/yolov3-spp-pan-scale.cfg
================================================
[net]
# Testing
#batch=1
#subdivisions=1
# Training
batch=64
subdivisions=32
width=544
height=544
channels=3
momentum=0.9
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.001
burn_in=1000
max_batches = 10000
policy=steps
steps=8000,9000
scales=.1,.1
#policy=sgdr
#sgdr_cycle=1000
#sgdr_mult=2
#steps=4000,6000,8000,9000
#scales=1, 1, 0.1, 0.1
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=leaky
# Downsample
[convolutional]
batch_normalize=1
filters=64
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=32
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=128
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=256
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=512
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
######################
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
### SPP ###
[maxpool]
stride=1
size=5
[route]
layers=-2
[maxpool]
stride=1
size=9
[route]
layers=-4
[maxpool]
stride=1
size=13
[route]
layers=-1,-3,-5,-6
### End SPP ###
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
########### to [yolo-3]
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 61
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
########### to [yolo-2]
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 36
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
########### to [yolo-1]
########### features of different layers
[route]
layers=1
[reorg3d]
stride=2
[route]
layers=5,-1
[reorg3d]
stride=2
[route]
layers=12,-1
[reorg3d]
stride=2
[route]
layers=37,-1
[reorg3d]
stride=2
[route]
layers=62,-1
########### [yolo-1]
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=4
[route]
layers = -1,-12
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=340
activation=linear
[yolo]
mask = 0,1,2,3
anchors = 8,8, 10,13, 16,30, 33,23, 32,32, 30,61, 62,45, 64,64, 59,119, 116,90, 156,198, 373,326
classes=80
num=12
jitter=.3
ignore_thresh = .7
truth_thresh = 1
scale_x_y = 1.05
random=0
########### [yolo-2]
[route]
layers = -7
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1,-28
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=340
activation=linear
[yolo]
mask = 4,5,6,7
anchors = 8,8, 10,13, 16,30, 33,23, 32,32, 30,61, 62,45, 64,64, 59,119, 116,90, 156,198, 373,326
classes=80
num=12
jitter=.3
ignore_thresh = .7
truth_thresh = 1
scale_x_y = 1.1
random=0
########### [yolo-3]
[route]
layers = -14
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[route]
layers = -1,-43
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=340
activation=linear
[yolo]
mask = 8,9,10,11
anchors = 8,8, 10,13, 16,30, 33,23, 32,32, 30,61, 62,45, 59,119, 80,80, 116,90, 156,198, 373,326
classes=80
num=12
jitter=.3
ignore_thresh = .7
truth_thresh = 1
scale_x_y = 1.2
random=0
================================================
FILE: cfg/yolov3/yolov3-spp.cfg
================================================
[net]
# Testing
# batch=1
# subdivisions=1
# Training
batch=64
subdivisions=16
width=608
height=608
channels=3
momentum=0.9
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.001
burn_in=1000
max_batches = 500200
policy=steps
steps=400000,450000
scales=.1,.1
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=leaky
# Downsample
[convolutional]
batch_normalize=1
filters=64
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=32
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=128
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=256
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=512
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
######################
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
### SPP ###
[maxpool]
stride=1
size=5
[route]
layers=-2
[maxpool]
stride=1
size=9
[route]
layers=-4
[maxpool]
stride=1
size=13
[route]
layers=-1,-3,-5,-6
### End SPP ###
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear
[yolo]
mask = 6,7,8
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
classes=80
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 61
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear
[yolo]
mask = 3,4,5
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
classes=80
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 36
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear
[yolo]
mask = 0,1,2
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
classes=80
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
================================================
FILE: cfg/yolov3/yolov3-spp3.cfg
================================================
[net]
# Testing
batch=1
subdivisions=1
# Training
# batch=64
# subdivisions=16
width=608
height=608
channels=3
momentum=0.9
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.001
burn_in=1000
max_batches = 120200
policy=steps
steps=70000,100000
scales=.1,.1
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=leaky
# Downsample
[convolutional]
batch_normalize=1
filters=64
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=32
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=128
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=256
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=512
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
######################
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
### SPP ###
[maxpool]
stride=1
size=5
[route]
layers=-2
[maxpool]
stride=1
size=9
[route]
layers=-4
[maxpool]
stride=1
size=13
[route]
layers=-1,-3,-5,-6
### End SPP ###
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear
[yolo]
mask = 6,7,8
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
classes=80
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 61
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
### SPP ###
[maxpool]
stride=1
size=5
[route]
layers=-2
[maxpool]
stride=1
size=9
[route]
layers=-4
[maxpool]
stride=1
size=13
[route]
layers=-1,-3,-5,-6
### End SPP ###
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear
[yolo]
mask = 3,4,5
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
classes=80
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 36
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
### SPP ###
[maxpool]
stride=1
size=5
[route]
layers=-2
[maxpool]
stride=1
size=9
[route]
layers=-4
[maxpool]
stride=1
size=13
[route]
layers=-1,-3,-5,-6
### End SPP ###
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear
[yolo]
mask = 0,1,2
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
classes=80
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
================================================
FILE: cfg/yolov3/yolov3-visdrone.cfg
================================================
[net]
# Testing
#batch=1
#subdivisions=1
# Training
batch=16
subdivisions=1
width=416
height=416
channels=3
momentum=0.9
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.001
burn_in=1000
max_batches = 500200
policy=steps
steps=400000,450000
scales=.1,.1
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=leaky
# Downsample
[convolutional]
batch_normalize=1
filters=64
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=32
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=128
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=256
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=512
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
######################
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=45
activation=linear
[yolo]
mask = 6,7,8
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
classes=10
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 61
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=45
activation=linear
[yolo]
mask = 3,4,5
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
classes=10
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 36
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=45
activation=linear
[yolo]
mask = 0,1,2
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
classes=10
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
================================================
FILE: cfg/yolov3/yolov3.cfg
================================================
[net]
# Testing
#batch=1
#subdivisions=1
# Training
batch=16
subdivisions=1
width=416
height=416
channels=3
momentum=0.9
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.001
burn_in=1000
max_batches = 500200
policy=steps
steps=400000,450000
scales=.1,.1
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=leaky
# Downsample
[convolutional]
batch_normalize=1
filters=64
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=32
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=128
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=256
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=512
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
######################
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear
[yolo]
mask = 6,7,8
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
classes=80
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 61
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear
[yolo]
mask = 3,4,5
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
classes=80
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 36
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear
[yolo]
mask = 0,1,2
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
classes=80
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
================================================
FILE: cfg/yolov3-ghostnet/yolov3-ghost-coco.cfg
================================================
[net]
# Testing
#batch=1
#subdivisions=1
# Training
batch=16
subdivisions=1
width=416
height=416
channels=3
momentum=0.9
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.001
burn_in=1000
max_batches = 500200
policy=steps
steps=400000,450000
scales=.1,.1
# 0
[convolutional]
batch_normalize=1
filters=16
size=3
stride=2
pad=1
groups=1
activation=relu
# ghost bottleneck starts
#GB1-PConv #1
[convolutional]
batch_normalize=1
filters=8
size=1
stride=1
pad=0
groups=1
activation=relu
#GB1-Cheap #2
[convolutional]
batch_normalize=1
filters=8
size=3
stride=1
pad=1
groups=8
activation=relu
# 3
[route]
layers=-1, 1
# 4
[convolutional]
batch_normalize=1
filters=8
size=1
stride=1
pad=0
groups=1
activation=none
# 5
[convolutional]
batch_normalize=1
filters=8
size=3
stride=1
pad=1
groups=8
activation=none
# 6
[route]
layers=-1,4
# 7
[shortcut]
from=-7
activation=none
# GB2-PConv # 8
[convolutional]
batch_normalize=1
filters=24
size=1
stride=1
pad=0
groups=1
activation=relu
# GB2-Cheap # 9
[convolutional]
batch_normalize=1
filters=24
size=3
stride=1
pad=1
groups=24
activation=relu
#10
[route]
layers=-1,8
#11
[convolutional]
batch_normalize=1
filters=48
size=3
stride=2
groups=48
pad=1
activation=none
#12
[convolutional]
batch_normalize=1
filters=12
size=1
stride=1
groups=1
pad=0
activation=none
#13
[convolutional]
batch_normalize=1
filters=12
size=3
stride=1
groups=12
pad=1
activation=none
#14
[route]
layers=-1,12
#15
[route]
layers=7
#16
[convolutional]
batch_normalize=1
filters=16
size=3
stride=2
groups=16
pad=1
activation=none
#17
[convolutional]
batch_normalize=1
filters=24
size=1
stride=1
groups=1
pad=0
activation=none
#18
[shortcut]
from=-4
activation=none
# GB3-PConv #19
[convolutional]
batch_normalize=1
filters=36
size=1
stride=1
groups=1
pad=0
activation=relu
# GB3-Cheap #20
[convolutional]
batch_normalize=1
filters=36
size=3
stride=1
groups=36
pad=1
activation=relu
#21
[route]
layers=-1,19
#22
[convolutional]
batch_normalize=1
filters=12
size=1
stride=1
groups=1
pad=0
activation=none
#23
[convolutional]
batch_normalize=1
filters=12
size=3
stride=1
groups=12
pad=1
activation=none
#24
[route]
layers=-1,22
#25
[shortcut]
from=-7
activation=none
#GB4-PConv #26
[convolutional]
batch_normalize=1
filters=36
size=1
stride=1
groups=1
pad=0
activation=relu
#GB4-Cheap #27
[convolutional]
batch_normalize=1
filters=36
size=3
stride=1
groups=36
pad=1
activation=relu
#28
[route]
layers=-1,26
#29
[convolutional]
batch_normalize=1
filters=72
size=5
stride=2
groups=72
pad=2
activation=none
#30
[se]
reduction=4
#31
[convolutional]
batch_normalize=1
filters=20
size=1
stride=1
groups=1
pad=0
activation=none
#32
[convolutional]
batch_normalize=1
filters=20
size=3
stride=1
groups=20
pad=1
activation=none
#33
[route]
layers=-1,31
#34
[route]
layers=25
#35
[convolutional]
batch_normalize=1
filters=24
size=5
stride=2
groups=24
pad=2
activation=none
#36
[convolutional]
batch_normalize=1
filters=40
size=1
stride=1
groups=1
pad=0
activation=none
#37
[shortcut]
from=-4
activation=none
#GB5-PConv #38
[convolutional]
batch_normalize=1
filters=60
size=1
stride=1
groups=1
pad=0
activation=relu
#GB5-Cheap #39
[convolutional]
batch_normalize=1
filters=60
size=3
stride=1
groups=60
pad=1
activation=relu
#40
[route]
layers=-1,38
#41
[se]
reduction=4
#42
[convolutional]
batch_normalize=1
filters=20
size=1
stride=1
groups=1
pad=0
activation=none
#43
[convolutional]
batch_normalize=1
filters=20
size=3
stride=1
groups=20
pad=1
activation=none
#44
[route]
layers=-1,42
#45
[shortcut]
from=-8
#GB6-PConv #46
[convolutional]
batch_normalize=1
filters=120
size=1
stride=1
groups=1
pad=0
activation=relu
#GB6-Cheap #47
[convolutional]
batch_normalize=1
filters=120
size=3
stride=1
groups=120
pad=1
activation=relu
#48
[route]
layers=-1,46
#49
[convolutional]
batch_normalize=1
filters=240
size=3
stride=2
groups=240
pad=1
activation=none
#50
[convolutional]
batch_normalize=1
filters=40
size=1
stride=1
groups=1
pad=0
activation=none
#51
[convolutional]
batch_normalize=1
filters=40
size=3
stride=1
groups=40
pad=1
activation=none
#52
[route]
layers=-1,50
#53
[route]
layers=45
#54
[convolutional]
batch_normalize=1
filters=40
size=3
stride=2
groups=40
pad=1
activation=none
#55
[convolutional]
batch_normalize=1
filters=80
size=1
stride=1
groups=1
pad=0
activation=none
#56
[shortcut]
from=-4
activation=none
#GB7-PConv #57
[convolutional]
batch_normalize=1
filters=100
size=1
stride=1
groups=1
pad=0
activation=relu
#GB7-Cheap #58
[convolutional]
batch_normalize=1
filters=100
size=3
stride=1
groups=100
pad=1
activation=relu
#59
[route]
layers=-1,57
#60
[convolutional]
batch_normalize=1
filters=40
size=1
stride=1
groups=1
pad=0
activation=none
#61
[convolutional]
batch_normalize=1
filters=40
size=3
stride=1
groups=40
pad=1
activation=none
#62
[route]
layers=-1,60
#63
[shortcut]
from=-7
activation=none
#GB8-PConv #64
[convolutional]
batch_normalize=1
filters=92
size=1
stride=1
groups=1
pad=0
activation=relu
#GB8-Cheap #65
[convolutional]
batch_normalize=1
filters=92
size=3
stride=1
groups=92
pad=1
activation=relu
#66
[route]
layers=-1,64
#67
[convolutional]
batch_normalize=1
filters=40
size=1
stride=1
groups=1
pad=0
activation=none
#68
[convolutional]
batch_normalize=1
filters=40
size=3
stride=1
groups=40
pad=1
activation=none
#69
[route]
layers=-1,67
#70
[shortcut]
from=-7
activation=none
#GB9-PConv #71
[convolutional]
batch_normalize=1
filters=92
size=1
stride=1
groups=1
pad=0
activation=relu
#GB9-Cheap #72
[convolutional]
batch_normalize=1
filters=92
size=3
stride=1
groups=92
pad=1
activation=relu
#73
[route]
layers=-1,71
#74
[convolutional]
batch_normalize=1
filters=40
size=1
stride=1
groups=1
pad=0
activation=none
#75
[convolutional]
batch_normalize=1
filters=40
size=3
stride=1
groups=40
pad=1
activation=none
#76
[route]
layers=-1,74
#77
[shortcut]
from=-7
activation=none
#GB10-PConv #78
[convolutional]
batch_normalize=1
filters=240
size=1
stride=1
groups=1
pad=0
activation=relu
#GB10-Cheap #79
[convolutional]
batch_normalize=1
filters=240
size=3
stride=1
groups=240
pad=1
activation=relu
#80
[route]
layers=-1,78
#81
[se]
reduction=4
#82
[convolutional]
batch_normalize=1
filters=56
size=1
stride=1
groups=1
pad=0
activation=none
#83
[convolutional]
batch_normalize=1
filters=56
size=3
stride=1
groups=56
pad=1
activation=none
#84
[route]
layers=-1,82
#85
[route]
layers=77
#86
[convolutional]
batch_normalize=1
filters=80
size=3
stride=1
groups=80
pad=1
activation=none
#87
[convolutional]
batch_normalize=1
filters=112
size=1
stride=1
groups=1
pad=0
activation=none
#88
[shortcut]
from=-4
activation=none
#GB11-PConv #89
[convolutional]
batch_normalize=1
filters=336
size=1
stride=1
groups=1
pad=0
activation=relu
#GB11-Cheap #90
[convolutional]
batch_normalize=1
filters=336
size=3
stride=1
groups=336
pad=1
activation=relu
#91
[route]
layers=-1,89
#92
[se]
reduction=4
#93
[convolutional]
batch_normalize=1
filters=56
size=1
stride=1
groups=1
pad=0
activation=none
#94
[convolutional]
batch_normalize=1
filters=56
size=3
stride=1
groups=56
pad=1
activation=none
#95
[route]
layers=-1,93
#96
[shortcut]
from=-8
activation=none
#GB12-PConv #97
[convolutional]
batch_normalize=1
filters=336
size=1
stride=1
groups=1
pad=0
activation=relu
#GB12-Cheap #98
[convolutional]
batch_normalize=1
filters=336
size=3
stride=1
groups=336
pad=1
activation=relu
#99
[route]
layers=-1,97
#100
[convolutional]
batch_normalize=1
filters=672
size=5
stride=2
groups=672
pad=2
activation=none
#101
[se]
reduction=4
#102
[convolutional]
batch_normalize=1
filters=80
size=1
stride=1
groups=1
pad=0
activation=none
#103
[convolutional]
batch_normalize=1
filters=80
size=3
stride=1
groups=80
pad=1
activation=none
#104
[route]
layers=-1,102
#105
[route]
layers=96
#106
[convolutional]
batch_normalize=1
filters=112
size=5
stride=2
groups=112
pad=2
activation=none
#107
[convolutional]
batch_normalize=1
filters=160
size=1
stride=1
groups=1
pad=0
activation=none
#108
[shortcut]
from=-4
activation=none
#GB13-PConv #109
[convolutional]
batch_normalize=1
filters=480
size=1
stride=1
groups=1
pad=0
activation=relu
#GB13-Cheap #110
[convolutional]
batch_normalize=1
filters=480
size=3
stride=1
groups=480
pad=1
activation=relu
#111
[route]
layers=-1,109
#112
[convolutional]
batch_normalize=1
filters=80
size=1
stride=1
groups=1
pad=0
activation=none
#113
[convolutional]
batch_normalize=1
filters=80
size=3
stride=1
groups=80
pad=1
activation=none
#114
[route]
layers=-1,112
#115
[shortcut]
from=-7
activation=none
#GB14-PConv #116
[convolutional]
batch_normalize=1
filters=480
size=1
stride=1
groups=1
pad=0
activation=relu
#GB14-Cheap #117
[convolutional]
batch_normalize=1
filters=480
size=3
stride=1
groups=480
pad=1
activation=relu
#118
[route]
layers=-1,116
#119
[se]
reduction=4
#120
[convolutional]
batch_normalize=1
filters=80
size=1
stride=1
groups=1
pad=0
activation=none
#121
[convolutional]
batch_normalize=1
filters=80
size=3
stride=1
groups=80
pad=1
activation=none
#122
[route]
layers=-1,120
#123
[shortcut]
from=-8
activation=none
#GB15-PConv #124
[convolutional]
batch_normalize=1
filters=480
size=1
stride=1
groups=1
pad=0
activation=relu
#GB15-Cheap #125
[convolutional]
batch_normalize=1
filters=480
size=3
stride=1
groups=480
pad=1
activation=relu
#126
[route]
layers=-1,124
#127
[convolutional]
batch_normalize=1
filters=80
size=1
stride=1
groups=1
pad=0
activation=none
#128
[convolutional]
batch_normalize=1
filters=80
size=3
stride=1
groups=80
pad=1
activation=none
#129
[route]
layers=-1,127
#130
[shortcut]
from=-7
activation=none
#GB16-PConv #131
[convolutional]
batch_normalize=1
filters=480
size=1
stride=1
groups=1
pad=0
activation=relu
#GB16-Cheap #132
[convolutional]
batch_normalize=1
filters=480
size=3
stride=1
groups=480
pad=1
activation=relu
#133
[route]
layers=-1,131
#134
[se]
reduction=4
#135
[convolutional]
batch_normalize=1
filters=80
size=1
stride=1
groups=1
pad=0
activation=none
#136
[convolutional]
batch_normalize=1
filters=80
size=3
stride=1
groups=80
pad=1
activation=none
#137
[route]
layers=-1,135
#138
[shortcut]
from=-8
#139
[convolutional]
batch_normalize=1
filters=960
size=1
stride=1
groups=1
pad=0
activation=relu
#######Backbone结束
#140
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=0
groups=1
activation=leaky
#141
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
groups=1
filters=1024
activation=leaky
#142
[convolutional]
batch_normalize=1
groups=1
filters=512
size=1
stride=1
pad=0
activation=leaky
#143
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
groups=1
filters=1024
activation=leaky
#144
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=0
groups=1
activation=leaky
#145
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
groups=1
activation=leaky
#146
[convolutional]
size=1
stride=1
pad=0
filters=255
groups=1
activation=linear
#147
[yolo]
mask = 6,7,8
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
classes=80
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
#148
[route]
layers = -4
#149
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
groups=1
pad=0
activation=leaky
#150
[upsample]
stride=2
#151
[route]
layers = -1, 96
#152
[convolutional]
batch_normalize=1
filters=256
size=1
groups=1
stride=1
pad=0
activation=leaky
#153
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
groups=1
filters=512
activation=leaky
#154
[convolutional]
batch_normalize=1
filters=256
size=1
groups=1
stride=1
pad=0
activation=leaky
#155
[convolutional]
batch_normalize=1
size=3
groups=1
stride=1
pad=1
filters=512
activation=leaky
#156
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=0
groups=1
activation=leaky
#157
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
groups=1
activation=leaky
#158
[convolutional]
size=1
stride=1
pad=0
groups=1
filters=255
activation=linear
#159
[yolo]
mask = 3,4,5
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
classes=80
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
#160
[route]
layers = -4
#161
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=0
groups=1
activation=leaky
#162
[upsample]
stride=2
#163
[route]
layers = -1, 45
#164
[convolutional]
batch_normalize=1
filters=128
size=1
groups=1
stride=1
pad=0
activation=leaky
#165
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
groups=1
filters=256
activation=leaky
#166
[convolutional]
batch_normalize=1
filters=128
size=1
groups=1
stride=1
pad=0
activation=leaky
#167
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
groups=1
filters=256
activation=leaky
#168
[convolutional]
batch_normalize=1
filters=128
size=1
groups=1
stride=1
pad=0
activation=leaky
#169
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
groups=1
filters=256
activation=leaky
#170
[convolutional]
size=1
stride=1
pad=0
groups=1
filters=255
activation=linear
#171
[yolo]
mask = 0,1,2
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
classes=80
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
================================================
FILE: cfg/yolov3-mobilenet/yolov3-mobilenet-UAV.cfg
================================================
[net]
# Testing
#batch=1
#subdivisions=1
# Training
batch=16
subdivisions=1
width=416
height=416
channels=3
momentum=0.9
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.001
burn_in=1000
max_batches = 500200
policy=steps
steps=400000,450000
scales=.1,.1
[convolutional]
batch_normalize=1
filters=16
size=3
stride=2
pad=1
activation=h_swish
# bneck1
[convolutional]
batch_normalize=1
filters=16
size=1
stride=1
pad=1
activation=relu6
[depthwise]
batch_normalize=1
filters=16
size=3
stride=1
pad=1
activation=relu6
[convolutional]
batch_normalize=1
filters=16
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=linear
# bneck2
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=relu6
[depthwise]
batch_normalize=1
filters=64
size=3
stride=2
pad=1
activation=relu6
[convolutional]
batch_normalize=1
filters=24
size=1
stride=1
pad=1
activation=linear
# bneck3
[convolutional]
batch_normalize=1
filters=72
size=1
stride=1
pad=1
activation=relu6
[depthwise]
batch_normalize=1
filters=72
size=3
stride=1
pad=1
activation=relu6
[convolutional]
batch_normalize=1
filters=24
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=linear
# bneck4
[convolutional]
batch_normalize=1
filters=72
size=1
stride=1
pad=1
activation=relu6
[depthwise]
batch_normalize=1
filters=72
size=5
stride=2
pad=1
activation=relu6
[se]
filters=72
[convolutional]
batch_normalize=1
filters=40
size=1
stride=1
pad=1
activation=linear
# bneck5
[convolutional]
batch_normalize=1
filters=120
size=1
stride=1
pad=1
activation=relu6
[depthwise]
batch_normalize=1
filters=120
size=5
stride=1
pad=1
activation=relu6
[se]
filters=120
[convolutional]
batch_normalize=1
filters=40
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-5
activation=linear
# bneck6
[convolutional]
batch_normalize=1
filters=120
size=1
stride=1
pad=1
activation=relu6
[depthwise]
batch_normalize=1
filters=120
size=5
stride=1
pad=1
activation=relu6
[se]
filters=120
[convolutional]
batch_normalize=1
filters=40
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-5
activation=linear
# bneck7
[convolutional]
batch_normalize=1
filters=240
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=240
size=3
stride=2
pad=1
activation=h_swish
[convolutional]
batch_normalize=1
filters=80
size=1
stride=1
pad=1
activation=linear
# bneck8
[convolutional]
batch_normalize=1
filters=200
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=200
size=3
stride=1
pad=1
activation=h_swish
[convolutional]
batch_normalize=1
filters=80
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=linear
# bneck9
[convolutional]
batch_normalize=1
filters=184
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=184
size=3
stride=1
pad=1
activation=h_swish
[convolutional]
batch_normalize=1
filters=80
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=linear
# bneck10
[convolutional]
batch_normalize=1
filters=184
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=184
size=3
stride=1
pad=1
activation=h_swish
[convolutional]
batch_normalize=1
filters=80
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=linear
# bneck11
[convolutional]
batch_normalize=1
filters=480
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=480
size=3
stride=1
pad=1
activation=h_swish
[se]
filters=480
[convolutional]
batch_normalize=1
filters=112
size=1
stride=1
pad=1
activation=linear
# bneck12
[convolutional]
batch_normalize=1
filters=672
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=672
size=3
stride=1
pad=1
activation=h_swish
[se]
filters=672
[convolutional]
batch_normalize=1
filters=112
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-5
activation=linear
# bneck13
[convolutional]
batch_normalize=1
filters=672
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=672
size=5
stride=2
pad=1
activation=h_swish
[se]
filters=672
[convolutional]
batch_normalize=1
filters=160
size=1
stride=1
pad=1
activation=linear
# bneck14
[convolutional]
batch_normalize=1
filters=960
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=960
size=5
stride=1
pad=1
activation=h_swish
[se]
filters=960
[convolutional]
batch_normalize=1
filters=160
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-5
activation=linear
# bneck15
[convolutional]
batch_normalize=1
filters=960
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=960
size=5
stride=1
pad=1
activation=h_swish
[se]
filters=960
[convolutional]
batch_normalize=1
filters=160
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-5
activation=linear
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=h_swish
######################
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=18
activation=linear
[yolo]
mask = 6,7,8
anchors = 5,6, 8,12, 9,8, 10,10, 11,12, 13,16, 15,13, 18,18, 22,25
classes=1
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 49
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=18
activation=linear
[yolo]
mask = 3,4,5
anchors = 5,6, 8,12, 9,8, 10,10, 11,12, 13,16, 15,13, 18,18, 22,25
classes=1
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 25
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=18
activation=linear
[yolo]
mask = 0,1,2
anchors = 5,6, 8,12, 9,8, 10,10, 11,12, 13,16, 15,13, 18,18, 22,25
classes=1
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
================================================
FILE: cfg/yolov3-mobilenet/yolov3-mobilenet-coco.cfg
================================================
[net]
# Testing
#batch=1
#subdivisions=1
# Training
batch=16
subdivisions=1
width=416
height=416
channels=3
momentum=0.9
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.001
burn_in=1000
max_batches = 500200
policy=steps
steps=400000,450000
scales=.1,.1
[convolutional]
batch_normalize=1
filters=16
size=3
stride=2
pad=1
activation=h_swish
# bneck1
[convolutional]
batch_normalize=1
filters=16
size=1
stride=1
pad=1
activation=relu6
[depthwise]
batch_normalize=1
filters=16
size=3
stride=1
pad=1
activation=relu6
[convolutional]
batch_normalize=1
filters=16
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=linear
# bneck2
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=relu6
[depthwise]
batch_normalize=1
filters=64
size=3
stride=2
pad=1
activation=relu6
[convolutional]
batch_normalize=1
filters=24
size=1
stride=1
pad=1
activation=linear
# bneck3
[convolutional]
batch_normalize=1
filters=72
size=1
stride=1
pad=1
activation=relu6
[depthwise]
batch_normalize=1
filters=72
size=3
stride=1
pad=1
activation=relu6
[convolutional]
batch_normalize=1
filters=24
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=linear
# bneck4
[convolutional]
batch_normalize=1
filters=72
size=1
stride=1
pad=1
activation=relu6
[depthwise]
batch_normalize=1
filters=72
size=5
stride=2
pad=1
activation=relu6
[se]
filters=72
[convolutional]
batch_normalize=1
filters=40
size=1
stride=1
pad=1
activation=linear
# bneck5
[convolutional]
batch_normalize=1
filters=120
size=1
stride=1
pad=1
activation=relu6
[depthwise]
batch_normalize=1
filters=120
size=5
stride=1
pad=1
activation=relu6
[se]
filters=120
[convolutional]
batch_normalize=1
filters=40
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-5
activation=linear
# bneck6
[convolutional]
batch_normalize=1
filters=120
size=1
stride=1
pad=1
activation=relu6
[depthwise]
batch_normalize=1
filters=120
size=5
stride=1
pad=1
activation=relu6
[se]
filters=120
[convolutional]
batch_normalize=1
filters=40
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-5
activation=linear
# bneck7
[convolutional]
batch_normalize=1
filters=240
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=240
size=3
stride=2
pad=1
activation=h_swish
[convolutional]
batch_normalize=1
filters=80
size=1
stride=1
pad=1
activation=linear
# bneck8
[convolutional]
batch_normalize=1
filters=200
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=200
size=3
stride=1
pad=1
activation=h_swish
[convolutional]
batch_normalize=1
filters=80
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=linear
# bneck9
[convolutional]
batch_normalize=1
filters=184
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=184
size=3
stride=1
pad=1
activation=h_swish
[convolutional]
batch_normalize=1
filters=80
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=linear
# bneck10
[convolutional]
batch_normalize=1
filters=184
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=184
size=3
stride=1
pad=1
activation=h_swish
[convolutional]
batch_normalize=1
filters=80
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=linear
# bneck11
[convolutional]
batch_normalize=1
filters=480
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=480
size=3
stride=1
pad=1
activation=h_swish
[se]
filters=480
[convolutional]
batch_normalize=1
filters=112
size=1
stride=1
pad=1
activation=linear
# bneck12
[convolutional]
batch_normalize=1
filters=672
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=672
size=3
stride=1
pad=1
activation=h_swish
[se]
filters=672
[convolutional]
batch_normalize=1
filters=112
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-5
activation=linear
# bneck13
[convolutional]
batch_normalize=1
filters=672
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=672
size=5
stride=2
pad=1
activation=h_swish
[se]
filters=672
[convolutional]
batch_normalize=1
filters=160
size=1
stride=1
pad=1
activation=linear
# bneck14
[convolutional]
batch_normalize=1
filters=960
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=960
size=5
stride=1
pad=1
activation=h_swish
[se]
filters=960
[convolutional]
batch_normalize=1
filters=160
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-5
activation=linear
# bneck15
[convolutional]
batch_normalize=1
filters=960
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=960
size=5
stride=1
pad=1
activation=h_swish
[se]
filters=960
[convolutional]
batch_normalize=1
filters=160
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-5
activation=linear
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=h_swish
######################
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear
[yolo]
mask = 6,7,8
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
classes=80
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 49
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear
[yolo]
mask = 3,4,5
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
classes=80
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 25
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear
[yolo]
mask = 0,1,2
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
classes=80
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
================================================
FILE: cfg/yolov3-mobilenet/yolov3-mobilenet-hand.cfg
================================================
[net]
# Testing
#batch=1
#subdivisions=1
# Training
batch=16
subdivisions=1
width=416
height=416
channels=3
momentum=0.9
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.001
burn_in=1000
max_batches = 500200
policy=steps
steps=400000,450000
scales=.1,.1
[convolutional]
batch_normalize=1
filters=16
size=3
stride=2
pad=1
activation=h_swish
# bneck1
[convolutional]
batch_normalize=1
filters=16
size=1
stride=1
pad=1
activation=relu6
[depthwise]
batch_normalize=1
filters=16
size=3
stride=1
pad=1
activation=relu6
[convolutional]
batch_normalize=1
filters=16
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=linear
# bneck2
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=relu6
[depthwise]
batch_normalize=1
filters=64
size=3
stride=2
pad=1
activation=relu6
[convolutional]
batch_normalize=1
filters=24
size=1
stride=1
pad=1
activation=linear
# bneck3
[convolutional]
batch_normalize=1
filters=72
size=1
stride=1
pad=1
activation=relu6
[depthwise]
batch_normalize=1
filters=72
size=3
stride=1
pad=1
activation=relu6
[convolutional]
batch_normalize=1
filters=24
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=linear
# bneck4
[convolutional]
batch_normalize=1
filters=72
size=1
stride=1
pad=1
activation=relu6
[depthwise]
batch_normalize=1
filters=72
size=5
stride=2
pad=1
activation=relu6
[se]
filters=72
[convolutional]
batch_normalize=1
filters=40
size=1
stride=1
pad=1
activation=linear
# bneck5
[convolutional]
batch_normalize=1
filters=120
size=1
stride=1
pad=1
activation=relu6
[depthwise]
batch_normalize=1
filters=120
size=5
stride=1
pad=1
activation=relu6
[se]
filters=120
[convolutional]
batch_normalize=1
filters=40
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-5
activation=linear
# bneck6
[convolutional]
batch_normalize=1
filters=120
size=1
stride=1
pad=1
activation=relu6
[depthwise]
batch_normalize=1
filters=120
size=5
stride=1
pad=1
activation=relu6
[se]
filters=120
[convolutional]
batch_normalize=1
filters=40
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-5
activation=linear
# bneck7
[convolutional]
batch_normalize=1
filters=240
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=240
size=3
stride=2
pad=1
activation=h_swish
[convolutional]
batch_normalize=1
filters=80
size=1
stride=1
pad=1
activation=linear
# bneck8
[convolutional]
batch_normalize=1
filters=200
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=200
size=3
stride=1
pad=1
activation=h_swish
[convolutional]
batch_normalize=1
filters=80
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=linear
# bneck9
[convolutional]
batch_normalize=1
filters=184
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=184
size=3
stride=1
pad=1
activation=h_swish
[convolutional]
batch_normalize=1
filters=80
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=linear
# bneck10
[convolutional]
batch_normalize=1
filters=184
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=184
size=3
stride=1
pad=1
activation=h_swish
[convolutional]
batch_normalize=1
filters=80
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=linear
# bneck11
[convolutional]
batch_normalize=1
filters=480
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=480
size=3
stride=1
pad=1
activation=h_swish
[se]
filters=480
[convolutional]
batch_normalize=1
filters=112
size=1
stride=1
pad=1
activation=linear
# bneck12
[convolutional]
batch_normalize=1
filters=672
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=672
size=3
stride=1
pad=1
activation=h_swish
[se]
filters=672
[convolutional]
batch_normalize=1
filters=112
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-5
activation=linear
# bneck13
[convolutional]
batch_normalize=1
filters=672
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=672
size=5
stride=2
pad=1
activation=h_swish
[se]
filters=672
[convolutional]
batch_normalize=1
filters=160
size=1
stride=1
pad=1
activation=linear
# bneck14
[convolutional]
batch_normalize=1
filters=960
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=960
size=5
stride=1
pad=1
activation=h_swish
[se]
filters=960
[convolutional]
batch_normalize=1
filters=160
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-5
activation=linear
# bneck15
[convolutional]
batch_normalize=1
filters=960
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=960
size=5
stride=1
pad=1
activation=h_swish
[se]
filters=960
[convolutional]
batch_normalize=1
filters=160
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-5
activation=linear
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=h_swish
######################
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=18
activation=linear
[yolo]
mask = 6,7,8
anchors = 8,13, 14,20, 22,25, 26,36, 35,49, 40,31, 51,69, 63,47, 94,103
classes=1
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 49
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=18
activation=linear
[yolo]
mask = 3,4,5
anchors = 8,13, 14,20, 22,25, 26,36, 35,49, 40,31, 51,69, 63,47, 94,103
classes=1
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 25
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=18
activation=linear
[yolo]
mask = 0,1,2
anchors = 8,13, 14,20, 22,25, 26,36, 35,49, 40,31, 51,69, 63,47, 94,103
classes=1
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
================================================
FILE: cfg/yolov3-mobilenet/yolov3-mobilenet-screw.cfg
================================================
[net]
# Testing
#batch=1
#subdivisions=1
# Training
batch=16
subdivisions=1
width=416
height=416
channels=3
momentum=0.9
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.001
burn_in=1000
max_batches = 500200
policy=steps
steps=400000,450000
scales=.1,.1
[convolutional]
batch_normalize=1
filters=16
size=3
stride=2
pad=1
activation=h_swish
# bneck1
[convolutional]
batch_normalize=1
filters=16
size=1
stride=1
pad=1
activation=relu6
[depthwise]
batch_normalize=1
filters=16
size=3
stride=1
pad=1
activation=relu6
[convolutional]
batch_normalize=1
filters=16
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=linear
# bneck2
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=relu6
[depthwise]
batch_normalize=1
filters=64
size=3
stride=2
pad=1
activation=relu6
[convolutional]
batch_normalize=1
filters=24
size=1
stride=1
pad=1
activation=linear
# bneck3
[convolutional]
batch_normalize=1
filters=72
size=1
stride=1
pad=1
activation=relu6
[depthwise]
batch_normalize=1
filters=72
size=3
stride=1
pad=1
activation=relu6
[convolutional]
batch_normalize=1
filters=24
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=linear
# bneck4
[convolutional]
batch_normalize=1
filters=72
size=1
stride=1
pad=1
activation=relu6
[depthwise]
batch_normalize=1
filters=72
size=5
stride=2
pad=1
activation=relu6
[se]
filters=72
[convolutional]
batch_normalize=1
filters=40
size=1
stride=1
pad=1
activation=linear
# bneck5
[convolutional]
batch_normalize=1
filters=120
size=1
stride=1
pad=1
activation=relu6
[depthwise]
batch_normalize=1
filters=120
size=5
stride=1
pad=1
activation=relu6
[se]
filters=120
[convolutional]
batch_normalize=1
filters=40
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-5
activation=linear
# bneck6
[convolutional]
batch_normalize=1
filters=120
size=1
stride=1
pad=1
activation=relu6
[depthwise]
batch_normalize=1
filters=120
size=5
stride=1
pad=1
activation=relu6
[se]
filters=120
[convolutional]
batch_normalize=1
filters=40
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-5
activation=linear
# bneck7
[convolutional]
batch_normalize=1
filters=240
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=240
size=3
stride=2
pad=1
activation=h_swish
[convolutional]
batch_normalize=1
filters=80
size=1
stride=1
pad=1
activation=linear
# bneck8
[convolutional]
batch_normalize=1
filters=200
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=200
size=3
stride=1
pad=1
activation=h_swish
[convolutional]
batch_normalize=1
filters=80
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=linear
# bneck9
[convolutional]
batch_normalize=1
filters=184
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=184
size=3
stride=1
pad=1
activation=h_swish
[convolutional]
batch_normalize=1
filters=80
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=linear
# bneck10
[convolutional]
batch_normalize=1
filters=184
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=184
size=3
stride=1
pad=1
activation=h_swish
[convolutional]
batch_normalize=1
filters=80
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=linear
# bneck11
[convolutional]
batch_normalize=1
filters=480
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=480
size=3
stride=1
pad=1
activation=h_swish
[se]
filters=480
[convolutional]
batch_normalize=1
filters=112
size=1
stride=1
pad=1
activation=linear
# bneck12
[convolutional]
batch_normalize=1
filters=672
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=672
size=3
stride=1
pad=1
activation=h_swish
[se]
filters=672
[convolutional]
batch_normalize=1
filters=112
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-5
activation=linear
# bneck13
[convolutional]
batch_normalize=1
filters=672
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=672
size=5
stride=2
pad=1
activation=h_swish
[se]
filters=672
[convolutional]
batch_normalize=1
filters=160
size=1
stride=1
pad=1
activation=linear
# bneck14
[convolutional]
batch_normalize=1
filters=960
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=960
size=5
stride=1
pad=1
activation=h_swish
[se]
filters=960
[convolutional]
batch_normalize=1
filters=160
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-5
activation=linear
# bneck15
[convolutional]
batch_normalize=1
filters=960
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=960
size=5
stride=1
pad=1
activation=h_swish
[se]
filters=960
[convolutional]
batch_normalize=1
filters=160
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-5
activation=linear
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=h_swish
######################
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=21
activation=linear
[yolo]
mask = 6,7,8
anchors = 20,29, 26,34, 29,40, 33,44, 35,47, 37,50, 39,52, 43,57, 74,87
classes=2
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 49
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=21
activation=linear
[yolo]
mask = 3,4,5
anchors = 20,29, 26,34, 29,40, 33,44, 35,47, 37,50, 39,52, 43,57, 74,87
classes=2
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 25
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=21
activation=linear
[yolo]
mask = 0,1,2
anchors = 20,29, 26,34, 29,40, 33,44, 35,47, 37,50, 39,52, 43,57, 74,87
classes=2
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
================================================
FILE: cfg/yolov3-mobilenet/yolov3-mobilenet-visdrone.cfg
================================================
[net]
# Testing
#batch=1
#subdivisions=1
# Training
batch=16
subdivisions=1
width=416
height=416
channels=3
momentum=0.9
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.001
burn_in=1000
max_batches = 500200
policy=steps
steps=400000,450000
scales=.1,.1
[convolutional]
batch_normalize=1
filters=16
size=3
stride=2
pad=1
activation=h_swish
# bneck1
[convolutional]
batch_normalize=1
filters=16
size=1
stride=1
pad=1
activation=relu6
[depthwise]
batch_normalize=1
filters=16
size=3
stride=1
pad=1
activation=relu6
[convolutional]
batch_normalize=1
filters=16
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=linear
# bneck2
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=relu6
[depthwise]
batch_normalize=1
filters=64
size=3
stride=2
pad=1
activation=relu6
[convolutional]
batch_normalize=1
filters=24
size=1
stride=1
pad=1
activation=linear
# bneck3
[convolutional]
batch_normalize=1
filters=72
size=1
stride=1
pad=1
activation=relu6
[depthwise]
batch_normalize=1
filters=72
size=3
stride=1
pad=1
activation=relu6
[convolutional]
batch_normalize=1
filters=24
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=linear
# bneck4
[convolutional]
batch_normalize=1
filters=72
size=1
stride=1
pad=1
activation=relu6
[depthwise]
batch_normalize=1
filters=72
size=5
stride=2
pad=1
activation=relu6
[se]
filters=72
[convolutional]
batch_normalize=1
filters=40
size=1
stride=1
pad=1
activation=linear
# bneck5
[convolutional]
batch_normalize=1
filters=120
size=1
stride=1
pad=1
activation=relu6
[depthwise]
batch_normalize=1
filters=120
size=5
stride=1
pad=1
activation=relu6
[se]
filters=120
[convolutional]
batch_normalize=1
filters=40
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-5
activation=linear
# bneck6
[convolutional]
batch_normalize=1
filters=120
size=1
stride=1
pad=1
activation=relu6
[depthwise]
batch_normalize=1
filters=120
size=5
stride=1
pad=1
activation=relu6
[se]
filters=120
[convolutional]
batch_normalize=1
filters=40
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-5
activation=linear
# bneck7
[convolutional]
batch_normalize=1
filters=240
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=240
size=3
stride=2
pad=1
activation=h_swish
[convolutional]
batch_normalize=1
filters=80
size=1
stride=1
pad=1
activation=linear
# bneck8
[convolutional]
batch_normalize=1
filters=200
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=200
size=3
stride=1
pad=1
activation=h_swish
[convolutional]
batch_normalize=1
filters=80
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=linear
# bneck9
[convolutional]
batch_normalize=1
filters=184
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=184
size=3
stride=1
pad=1
activation=h_swish
[convolutional]
batch_normalize=1
filters=80
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=linear
# bneck10
[convolutional]
batch_normalize=1
filters=184
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=184
size=3
stride=1
pad=1
activation=h_swish
[convolutional]
batch_normalize=1
filters=80
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=linear
# bneck11
[convolutional]
batch_normalize=1
filters=480
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=480
size=3
stride=1
pad=1
activation=h_swish
[se]
filters=480
[convolutional]
batch_normalize=1
filters=112
size=1
stride=1
pad=1
activation=linear
# bneck12
[convolutional]
batch_normalize=1
filters=672
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=672
size=3
stride=1
pad=1
activation=h_swish
[se]
filters=672
[convolutional]
batch_normalize=1
filters=112
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-5
activation=linear
# bneck13
[convolutional]
batch_normalize=1
filters=672
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=672
size=5
stride=2
pad=1
activation=h_swish
[se]
filters=672
[convolutional]
batch_normalize=1
filters=160
size=1
stride=1
pad=1
activation=linear
# bneck14
[convolutional]
batch_normalize=1
filters=960
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=960
size=5
stride=1
pad=1
activation=h_swish
[se]
filters=960
[convolutional]
batch_normalize=1
filters=160
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-5
activation=linear
# bneck15
[convolutional]
batch_normalize=1
filters=960
size=1
stride=1
pad=1
activation=h_swish
[depthwise]
batch_normalize=1
filters=960
size=5
stride=1
pad=1
activation=h_swish
[se]
filters=960
[convolutional]
batch_normalize=1
filters=160
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-5
activation=linear
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=h_swish
######################
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=45
activation=linear
[yolo]
mask = 6,7,8
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
classes=10
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 49
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=45
activation=linear
[yolo]
mask = 3,4,5
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
classes=10
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 25
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=45
activation=linear
[yolo]
mask = 0,1,2
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
classes=10
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
================================================
FILE: cfg/yolov3-singlechannel/yolov3-singlechannel.cfg
================================================
[net]
# Testing
#batch=1
#subdivisions=1
# Training
batch = 16
subdivisions = 1
width = 416
height = 416
channels = 1
momentum = 0.9
decay = 0.0005
angle = 0
saturation = 1.5
exposure = 1.5
hue = .1
learning_rate = 0.001
burn_in = 1000
max_batches = 500200
policy = steps
steps = 400000,450000
scales = .1,.1
[convolutional]
batch_normalize = 1
filters = 32
size = 3
stride = 1
pad = 1
activation = leaky
# Downsample
[convolutional]
batch_normalize = 1
filters = 64
size = 3
stride = 2
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 32
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 64
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
# Downsample
[convolutional]
batch_normalize = 1
filters = 128
size = 3
stride = 2
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 64
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 128
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
[convolutional]
batch_normalize = 1
filters = 64
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 128
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
# Downsample
[convolutional]
batch_normalize = 1
filters = 256
size = 3
stride = 2
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 128
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 256
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
[convolutional]
batch_normalize = 1
filters = 128
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 256
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
[convolutional]
batch_normalize = 1
filters = 128
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 256
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
[convolutional]
batch_normalize = 1
filters = 128
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 256
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
[convolutional]
batch_normalize = 1
filters = 128
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 256
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
[convolutional]
batch_normalize = 1
filters = 128
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 256
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
[convolutional]
batch_normalize = 1
filters = 128
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 256
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
[convolutional]
batch_normalize = 1
filters = 128
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 256
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
# Downsample
[convolutional]
batch_normalize = 1
filters = 512
size = 3
stride = 2
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 256
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 512
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
[convolutional]
batch_normalize = 1
filters = 256
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 512
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
[convolutional]
batch_normalize = 1
filters = 256
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 512
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
[convolutional]
batch_normalize = 1
filters = 256
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 512
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
[convolutional]
batch_normalize = 1
filters = 256
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 512
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
[convolutional]
batch_normalize = 1
filters = 256
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 512
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
[convolutional]
batch_normalize = 1
filters = 256
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 512
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
[convolutional]
batch_normalize = 1
filters = 256
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 512
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
# Downsample
[convolutional]
batch_normalize = 1
filters = 1024
size = 3
stride = 2
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 512
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 1024
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
[convolutional]
batch_normalize = 1
filters = 512
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 1024
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
[convolutional]
batch_normalize = 1
filters = 512
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 1024
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
[convolutional]
batch_normalize = 1
filters = 512
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 1024
size = 3
stride = 1
pad = 1
activation = leaky
[shortcut]
from = -3
activation = linear
######################
[convolutional]
batch_normalize = 1
filters = 512
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
size = 3
stride = 1
pad = 1
filters = 1024
activation = leaky
[convolutional]
batch_normalize = 1
filters = 512
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
size = 3
stride = 1
pad = 1
filters = 1024
activation = leaky
[convolutional]
batch_normalize = 1
filters = 512
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
size = 3
stride = 1
pad = 1
filters = 1024
activation = leaky
[convolutional]
size = 1
stride = 1
pad = 1
filters = 30
activation = linear
[yolo]
mask = 6,7,8
anchors = 112,107, 148,331, 184,196, 234,284, 297,135, 297,350, 352,261, 358,193, 377,352
classes = 5
num = 9
jitter = .3
ignore_thresh = .7
truth_thresh = 1
random = 1
[route]
layers = -4
[convolutional]
batch_normalize = 1
filters = 256
size = 1
stride = 1
pad = 1
activation = leaky
[upsample]
stride = 2
[route]
layers = -1, 61
[convolutional]
batch_normalize = 1
filters = 256
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
size = 3
stride = 1
pad = 1
filters = 512
activation = leaky
[convolutional]
batch_normalize = 1
filters = 256
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
size = 3
stride = 1
pad = 1
filters = 512
activation = leaky
[convolutional]
batch_normalize = 1
filters = 256
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
size = 3
stride = 1
pad = 1
filters = 512
activation = leaky
[convolutional]
size = 1
stride = 1
pad = 1
filters = 30
activation = linear
[yolo]
mask = 3,4,5
anchors = 112,107, 148,331, 184,196, 234,284, 297,135, 297,350, 352,261, 358,193, 377,352
classes = 5
num = 9
jitter = .3
ignore_thresh = .7
truth_thresh = 1
random = 1
[route]
layers = -4
[convolutional]
batch_normalize = 1
filters = 128
size = 1
stride = 1
pad = 1
activation = leaky
[upsample]
stride = 2
[route]
layers = -1, 36
[convolutional]
batch_normalize = 1
filters = 128
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
size = 3
stride = 1
pad = 1
filters = 256
activation = leaky
[convolutional]
batch_normalize = 1
filters = 128
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
size = 3
stride = 1
pad = 1
filters = 256
activation = leaky
[convolutional]
batch_normalize = 1
filters = 128
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
size = 3
stride = 1
pad = 1
filters = 256
activation = leaky
[convolutional]
size = 1
stride = 1
pad = 1
filters = 30
activation = linear
[yolo]
mask = 0,1,2
anchors = 112,107, 148,331, 184,196, 234,284, 297,135, 297,350, 352,261, 358,193, 377,352
classes = 5
num = 9
jitter = .3
ignore_thresh = .7
truth_thresh = 1
random = 1
================================================
FILE: cfg/yolov3tiny/yolov3-tiny-UAV.cfg
================================================
[net]
# Testing
batch=1
subdivisions=1
# Training
batch=16
subdivisions=1
width=416
height=416
channels=3
momentum=0.9
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.001
burn_in=1000
max_batches = 500200
policy=steps
steps=15,25,60,99,150,160,180
scales=0.5,0.5,0.1,0.5,0.5,0.1,0.1
# 0
[convolutional]
batch_normalize=1
filters=16
size=3
stride=1
pad=1
activation=leaky
# 1
[maxpool]
size=2
stride=2
# 2
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=leaky
# 3
[maxpool]
size=2
stride=2
# 4
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
# 5
[maxpool]
size=2
stride=2
# 6
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
# 7
[maxpool]
size=2
stride=2
# 8
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
# 9
[maxpool]
size=2
stride=2
# 10
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
# 11
[maxpool]
size=2
stride=1
# 12
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
###########
# 13
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
# 14
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
# 15
[convolutional]
size=1
stride=1
pad=1
filters=18
activation=linear
# 16
[yolo]
mask = 3,4,5
anchors = 8,9, 10,12, 13,12, 14,15, 17,20, 23,26
classes=1
num=6
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
# 17
[route]
layers = -4
# 18
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
# 19
[upsample]
stride=2
# 20
[route]
layers = -1, 8
# 21
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
# 22
[convolutional]
size=1
stride=1
pad=1
filters=18
activation=linear
# 23
[yolo]
mask = 0,1,2
anchors = 8,9, 10,12, 13,12, 14,15, 17,20, 23,26
classes=1
num=6
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
================================================
FILE: cfg/yolov3tiny/yolov3-tiny-hand.cfg
================================================
[net]
# Testing
batch=1
subdivisions=1
# Training
batch=16
subdivisions=1
width=416
height=416
channels=3
momentum=0.9
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.001
burn_in=1000
max_batches = 500200
policy=steps
steps=15,25,60,99,150,160,180
scales=0.5,0.5,0.1,0.5,0.5,0.1,0.1
# 0
[convolutional]
batch_normalize=1
filters=16
size=3
stride=1
pad=1
activation=leaky
# 1
[maxpool]
size=2
stride=2
# 2
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=leaky
# 3
[maxpool]
size=2
stride=2
# 4
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
# 5
[maxpool]
size=2
stride=2
# 6
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
# 7
[maxpool]
size=2
stride=2
# 8
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
# 9
[maxpool]
size=2
stride=2
# 10
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
# 11
[maxpool]
size=2
stride=1
# 12
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
###########
# 13
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
# 14
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
# 15
[convolutional]
size=1
stride=1
pad=1
filters=18
activation=linear
# 16
[yolo]
mask = 3,4,5
anchors = 9,13, 16,22, 27,38, 28,27, 44,49, 79,83
classes=1
num=6
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
# 17
[route]
layers = -4
# 18
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
# 19
[upsample]
stride=2
# 20
[route]
layers = -1, 8
# 21
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
# 22
[convolutional]
size=1
stride=1
pad=1
filters=18
activation=linear
# 23
[yolo]
mask = 0,1,2
anchors = 9,13, 16,22, 27,38, 28,27, 44,49, 79,83
classes=1
num=6
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
================================================
FILE: cfg/yolov3tiny/yolov3-tiny-ship-one.cfg
================================================
[net]
# Testing
batch=1
subdivisions=1
# Training
# batch=64
# subdivisions=2
width=416
height=416
channels=3
momentum=0.9
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.001
burn_in=1000
max_batches = 500200
policy=steps
steps=400000,450000
scales=.1,.1
[convolutional]
batch_normalize=1
filters=16
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=1
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
###########
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=30
activation=linear
[yolo]
mask = 0,1,2
anchors = 209,277, 315,160, 358,321
classes=5
num=6
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
================================================
FILE: cfg/yolov3tiny/yolov3-tiny-ship.cfg
================================================
[net]
# Testing
batch=1
subdivisions=1
# Training
# batch=64
# subdivisions=2
width=416
height=416
channels=3
momentum=0.9
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.001
burn_in=1000
max_batches = 500200
policy=steps
steps=400000,450000
scales=.1,.1
[convolutional]
batch_normalize=1
filters=16
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=1
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
###########
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=30
activation=linear
[yolo]
mask = 3,4,5
anchors = 140,147, 209,309, 293,136, 328,260, 358,194, 365,351
classes=5
num=6
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 8
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=30
activation=linear
[yolo]
mask = 0,1,2
anchors = 140,147, 209,309, 293,136, 328,260, 358,194, 365,351
classes=5
num=6
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
================================================
FILE: cfg/yolov3tiny/yolov3-tiny.cfg
================================================
[net]
# Testing
batch=1
subdivisions=1
# Training
# batch=64
# subdivisions=2
width=416
height=416
channels=3
momentum=0.9
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.001
burn_in=1000
max_batches = 500200
policy=steps
steps=400000,450000
scales=.1,.1
[convolutional]
batch_normalize=1
filters=16
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=1
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
###########
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear
[yolo]
mask = 3,4,5
anchors = 10,14, 23,27, 37,58, 81,82, 135,169, 344,319
classes=80
num=6
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 8
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear
[yolo]
mask = 0,1,2
anchors = 10,14, 23,27, 37,58, 81,82, 135,169, 344,319
classes=80
num=6
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
================================================
FILE: cfg/yolov3tiny/yolov3-tiny3.cfg
================================================
[net]
# Testing
# batch=1
# subdivisions=1
# Training
batch=64
subdivisions=16
width=608
height=608
channels=3
momentum=0.9
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.001
burn_in=1000
max_batches = 200000
policy=steps
steps=180000,190000
scales=.1,.1
[convolutional]
batch_normalize=1
filters=16
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=1
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
###########
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear
[yolo]
mask = 6,7,8
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
classes=80
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 8
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear
[yolo]
mask = 3,4,5
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
classes=80
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
[route]
layers = -3
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 6
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear
[yolo]
mask = 0,1,2
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
classes=80
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
================================================
FILE: cfg/yolov3tiny/yolov3-tiny_bdd100k.cfg
================================================
[net]
# Testing
batch=1
subdivisions=1
# Training
# batch=64
# subdivisions=2
width=416
height=416
channels=3
momentum=0.9
decay=0.001
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.0001
burn_in=1000
max_batches = 500200
policy=steps
steps=400000,450000
scales=.1,.1
# 0
[convolutional]
batch_normalize=1
filters=16
size=3
stride=1
pad=1
activation=leaky
# 1
[maxpool]
size=2
stride=2
# 2
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=leaky
# 3
[maxpool]
size=2
stride=2
# 4
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
# 5
[maxpool]
size=2
stride=2
# 6
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
# 7
[maxpool]
size=2
stride=2
# 8
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
# 9
[maxpool]
size=2
stride=2
# 10
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
# 11
[maxpool]
size=2
stride=1
# 12
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
###########
# 13
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
# 14
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
# 15
[convolutional]
size=1
stride=1
pad=1
filters=45
activation=linear
# 16
[yolo]
mask = 3,4,5
anchors = 3,7, 5,18, 7,10, 12,20, 26,38, 70,96
classes=10
num=6
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
# 17
[route]
layers = -4
# 18
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
# 19
[upsample]
stride=2
# 20
[route]
layers = -1, 8
# 21
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
# 22
[convolutional]
size=1
stride=1
pad=1
filters=45
activation=linear
# 23
[yolo]
mask = 0,1,2
anchors = 3,7, 5,18, 7,10, 12,20, 26,38, 70,96
classes=10
num=6
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
================================================
FILE: cfg/yolov3tiny/yolov3-tiny_onDIOR.cfg
================================================
[net]
# Testing
batch=1
subdivisions=1
# Training
# batch=64
# subdivisions=2
width=416
height=416
channels=3
momentum=0.9
decay=0.001
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.0001
burn_in=1000
max_batches = 500200
policy=steps
steps=400000,450000
scales=.1,.1
# 0
[convolutional]
batch_normalize=1
filters=16
size=3
stride=1
pad=1
activation=leaky
# 1
[maxpool]
size=2
stride=2
# 2
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=leaky
# 3
[maxpool]
size=2
stride=2
# 4
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
# 5
[maxpool]
size=2
stride=2
# 6
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
# 7
[maxpool]
size=2
stride=2
# 8
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
# 9
[maxpool]
size=2
stride=2
# 10
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
# 11
[maxpool]
size=2
stride=1
# 12
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
###########
# 13
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
# 14
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
# 15
[convolutional]
size=1
stride=1
pad=1
filters=75
activation=linear
# 16
[yolo]
mask = 3,4,5
anchors = 5,5, 8,15, 17,9, 22,27, 54,65, 152,155
classes=20
num=6
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
# 17
[route]
layers = -4
# 18
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
# 19
[upsample]
stride=2
# 20
[route]
layers = -1, 8
# 21
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
# 22
[convolutional]
size=1
stride=1
pad=1
filters=75
activation=linear
# 23
[yolo]
mask = 0,1,2
anchors = 5,5, 8,15, 17,9, 22,27, 54,65, 152,155
classes=20
num=6
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
================================================
FILE: cfg/yolov3tiny/yolov3-tiny_visdrone.cfg
================================================
[net]
# Testing
batch=1
subdivisions=1
# Training
# batch=64
# subdivisions=2
width=416
height=416
channels=3
momentum=0.9
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.001
burn_in=1000
max_batches = 500200
policy=steps
steps=400000,450000
scales=.1,.1
[convolutional]
batch_normalize=1
filters=16
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=1
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
###########
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=45
activation=linear
[yolo]
mask = 3,4,5
anchors = 0,3, 2,5, 3,11, 5,6, 9,14, 22,30
classes=10
num=6
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 8
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=45
activation=linear
[yolo]
mask = 0,1,2
anchors = 0,3, 2,5, 3,11, 5,6, 9,14, 22,30
classes=10
num=6
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
================================================
FILE: cfg/yolov3tiny-efficientnetB0/yolov3tiny-efficientnetB0.cfg
================================================
[net]
# Testing
#batch=1
#subdivisions=1
# Training
batch=64
subdivisions=8
width=416
height=416
channels=3
momentum=0.9
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.001
burn_in=1000
max_batches = 500200
policy=steps
steps=400000,450000
scales=.1,.1
### CONV1 - 1 (1)
# conv1
[convolutional]
filters=32
size=3
pad=1
stride=2
batch_normalize=1
activation=swish
### CONV2 - MBConv1 - 1 (1)
# conv2_1_expand
[convolutional]
filters=32
size=1
stride=1
pad=0
batch_normalize=1
activation=swish
# conv2_1_dwise
[convolutional]
groups=32
filters=32
size=3
stride=1
pad=1
batch_normalize=1
activation=swish
#squeeze-n-excitation
[avgpool]
# squeeze ratio r=4 (recommended r=16)
[convolutional]
filters=8
size=1
stride=1
activation=swish
# excitation
[convolutional]
filters=32
size=1
stride=1
activation=logistic
# multiply channels
[scale_channels]
from=-4
# conv2_1_linear
[convolutional]
filters=16
size=1
stride=1
pad=0
batch_normalize=1
activation=linear
### CONV3 - MBConv6 - 1 (2)
# conv2_2_expand
[convolutional]
filters=96
size=1
stride=1
pad=0
batch_normalize=1
activation=swish
# conv2_2_dwise
[convolutional]
groups=96
filters=96
size=3
pad=1
stride=2
batch_normalize=1
activation=swish
#squeeze-n-excitation
[avgpool]
# squeeze ratio r=8 (recommended r=16)
[convolutional]
filters=16
size=1
stride=1
activation=swish
# excitation
[convolutional]
filters=96
size=1
stride=1
activation=logistic
# multiply channels
[scale_channels]
from=-4
# conv2_2_linear
[convolutional]
filters=24
size=1
stride=1
pad=0
batch_normalize=1
activation=linear
### CONV3 - MBConv6 - 2 (2)
# conv3_1_expand
[convolutional]
filters=144
size=1
stride=1
pad=0
batch_normalize=1
activation=swish
# conv3_1_dwise
[convolutional]
groups=144
filters=144
size=3
stride=1
pad=1
batch_normalize=1
activation=swish
#squeeze-n-excitation
[avgpool]
# squeeze ratio r=16 (recommended r=16)
[convolutional]
filters=8
size=1
stride=1
activation=swish
# excitation
[convolutional]
filters=144
size=1
stride=1
activation=logistic
# multiply channels
[scale_channels]
from=-4
# conv3_1_linear
[convolutional]
filters=24
size=1
stride=1
pad=0
batch_normalize=1
activation=linear
### CONV4 - MBConv6 - 1 (2)
# dropout only before residual connection
[dropout]
probability=.0
# block_3_1
[shortcut]
from=-9
activation=linear
# conv_3_2_expand
[convolutional]
filters=144
size=1
stride=1
pad=0
batch_normalize=1
activation=swish
# conv_3_2_dwise
[convolutional]
groups=144
filters=144
size=5
pad=1
stride=2
batch_normalize=1
activation=swish
#squeeze-n-excitation
[avgpool]
# squeeze ratio r=16 (recommended r=16)
[convolutional]
filters=8
size=1
stride=1
activation=swish
# excitation
[convolutional]
filters=144
size=1
stride=1
activation=logistic
# multiply channels
[scale_channels]
from=-4
# conv_3_2_linear
[convolutional]
filters=40
size=1
stride=1
pad=0
batch_normalize=1
activation=linear
### CONV4 - MBConv6 - 2 (2)
# conv_4_1_expand
[convolutional]
filters=192
size=1
stride=1
pad=0
batch_normalize=1
activation=swish
# conv_4_1_dwise
[convolutional]
groups=192
filters=192
size=5
stride=1
pad=1
batch_normalize=1
activation=swish
#squeeze-n-excitation
[avgpool]
# squeeze ratio r=16 (recommended r=16)
[convolutional]
filters=16
size=1
stride=1
activation=swish
# excitation
[convolutional]
filters=192
size=1
stride=1
activation=logistic
# multiply channels
[scale_channels]
from=-4
# conv_4_1_linear
[convolutional]
filters=40
size=1
stride=1
pad=0
batch_normalize=1
activation=linear
### CONV5 - MBConv6 - 1 (3)
# dropout only before residual connection
[dropout]
probability=.0
# block_4_2
[shortcut]
from=-9
activation=linear
# conv_4_3_expand
[convolutional]
filters=192
size=1
stride=1
pad=0
batch_normalize=1
activation=swish
# conv_4_3_dwise
[convolutional]
groups=192
filters=192
size=3
stride=1
pad=1
batch_normalize=1
activation=swish
#squeeze-n-excitation
[avgpool]
# squeeze ratio r=16 (recommended r=16)
[convolutional]
filters=16
size=1
stride=1
activation=swish
# excitation
[convolutional]
filters=192
size=1
stride=1
activation=logistic
# multiply channels
[scale_channels]
from=-4
# conv_4_3_linear
[convolutional]
filters=80
size=1
stride=1
pad=0
batch_normalize=1
activation=linear
### CONV5 - MBConv6 - 2 (3)
# conv_4_4_expand
[convolutional]
filters=384
size=1
stride=1
pad=0
batch_normalize=1
activation=swish
# conv_4_4_dwise
[convolutional]
groups=384
filters=384
size=3
stride=1
pad=1
batch_normalize=1
activation=swish
#squeeze-n-excitation
[avgpool]
# squeeze ratio r=16 (recommended r=16)
[convolutional]
filters=24
size=1
stride=1
activation=swish
# excitation
[convolutional]
filters=384
size=1
stride=1
activation=logistic
# multiply channels
[scale_channels]
from=-4
# conv_4_4_linear
[convolutional]
filters=80
size=1
stride=1
pad=0
batch_normalize=1
activation=linear
### CONV5 - MBConv6 - 3 (3)
# dropout only before residual connection
[dropout]
probability=.0
# block_4_4
[shortcut]
from=-9
activation=linear
# conv_4_5_expand
[convolutional]
filters=384
size=1
stride=1
pad=0
batch_normalize=1
activation=swish
# conv_4_5_dwise
[convolutional]
groups=384
filters=384
size=3
stride=1
pad=1
batch_normalize=1
activation=swish
#squeeze-n-excitation
[avgpool]
# squeeze ratio r=16 (recommended r=16)
[convolutional]
filters=24
size=1
stride=1
activation=swish
# excitation
[convolutional]
filters=384
size=1
stride=1
activation=logistic
# multiply channels
[scale_channels]
from=-4
# conv_4_5_linear
[convolutional]
filters=80
size=1
stride=1
pad=0
batch_normalize=1
activation=linear
### CONV6 - MBConv6 - 1 (3)
# dropout only before residual connection
[dropout]
probability=.0
# block_4_6
[shortcut]
from=-9
activation=linear
# conv_4_7_expand
[convolutional]
filters=384
size=1
stride=1
pad=0
batch_normalize=1
activation=swish
# conv_4_7_dwise
[convolutional]
groups=384
filters=384
size=5
pad=1
stride=2
batch_normalize=1
activation=swish
#squeeze-n-excitation
[avgpool]
# squeeze ratio r=16 (recommended r=16)
[convolutional]
filters=24
size=1
stride=1
activation=swish
# excitation
[convolutional]
filters=384
size=1
stride=1
activation=logistic
# multiply channels
[scale_channels]
from=-4
# conv_4_7_linear
[convolutional]
filters=112
size=1
stride=1
pad=0
batch_normalize=1
activation=linear
### CONV6 - MBConv6 - 2 (3)
# conv_5_1_expand
[convolutional]
filters=576
size=1
stride=1
pad=0
batch_normalize=1
activation=swish
# conv_5_1_dwise
[convolutional]
groups=576
filters=576
size=5
stride=1
pad=1
batch_normalize=1
activation=swish
#squeeze-n-excitation
[avgpool]
# squeeze ratio r=16 (recommended r=16)
[convolutional]
filters=32
size=1
stride=1
activation=swish
# excitation
[convolutional]
filters=576
size=1
stride=1
activation=logistic
# multiply channels
[scale_channels]
from=-4
# conv_5_1_linear
[convolutional]
filters=112
size=1
stride=1
pad=0
batch_normalize=1
activation=linear
### CONV6 - MBConv6 - 3 (3)
# dropout only before residual connection
[dropout]
probability=.0
# block_5_1
[shortcut]
from=-9
activation=linear
# conv_5_2_expand
[convolutional]
filters=576
size=1
stride=1
pad=0
batch_normalize=1
activation=swish
# conv_5_2_dwise
[convolutional]
groups=576
filters=576
size=5
stride=1
pad=1
batch_normalize=1
activation=swish
#squeeze-n-excitation
[avgpool]
# squeeze ratio r=16 (recommended r=16)
[convolutional]
filters=32
size=1
stride=1
activation=swish
# excitation
[convolutional]
filters=576
size=1
stride=1
activation=logistic
# multiply channels
[scale_channels]
from=-4
# conv_5_2_linear
[convolutional]
filters=112
size=1
stride=1
pad=0
batch_normalize=1
activation=linear
### CONV7 - MBConv6 - 1 (4)
# dropout only before residual connection
[dropout]
probability=.0
# block_5_2
[shortcut]
from=-9
activation=linear
# conv_5_3_expand
[convolutional]
filters=576
size=1
stride=1
pad=0
batch_normalize=1
activation=swish
# conv_5_3_dwise
[convolutional]
groups=576
filters=576
size=5
pad=1
stride=2
batch_normalize=1
activation=swish
#squeeze-n-excitation
[avgpool]
# squeeze ratio r=16 (recommended r=16)
[convolutional]
filters=32
size=1
stride=1
activation=swish
# excitation
[convolutional]
filters=576
size=1
stride=1
activation=logistic
# multiply channels
[scale_channels]
from=-4
# conv_5_3_linear
[convolutional]
filters=192
size=1
stride=1
pad=0
batch_normalize=1
activation=linear
### CONV7 - MBConv6 - 2 (4)
# conv_6_1_expand
[convolutional]
filters=960
size=1
stride=1
pad=0
batch_normalize=1
activation=swish
# conv_6_1_dwise
[convolutional]
groups=960
filters=960
size=5
stride=1
pad=1
batch_normalize=1
activation=swish
#squeeze-n-excitation
[avgpool]
# squeeze ratio r=16 (recommended r=16)
[convolutional]
filters=64
size=1
stride=1
activation=swish
# excitation
[convolutional]
filters=960
size=1
stride=1
activation=logistic
# multiply channels
[scale_channels]
from=-4
# conv_6_1_linear
[convolutional]
filters=192
size=1
stride=1
pad=0
batch_normalize=1
activation=linear
### CONV7 - MBConv6 - 3 (4)
# dropout only before residual connection
[dropout]
probability=.0
# block_6_1
[shortcut]
from=-9
activation=linear
# conv_6_2_expand
[convolutional]
filters=960
size=1
stride=1
pad=0
batch_normalize=1
activation=swish
# conv_6_2_dwise
[convolutional]
groups=960
filters=960
size=5
stride=1
pad=1
batch_normalize=1
activation=swish
#squeeze-n-excitation
[avgpool]
# squeeze ratio r=16 (recommended r=16)
[convolutional]
filters=64
size=1
stride=1
activation=swish
# excitation
[convolutional]
filters=960
size=1
stride=1
activation=logistic
# multiply channels
[scale_channels]
from=-4
# conv_6_2_linear
[convolutional]
filters=192
size=1
stride=1
pad=0
batch_normalize=1
activation=linear
### CONV7 - MBConv6 - 4 (4)
# dropout only before residual connection
[dropout]
probability=.0
# block_6_1
[shortcut]
from=-9
activation=linear
# conv_6_2_expand
[convolutional]
filters=960
size=1
stride=1
pad=0
batch_normalize=1
activation=swish
# conv_6_2_dwise
[convolutional]
groups=960
filters=960
size=5
stride=1
pad=1
batch_normalize=1
activation=swish
#squeeze-n-excitation
[avgpool]
# squeeze ratio r=16 (recommended r=16)
[convolutional]
filters=64
size=1
stride=1
activation=swish
# excitation
[convolutional]
filters=960
size=1
stride=1
activation=logistic
# multiply channels
[scale_channels]
from=-4
# conv_6_2_linear
[convolutional]
filters=192
size=1
stride=1
pad=0
batch_normalize=1
activation=linear
### CONV8 - MBConv6 - 1 (1)
# dropout only before residual connection
[dropout]
probability=.0
# block_6_2
[shortcut]
from=-9
activation=linear
# conv_6_3_expand
[convolutional]
filters=960
size=1
stride=1
pad=0
batch_normalize=1
activation=swish
# conv_6_3_dwise
[convolutional]
groups=960
filters=960
size=3
stride=1
pad=1
batch_normalize=1
activation=swish
#squeeze-n-excitation
[avgpool]
# squeeze ratio r=16 (recommended r=16)
[convolutional]
filters=64
size=1
stride=1
activation=swish
# excitation
[convolutional]
filters=960
size=1
stride=1
activation=logistic
# multiply channels
[scale_channels]
from=-4
# conv_6_3_linear
[convolutional]
filters=320
size=1
stride=1
pad=0
batch_normalize=1
activation=linear
### CONV9 - Conv2d 1x1
# conv_6_4
[convolutional]
filters=1280
size=1
stride=1
pad=0
batch_normalize=1
activation=swish
##########################
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
activation=leaky
from=-2
[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear
[yolo]
mask = 3,4,5
anchors = 10,14, 23,27, 37,58, 81,82, 135,169, 344,319
classes=80
num=6
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=0
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[shortcut]
activation=leaky
from=90
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
activation=leaky
from=-3
[shortcut]
activation=leaky
from=90
[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear
[yolo]
mask = 0,1,2
anchors = 10,14, 23,27, 37,58, 81,82, 135,169, 344,319
classes=80
num=6
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=0
================================================
FILE: cfg/yolov3tiny-mobilenet-small/yolov3tiny-mobilenet-small-UAV.cfg
================================================
[net]
# Testing
batch = 1
subdivisions = 1
# Training
# batch=64
# subdivisions=2
width = 416
height = 416
channels = 3
momentum = 0.9
decay = 0.0005
angle = 0
saturation = 1.5
exposure = 1.5
hue = .1
learning_rate = 0.001
burn_in = 1000
max_batches = 500200
policy = steps
steps = 400000,450000
scales = .1,.1
[convolutional]
batch_normalize = 1
filters = 16
size = 3
stride = 2
pad = 1
activation = h_swish
# bneck1
[convolutional]
batch_normalize = 1
filters = 16
size = 1
stride = 1
pad = 1
activation = relu6
[depthwise]
batch_normalize = 1
filters = 16
size = 3
stride = 2
pad = 1
activation = relu6
[se]
filters = 16
[convolutional]
batch_normalize = 1
filters = 16
size = 1
stride = 1
pad = 1
activation = linear
# bneck2
[convolutional]
batch_normalize = 1
filters = 72
size = 1
stride = 1
pad = 1
activation = relu6
[depthwise]
batch_normalize = 1
filters = 72
size = 3
stride = 2
pad = 1
activation = relu6
[convolutional]
batch_normalize = 1
filters = 24
size = 1
stride = 1
pad = 1
activation = linear
# bneck3
[convolutional]
batch_normalize = 1
filters = 88
size = 1
stride = 1
pad = 1
activation = relu6
[depthwise]
batch_normalize = 1
filters = 88
size = 3
stride = 1
pad = 1
activation = relu6
[convolutional]
batch_normalize = 1
filters = 24
size = 1
stride = 1
pad = 1
activation = linear
[shortcut]
from = -4
activation = linear
# bneck4
[convolutional]
batch_normalize = 1
filters = 96
size = 1
stride = 1
pad = 1
activation = h_swish
[depthwise]
batch_normalize = 1
filters = 96
size = 5
stride = 2
pad = 1
activation = h_swish
[se]
filters = 96
[convolutional]
batch_normalize = 1
filters = 40
size = 1
stride = 1
pad = 1
activation = linear
# bneck5
[convolutional]
batch_normalize = 1
filters = 240
size = 1
stride = 1
pad = 1
activation = h_swish
[depthwise]
batch_normalize = 1
filters = 240
size = 5
stride = 1
pad = 1
activation = h_swish
[se]
filters = 240
[convolutional]
batch_normalize = 1
filters = 40
size = 1
stride = 1
pad = 1
activation = linear
[shortcut]
from = -5
activation = linear
# bneck6
[convolutional]
batch_normalize = 1
filters = 240
size = 1
stride = 1
pad = 1
activation = h_swish
[depthwise]
batch_normalize = 1
filters = 240
size = 5
stride = 1
pad = 1
activation = h_swish
[se]
filters = 240
[convolutional]
batch_normalize = 1
filters = 40
size = 1
stride = 1
pad = 1
activation = linear
[shortcut]
from = -5
activation = linear
# bneck7
[convolutional]
batch_normalize = 1
filters = 120
size = 1
stride = 1
pad = 1
activation = h_swish
[depthwise]
batch_normalize = 1
filters = 120
size = 5
stride = 1
pad = 1
activation = h_swish
[se]
filters = 120
[convolutional]
batch_normalize = 1
filters = 48
size = 1
stride = 1
pad = 1
activation = linear
# bneck8
[convolutional]
batch_normalize = 1
filters = 144
size = 1
stride = 1
pad = 1
activation = h_swish
[depthwise]
batch_normalize = 1
filters = 144
size = 5
stride = 1
pad = 1
activation = h_swish
[se]
filters = 144
[convolutional]
batch_normalize = 1
filters = 48
size = 1
stride = 1
pad = 1
activation = linear
[shortcut]
from = -5
activation = linear
# bneck9
[convolutional]
batch_normalize = 1
filters = 288
size = 1
stride = 1
pad = 1
activation = h_swish
[depthwise]
batch_normalize = 1
filters = 288
size = 5
stride = 2
pad = 1
activation = h_swish
[se]
filters = 288
[convolutional]
batch_normalize = 1
filters = 96
size = 1
stride = 1
pad = 1
activation = linear
# bneck10
[convolutional]
batch_normalize = 1
filters = 576
size = 1
stride = 1
pad = 1
activation = h_swish
[depthwise]
batch_normalize = 1
filters = 576
size = 5
stride = 1
pad = 1
activation = h_swish
[se]
filters = 576
[convolutional]
batch_normalize = 1
filters = 96
size = 1
stride = 1
pad = 1
activation = linear
[shortcut]
from = -5
activation = linear
# bneck11
[convolutional]
batch_normalize = 1
filters = 576
size = 1
stride = 1
pad = 1
activation = h_swish
[depthwise]
batch_normalize = 1
filters = 576
size = 5
stride = 1
pad = 1
activation = h_swish
[se]
filters = 576
[convolutional]
batch_normalize = 1
filters = 96
size = 1
stride = 1
pad = 1
activation = linear
[shortcut]
from = -5
activation = linear
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=h_swish
###########
[convolutional]
batch_normalize = 1
filters = 256
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 512
size = 3
stride = 1
pad = 1
activation = leaky
[convolutional]
size = 1
stride = 1
pad = 1
filters = 18
activation = linear
[yolo]
mask = 3,4,5
anchors = 8,9, 10,12, 13,12, 14,15, 17,20, 23,26
classes = 1
num = 6
jitter = .3
ignore_thresh = .7
truth_thresh = 1
random = 1
[route]
layers = -4
[convolutional]
batch_normalize = 1
filters = 128
size = 1
stride = 1
pad = 1
activation = leaky
[upsample]
stride = 2
[route]
layers = -1, 34
[convolutional]
batch_normalize = 1
filters = 256
size = 3
stride = 1
pad = 1
activation = leaky
[convolutional]
size = 1
stride = 1
pad = 1
filters = 18
activation = linear
[yolo]
mask = 0,1,2
anchors = 8,9, 10,12, 13,12, 14,15, 17,20, 23,26
classes = 1
num = 6
jitter = .3
ignore_thresh = .7
truth_thresh = 1
random = 1
================================================
FILE: cfg/yolov3tiny-mobilenet-small/yolov3tiny-mobilenet-small-coco.cfg
================================================
[net]
# Testing
batch = 1
subdivisions = 1
# Training
# batch=64
# subdivisions=2
width = 416
height = 416
channels = 3
momentum = 0.9
decay = 0.0005
angle = 0
saturation = 1.5
exposure = 1.5
hue = .1
learning_rate = 0.001
burn_in = 1000
max_batches = 500200
policy = steps
steps = 400000,450000
scales = .1,.1
[convolutional]
batch_normalize = 1
filters = 16
size = 3
stride = 2
pad = 1
activation = h_swish
# bneck1
[convolutional]
batch_normalize = 1
filters = 16
size = 1
stride = 1
pad = 1
activation = relu6
[depthwise]
batch_normalize = 1
filters = 16
size = 3
stride = 2
pad = 1
activation = relu6
[se]
filters = 16
[convolutional]
batch_normalize = 1
filters = 16
size = 1
stride = 1
pad = 1
activation = linear
# bneck2
[convolutional]
batch_normalize = 1
filters = 72
size = 1
stride = 1
pad = 1
activation = relu6
[depthwise]
batch_normalize = 1
filters = 72
size = 3
stride = 2
pad = 1
activation = relu6
[convolutional]
batch_normalize = 1
filters = 24
size = 1
stride = 1
pad = 1
activation = linear
# bneck3
[convolutional]
batch_normalize = 1
filters = 88
size = 1
stride = 1
pad = 1
activation = relu6
[depthwise]
batch_normalize = 1
filters = 88
size = 3
stride = 1
pad = 1
activation = relu6
[convolutional]
batch_normalize = 1
filters = 24
size = 1
stride = 1
pad = 1
activation = linear
[shortcut]
from = -4
activation = linear
# bneck4
[convolutional]
batch_normalize = 1
filters = 96
size = 1
stride = 1
pad = 1
activation = h_swish
[depthwise]
batch_normalize = 1
filters = 96
size = 5
stride = 2
pad = 1
activation = h_swish
[se]
filters = 96
[convolutional]
batch_normalize = 1
filters = 40
size = 1
stride = 1
pad = 1
activation = linear
# bneck5
[convolutional]
batch_normalize = 1
filters = 240
size = 1
stride = 1
pad = 1
activation = h_swish
[depthwise]
batch_normalize = 1
filters = 240
size = 5
stride = 1
pad = 1
activation = h_swish
[se]
filters = 240
[convolutional]
batch_normalize = 1
filters = 40
size = 1
stride = 1
pad = 1
activation = linear
[shortcut]
from = -5
activation = linear
# bneck6
[convolutional]
batch_normalize = 1
filters = 240
size = 1
stride = 1
pad = 1
activation = h_swish
[depthwise]
batch_normalize = 1
filters = 240
size = 5
stride = 1
pad = 1
activation = h_swish
[se]
filters = 240
[convolutional]
batch_normalize = 1
filters = 40
size = 1
stride = 1
pad = 1
activation = linear
[shortcut]
from = -5
activation = linear
# bneck7
[convolutional]
batch_normalize = 1
filters = 120
size = 1
stride = 1
pad = 1
activation = h_swish
[depthwise]
batch_normalize = 1
filters = 120
size = 5
stride = 1
pad = 1
activation = h_swish
[se]
filters = 120
[convolutional]
batch_normalize = 1
filters = 48
size = 1
stride = 1
pad = 1
activation = linear
# bneck8
[convolutional]
batch_normalize = 1
filters = 144
size = 1
stride = 1
pad = 1
activation = h_swish
[depthwise]
batch_normalize = 1
filters = 144
size = 5
stride = 1
pad = 1
activation = h_swish
[se]
filters = 144
[convolutional]
batch_normalize = 1
filters = 48
size = 1
stride = 1
pad = 1
activation = linear
[shortcut]
from = -5
activation = linear
# bneck9
[convolutional]
batch_normalize = 1
filters = 288
size = 1
stride = 1
pad = 1
activation = h_swish
[depthwise]
batch_normalize = 1
filters = 288
size = 5
stride = 2
pad = 1
activation = h_swish
[se]
filters = 288
[convolutional]
batch_normalize = 1
filters = 96
size = 1
stride = 1
pad = 1
activation = linear
# bneck10
[convolutional]
batch_normalize = 1
filters = 576
size = 1
stride = 1
pad = 1
activation = h_swish
[depthwise]
batch_normalize = 1
filters = 576
size = 5
stride = 1
pad = 1
activation = h_swish
[se]
filters = 576
[convolutional]
batch_normalize = 1
filters = 96
size = 1
stride = 1
pad = 1
activation = linear
[shortcut]
from = -5
activation = linear
# bneck11
[convolutional]
batch_normalize = 1
filters = 576
size = 1
stride = 1
pad = 1
activation = h_swish
[depthwise]
batch_normalize = 1
filters = 576
size = 5
stride = 1
pad = 1
activation = h_swish
[se]
filters = 576
[convolutional]
batch_normalize = 1
filters = 96
size = 1
stride = 1
pad = 1
activation = linear
[shortcut]
from = -5
activation = linear
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=h_swish
###########
[convolutional]
batch_normalize = 1
filters = 256
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 512
size = 3
stride = 1
pad = 1
activation = leaky
[convolutional]
size = 1
stride = 1
pad = 1
filters = 255
activation = linear
[yolo]
mask = 3,4,5
anchors = 10,14, 23,27, 37,58, 81,82, 135,169, 344,319
classes = 80
num = 6
jitter = .3
ignore_thresh = .7
truth_thresh = 1
random = 1
[route]
layers = -4
[convolutional]
batch_normalize = 1
filters = 128
size = 1
stride = 1
pad = 1
activation = leaky
[upsample]
stride = 2
[route]
layers = -1, 34
[convolutional]
batch_normalize = 1
filters = 256
size = 3
stride = 1
pad = 1
activation = leaky
[convolutional]
size = 1
stride = 1
pad = 1
filters = 255
activation = linear
[yolo]
mask = 0,1,2
anchors = 10,14, 23,27, 37,58, 81,82, 135,169, 344,319
classes = 80
num = 6
jitter = .3
ignore_thresh = .7
truth_thresh = 1
random = 1
================================================
FILE: cfg/yolov3tiny-mobilenet-small/yolov3tiny-mobilenet-small-screw.cfg
================================================
[net]
# Testing
batch = 1
subdivisions = 1
# Training
# batch=64
# subdivisions=2
width = 416
height = 416
channels = 3
momentum = 0.9
decay = 0.0005
angle = 0
saturation = 1.5
exposure = 1.5
hue = .1
learning_rate = 0.001
burn_in = 1000
max_batches = 500200
policy = steps
steps = 400000,450000
scales = .1,.1
[convolutional]
batch_normalize = 1
filters = 16
size = 3
stride = 2
pad = 1
activation = h_swish
# bneck1
[convolutional]
batch_normalize = 1
filters = 16
size = 1
stride = 1
pad = 1
activation = relu6
[depthwise]
batch_normalize = 1
filters = 16
size = 3
stride = 2
pad = 1
activation = relu6
[se]
filters = 16
[convolutional]
batch_normalize = 1
filters = 16
size = 1
stride = 1
pad = 1
activation = linear
# bneck2
[convolutional]
batch_normalize = 1
filters = 72
size = 1
stride = 1
pad = 1
activation = relu6
[depthwise]
batch_normalize = 1
filters = 72
size = 3
stride = 2
pad = 1
activation = relu6
[convolutional]
batch_normalize = 1
filters = 24
size = 1
stride = 1
pad = 1
activation = linear
# bneck3
[convolutional]
batch_normalize = 1
filters = 88
size = 1
stride = 1
pad = 1
activation = relu6
[depthwise]
batch_normalize = 1
filters = 88
size = 3
stride = 1
pad = 1
activation = relu6
[convolutional]
batch_normalize = 1
filters = 24
size = 1
stride = 1
pad = 1
activation = linear
[shortcut]
from = -4
activation = linear
# bneck4
[convolutional]
batch_normalize = 1
filters = 96
size = 1
stride = 1
pad = 1
activation = h_swish
[depthwise]
batch_normalize = 1
filters = 96
size = 5
stride = 2
pad = 1
activation = h_swish
[se]
filters = 96
[convolutional]
batch_normalize = 1
filters = 40
size = 1
stride = 1
pad = 1
activation = linear
# bneck5
[convolutional]
batch_normalize = 1
filters = 240
size = 1
stride = 1
pad = 1
activation = h_swish
[depthwise]
batch_normalize = 1
filters = 240
size = 5
stride = 1
pad = 1
activation = h_swish
[se]
filters = 240
[convolutional]
batch_normalize = 1
filters = 40
size = 1
stride = 1
pad = 1
activation = linear
[shortcut]
from = -5
activation = linear
# bneck6
[convolutional]
batch_normalize = 1
filters = 240
size = 1
stride = 1
pad = 1
activation = h_swish
[depthwise]
batch_normalize = 1
filters = 240
size = 5
stride = 1
pad = 1
activation = h_swish
[se]
filters = 240
[convolutional]
batch_normalize = 1
filters = 40
size = 1
stride = 1
pad = 1
activation = linear
[shortcut]
from = -5
activation = linear
# bneck7
[convolutional]
batch_normalize = 1
filters = 120
size = 1
stride = 1
pad = 1
activation = h_swish
[depthwise]
batch_normalize = 1
filters = 120
size = 5
stride = 1
pad = 1
activation = h_swish
[se]
filters = 120
[convolutional]
batch_normalize = 1
filters = 48
size = 1
stride = 1
pad = 1
activation = linear
# bneck8
[convolutional]
batch_normalize = 1
filters = 144
size = 1
stride = 1
pad = 1
activation = h_swish
[depthwise]
batch_normalize = 1
filters = 144
size = 5
stride = 1
pad = 1
activation = h_swish
[se]
filters = 144
[convolutional]
batch_normalize = 1
filters = 48
size = 1
stride = 1
pad = 1
activation = linear
[shortcut]
from = -5
activation = linear
# bneck9
[convolutional]
batch_normalize = 1
filters = 288
size = 1
stride = 1
pad = 1
activation = h_swish
[depthwise]
batch_normalize = 1
filters = 288
size = 5
stride = 2
pad = 1
activation = h_swish
[se]
filters = 288
[convolutional]
batch_normalize = 1
filters = 96
size = 1
stride = 1
pad = 1
activation = linear
# bneck10
[convolutional]
batch_normalize = 1
filters = 576
size = 1
stride = 1
pad = 1
activation = h_swish
[depthwise]
batch_normalize = 1
filters = 576
size = 5
stride = 1
pad = 1
activation = h_swish
[se]
filters = 576
[convolutional]
batch_normalize = 1
filters = 96
size = 1
stride = 1
pad = 1
activation = linear
[shortcut]
from = -5
activation = linear
# bneck11
[convolutional]
batch_normalize = 1
filters = 576
size = 1
stride = 1
pad = 1
activation = h_swish
[depthwise]
batch_normalize = 1
filters = 576
size = 5
stride = 1
pad = 1
activation = h_swish
[se]
filters = 576
[convolutional]
batch_normalize = 1
filters = 96
size = 1
stride = 1
pad = 1
activation = linear
[shortcut]
from = -5
activation = linear
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=h_swish
###########
[convolutional]
batch_normalize = 1
filters = 256
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 512
size = 3
stride = 1
pad = 1
activation = leaky
[convolutional]
size = 1
stride = 1
pad = 1
filters = 21
activation = linear
[yolo]
mask = 3,4,5
anchors = 25,34, 33,45, 37,50, 43,57, 70,85, 76,88
classes = 2
num = 6
jitter = .3
ignore_thresh = .7
truth_thresh = 1
random = 1
[route]
layers = -4
[convolutional]
batch_normalize = 1
filters = 128
size = 1
stride = 1
pad = 1
activation = leaky
[upsample]
stride = 2
[route]
layers = -1, 34
[convolutional]
batch_normalize = 1
filters = 256
size = 3
stride = 1
pad = 1
activation = leaky
[convolutional]
size = 1
stride = 1
pad = 1
filters = 21
activation = linear
[yolo]
mask = 0,1,2
anchors = 25,34, 33,45, 37,50, 43,57, 70,85, 76,88
classes = 2
num = 6
jitter = .3
ignore_thresh = .7
truth_thresh = 1
random = 1
================================================
FILE: cfg/yolov3tiny-mobilenet-small/yolov3tiny-mobilenet-small-visdrone.cfg
================================================
[net]
# Testing
batch = 1
subdivisions = 1
# Training
# batch=64
# subdivisions=2
width = 416
height = 416
channels = 3
momentum = 0.9
decay = 0.0005
angle = 0
saturation = 1.5
exposure = 1.5
hue = .1
learning_rate = 0.001
burn_in = 1000
max_batches = 500200
policy = steps
steps = 400000,450000
scales = .1,.1
[convolutional]
batch_normalize = 1
filters = 16
size = 3
stride = 2
pad = 1
activation = h_swish
# bneck1
[convolutional]
batch_normalize = 1
filters = 16
size = 1
stride = 1
pad = 1
activation = relu6
[depthwise]
batch_normalize = 1
filters = 16
size = 3
stride = 2
pad = 1
activation = relu6
[se]
filters = 16
[convolutional]
batch_normalize = 1
filters = 16
size = 1
stride = 1
pad = 1
activation = linear
# bneck2
[convolutional]
batch_normalize = 1
filters = 72
size = 1
stride = 1
pad = 1
activation = relu6
[depthwise]
batch_normalize = 1
filters = 72
size = 3
stride = 2
pad = 1
activation = relu6
[convolutional]
batch_normalize = 1
filters = 24
size = 1
stride = 1
pad = 1
activation = linear
# bneck3
[convolutional]
batch_normalize = 1
filters = 88
size = 1
stride = 1
pad = 1
activation = relu6
[depthwise]
batch_normalize = 1
filters = 88
size = 3
stride = 1
pad = 1
activation = relu6
[convolutional]
batch_normalize = 1
filters = 24
size = 1
stride = 1
pad = 1
activation = linear
[shortcut]
from = -4
activation = linear
# bneck4
[convolutional]
batch_normalize = 1
filters = 96
size = 1
stride = 1
pad = 1
activation = h_swish
[depthwise]
batch_normalize = 1
filters = 96
size = 5
stride = 2
pad = 1
activation = h_swish
[se]
filters = 96
[convolutional]
batch_normalize = 1
filters = 40
size = 1
stride = 1
pad = 1
activation = linear
# bneck5
[convolutional]
batch_normalize = 1
filters = 240
size = 1
stride = 1
pad = 1
activation = h_swish
[depthwise]
batch_normalize = 1
filters = 240
size = 5
stride = 1
pad = 1
activation = h_swish
[se]
filters = 240
[convolutional]
batch_normalize = 1
filters = 40
size = 1
stride = 1
pad = 1
activation = linear
[shortcut]
from = -5
activation = linear
# bneck6
[convolutional]
batch_normalize = 1
filters = 240
size = 1
stride = 1
pad = 1
activation = h_swish
[depthwise]
batch_normalize = 1
filters = 240
size = 5
stride = 1
pad = 1
activation = h_swish
[se]
filters = 240
[convolutional]
batch_normalize = 1
filters = 40
size = 1
stride = 1
pad = 1
activation = linear
[shortcut]
from = -5
activation = linear
# bneck7
[convolutional]
batch_normalize = 1
filters = 120
size = 1
stride = 1
pad = 1
activation = h_swish
[depthwise]
batch_normalize = 1
filters = 120
size = 5
stride = 1
pad = 1
activation = h_swish
[se]
filters = 120
[convolutional]
batch_normalize = 1
filters = 48
size = 1
stride = 1
pad = 1
activation = linear
# bneck8
[convolutional]
batch_normalize = 1
filters = 144
size = 1
stride = 1
pad = 1
activation = h_swish
[depthwise]
batch_normalize = 1
filters = 144
size = 5
stride = 1
pad = 1
activation = h_swish
[se]
filters = 144
[convolutional]
batch_normalize = 1
filters = 48
size = 1
stride = 1
pad = 1
activation = linear
[shortcut]
from = -5
activation = linear
# bneck9
[convolutional]
batch_normalize = 1
filters = 288
size = 1
stride = 1
pad = 1
activation = h_swish
[depthwise]
batch_normalize = 1
filters = 288
size = 5
stride = 2
pad = 1
activation = h_swish
[se]
filters = 288
[convolutional]
batch_normalize = 1
filters = 96
size = 1
stride = 1
pad = 1
activation = linear
# bneck10
[convolutional]
batch_normalize = 1
filters = 576
size = 1
stride = 1
pad = 1
activation = h_swish
[depthwise]
batch_normalize = 1
filters = 576
size = 5
stride = 1
pad = 1
activation = h_swish
[se]
filters = 576
[convolutional]
batch_normalize = 1
filters = 96
size = 1
stride = 1
pad = 1
activation = linear
[shortcut]
from = -5
activation = linear
# bneck11
[convolutional]
batch_normalize = 1
filters = 576
size = 1
stride = 1
pad = 1
activation = h_swish
[depthwise]
batch_normalize = 1
filters = 576
size = 5
stride = 1
pad = 1
activation = h_swish
[se]
filters = 576
[convolutional]
batch_normalize = 1
filters = 96
size = 1
stride = 1
pad = 1
activation = linear
[shortcut]
from = -5
activation = linear
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=h_swish
###########
[convolutional]
batch_normalize = 1
filters = 256
size = 1
stride = 1
pad = 1
activation = leaky
[convolutional]
batch_normalize = 1
filters = 512
size = 3
stride = 1
pad = 1
activation = leaky
[convolutional]
size = 1
stride = 1
pad = 1
filters = 45
activation = linear
[yolo]
mask = 3,4,5
anchors = 0,3, 2,5, 3,11, 5,6, 9,14, 22,30
classes = 10
num = 6
jitter = .3
ignore_thresh = .7
truth_thresh = 1
random = 1
[route]
layers = -4
[convolutional]
batch_normalize = 1
filters = 128
size = 1
stride = 1
pad = 1
activation = leaky
[upsample]
stride = 2
[route]
layers = -1, 34
[convolutional]
batch_normalize = 1
filters = 256
size = 3
stride = 1
pad = 1
activation = leaky
[convolutional]
size = 1
stride = 1
pad = 1
filters = 45
activation = linear
[yolo]
mask = 0,1,2
anchors = 0,3, 2,5, 3,11, 5,6, 9,14, 22,30
classes = 10
num = 6
jitter = .3
ignore_thresh = .7
truth_thresh = 1
random = 1
================================================
FILE: cfg/yolov4/yolov4-hand.cfg
================================================
[net]
# Testing
#batch=1
#subdivisions=1
# Training
batch=64
subdivisions=8
width=608
height=608
channels=3
momentum=0.949
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.00261
burn_in=1000
max_batches = 500500
policy=steps
steps=400000,450000
scales=.1,.1
#cutmix=1
mosaic=1
#:104x104 54:52x52 85:26x26 104:13x13 for 416
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=mish
# Downsample
[convolutional]
batch_normalize=1
filters=64
size=3
stride=2
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=mish
[route]
layers = -2
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=32
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=mish
[route]
layers = -1,-7
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=mish
# Downsample
[convolutional]
batch_normalize=1
filters=128
size=3
stride=2
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=mish
[route]
layers = -2
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=mish
[route]
layers = -1,-10
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish
# Downsample
[convolutional]
batch_normalize=1
filters=256
size=3
stride=2
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish
[route]
layers = -2
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish
[route]
layers = -1,-28
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish
# Downsample
[convolutional]
batch_normalize=1
filters=512
size=3
stride=2
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish
[route]
layers = -2
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish
[route]
layers = -1,-28
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=mish
# Downsample
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=2
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=mish
[route]
layers = -2
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=mish
[route]
layers = -1,-16
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=mish
##########################
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
### SPP ###
[maxpool]
stride=1
size=5
[route]
layers=-2
[maxpool]
stride=1
size=9
[route]
layers=-4
[maxpool]
stride=1
size=13
[route]
layers=-1,-3,-5,-6
### End SPP ###
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = 85
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[route]
layers = -1, -3
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = 54
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[route]
layers = -1, -3
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
##########################
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=18
activation=linear
[yolo]
mask = 0,1,2
anchors = 8,13, 14,20, 22,25, 26,36, 35,49, 40,31, 51,69, 63,47, 94,103
classes=1
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
scale_x_y = 1.2
iou_thresh=0.213
cls_normalizer=1.0
iou_normalizer=0.07
iou_loss=ciou
nms_kind=greedynms
beta_nms=0.6
[route]
layers = -4
[convolutional]
batch_normalize=1
size=3
stride=2
pad=1
filters=256
activation=leaky
[route]
layers = -1, -16
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=18
activation=linear
[yolo]
mask = 3,4,5
anchors = 8,13, 14,20, 22,25, 26,36, 35,49, 40,31, 51,69, 63,47, 94,103
classes=1
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
scale_x_y = 1.1
iou_thresh=0.213
cls_normalizer=1.0
iou_normalizer=0.07
iou_loss=ciou
nms_kind=greedynms
beta_nms=0.6
[route]
layers = -4
[convolutional]
batch_normalize=1
size=3
stride=2
pad=1
filters=512
activation=leaky
[route]
layers = -1, -37
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=18
activation=linear
[yolo]
mask = 6,7,8
anchors = 8,13, 14,20, 22,25, 26,36, 35,49, 40,31, 51,69, 63,47, 94,103
classes=1
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
scale_x_y = 1.05
iou_thresh=0.213
cls_normalizer=1.0
iou_normalizer=0.07
iou_loss=ciou
nms_kind=greedynms
beta_nms=0.6
================================================
FILE: cfg/yolov4/yolov4-relu.cfg
================================================
[net]
# Testing
#batch=1
#subdivisions=1
# Training
batch=64
subdivisions=8
width=608
height=608
channels=3
momentum=0.949
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.00261
burn_in=1000
max_batches = 500500
policy=steps
steps=400000,450000
scales=.1,.1
#cutmix=1
mosaic=1
#:104x104 54:52x52 85:26x26 104:13x13 for 416
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=leaky
# Downsample
[convolutional]
batch_normalize=1
filters=64
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[route]
layers = -2
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=32
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[route]
layers = -1,-7
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
# Downsample
[convolutional]
batch_normalize=1
filters=128
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[route]
layers = -2
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[route]
layers = -1,-10
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
# Downsample
[convolutional]
batch_normalize=1
filters=256
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[route]
layers = -2
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[route]
layers = -1,-28
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
# Downsample
[convolutional]
batch_normalize=1
filters=512
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[route]
layers = -2
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[route]
layers = -1,-28
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
# Downsample
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[route]
layers = -2
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[route]
layers = -1,-16
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=leaky
##########################
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
### SPP ###
[maxpool]
stride=1
size=5
[route]
layers=-2
[maxpool]
stride=1
size=9
[route]
layers=-4
[maxpool]
stride=1
size=13
[route]
layers=-1,-3,-5,-6
### End SPP ###
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = 85
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[route]
layers = -1, -3
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = 54
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[route]
layers = -1, -3
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
##########################
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear
[yolo]
mask = 0,1,2
anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401
classes=80
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
scale_x_y = 1.2
iou_thresh=0.213
cls_normalizer=1.0
iou_normalizer=0.07
iou_loss=ciou
nms_kind=greedynms
beta_nms=0.6
[route]
layers = -4
[convolutional]
batch_normalize=1
size=3
stride=2
pad=1
filters=256
activation=leaky
[route]
layers = -1, -16
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear
[yolo]
mask = 3,4,5
anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401
classes=80
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
scale_x_y = 1.1
iou_thresh=0.213
cls_normalizer=1.0
iou_normalizer=0.07
iou_loss=ciou
nms_kind=greedynms
beta_nms=0.6
[route]
layers = -4
[convolutional]
batch_normalize=1
size=3
stride=2
pad=1
filters=512
activation=leaky
[route]
layers = -1, -37
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear
[yolo]
mask = 6,7,8
anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401
classes=80
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
scale_x_y = 1.05
iou_thresh=0.213
cls_normalizer=1.0
iou_normalizer=0.07
iou_loss=ciou
nms_kind=greedynms
beta_nms=0.6
================================================
FILE: cfg/yolov4/yolov4-visdrone.cfg
================================================
[net]
# Testing
#batch=1
#subdivisions=1
# Training
batch=64
subdivisions=8
width=608
height=608
channels=3
momentum=0.949
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.00261
burn_in=1000
max_batches = 500500
policy=steps
steps=400000,450000
scales=.1,.1
#cutmix=1
mosaic=1
#:104x104 54:52x52 85:26x26 104:13x13 for 416
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=mish
# Downsample
[convolutional]
batch_normalize=1
filters=64
size=3
stride=2
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=mish
[route]
layers = -2
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=32
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=mish
[route]
layers = -1,-7
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=mish
# Downsample
[convolutional]
batch_normalize=1
filters=128
size=3
stride=2
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=mish
[route]
layers = -2
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=mish
[route]
layers = -1,-10
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish
# Downsample
[convolutional]
batch_normalize=1
filters=256
size=3
stride=2
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish
[route]
layers = -2
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish
[route]
layers = -1,-28
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish
# Downsample
[convolutional]
batch_normalize=1
filters=512
size=3
stride=2
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish
[route]
layers = -2
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish
[route]
layers = -1,-28
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=mish
# Downsample
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=2
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=mish
[route]
layers = -2
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=mish
[route]
layers = -1,-16
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=mish
##########################
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
### SPP ###
[maxpool]
stride=1
size=5
[route]
layers=-2
[maxpool]
stride=1
size=9
[route]
layers=-4
[maxpool]
stride=1
size=13
[route]
layers=-1,-3,-5,-6
### End SPP ###
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = 85
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[route]
layers = -1, -3
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = 54
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[route]
layers = -1, -3
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
##########################
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=45
activation=linear
[yolo]
mask = 0,1,2
anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401
classes=10
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
scale_x_y = 1.2
iou_thresh=0.213
cls_normalizer=1.0
iou_normalizer=0.07
iou_loss=ciou
nms_kind=greedynms
beta_nms=0.6
[route]
layers = -4
[convolutional]
batch_normalize=1
size=3
stride=2
pad=1
filters=256
activation=leaky
[route]
layers = -1, -16
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=45
activation=linear
[yolo]
mask = 3,4,5
anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401
classes=10
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
scale_x_y = 1.1
iou_thresh=0.213
cls_normalizer=1.0
iou_normalizer=0.07
iou_loss=ciou
nms_kind=greedynms
beta_nms=0.6
[route]
layers = -4
[convolutional]
batch_normalize=1
size=3
stride=2
pad=1
filters=512
activation=leaky
[route]
layers = -1, -37
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=45
activation=linear
[yolo]
mask = 6,7,8
anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401
classes=10
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
scale_x_y = 1.05
iou_thresh=0.213
cls_normalizer=1.0
iou_normalizer=0.07
iou_loss=ciou
nms_kind=greedynms
beta_nms=0.6
================================================
FILE: cfg/yolov4/yolov4.cfg
================================================
[net]
# Testing
#batch=1
#subdivisions=1
# Training
batch=64
subdivisions=8
width=608
height=608
channels=3
momentum=0.949
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.00261
burn_in=1000
max_batches = 500500
policy=steps
steps=400000,450000
scales=.1,.1
#cutmix=1
mosaic=1
#:104x104 54:52x52 85:26x26 104:13x13 for 416
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=mish
# Downsample
[convolutional]
batch_normalize=1
filters=64
size=3
stride=2
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=mish
[route]
layers = -2
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=32
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=mish
[route]
layers = -1,-7
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=mish
# Downsample
[convolutional]
batch_normalize=1
filters=128
size=3
stride=2
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=mish
[route]
layers = -2
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=mish
[route]
layers = -1,-10
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish
# Downsample
[convolutional]
batch_normalize=1
filters=256
size=3
stride=2
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish
[route]
layers = -2
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish
[route]
layers = -1,-28
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish
# Downsample
[convolutional]
batch_normalize=1
filters=512
size=3
stride=2
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish
[route]
layers = -2
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish
[route]
layers = -1,-28
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=mish
# Downsample
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=2
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=mish
[route]
layers = -2
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=mish
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=mish
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=mish
[route]
layers = -1,-16
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=mish
##########################
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
### SPP ###
[maxpool]
stride=1
size=5
[route]
layers=-2
[maxpool]
stride=1
size=9
[route]
layers=-4
[maxpool]
stride=1
size=13
[route]
layers=-1,-3,-5,-6
### End SPP ###
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = 85
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[route]
layers = -1, -3
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = 54
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[route]
layers = -1, -3
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
##########################
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear
[yolo]
mask = 0,1,2
anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401
classes=80
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
scale_x_y = 1.2
iou_thresh=0.213
cls_normalizer=1.0
iou_normalizer=0.07
iou_loss=ciou
nms_kind=grpeedynms
beta_nms=0.6
[route]
layers = -4
[convolutional]
batch_normalize=1
size=3
stride=2
pad=1
filters=256
activation=leaky
[route]
layers = -1, -16
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear
[yolo]
mask = 3,4,5
anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401
classes=80
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
scale_x_y = 1.1
iou_thresh=0.213
cls_normalizer=1.0
iou_normalizer=0.07
iou_loss=ciou
nms_kind=greedynms
beta_nms=0.6
[route]
layers = -4
[convolutional]
batch_normalize=1
size=3
stride=2
pad=1
filters=512
activation=leaky
[route]
layers = -1, -37
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear
[yolo]
mask = 6,7,8
anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401
classes=80
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
scale_x_y = 1.05
iou_thresh=0.213
cls_normalizer=1.0
iou_normalizer=0.07
iou_loss=ciou
nms_kind=greedynms
beta_nms=0.6
================================================
FILE: cfg/yolov4tiny/yolov4-tiny.cfg
================================================
[net]
# Testing
#batch=1
#subdivisions=1
# Training
batch=64
subdivisions=1
width=416
height=416
channels=3
momentum=0.9
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.00261
burn_in=1000
max_batches = 500200
policy=steps
steps=400000,450000
scales=.1,.1
[convolutional]
batch_normalize=1
filters=32
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[route]
layers=-1
groups=2
group_id=1
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=leaky
[route]
layers = -1,-2
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[route]
layers = -6,-1
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[route]
layers=-1
groups=2
group_id=1
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[route]
layers = -1,-2
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[route]
layers = -6,-1
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[route]
layers=-1
groups=2
group_id=1
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[route]
layers = -1,-2
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[route]
layers = -6,-1
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
##################################
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear
[yolo]
mask = 3,4,5
anchors = 10,14, 23,27, 37,58, 81,82, 135,169, 344,319
classes=80
num=6
jitter=.3
scale_x_y = 1.05
cls_normalizer=1.0
iou_normalizer=0.07
iou_loss=ciou
ignore_thresh = .7
truth_thresh = 1
random=0
resize=1.5
nms_kind=greedynms
beta_nms=0.6
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 23
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear
[yolo]
mask = 0,1,2
anchors = 10,14, 23,27, 37,58, 81,82, 135,169, 344,319
classes=80
num=6
jitter=.3
scale_x_y = 1.05
cls_normalizer=1.0
iou_normalizer=0.07
iou_loss=ciou
ignore_thresh = .7
truth_thresh = 1
random=0
resize=1.5
nms_kind=greedynms
beta_nms=0.6
================================================
FILE: convert.py
================================================
# Author:LiPu
import argparse
from sys import platform
from models import *
from utils.datasets import *
from utils.utils import *
def convert():
img_size = opt.img_size # (320, 192) or (416, 256) or (608, 352) for (height, width)
weights = opt.weights
# Initialize
device = torch_utils.select_device(opt.device)
# Initialize model
model = Darknet(opt.cfg, img_size, is_gray_scale=opt.gray_scale)
# Load weights
attempt_download(weights)
if weights.endswith('.pt'): # pytorch format
model.load_state_dict(torch.load(weights, map_location=device)['model'])
else: # darknet format
_ = load_darknet_weights(model, weights)
# Eval mode
model.to(device).eval()
save_weights(model, path='weights/best.weights')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path')
parser.add_argument('--weights', type=str, default='weights/yolov3.weights', help='path to weights file')
parser.add_argument('--output', type=str, default='output', help='output folder') # output folder
parser.add_argument('--img_size', type=int, default=416, help='inference size (pixels)')
parser.add_argument('--device', default='', help='device id (i.e. 0 or 0,1) or cpu')
parser.add_argument('--gray-scale', action='store_true', help='gray scale trainning')
opt = parser.parse_args()
print(opt)
with torch.no_grad():
convert()
================================================
FILE: convert_FPGA.py
================================================
import argparse
import struct
from models import * # set ONNX_EXPORT in models.py
from utils.utils import *
def convert():
img_size = (320, 192) if ONNX_EXPORT else opt.img_size # (320, 192) or (416, 256) or (608, 352) for (height, width)
weights = opt.weights
# Initialize
device = torch_utils.select_device(device='cpu' if ONNX_EXPORT else opt.device)
# Initialize model
model = Darknet(opt.cfg, img_size, quantized=opt.quantized, a_bit=opt.a_bit, w_bit=opt.w_bit,
FPGA=opt.FPGA, is_gray_scale=opt.gray_scale)
# Load weights
attempt_download(weights)
if weights.endswith('.pt'): # pytorch format
model.load_state_dict(torch.load(weights, map_location=device)['model'])
else: # darknet format
_ = load_darknet_weights(model, weights, FPGA=opt.FPGA)
if opt.quantized == 0:
save_weights(model, path='weights/' + opt.cfg.split('/')[-1].replace('.cfg', '') + '-best.weights')
else:
w_file = open('weights/' + opt.cfg.split('/')[-1].replace('.cfg', '') + '_weights.bin', 'wb')
b_file = open('weights/' + opt.cfg.split('/')[-1].replace('.cfg', '') + '_bias.bin', 'wb')
if opt.quantized == 1:
w_scale = open('weights/' + opt.cfg.split('/')[-1].replace('.cfg', '') + '_w_scale.bin', 'wb')
a_scale = open('weights/' + opt.cfg.split('/')[-1].replace('.cfg', '') + '_a_scale.bin', 'wb')
b_scale = open('weights/' + opt.cfg.split('/')[-1].replace('.cfg', '') + '_b_scale.bin', 'wb')
s_scale = open('weights/' + opt.cfg.split('/')[-1].replace('.cfg', '') + '_s_scale.bin', 'wb')
if opt.w_bit == 16:
a = struct.pack(' /dev/null
curl -Lb ./cookie "https://drive.google.com/uc?export=download&confirm=`awk '/download/ {print $NF}' ./cookie`&id=${fileid}" -o ${filename}
rm ./cookie
# Unzip labels
unzip -q ${filename} # for coco.zip
# tar -xzf ${filename} # for coco.tar.gz
rm ${filename}
# Download and unzip images
cd coco/images
f="train2014.zip" && curl http://images.cocodataset.org/zips/$f -o $f && unzip -q $f && rm $f
f="val2014.zip" && curl http://images.cocodataset.org/zips/$f -o $f && unzip -q $f && rm $f
# cd out
cd ../..
================================================
FILE: data/get_coco2017.sh
================================================
#!/bin/bash
# Zip coco folder
# zip -r coco.zip coco
# tar -czvf coco.tar.gz coco
# Download labels from Google Drive, accepting presented query
filename="coco2017labels.zip"
fileid="1cXZR_ckHki6nddOmcysCuuJFM--T-Q6L"
curl -c ./cookie -s -L "https://drive.google.com/uc?export=download&id=${fileid}" > /dev/null
curl -Lb ./cookie "https://drive.google.com/uc?export=download&confirm=`awk '/download/ {print $NF}' ./cookie`&id=${fileid}" -o ${filename}
rm ./cookie
# Unzip labels
unzip -q ${filename} # for coco.zip
# tar -xzf ${filename} # for coco.tar.gz
rm ${filename}
# Download and unzip images
cd coco/images
f="train2017.zip" && curl http://images.cocodataset.org/zips/$f -o $f && unzip -q $f && rm $f
f="val2017.zip" && curl http://images.cocodataset.org/zips/$f -o $f && unzip -q $f && rm $f
# cd out
cd ../..
================================================
FILE: data/oxfordhand.data
================================================
classes= 1
train=data/hand/train.txt
valid=data/hand/valid.txt
names=data/oxfordhand.names
================================================
FILE: data/oxfordhand.names
================================================
hand
================================================
FILE: data/screw.data
================================================
classes= 2
train = data/screw/train.txt
valid = data/screw/valid.txt
names = data/screw.names
backup = backup
================================================
FILE: data/screw.names
================================================
noscrew
screw
================================================
FILE: data/trainset.data
================================================
classes=5
train=data/trainset/train.txt
valid=data/trainset/test.txt
names=data/trainset.names
================================================
FILE: data/trainset.names
================================================
Freedom
Burke
Nimitz
Wasp
Ticonderoga
================================================
FILE: data/visdrone.data
================================================
classes= 10
train=data/visdrone/train.txt
valid=data/visdrone/test.txt
names=data/visdrone.names
================================================
FILE: data/visdrone.names
================================================
pedestrian
people
bicycle
car
van
truck
tricycle
awning-tricycle
bus
motor
================================================
FILE: detect.py
================================================
import argparse
from utils import output_upsample
from models import *
from utils.datasets import *
from utils.utils import *
def detect(save_img=False):
if opt.quantizer_output == True:
tmp_dir = 'quantizer_output'
subprocess.Popen("rm -rf %s" % tmp_dir, shell=True)
imgsz = opt.img_size # (320, 192) or (416, 256) or (608, 352) for (height, width)
out, source, weights, view_img, save_txt = opt.output, opt.source, opt.weights, opt.view_img, opt.save_txt
webcam = source == '0' or source.startswith('rtsp') or source.startswith('http') or source.endswith('.txt')
# Initialize
device = torch_utils.select_device(opt.device)
if os.path.exists(out):
shutil.rmtree(out) # delete output folder
os.makedirs(out) # make new output folder
# Initialize model
model = Darknet(opt.cfg, imgsz, quantized=opt.quantized, quantizer_output=opt.quantizer_output,
layer_idx=opt.layer_idx,
reorder=opt.reorder, TN=opt.TN, TM=opt.TM, a_bit=opt.a_bit, w_bit=opt.w_bit, FPGA=opt.FPGA,
is_gray_scale=opt.gray_scale, maxabsscaler=opt.maxabsscaler, shortcut_way=opt.shortcut_way)
# Load weights
attempt_download(weights)
if weights.endswith('.pt'): # pytorch format
model.load_state_dict(torch.load(weights, map_location=device)['model'], strict=False)
else: # darknet format
load_darknet_weights(model, weights)
#################打印model_list
'''AWEIGHT = torch.load(weights, map_location=device)['model']
for k,v in AWEIGHT.items():
print(k)'''
# Eval mode
model.to(device).eval()
# Set Dataloader
vid_path, vid_writer = None, None
if webcam:
view_img = True
torch.backends.cudnn.benchmark = True # set True to speed up constant image size inference
dataset = LoadStreams(source, img_size=imgsz)
else:
save_img = True
dataset = LoadImages(source, img_size=imgsz, is_gray_scale=opt.gray_scale, rect=opt.rect)
# Get names and colors
names = load_classes(opt.names)
colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(names))]
# Run inference
t0 = time.time()
# img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
# _ = model(img.float()) if device.type != 'cpu' else None # run once
for path, img, im0s, vid_cap in dataset:
img = torch.from_numpy(img).to(device)
img = img.float() # uint8 to fp16/32
if opt.maxabsscaler:
# 输出原始图片
if opt.quantizer_output == True:
if not os.path.isdir('./quantizer_output/'):
os.makedirs('./quantizer_output/')
ori_img = copy.deepcopy(img)
ori_img_input = np.array(ori_img.cpu()).reshape(1, -1)
np.savetxt('./quantizer_output/img_input.txt', ori_img_input, delimiter='\n')
ori_img_input = ori_img_input.astype(np.int8)
writer = open('./quantizer_output/img_bin', "wb")
writer.write(ori_img_input)
writer.close()
val_img = copy.deepcopy(img)
val_img = val_img - 128
img /= 256
img = img * 2 - 1
# 输出第一层的要送入卷积的量化数据
if opt.quantizer_output == True:
if not os.path.isdir('./quantizer_output/'):
os.makedirs('./quantizer_output/')
q_img_input = copy.deepcopy(img)
q_img_input = q_img_input * (2 ** (opt.a_bit - 1))
# 软硬件处理方式对比
delt = val_img - q_img_input
delt = np.array(delt.cpu()).reshape(1, -1)
delt_count = [np.sum(abs(delt) > 0)]
np.savetxt(('./quantizer_output/not0_count.txt'), delt_count)
q_img_input = np.array(q_img_input.cpu()).reshape(1, -1)
np.savetxt('./quantizer_output/q_img_input.txt', q_img_input, delimiter='\n')
q_img_input = q_img_input.astype(np.int8)
writer = open('./quantizer_output/q_img_bin', "wb")
writer.write(q_img_input)
writer.close()
else:
img /= 256.0 # 0 - 255 to 0.0 - 1.0
if opt.quantized != -1:
if opt.a_bit == 16:
img = img * (2 ** 14)
sign = torch.sign(img)
img = sign * torch.floor(torch.abs(img) + 0.5)
img = img / (2 ** 14)
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
t1 = torch_utils.time_synchronized()
pred = model(img, augment=opt.augment)[0]
t2 = torch_utils.time_synchronized()
# Apply NMS
pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres,
multi_label=False, classes=opt.classes, agnostic=opt.agnostic_nms)
# Process detections
for i, det in enumerate(pred): # detections for image i
if webcam: # batch_size >= 1
p, s, im0 = path[i], '%g: ' % i, im0s[i].copy()
else:
p, s, im0 = path, '', im0s
save_path = str(Path(out) / Path(p).name)
s += '%gx%g ' % img.shape[2:] # print string
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
if det is not None and len(det):
# Rescale boxes from imgsz to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
# Print results
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
s += '%g %ss, ' % (n, names[int(c)]) # add to string
# Write results
for *xyxy, conf, cls in det:
if save_txt: # Write to file
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
with open(save_path[:save_path.rfind('.')] + '.txt', 'a') as file:
file.write(('%g ' * 5 + '\n') % (cls, *xywh)) # label format
if save_img or view_img: # Add bbox to image
label = '%s %.2f' % (names[int(cls)], conf)
plot_one_box(xyxy, im0, label=label, color=colors[int(cls)])
# Print time (inference + NMS)
print('%sDone. (%.3fs)' % (s, t2 - t1))
# Stream results
if view_img:
cv2.imshow(p, im0)
if cv2.waitKey(1) == ord('q'): # q to quit
raise StopIteration
# Save results (image with detections)
if save_img:
if dataset.mode == 'images':
cv2.imwrite(save_path, im0)
else:
if vid_path != save_path: # new video
vid_path = save_path
if isinstance(vid_writer, cv2.VideoWriter):
vid_writer.release() # release previous video writer
fps = vid_cap.get(cv2.CAP_PROP_FPS)
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*opt.fourcc), fps, (w, h))
vid_writer.write(im0)
if save_txt or save_img:
print('Results saved to %s' % os.getcwd() + os.sep + out)
if platform == 'darwin': # MacOS
os.system('open ' + save_path)
print('Done. (%.3fs)' % (time.time() - t0))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, default='cfg/yolov3-spp.cfg', help='*.cfg path')
parser.add_argument('--names', type=str, default='data/coco.names', help='*.names path')
parser.add_argument('--weights', type=str, default='weights/yolov3-spp-ultralytics.pt', help='weights path')
parser.add_argument('--source', type=str, default='data/samples', help='source') # input file/folder, 0 for webcam
parser.add_argument('--output', type=str, default='output', help='output folder') # output folder
parser.add_argument('--img-size', type=int, default=512, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.3, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.6, help='IOU threshold for NMS')
parser.add_argument('--fourcc', type=str, default='mp4v', help='output video codec (verify ffmpeg support)')
parser.add_argument('--device', default='', help='device id (i.e. 0 or 0,1) or cpu')
parser.add_argument('--rect', action='store_true', help='rectangular detecting')
parser.add_argument('--view-img', action='store_true', help='display results')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--classes', nargs='+', type=int, help='filter by class')
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--quantized', type=int, default=-1, help='quantization way')
parser.add_argument('--shortcut_way', type=int, default=1, help='--shortcut quantization way')
parser.add_argument('--a_bit', type=int, default=8, help='a-bit')
parser.add_argument('--w_bit', type=int, default=8, help='w-bit')
parser.add_argument('--FPGA', action='store_true', help='FPGA')
parser.add_argument('--quantizer_output', action='store_true', help='quantizer output')
parser.add_argument('--layer_idx', type=int, default=-1, help='output')
parser.add_argument('--reorder', action='store_true', help='reorder')
parser.add_argument('--TN', type=int, default=32, help='TN')
parser.add_argument('--TM', type=int, default=32, help='TM')
parser.add_argument('--gray-scale', action='store_true', help='gray scale trainning')
parser.add_argument('--maxabsscaler', '-mas', action='store_true', help='Standarize input to (-1,1)')
opt = parser.parse_args()
opt.cfg = list(glob.iglob('./**/' + opt.cfg, recursive=True))[0] # find file
opt.names = list(glob.iglob('./**/' + opt.names, recursive=True))[0] # find file
print(opt)
with torch.no_grad():
detect()
if opt.quantizer_output == True and opt.layer_idx == -1:
output_upsample.Val_upsample(opt.cfg, opt.TN)
================================================
FILE: info.py
================================================
# Author:LiPu
import argparse
from models import *
from torchsummary import summary
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, default='cfg/yolov3/yolov3.cfg', help='*.cfg path')
parser.add_argument('--img_size', type=int, default=416, help='img_size')
parser.add_argument('--device', default='', help='device id (i.e. 0 or 0,1) or cpu')
opt = parser.parse_args()
device = torch_utils.select_device(opt.device)
model = Darknet(opt.cfg)
# model.fuse()
model.to(device)
summary(model, input_size=(3, opt.img_size, opt.img_size))
================================================
FILE: layer_channel_prune.py
================================================
from models import *
from utils.utils import *
import numpy as np
from copy import deepcopy
from test import test
from terminaltables import AsciiTable
import time
from utils.prune_utils import *
import argparse
# %%
def obtain_filters_mask(model, thre, CBL_idx, prune_idx):
pruned = 0
total = 0
num_filters = []
filters_mask = []
for idx in CBL_idx:
bn_module = model.module_list[idx][1]
if idx in prune_idx:
weight_copy = bn_module.weight.data.abs().clone()
channels = weight_copy.shape[0] #
min_channel_num = int(channels * opt.layer_keep) if int(channels * opt.layer_keep) > 0 else 1
mask = weight_copy.gt(thresh).float()
if int(torch.sum(mask)) < min_channel_num:
_, sorted_index_weights = torch.sort(weight_copy, descending=True)
mask[sorted_index_weights[:min_channel_num]] = 1.
remain = int(mask.sum())
pruned = pruned + mask.shape[0] - remain
print(f'layer index: {idx:>3d} \t total channel: {mask.shape[0]:>4d} \t '
f'remaining channel: {remain:>4d}')
else:
mask = torch.ones(bn_module.weight.data.shape)
remain = mask.shape[0]
total += mask.shape[0]
num_filters.append(remain)
filters_mask.append(mask.clone())
prune_ratio = pruned / total
print(f'Prune channels: {pruned}\tPrune ratio: {prune_ratio:.3f}')
return num_filters, filters_mask
def prune_and_eval(model, CBL_idx, CBLidx2mask):
model_copy = deepcopy(model)
for idx in CBL_idx:
bn_module = model_copy.module_list[idx][1]
mask = CBLidx2mask[idx].cuda()
bn_module.weight.data.mul_(mask)
with torch.no_grad():
mAP = eval_model(model_copy)[0][2]
print(f'mask the gamma as zero, mAP of the model is {mAP:.4f}')
def prune_and_eval2(model, prune_shortcuts=[]):
model_copy = deepcopy(model)
for idx in prune_shortcuts:
for i in [idx, idx - 1]:
bn_module = model_copy.module_list[i][1]
mask = torch.zeros(bn_module.weight.data.shape[0]).cuda()
bn_module.weight.data.mul_(mask)
with torch.no_grad():
mAP = eval_model(model_copy)[0][2]
print(f'simply mask the BN Gama of to_be_pruned CBL as zero, now the mAP is {mAP:.4f}')
# %%
def obtain_filters_mask2(model, CBL_idx, prune_shortcuts):
filters_mask = []
for idx in CBL_idx:
bn_module = model.module_list[idx][1]
mask = np.ones(bn_module.weight.data.shape[0], dtype='float32')
filters_mask.append(mask.copy())
CBLidx2mask = {idx: mask for idx, mask in zip(CBL_idx, filters_mask)}
for idx in prune_shortcuts:
for i in [idx, idx - 1]:
bn_module = model.module_list[i][1]
mask = np.zeros(bn_module.weight.data.shape[0], dtype='float32')
CBLidx2mask[i] = mask.copy()
return CBLidx2mask
def obtain_avg_forward_time(input, model, repeat=200):
model.eval()
start = time.time()
with torch.no_grad():
for i in range(repeat):
output = model(input)
avg_infer_time = (time.time() - start) / repeat
return avg_infer_time, output
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path')
parser.add_argument('--data', type=str, default='data/coco.data', help='*.data file path')
parser.add_argument('--weights', type=str, default='weights/last.pt', help='sparse model weights')
parser.add_argument('--shortcuts', type=int, default=8, help='how many shortcut layers will be pruned,\
pruning one shortcut will also prune two CBL,yolov3 has 23 shortcuts')
parser.add_argument('--percent', type=float, default=0.6, help='global channel prune percent')
parser.add_argument('--layer_keep', type=float, default=0.01, help='channel keep percent per layer')
parser.add_argument('--img-size', type=int, default=416, help='inference size (pixels)')
parser.add_argument('--batch-size', type=int, default=16, help='batch-size')
parser.add_argument('--gray-scale', action='store_true', help='gray scale trainning')
opt = parser.parse_args()
print(opt)
assert opt.cfg.find("mobilenet") == -1, "Mobilenet doesn't support layer pruning!"
img_size = opt.img_size
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = Darknet(opt.cfg, (img_size, img_size), is_gray_scale=opt.gray_scale).to(device)
if opt.weights.endswith(".pt"):
model.load_state_dict(torch.load(opt.weights, map_location=device)['model'])
else:
_ = load_darknet_weights(model, opt.weights)
print('\nloaded weights from ', opt.weights)
eval_model = lambda model: test(model=model, cfg=opt.cfg, data=opt.data, batch_size=opt.batch_size, imgsz=img_size,
rank=-1, is_gray_scale=True if opt.gray_scale else False)
obtain_num_parameters = lambda model: sum([param.nelement() for param in model.parameters()])
print("\nlet's test the original model first:")
with torch.no_grad():
origin_model_metric = eval_model(model)
origin_nparameters = obtain_num_parameters(model)
##############################################################
# 先剪通道
print("we will prune the channels first")
CBL_idx, Conv_idx, prune_idx, _, _ = parse_module_defs2(model.module_defs)
bn_weights = gather_bn_weights(model.module_list, prune_idx)
sorted_bn = torch.sort(bn_weights)[0]
sorted_bn, sorted_index = torch.sort(bn_weights)
thresh_index = int(len(bn_weights) * opt.percent)
thresh = sorted_bn[thresh_index].cuda()
print(f'Global Threshold should be less than {thresh:.4f}.')
num_filters, filters_mask = obtain_filters_mask(model, thresh, CBL_idx, prune_idx)
CBLidx2mask = {idx: mask for idx, mask in zip(CBL_idx, filters_mask)}
CBLidx2filters = {idx: filters for idx, filters in zip(CBL_idx, num_filters)}
for i in model.module_defs:
if i['type'] == 'shortcut':
i['is_access'] = False
print('merge the mask of layers connected to shortcut!')
merge_mask(model, CBLidx2mask, CBLidx2filters)
prune_and_eval(model, CBL_idx, CBLidx2mask)
for i in CBLidx2mask:
CBLidx2mask[i] = CBLidx2mask[i].clone().cpu().numpy()
pruned_model = prune_model_keep_size(model, prune_idx, CBL_idx, CBLidx2mask)
print(
"\nnow prune the model but keep size,(actually add offset of BN beta to following layers), let's see how the mAP goes")
with torch.no_grad():
eval_model(pruned_model)
for i in model.module_defs:
if i['type'] == 'shortcut':
i.pop('is_access')
compact_module_defs = deepcopy(model.module_defs)
for idx in CBL_idx:
assert compact_module_defs[idx]['type'] == 'convolutional'
compact_module_defs[idx]['filters'] = str(CBLidx2filters[idx])
compact_model1 = Darknet([model.hyperparams.copy()] + compact_module_defs, (img_size, img_size),
is_gray_scale=opt.gray_scale).to(device)
compact_nparameters1 = obtain_num_parameters(compact_model1)
init_weights_from_loose_model(compact_model1, pruned_model, CBL_idx, Conv_idx, CBLidx2mask,
is_gray_scale=opt.gray_scale)
print('testing the channel pruned model...')
with torch.no_grad():
compact_model_metric1 = eval_model(compact_model1)
#########################################################
# 再剪层
print('\nnow we prune shortcut layers and corresponding CBLs')
CBL_idx, Conv_idx, shortcut_idx = parse_module_defs4(compact_model1.module_defs)
print('all shortcut_idx:', [i + 1 for i in shortcut_idx])
# highest_thre = torch.zeros(len(shortcut_idx))
# for i, idx in enumerate(shortcut_idx):
# highest_thre[i] = compact_model1.module_list[idx][1].weight.data.abs().max().clone()
# _, sorted_index_thre = torch.sort(highest_thre)
# 这里更改了选层策略,由最大值排序改为均值排序,均值一般表现要稍好,但不是绝对,可以自己切换尝试;前面注释的四行为原策略。
bn_mean = torch.zeros(len(shortcut_idx))
for i, idx in enumerate(shortcut_idx):
bn_mean[i] = compact_model1.module_list[idx][1].weight.data.abs().mean().clone()
_, sorted_index_thre = torch.sort(bn_mean)
prune_shortcuts = torch.tensor(shortcut_idx)[[sorted_index_thre[:opt.shortcuts]]]
prune_shortcuts = [int(x) for x in prune_shortcuts]
index_all = list(range(len(compact_model1.module_defs)))
index_prune = []
for idx in prune_shortcuts:
index_prune.extend([idx - 1, idx, idx + 1])
index_remain = [idx for idx in index_all if idx not in index_prune]
print('These shortcut layers and corresponding CBL will be pruned :', index_prune)
prune_and_eval2(compact_model1, prune_shortcuts)
CBLidx2mask = obtain_filters_mask2(compact_model1, CBL_idx, prune_shortcuts)
pruned_model = prune_model_keep_size(compact_model1, CBL_idx, CBL_idx, CBLidx2mask)
with torch.no_grad():
mAP = eval_model(pruned_model)[0][2]
print("after transfering the offset of pruned CBL's activation, map is {}".format(mAP))
compact_module_defs = deepcopy(compact_model1.module_defs)
for module_def in compact_module_defs:
if module_def['type'] == 'route':
from_layers = [int(s) for s in module_def['layers']]
if len(from_layers) == 2:
count = 0
for i in index_prune:
if i <= from_layers[1]:
count += 1
from_layers[1] = from_layers[1] - count
# from_layers = ', '.join([str(s) for s in from_layers])
module_def['layers'] = from_layers
compact_module_defs = [compact_module_defs[i] for i in index_remain]
compact_model2 = Darknet([compact_model1.hyperparams.copy()] + compact_module_defs, (img_size, img_size),
is_gray_scale=opt.gray_scale).to(device)
compact_nparameters2 = obtain_num_parameters(compact_model2)
print('testing the final model')
with torch.no_grad():
compact_model_metric2 = eval_model(compact_model2)
################################################################
# 剪枝完毕,测试速度
if opt.gray_scale:
random_input = torch.rand((1, 1, img_size, img_size)).to(device)
else:
random_input = torch.rand((1, 3, img_size, img_size)).to(device)
print('testing inference time...')
pruned_forward_time, output = obtain_avg_forward_time(random_input, model)
compact_forward_time1, compact_output1 = obtain_avg_forward_time(random_input, compact_model1)
compact_forward_time2, compact_output2 = obtain_avg_forward_time(random_input, compact_model2)
metric_table = [
["Metric", "Before", "After prune channels", "After prune layers(final)"],
["mAP", f'{origin_model_metric[0][2]:.6f}', f'{compact_model_metric1[0][2]:.6f}',
f'{compact_model_metric2[0][2]:.6f}'],
["Parameters", f"{origin_nparameters}", f"{compact_nparameters1}", f"{compact_nparameters2}"],
["Inference", f'{pruned_forward_time:.4f}', f'{compact_forward_time1:.4f}', f'{compact_forward_time2:.4f}']
]
print(AsciiTable(metric_table).table)
pruned_cfg_name = opt.cfg.replace('/',
f'/layer_channel_prune_{opt.percent}_{opt.shortcuts}_shortcut_')
# 创建存储目录
dir_name = pruned_cfg_name.split('/')[0] + '/' + pruned_cfg_name.split('/')[1]
if not os.path.isdir(dir_name):
os.makedirs(dir_name)
# 由于原始的compact_module_defs将anchor从字符串变为了数组,因此这里将anchors重新变为字符串
file = open(opt.cfg, 'r')
lines = file.read().split('\n')
for line in lines:
if line.split(' = ')[0] == 'anchors':
anchor = line.split(' = ')[1]
break
if line.split('=')[0] == 'anchors':
anchor = line.split('=')[1]
break
file.close()
for item in compact_module_defs:
if item['type'] == 'shortcut':
item['from'] = str(item['from'][0])
elif item['type'] == 'route':
item['layers'] = ",".join('%s' % i for i in item['layers'])
elif item['type'] == 'yolo':
item['mask'] = ",".join('%s' % i for i in item['mask'])
item['anchors'] = anchor
pruned_cfg_file = write_cfg(pruned_cfg_name, [model.hyperparams.copy()] + compact_module_defs)
print(f'Config file has been saved: {pruned_cfg_file}')
weights_dir_name = dir_name.replace('cfg', 'weights')
if not os.path.isdir(weights_dir_name):
os.makedirs(weights_dir_name)
compact_model_name = weights_dir_name + f'/layer_channel_prune_{str(opt.shortcuts)}_shortcuts_{str(opt.percent)}_percent.weights'
save_weights(compact_model2, path=compact_model_name)
print(f'Compact model has been saved: {compact_model_name}')
================================================
FILE: layer_channel_regular_prune.py
================================================
from models import *
from utils.utils import *
import numpy as np
from copy import deepcopy
from test import test
from terminaltables import AsciiTable
import time
from utils.prune_utils import *
import argparse
filter_switch = [each for each in range(2048) if (each % 32 == 0)]
# %%
def obtain_filters_mask(model, thre, CBL_idx, shortcut_idx, prune_idx):
pruned = 0
total = 0
num_filters = []
filters_mask = []
idx_new = dict()
# CBL_idx存储的是所有带BN的卷积层(YOLO层的前一层卷积层是不带BN的)
for idx in CBL_idx:
bn_module = model.module_list[idx][1]
if idx in prune_idx:
if idx not in shortcut_idx:
mask = obtain_bn_mask(bn_module, thre).cpu().numpy()
# 保证通道数为的倍数
mask_cnt = int(mask.sum())
if mask_cnt == 0:
this_layer_sort_bn = bn_module.weight.data.abs().clone()
sort_bn_values = torch.sort(this_layer_sort_bn)[0]
bn_cnt = bn_module.weight.shape[0]
this_layer_thre = sort_bn_values[bn_cnt - 8]
mask = obtain_bn_mask(bn_module, this_layer_thre).cpu().numpy()
else:
for i in range(len(filter_switch)):
if mask_cnt <= filter_switch[i]:
mask_cnt = filter_switch[i]
break
this_layer_sort_bn = bn_module.weight.data.abs().clone()
sort_bn_values = torch.sort(this_layer_sort_bn)[0]
bn_cnt = bn_module.weight.shape[0]
this_layer_thre = sort_bn_values[bn_cnt - mask_cnt]
mask = obtain_bn_mask(bn_module, this_layer_thre).cpu().numpy()
idx_new[idx] = mask
remain = int(mask.sum())
pruned = pruned + mask.shape[0] - remain
# if remain == 0:
# print("Channels would be all pruned!")
# raise Exception
# print(f'layer index: {idx:>3d} \t total channel: {mask.shape[0]:>4d} \t '
# f'remaining channel: {remain:>4d}')
else:
# 如果idx在shortcut_idx之中,则试跳连层的两层的mask相等
mask = idx_new[shortcut_idx[idx]]
idx_new[idx] = mask
remain = int(mask.sum())
pruned = pruned + mask.shape[0] - remain
if remain == 0:
print("Channels would be all pruned!")
raise Exception
print(f'layer index: {idx:>3d} \t total channel: {mask.shape[0]:>4d} \t '
f'remaining channel: {remain:>4d}')
else:
mask = np.ones(bn_module.weight.data.shape)
remain = mask.shape[0]
total += mask.shape[0]
num_filters.append(remain)
filters_mask.append(mask.copy())
# 因此,这里求出的prune_ratio,需要裁剪的α参数/cbl_idx中所有的α参数
prune_ratio = pruned / total
print(f'Prune channels: {pruned}\tPrune ratio: {prune_ratio:.3f}')
return num_filters, filters_mask
def prune_and_eval(model, sorted_bn, shortcut_idx, percent=.0):
model_copy = deepcopy(model)
thre_index = int(len(sorted_bn) * percent)
# 获得α参数的阈值,小于该值的α参数对应的通道,全部裁剪掉
thre1 = sorted_bn[thre_index]
print(f'Channels with Gamma value less than {thre1:.8f} are pruned!')
remain_num = 0
idx_new = dict()
for idx in prune_idx:
if idx not in shortcut_idx:
bn_module = model_copy.module_list[idx][1]
mask = obtain_bn_mask(bn_module, thre1)
# 记录剪枝后,每一层卷积层对应的mask
# idx_new[idx]=mask.cpu().numpy()
idx_new[idx] = mask
remain_num += int(mask.sum())
bn_module.weight.data.mul_(mask)
# bn_module.bias.data.mul_(mask*0.0001)
else:
bn_module = model_copy.module_list[idx][1]
mask = idx_new[shortcut_idx[idx]]
idx_new[idx] = mask
remain_num += int(mask.sum())
bn_module.weight.data.mul_(mask)
# print(int(mask.sum()))
# with torch.no_grad():
# mAP = eval_model(model_copy)[0][2]
print(f'Number of channels has been reduced from {len(sorted_bn)} to {remain_num}')
print(f'Prune ratio: {1 - remain_num / len(sorted_bn):.3f}')
# print(f'mAP of the pruned model is {mAP:.4f}')
return thre1
def prune_and_eval2(model, prune_shortcuts=[]):
model_copy = deepcopy(model)
for idx in prune_shortcuts:
for i in [idx, idx - 1]:
bn_module = model_copy.module_list[i][1]
mask = torch.zeros(bn_module.weight.data.shape[0]).cuda()
bn_module.weight.data.mul_(mask)
with torch.no_grad():
mAP = eval_model(model_copy)[0][2]
print(f'simply mask the BN Gama of to_be_pruned CBL as zero, now the mAP is {mAP:.4f}')
# %%
def obtain_filters_mask2(model, CBL_idx, prune_shortcuts):
filters_mask = []
for idx in CBL_idx:
bn_module = model.module_list[idx][1]
mask = np.ones(bn_module.weight.data.shape[0], dtype='float32')
filters_mask.append(mask.copy())
CBLidx2mask = {idx: mask for idx, mask in zip(CBL_idx, filters_mask)}
for idx in prune_shortcuts:
for i in [idx, idx - 1]:
bn_module = model.module_list[i][1]
mask = np.zeros(bn_module.weight.data.shape[0], dtype='float32')
CBLidx2mask[i] = mask.copy()
return CBLidx2mask
def obtain_avg_forward_time(input, model, repeat=200):
model.eval()
start = time.time()
with torch.no_grad():
for i in range(repeat):
output = model(input)
avg_infer_time = (time.time() - start) / repeat
return avg_infer_time, output
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path')
parser.add_argument('--data', type=str, default='data/coco.data', help='*.data file path')
parser.add_argument('--weights', type=str, default='weights/last.pt', help='sparse model weights')
parser.add_argument('--shortcuts', type=int, default=8, help='how many shortcut layers will be pruned,\
pruning one shortcut will also prune two CBL,yolov3 has 23 shortcuts')
parser.add_argument('--percent', type=float, default=0.6, help='global channel prune percent')
parser.add_argument('--layer_keep', type=float, default=0.01, help='channel keep percent per layer')
parser.add_argument('--img-size', type=int, default=416, help='inference size (pixels)')
parser.add_argument('--batch-size', type=int, default=16, help='batch-size')
parser.add_argument('--gray-scale', action='store_true', help='gray scale trainning')
opt = parser.parse_args()
print(opt)
assert opt.cfg.find("mobilenet") == -1, "Mobilenet doesn't support layer pruning!"
img_size = opt.img_size
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = Darknet(opt.cfg, (img_size, img_size), is_gray_scale=opt.gray_scale).to(device)
if opt.weights.endswith(".pt"):
model.load_state_dict(torch.load(opt.weights, map_location=device)['model'])
else:
_ = load_darknet_weights(model, opt.weights)
print('\nloaded weights from ', opt.weights)
eval_model = lambda model: test(model=model, cfg=opt.cfg, data=opt.data, batch_size=opt.batch_size, imgsz=img_size,
rank=-1, is_gray_scale=True if opt.gray_scale else False)
obtain_num_parameters = lambda model: sum([param.nelement() for param in model.parameters()])
print("\nlet's test the original model first:")
with torch.no_grad():
origin_model_metric = eval_model(model)
origin_nparameters = obtain_num_parameters(model)
##############################################################
# 先剪通道
# 与normal_prune不同的是这里需要获得shortcu_idx和short_all
# 其中shortcut_idx存储的是对应关系,故shortcut[x]就对应的是与第x-1卷积层相加层的索引值
# shortcut_all存储的是所有相加层
CBL_idx, Conv_idx, prune_idx, shortcut_idx, shortcut_all = parse_module_defs2(model.module_defs)
# 将所有要剪枝的BN层的γ参数,拷贝到bn_weights列表
bn_weights = gather_bn_weights(model.module_list, prune_idx)
# 对BN中的γ参数排序
# torch.sort返回二维列表,第一维是排序后的值列表,第二维是排序后的值列表对应的索引
sorted_bn = torch.sort(bn_weights)[0]
# 避免剪掉一层中的所有channel的最高阈值(每个BN层中gamma的最大值在所有层中最小值即为阈值上限)
highest_thre = []
for idx in prune_idx:
# .item()可以得到张量里的元素值
highest_thre.append(model.module_list[idx][1].weight.data.abs().max().item())
highest_thre = min(highest_thre)
# 找到highest_thre对应的下标对应的百分比
percent_limit = (sorted_bn == highest_thre).nonzero().item() / len(bn_weights)
print(f'Threshold should be less than {highest_thre:.8f}.')
print(f'The corresponding prune ratio is {percent_limit:.3f}.')
percent = opt.percent
threshold = prune_and_eval(model, sorted_bn, shortcut_idx, percent)
num_filters, filters_mask = obtain_filters_mask(model, threshold, CBL_idx, shortcut_idx, prune_idx)
# CBLidx2mask存储CBL_idx中,每一层BN层对应的mask
CBLidx2mask = {idx: mask for idx, mask in zip(CBL_idx, filters_mask)}
pruned_model = prune_model_keep_size(model, prune_idx, CBL_idx, CBLidx2mask)
with torch.no_grad():
mAP = eval_model(pruned_model)[0][2]
print('after prune_model_keep_size map is {}'.format(mAP))
# 获得原始模型的module_defs,并修改该defs中的卷积核数量
compact_module_defs = deepcopy(model.module_defs)
for idx, num in zip(CBL_idx, num_filters):
assert compact_module_defs[idx]['type'] == 'convolutional'
compact_module_defs[idx]['filters'] = str(num)
# for item_def in compact_module_defs:
# print(item_def)
compact_model1 = Darknet([model.hyperparams.copy()] + compact_module_defs, is_gray_scale=opt.gray_scale).to(device)
compact_nparameters1 = obtain_num_parameters(compact_model1)
init_weights_from_loose_model(compact_model1, pruned_model, CBL_idx, Conv_idx, CBLidx2mask,
is_gray_scale=opt.gray_scale)
print('testing the channel pruned model...')
with torch.no_grad():
compact_model_metric1 = eval_model(compact_model1)
#########################################################
# 再剪层
print('\nnow we prune shortcut layers and corresponding CBLs')
CBL_idx, Conv_idx, shortcut_idx = parse_module_defs4(compact_model1.module_defs)
print('all shortcut_idx:', [i + 1 for i in shortcut_idx])
bn_weights = gather_bn_weights(compact_model1.module_list, shortcut_idx)
sorted_bn = torch.sort(bn_weights)[0]
# highest_thre = torch.zeros(len(shortcut_idx))
# for i, idx in enumerate(shortcut_idx):
# highest_thre[i] = compact_model1.module_list[idx][1].weight.data.abs().max().clone()
# _, sorted_index_thre = torch.sort(highest_thre)
# 这里更改了选层策略,由最大值排序改为均值排序,均值一般表现要稍好,但不是绝对,可以自己切换尝试;前面注释的四行为原策略。
bn_mean = torch.zeros(len(shortcut_idx))
for i, idx in enumerate(shortcut_idx):
bn_mean[i] = compact_model1.module_list[idx][1].weight.data.abs().mean().clone()
_, sorted_index_thre = torch.sort(bn_mean)
prune_shortcuts = torch.tensor(shortcut_idx)[[sorted_index_thre[:opt.shortcuts]]]
prune_shortcuts = [int(x) for x in prune_shortcuts]
index_all = list(range(len(compact_model1.module_defs)))
index_prune = []
for idx in prune_shortcuts:
index_prune.extend([idx - 1, idx, idx + 1])
index_remain = [idx for idx in index_all if idx not in index_prune]
print('These shortcut layers and corresponding CBL will be pruned :', index_prune)
prune_and_eval2(compact_model1, prune_shortcuts)
CBLidx2mask = obtain_filters_mask2(compact_model1, CBL_idx, prune_shortcuts)
pruned_model = prune_model_keep_size(compact_model1, CBL_idx, CBL_idx, CBLidx2mask)
with torch.no_grad():
mAP = eval_model(pruned_model)[0][2]
print("after transfering the offset of pruned CBL's activation, map is {}".format(mAP))
compact_module_defs = deepcopy(compact_model1.module_defs)
for module_def in compact_module_defs:
if module_def['type'] == 'route':
from_layers = [int(s) for s in module_def['layers']]
if len(from_layers) == 2:
count = 0
for i in index_prune:
if i <= from_layers[1]:
count += 1
from_layers[1] = from_layers[1] - count
# from_layers = ', '.join([str(s) for s in from_layers])
module_def['layers'] = from_layers
compact_module_defs = [compact_module_defs[i] for i in index_remain]
compact_model2 = Darknet([compact_model1.hyperparams.copy()] + compact_module_defs, (img_size, img_size),
is_gray_scale=opt.gray_scale).to(device)
compact_nparameters2 = obtain_num_parameters(compact_model2)
# init_weights_from_loose_model(compact_model2, compact_model1, CBL_idx, Conv_idx, CBLidx2mask,
# is_gray_scale=opt.gray_scale)
print('testing the final model')
torch.cuda.empty_cache()
with torch.no_grad():
compact_model_metric2 = eval_model(compact_model2)
################################################################
# 剪枝完毕,测试速度
if opt.gray_scale:
random_input = torch.rand((1, 1, img_size, img_size)).to(device)
else:
random_input = torch.rand((1, 3, img_size, img_size)).to(device)
print('testing inference time...')
pruned_forward_time, output = obtain_avg_forward_time(random_input, model)
compact_forward_time1, compact_output1 = obtain_avg_forward_time(random_input, compact_model1)
compact_forward_time2, compact_output2 = obtain_avg_forward_time(random_input, compact_model2)
metric_table = [
["Metric", "Before", "After prune channels", "After prune layers(final)"],
["mAP", f'{origin_model_metric[0][2]:.6f}', f'{compact_model_metric1[0][2]:.6f}',
f'{compact_model_metric2[0][2]:.6f}'],
["Parameters", f"{origin_nparameters}", f"{compact_nparameters1}", f"{compact_nparameters2}"],
["Inference", f'{pruned_forward_time:.4f}', f'{compact_forward_time1:.4f}', f'{compact_forward_time2:.4f}']
]
print(AsciiTable(metric_table).table)
pruned_cfg_name = opt.cfg.replace('/',
f'/layer_channel_regular_prune_{opt.percent}_{opt.shortcuts}_shortcut_')
# 创建存储目录
dir_name = pruned_cfg_name.split('/')[0] + '/' + pruned_cfg_name.split('/')[1]
if not os.path.isdir(dir_name):
os.makedirs(dir_name)
# 由于原始的compact_module_defs将anchor从字符串变为了数组,因此这里将anchors重新变为字符串
file = open(opt.cfg, 'r')
lines = file.read().split('\n')
for line in lines:
if line.split(' = ')[0] == 'anchors':
anchor = line.split(' = ')[1]
break
if line.split('=')[0] == 'anchors':
anchor = line.split('=')[1]
break
file.close()
for item in compact_module_defs:
if item['type'] == 'shortcut':
item['from'] = str(item['from'][0])
elif item['type'] == 'route':
item['layers'] = ",".join('%s' % i for i in item['layers'])
elif item['type'] == 'yolo':
item['mask'] = ",".join('%s' % i for i in item['mask'])
item['anchors'] = anchor
pruned_cfg_file = write_cfg(pruned_cfg_name, [model.hyperparams.copy()] + compact_module_defs)
print(f'Config file has been saved: {pruned_cfg_file}')
weights_dir_name = dir_name.replace('cfg', 'weights')
if not os.path.isdir(weights_dir_name):
os.makedirs(weights_dir_name)
compact_model_name = weights_dir_name + f'/layer_channel_regular_prune_{str(opt.shortcuts)}_shortcuts_{str(opt.percent)}_percent.weights'
save_weights(compact_model2, path=compact_model_name)
print(f'Compact model has been saved: {compact_model_name}')
================================================
FILE: layer_prune.py
================================================
from models import *
from utils.utils import *
import torch
import numpy as np
from copy import deepcopy
from test import test
from terminaltables import AsciiTable
import time
from utils.utils import *
from utils.prune_utils import *
import argparse
def prune_and_eval(model, prune_shortcuts=[]):
model_copy = deepcopy(model)
for idx in prune_shortcuts:
for i in [idx, idx - 1]:
bn_module = model_copy.module_list[i][1]
mask = torch.zeros(bn_module.weight.data.shape[0]).cuda()
bn_module.weight.data.mul_(mask)
with torch.no_grad():
mAP = eval_model(model_copy)[0][2]
print(f'simply mask the BN Gama of to_be_pruned CBL as zero, now the mAP is {mAP:.4f}')
# %%
def obtain_filters_mask(model, CBL_idx, prune_shortcuts):
filters_mask = []
for idx in CBL_idx:
bn_module = model.module_list[idx][1]
mask = np.ones(bn_module.weight.data.shape[0], dtype='float32')
filters_mask.append(mask.copy())
CBLidx2mask = {idx: mask for idx, mask in zip(CBL_idx, filters_mask)}
for idx in prune_shortcuts:
for i in [idx, idx - 1]:
bn_module = model.module_list[i][1]
mask = np.zeros(bn_module.weight.data.shape[0], dtype='float32')
CBLidx2mask[i] = mask.copy()
return CBLidx2mask
def obtain_avg_forward_time(input, model, repeat=200):
model.eval()
start = time.time()
with torch.no_grad():
for i in range(repeat):
output = model(input)
avg_infer_time = (time.time() - start) / repeat
return avg_infer_time, output
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, default='cfg/yolov3-hand.cfg', help='cfg file path')
parser.add_argument('--data', type=str, default='data/oxfordhand.data', help='*.data file path')
parser.add_argument('--weights', type=str, default='weights/last.pt', help='sparse model weights')
parser.add_argument('--shortcuts', type=int, default=8, help='how many shortcut layers will be pruned,\
pruning one shortcut will also prune two CBL,yolov3 has 23 shortcuts')
parser.add_argument('--img-size', type=int, default=416, help='inference size (pixels)')
parser.add_argument('--batch-size', type=int, default=16, help='batch-size')
opt = parser.parse_args()
print(opt)
assert opt.cfg.find("mobilenet") == -1, "Mobilenet doesn't support layer pruning!"
img_size = opt.img_size
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = Darknet(opt.cfg, (img_size, img_size)).to(device)
if opt.weights.endswith(".pt"):
model.load_state_dict(torch.load(opt.weights, map_location=device)['model'])
else:
load_darknet_weights(model, opt.weights)
print('\nloaded weights from ', opt.weights)
eval_model = lambda model: test(model=model, cfg=opt.cfg, data=opt.data, batch_size=opt.batch_size, imgsz=img_size,
rank=-1)
obtain_num_parameters = lambda model: sum([param.nelement() for param in model.parameters()])
with torch.no_grad():
print("\nlet's test the original model first:")
origin_model_metric = eval_model(model)
origin_nparameters = obtain_num_parameters(model)
CBL_idx, Conv_idx, shortcut_idx = parse_module_defs4(model.module_defs)
print('all shortcut_idx:', [i + 1 for i in shortcut_idx])
# highest_thre = torch.zeros(len(shortcut_idx))
# for i, idx in enumerate(shortcut_idx):
# highest_thre[i] = model.module_list[idx][1].weight.data.abs().max().clone()
# _, sorted_index_thre = torch.sort(highest_thre)
# 这里更改了选层策略,由最大值排序改为均值排序,均值一般表现要稍好,但不是绝对,可以自己切换尝试;前面注释的四行为原策略。
bn_mean = torch.zeros(len(shortcut_idx))
for i, idx in enumerate(shortcut_idx):
bn_mean[i] = model.module_list[idx][1].weight.data.abs().mean().clone()
_, sorted_index_thre = torch.sort(bn_mean)
prune_shortcuts = torch.tensor(shortcut_idx)[[sorted_index_thre[:opt.shortcuts]]]
prune_shortcuts = [int(x) for x in prune_shortcuts]
index_all = list(range(len(model.module_defs)))
index_prune = []
for idx in prune_shortcuts:
index_prune.extend([idx - 1, idx, idx + 1])
index_remain = [idx for idx in index_all if idx not in index_prune]
print('These shortcut layers and corresponding CBL will be pruned :', index_prune)
prune_and_eval(model, prune_shortcuts)
CBLidx2mask = obtain_filters_mask(model, CBL_idx, prune_shortcuts)
pruned_model = prune_model_keep_size(model, CBL_idx, CBL_idx, CBLidx2mask)
with torch.no_grad():
mAP = eval_model(pruned_model)[0][2]
print("after transfering the offset of pruned CBL's activation, map is {}".format(mAP))
compact_module_defs = deepcopy(model.module_defs)
for j, module_def in enumerate(compact_module_defs):
if module_def['type'] == 'route':
from_layers = [int(s) for s in module_def['layers']]
if len(from_layers) == 1 and from_layers[0] > 0:
count = 0
for i in index_prune:
if i <= from_layers[0]:
count += 1
from_layers[0] = from_layers[0] - count
# from_layers = str(from_layers[0])
module_def['layers'] = from_layers
elif len(from_layers) == 2:
count = 0
if from_layers[1] > 0:
for i in index_prune:
if i <= from_layers[1]:
count += 1
from_layers[1] = from_layers[1] - count
else:
for i in index_prune:
if i > j + from_layers[1] and i < j:
count += 1
from_layers[1] = from_layers[1] + count
# from_layers = ', '.join([str(s) for s in from_layers])
module_def['layers'] = from_layers
compact_module_defs = [compact_module_defs[i] for i in index_remain]
compact_model = Darknet([model.hyperparams.copy()] + compact_module_defs, (img_size, img_size)).to(device)
compact_nparameters = obtain_num_parameters(compact_model)
# init_weights_from_loose_model(compact_model, pruned_model, CBL_idx, Conv_idx, CBLidx2mask)
random_input = torch.rand((1, 3, img_size, img_size)).to(device)
pruned_forward_time, pruned_output = obtain_avg_forward_time(random_input, pruned_model)
compact_forward_time, compact_output = obtain_avg_forward_time(random_input, compact_model)
# 在测试集上测试剪枝后的模型, 并统计模型的参数数量
with torch.no_grad():
compact_model_metric = eval_model(compact_model)
# 比较剪枝前后参数数量的变化、指标性能的变化
metric_table = [
["Metric", "Before", "After"],
["mAP", f'{origin_model_metric[0][2]:.6f}', f'{compact_model_metric[0][2]:.6f}'],
["Parameters", f"{origin_nparameters}", f"{compact_nparameters}"],
["Inference", f'{pruned_forward_time:.4f}', f'{compact_forward_time:.4f}']
]
print(AsciiTable(metric_table).table)
# 生成剪枝后的cfg文件并保存模型
pruned_cfg_name = opt.cfg.replace('/', f'/layer_prune_{opt.shortcuts}_shortcut_')
# 创建存储目录
dir_name = pruned_cfg_name.split('/')[0] + '/' + pruned_cfg_name.split('/')[1]
if not os.path.isdir(dir_name):
os.makedirs(dir_name)
# 由于原始的compact_module_defs将anchor从字符串变为了数组,因此这里将anchors重新变为字符串
file = open(opt.cfg, 'r')
lines = file.read().split('\n')
for line in lines:
if line.split(' = ')[0] == 'anchors':
anchor = line.split(' = ')[1]
break
if line.split('=')[0] == 'anchors':
anchor = line.split('=')[1]
break
file.close()
for item in compact_module_defs:
if item['type'] == 'shortcut':
item['from'] = str(item['from'][0])
elif item['type'] == 'route':
item['layers'] = ",".join('%s' % i for i in item['layers'])
elif item['type'] == 'yolo':
item['mask'] = ",".join('%s' % i for i in item['mask'])
item['anchors'] = anchor
pruned_cfg_file = write_cfg(pruned_cfg_name, [model.hyperparams.copy()] + compact_module_defs)
print(f'Config file has been saved: {pruned_cfg_file}')
weights_dir_name = dir_name.replace('cfg', 'weights')
if not os.path.isdir(weights_dir_name):
os.makedirs(weights_dir_name)
compact_model_name = weights_dir_name + f'/layer_prune_{str(opt.shortcuts)}_shortcuts.weights'
save_weights(compact_model, path=compact_model_name)
print(f'Compact model has been saved: {compact_model_name}')
================================================
FILE: models.py
================================================
from utils.google_utils import *
from utils.parse_config import *
from utils.quantized.quantized_google import *
from utils.quantized.quantized_ptq_cos import *
from utils.quantized.quantized_TPSQ import *
from utils.layers import *
import copy
# YOLO
def create_modules(module_defs, img_size, cfg, quantized, quantizer_output, layer_idx, reorder, TM, TN, a_bit=8,
w_bit=8, steps=0, is_gray_scale=False, maxabsscaler=False, shortcut_way=-1):
# Constructs module list of layer blocks from module configuration in module_defs
img_size = [img_size] * 2 if isinstance(img_size, int) else img_size # expand if necessary
_ = module_defs.pop(0) # cfg training hyperparams (unused)
if is_gray_scale:
output_filters = [1] # input channels
else:
output_filters = [3]
module_list = nn.ModuleList()
routs = [] # list of layers which rout to deeper layers
yolo_index = -1
for i, mdef in enumerate(module_defs):
modules = nn.Sequential()
if mdef['type'] == 'convolutional':
bn = int(mdef['batch_normalize'])
filters = int(mdef['filters'])
kernel_size = int(mdef['size'])
pad = (kernel_size - 1) // 2 if int(mdef['pad']) else 0
if quantized == 1:
modules.add_module('Conv2d', BNFold_QuantizedConv2d_For_FPGA(in_channels=output_filters[-1],
out_channels=filters,
kernel_size=kernel_size,
stride=int(mdef['stride']),
padding=pad,
groups=mdef[
'groups'] if 'groups' in mdef else 1,
bias=not bn,
a_bits=a_bit,
w_bits=w_bit,
bn=bn,
activate=mdef['activation'],
steps=steps,
quantizer_output=quantizer_output,
reorder=reorder, TM=TM, TN=TN,
name="{:04d}".format(i) + "_" + mdef[
'type'][
:4],
layer_idx=layer_idx,
maxabsscaler=maxabsscaler))
elif quantized == 2:
modules.add_module('Conv2d', TPSQ_BNFold_QuantizedConv2d_For_FPGA(in_channels=output_filters[-1],
out_channels=filters,
kernel_size=kernel_size,
stride=int(mdef['stride']),
padding=pad,
groups=mdef[
'groups'] if 'groups' in mdef else 1,
bias=not bn,
a_bits=a_bit,
w_bits=w_bit,
bn=bn,
activate=mdef['activation'],
steps=steps,
quantizer_output=quantizer_output,
maxabsscaler=maxabsscaler))
elif quantized == 3:
modules.add_module('Conv2d', BNFold_COSPTQuantizedConv2d_For_FPGA(in_channels=output_filters[-1],
out_channels=filters,
kernel_size=kernel_size,
stride=int(mdef['stride']),
padding=pad,
groups=mdef[
'groups'] if 'groups' in mdef else 1,
bias=not bn,
a_bits=a_bit,
w_bits=w_bit,
bn=bn,
activate=mdef['activation'],
quantizer_output=quantizer_output,
reorder=reorder, TM=TM, TN=TN,
name="{:04d}".format(i) + "_" + mdef[
'type'][
:4],
layer_idx=layer_idx,
maxabsscaler=maxabsscaler))
else:
modules.add_module('Conv2d', nn.Conv2d(in_channels=output_filters[-1],
out_channels=filters,
kernel_size=kernel_size,
stride=int(mdef['stride']),
padding=pad,
groups=mdef['groups'] if 'groups' in mdef else 1,
bias=not bn))
if bn:
modules.add_module('BatchNorm2d', nn.BatchNorm2d(filters, momentum=0.1))
if mdef['activation'] == 'leaky':
modules.add_module('activation', nn.LeakyReLU(0.1 if not maxabsscaler else 0.25, inplace=True))
# modules.add_module('activation', nn.PReLU(num_parameters=1, init=0.10))
# modules.add_module('activation', Swish())
if mdef['activation'] == 'relu6':
modules.add_module('activation', ReLU6())
if mdef['activation'] == 'h_swish':
modules.add_module('activation', HardSwish())
if mdef['activation'] == 'relu':
modules.add_module('activation', nn.ReLU())
if mdef['activation'] == 'mish':
modules.add_module('activation', Mish())
elif mdef['type'] == 'depthwise':
bn = int(mdef['batch_normalize'])
filters = int(mdef['filters'])
kernel_size = int(mdef['size'])
pad = (kernel_size - 1) // 2 if int(mdef['pad']) else 0
if quantized == 1:
modules.add_module('DepthWise2d',
BNFold_QuantizedConv2d_For_FPGA(in_channels=output_filters[-1],
out_channels=filters,
kernel_size=kernel_size,
stride=int(mdef['stride']),
padding=pad,
groups=output_filters[-1],
bias=not bn,
a_bits=a_bit,
w_bits=w_bit,
bn=bn,
activate=mdef['activation'],
steps=steps,
quantizer_output=quantizer_output,
reorder=reorder, TM=TM, TN=TN,
name="{:04d}".format(i) + "_" + mdef['type'][:4],
layer_idx=layer_idx,
maxabsscaler=maxabsscaler))
if quantized == 2:
modules.add_module('DepthWise2d',
TPSQ_BNFold_QuantizedConv2d_For_FPGA(in_channels=output_filters[-1],
out_channels=filters,
kernel_size=kernel_size,
stride=int(mdef['stride']),
padding=pad,
groups=output_filters[-1],
bias=not bn,
a_bits=a_bit,
w_bits=w_bit,
bn=bn,
activate=mdef['activation'],
steps=steps,
quantizer_output=quantizer_output,
maxabsscaler=maxabsscaler))
elif quantized == 3:
modules.add_module('DepthWise2d', BNFold_COSPTQuantizedConv2d_For_FPGA(in_channels=output_filters[-1],
out_channels=filters,
kernel_size=kernel_size,
stride=int(mdef['stride']),
padding=pad,
groups=output_filters[-1],
bias=not bn,
a_bits=a_bit,
w_bits=w_bit,
bn=bn,
activate=mdef['activation'],
quantizer_output=quantizer_output,
reorder=reorder, TM=TM, TN=TN,
name="{:04d}".format(i) + "_" +
mdef['type'][:4],
layer_idx=layer_idx,
maxabsscaler=maxabsscaler))
else:
modules.add_module('DepthWise2d', nn.Conv2d(in_channels=output_filters[-1],
out_channels=filters,
kernel_size=kernel_size,
stride=int(mdef['stride']),
padding=pad,
groups=output_filters[-1],
bias=not bn), )
if bn:
modules.add_module('BatchNorm2d', nn.BatchNorm2d(filters, momentum=0.1))
if mdef['activation'] == 'leaky':
modules.add_module('activation', nn.LeakyReLU(0.1 if not maxabsscaler else 0.25, inplace=True))
# modules.add_module('activation', nn.PReLU(num_parameters=1, init=0.10))
# modules.add_module('activation', Swish())
if mdef['activation'] == 'relu6':
modules.add_module('activation', ReLU6())
if mdef['activation'] == 'h_swish':
modules.add_module('activation', HardSwish())
if mdef['activation'] == 'relu':
modules.add_module('activation', nn.ReLU())
if mdef['activation'] == 'mish':
modules.add_module('activation', Mish())
elif mdef['type'] == 'BatchNorm2d':
filters = output_filters[-1]
modules = nn.BatchNorm2d(filters, momentum=0.03, eps=1E-4)
if i == 0 and filters == 3: # normalize RGB image
# imagenet mean and var https://pytorch.org/docs/stable/torchvision/models.html#classification
modules.running_mean = torch.tensor([0.485, 0.456, 0.406])
modules.running_var = torch.tensor([0.0524, 0.0502, 0.0506])
elif mdef['type'] == 'maxpool':
k = mdef['size'] # kernel size
stride = mdef['stride']
maxpool = nn.MaxPool2d(kernel_size=k, stride=stride, padding=(k - 1) // 2)
if k == 2 and stride == 1: # yolov3-tiny
modules.add_module('ZeroPad2d', nn.ZeroPad2d((0, 1, 0, 1)))
modules.add_module('MaxPool2d', maxpool)
else:
modules = maxpool
elif mdef['type'] == 'se':
if 'filters' in mdef:
filters = int(mdef['filters'])
modules.add_module('se', SE(channel=filters))
if 'reduction' in mdef:
modules.add_module('se', SE(output_filters[-1], reduction=int(mdef['reduction'])))
elif mdef['type'] == 'upsample':
modules = nn.Upsample(scale_factor=mdef['stride'])
elif mdef['type'] == 'route': # nn.Sequential() placeholder for 'route' layer
layers = mdef['layers']
filters = sum([output_filters[l + 1 if l > 0 else l] for l in layers])
if 'groups' in mdef:
filters = filters // 2
routs.extend([i + l if l < 0 else l for l in layers])
if quantized == -1:
if 'groups' in mdef:
modules = FeatureConcat(layers=layers, groups=True)
else:
modules = FeatureConcat(layers=layers, groups=False)
elif quantized == 3:
if 'groups' in mdef:
modules = COSPTQuantizedFeatureConcat(layers=layers, groups=True, bits=a_bit,
quantizer_output=quantizer_output,
reorder=reorder, TM=TM, TN=TN,
name="{:04d}".format(i) + "_" +
mdef['type'][:4],
layer_idx=layer_idx, )
else:
modules = COSPTQuantizedFeatureConcat(layers=layers, groups=False, bits=a_bit,
quantizer_output=quantizer_output,
reorder=reorder, TM=TM, TN=TN,
name="{:04d}".format(i) + "_" +
mdef['type'][:4],
layer_idx=layer_idx, )
else:
if 'groups' in mdef:
modules = QuantizedFeatureConcat(layers=layers, groups=True, bits=a_bit,
quantizer_output=quantizer_output,
reorder=reorder, TM=TM, TN=TN,
name="{:04d}".format(i) + "_" +
mdef['type'][:4],
layer_idx=layer_idx, )
else:
modules = QuantizedFeatureConcat(layers=layers, groups=False, bits=a_bit,
quantizer_output=quantizer_output,
reorder=reorder, TM=TM, TN=TN,
name="{:04d}".format(i) + "_" +
mdef['type'][:4],
layer_idx=layer_idx, )
elif mdef['type'] == 'shortcut': # nn.Sequential() placeholder for 'shortcut' layer
layers = mdef['from']
filters = output_filters[-1]
routs.extend([i + l if l < 0 else l for l in layers])
if quantized == -1 or quantized == 2:
modules = Shortcut(layers=layers, weight='weights_type' in mdef)
else:
if quantized == 3:
if shortcut_way == 1:
modules = COSPTQuantizedShortcut_min(layers=layers, weight='weights_type' in mdef, bits=a_bit,
quantizer_output=quantizer_output,
reorder=reorder, TM=TM, TN=TN,
name="{:04d}".format(i) + "_" +
mdef['type'][:4],
layer_idx=layer_idx, )
elif shortcut_way == 2:
modules = COSPTQuantizedShortcut_max(layers=layers, weight='weights_type' in mdef, bits=a_bit,
quantizer_output=quantizer_output,
reorder=reorder, TM=TM, TN=TN,
name="{:04d}".format(i) + "_" +
mdef['type'][:4],
layer_idx=layer_idx, )
else:
if shortcut_way == 1:
modules = QuantizedShortcut_min(layers=layers, weight='weights_type' in mdef, bits=a_bit,
quantizer_output=quantizer_output,
reorder=reorder, TM=TM, TN=TN,
name="{:04d}".format(i) + "_" +
mdef['type'][:4],
layer_idx=layer_idx, )
elif shortcut_way == 2:
modules = QuantizedShortcut_max(layers=layers, weight='weights_type' in mdef, bits=a_bit,
quantizer_output=quantizer_output,
reorder=reorder, TM=TM, TN=TN,
name="{:04d}".format(i) + "_" +
mdef['type'][:4],
layer_idx=layer_idx, )
elif mdef['type'] == 'reorg3d': # yolov3-spp-pan-scale
pass
elif mdef['type'] == 'yolo':
yolo_index += 1
stride = [32, 16, 8] # P5, P4, P3 strides
if any(x in cfg for x in ['panet', 'yolov4', 'cd53']): # stride order reversed
if not 'yolov4-tiny' in cfg:
stride = list(reversed(stride))
layers = mdef['from'] if 'from' in mdef else []
modules = YOLOLayer(anchors=mdef['anchors'][mdef['mask']], # anchor list
nc=mdef['classes'], # number of classes
img_size=img_size, # (416, 416)
yolo_index=yolo_index, # 0, 1, 2...
layers=layers, # output layers
stride=stride[yolo_index],
quantizer_output=quantizer_output)
# Initialize preceding Conv2d() bias (https://arxiv.org/pdf/1708.02002.pdf section 3.3)
try:
with torch.no_grad():
j = layers[yolo_index] if 'from' in mdef else -1
bias_ = module_list[j][0].bias # shape(255,)
bias = bias_[:modules.no * modules.na].view(modules.na, -1) # shape(3,85)
bias[:, 4] = bias[:, 4] - 4.5 # obj ln((1-0.01)/0.01)约等于4.5
bias[:, 5:] = bias[:, 5:] + math.log(0.6 / (modules.nc - 0.99)) # cls (sigmoid(p) = 1/nc)
module_list[j][0].bias = torch.nn.Parameter(bias_, requires_grad=bias_.requires_grad)
except:
print('WARNING: smart bias initialization failure.')
else:
print('Warning: Unrecognized Layer Type: ' + mdef['type'])
# Register module list and number of output filters
module_list.append(modules)
output_filters.append(filters)
routs_binary = [False] * (i + 1)
for i in routs:
routs_binary[i] = True
return module_list, routs_binary
class YOLOLayer(nn.Module):
def __init__(self, anchors, nc, img_size, yolo_index, layers, stride, quantizer_output):
super(YOLOLayer, self).__init__()
self.anchors = torch.Tensor(anchors)
self.index = yolo_index # index of this layer in layers
self.layers = layers # model output layer indices
self.stride = stride # layer stride
self.nl = len(layers) # number of output layers (3)
self.na = len(anchors) # number of anchors (3)
self.nc = nc # number of classes (80)
self.no = nc + 5 # number of outputs (85)
self.nx, self.ny, self.ng = 0, 0, 0 # initialize number of x, y gridpoints
self.anchor_vec = self.anchors / self.stride
self.anchor_wh = self.anchor_vec.view(1, self.na, 1, 1, 2)
self.quantizer_output = quantizer_output
def create_grids(self, ng=(13, 13), device='cpu'):
self.nx, self.ny = ng # x and y grid size
self.ng = torch.tensor(ng, dtype=torch.float)
# build xy offsets
if not self.training:
yv, xv = torch.meshgrid([torch.arange(self.ny, device=device), torch.arange(self.nx, device=device)])
self.grid = torch.stack((xv, yv), 2).view((1, 1, self.ny, self.nx, 2)).float()
if self.anchor_vec.device != device:
self.anchor_vec = self.anchor_vec.to(device)
self.anchor_wh = self.anchor_wh.to(device)
def forward(self, p, out):
ASFF = False # https://arxiv.org/abs/1911.09516
if ASFF:
i, n = self.index, self.nl # index in layers, number of layers
p = out[self.layers[i]]
bs, _, ny, nx = p.shape # bs, 255, 13, 13
if (self.nx, self.ny) != (nx, ny):
self.create_grids((nx, ny), p.device)
# outputs and weights
# w = F.softmax(p[:, -n:], 1) # normalized weights
w = torch.sigmoid(p[:, -n:]) * (2 / n) # sigmoid weights (faster)
# w = w / w.sum(1).unsqueeze(1) # normalize across layer dimension
# weighted ASFF sum
p = out[self.layers[i]][:, :-n] * w[:, i:i + 1]
for j in range(n):
if j != i:
p += w[:, j:j + 1] * \
F.interpolate(out[self.layers[j]][:, :-n], size=[ny, nx], mode='bilinear', align_corners=False)
else:
bs, _, ny, nx = p.shape # bs, 255, 13, 13
# if (self.nx, self.ny) != (nx, ny):
self.create_grids((nx, ny), p.device)
# p.view(bs, 255, 13, 13) -- > (bs, 3, 13, 13, 85) # (bs, anchors, grid, grid, classes + xywh)
p = p.view(bs, self.na, self.no, self.ny, self.nx).permute(0, 1, 3, 4, 2).contiguous() # prediction
if self.training:
return p
else: # inference
io = p.clone() # inference output
if self.quantizer_output == True:
sigmoid_output = p.clone()
io[..., :2] = torch.sigmoid(io[..., :2]) + self.grid # xy
io[..., 2:4] = torch.exp(io[..., 2:4]) * self.anchor_wh # wh yolo method
io[..., :4] *= self.stride
torch.sigmoid_(io[..., 4:])
##输出
if self.quantizer_output == True:
xy_sigmoid_output = torch.sigmoid(sigmoid_output[..., :2])
cls_sigmoid_output = sigmoid_output[..., 4:] * self.stride
cls_sigmoid_output = torch.sigmoid_(cls_sigmoid_output)
xy_sigmoid_output = np.array(xy_sigmoid_output.cpu()).reshape(1, -1)
np.savetxt(('./quantizer_output/xy_sigmoid_output.txt'), xy_sigmoid_output,
delimiter='\n')
writer = open('./quantizer_output/xy_sigmoid_bin', "wb")
writer.write(xy_sigmoid_output)
writer.close()
cls_sigmoid_output = np.array(cls_sigmoid_output.cpu()).reshape(1, -1)
np.savetxt(('./quantizer_output/cls_sigmoid_output.txt'), cls_sigmoid_output,
delimiter='\n')
writer = open('./quantizer_output/cls_sigmoid_bin', "wb")
writer.write(cls_sigmoid_output)
writer.close()
return io.view(bs, -1, self.no), p # view [1, 3, 13, 13, 85] as [1, 507, 85]
class Darknet(nn.Module):
# YOLOv3 object detection model
def __init__(self, cfg, img_size=(416, 416), verbose=False, quantized=-1, a_bit=8, w_bit=8,
quantizer_output=False, layer_idx=-1, reorder=False, TM=32, TN=32, steps=0, is_gray_scale=False,
maxabsscaler=False, shortcut_way=-1):
super(Darknet, self).__init__()
if isinstance(cfg, str):
self.module_defs = parse_model_cfg(cfg)
elif isinstance(cfg, list):
self.module_defs = cfg
self.quantized = quantized
self.a_bit = a_bit
self.w_bit = w_bit
self.quantizer_output = quantizer_output ####输出设置超参数
self.layer_idx = layer_idx
self.reorder = reorder
self.TM = TM
self.TN = TN
self.hyperparams = copy.deepcopy(self.module_defs[0])
self.module_list, self.routs = create_modules(self.module_defs, img_size, cfg, quantized=self.quantized,
quantizer_output=self.quantizer_output, reorder=self.reorder,
TM=self.TM, TN=self.TN, layer_idx=self.layer_idx,
a_bit=self.a_bit, w_bit=self.w_bit, steps=steps,
is_gray_scale=is_gray_scale, maxabsscaler=maxabsscaler,
shortcut_way=shortcut_way)
self.yolo_layers = get_yolo_layers(self)
# torch_utils.initialize_weights(self)
# Darknet Header https://github.com/AlexeyAB/darknet/issues/2914#issuecomment-496675346
self.version = np.array([0, 2, 5], dtype=np.int32) # (int32) version info: major, minor, revision
self.seen = np.array([0], dtype=np.int64) # (int64) number of images seen during training
# 输出modelsummary
if self.quantized == -1:
self.info(verbose) # print model description
def forward(self, x, augment=False):
if not augment:
return self.forward_once(x)
else: # Augment images (inference and test only) https://github.com/ultralytics/yolov3/issues/931
img_size = x.shape[-2:] # height, width
s = [0.83, 0.67] # scales
y = []
for i, xi in enumerate((x,
torch_utils.scale_img(x.flip(3), s[0], same_shape=False), # flip-lr and scale
torch_utils.scale_img(x, s[1], same_shape=False), # scale
)):
# cv2.imwrite('img%g.jpg' % i, 255 * xi[0].numpy().transpose((1, 2, 0))[:, :, ::-1])
y.append(self.forward_once(xi)[0])
y[1][..., :4] /= s[0] # scale
y[1][..., 0] = img_size[1] - y[1][..., 0] # flip lr
y[2][..., :4] /= s[1] # scale
# for i, yi in enumerate(y): # coco small, medium, large = < 32**2 < 96**2 <
# area = yi[..., 2:4].prod(2)[:, :, None]
# if i == 1:
# yi *= (area < 96. ** 2).float()
# elif i == 2:
# yi *= (area > 32. ** 2).float()
# y[i] = yi
y = torch.cat(y, 1)
return y, None
def forward_once(self, x, augment=False, verbose=False):
img_size = x.shape[-2:] # height, width
yolo_out, out, feature_out = [], [], []
if verbose:
print('0', x.shape)
str = ''
# Augment images (inference and test only)
if augment: # https://github.com/ultralytics/yolov3/issues/931
nb = x.shape[0] # batch size
s = [0.83, 0.67] # scales
x = torch.cat((x,
torch_utils.scale_img(x.flip(3), s[0]), # flip-lr and scale
torch_utils.scale_img(x, s[1]), # scale
), 0)
for i, module in enumerate(self.module_list):
name = module.__class__.__name__
if name in ['Shortcut', 'FeatureConcat', 'QuantizedShortcut_max', 'QuantizedShortcut_min',
'QuantizedFeatureConcat', 'COSPTQuantizedShortcut_min',
'COSPTQuantizedShortcut_max', 'COSPTQuantizedFeatureConcat']: # sum, concat
if verbose:
l = [i - 1] + module.layers # layers
sh = [list(x.shape)] + [list(out[i].shape) for i in module.layers] # shapes
str = ' >> ' + ' + '.join(['layer %g %s' % x for x in zip(l, sh)])
x = module(x, out) # Shortcut(), FeatureConcat()
elif name == 'YOLOLayer':
yolo_out.append(module(x, out))
else: # run module directly, i.e. mtype = 'convolutional', 'upsample', 'maxpool', 'batchnorm2d' etc.
if name == 'Upsample' and isinstance(x, list):
x[0] = module(x[0])
x[1] = module(x[1])
else:
x = module(x)
if name == "Sequential" and self.module_list[i + 1].__class__.__name__ != 'YOLOLayer':
feature_out.append(x)
out.append(x if self.routs[i] else [])
if verbose:
print('%g/%g %s -' % (i, len(self.module_list), name), list(x.shape), str)
str = ''
if self.training: # train
return yolo_out, feature_out
else: # inference or test
x, p = zip(*yolo_out) # inference output, training output
x = torch.cat(x, 1) # cat yolo outputs
if augment: # de-augment results
x = torch.split(x, nb, dim=0)
x[1][..., :4] /= s[0] # scale
x[1][..., 0] = img_size[1] - x[1][..., 0] # flip lr
x[2][..., :4] /= s[1] # scale
x = torch.cat(x, 1)
return x, p, feature_out
def fuse(self):
# Fuse Conv2d + BatchNorm2d layers throughout model
print('Fusing layers...')
fused_list = nn.ModuleList()
for a in list(self.children())[0]:
if isinstance(a, nn.Sequential):
for i, b in enumerate(a):
if isinstance(b, nn.modules.batchnorm.BatchNorm2d):
# fuse this bn layer with the previous conv2d layer
conv = a[i - 1]
fused = torch_utils.fuse_conv_and_bn(conv, b)
a = nn.Sequential(fused, *list(a.children())[i + 1:])
break
fused_list.append(a)
self.module_list = fused_list
def info(self, verbose=False):
torch_utils.model_info(self, verbose)
def get_yolo_layers(model):
return [i for i, m in enumerate(model.module_list) if m.__class__.__name__ == 'YOLOLayer'] # [89, 101, 113]
def load_darknet_weights(self, weights, cutoff=-1, pt=False, quant=False):
# Parses and loads the weights stored in 'weights'
# Establish cutoffs (load layers between 0 and cutoff. if cutoff = -1 all are loaded)
file = Path(weights).name
if file == 'darknet53.conv.74':
cutoff = 75
elif file == 'yolov3-tiny.conv.15':
cutoff = 15
# Read weights file
with open(weights, 'rb') as f:
# Read Header https://github.com/AlexeyAB/darknet/issues/2914#issuecomment-496675346
self.version = np.fromfile(f, dtype=np.int32, count=3) # (int32) version info: major, minor, revision
self.seen = np.fromfile(f, dtype=np.int64, count=1) # (int64) number of images seen during training
weights = np.fromfile(f, dtype=np.float32) # The rest are weights
ptr = 0
for i, (mdef, module) in enumerate(zip(self.module_defs[:cutoff], self.module_list[:cutoff])):
if mdef['type'] == 'convolutional':
conv_layer = module[0]
if mdef['batch_normalize']:
if quant:
# Load BN bias, weights, running mean and running variance
num_b = conv_layer.beta.numel()
# Bias
bn_b = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(conv_layer.beta)
conv_layer.beta.data.copy_(bn_b)
ptr += num_b
# Weight
bn_w = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(conv_layer.gamma)
conv_layer.gamma.data.copy_(bn_w)
ptr += num_b
# Running Mean
bn_rm = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(conv_layer.running_mean)
conv_layer.running_mean.data.copy_(bn_rm)
ptr += num_b
# Running Var
bn_rv = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(conv_layer.running_var)
conv_layer.running_var.data.copy_(bn_rv)
ptr += num_b
else:
# Load BN bias, weights, running mean and running variance
bn_layer = module[1]
num_b = bn_layer.bias.numel() # Number of biases
# Bias
bn_b = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.bias)
bn_layer.bias.data.copy_(bn_b)
ptr += num_b
# Weight
bn_w = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.weight)
bn_layer.weight.data.copy_(bn_w)
ptr += num_b
# Running Mean
bn_rm = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.running_mean)
bn_layer.running_mean.data.copy_(bn_rm)
ptr += num_b
# Running Var
bn_rv = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.running_var)
bn_layer.running_var.data.copy_(bn_rv)
ptr += num_b
# Load conv. weights
num_w = conv_layer.weight.numel()
conv_w = torch.from_numpy(weights[ptr:ptr + num_w]).view_as(conv_layer.weight)
conv_layer.weight.data.copy_(conv_w)
ptr += num_w
else:
# if os.path.basename(file) == 'yolov3.weights' or os.path.basename(file) == 'yolov3-tiny.weights':
# pt标识使用coco预训练模型,读取参数时yolo层前面的一层输出为255
if pt and os.path.basename(file).split('.')[-1] == 'weights':
num_b = 255
ptr += num_b
num_w = int(self.module_defs[i - 1]["filters"]) * 255
ptr += num_w
else:
# Load conv. bias
num_b = conv_layer.bias.numel()
conv_b = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(conv_layer.bias)
conv_layer.bias.data.copy_(conv_b)
ptr += num_b
# Load conv. weights
num_w = conv_layer.weight.numel()
conv_w = torch.from_numpy(weights[ptr:ptr + num_w]).view_as(conv_layer.weight)
conv_layer.weight.data.copy_(conv_w)
ptr += num_w
elif mdef['type'] == 'depthwise':
depthwise_layer = module[0]
if mdef['batch_normalize']:
if quant:
# Load BN bias, weights, running mean and running variance
num_b = conv_layer.beta.numel()
# Bias
bn_b = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(conv_layer.beta)
conv_layer.beta.data.copy_(bn_b)
ptr += num_b
# Weight
bn_w = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(conv_layer.gamma)
conv_layer.gamma.data.copy_(bn_w)
ptr += num_b
# Running Mean
bn_rm = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(conv_layer.running_mean)
conv_layer.running_mean.data.copy_(bn_rm)
ptr += num_b
# Running Var
bn_rv = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(conv_layer.running_var)
conv_layer.running_var.data.copy_(bn_rv)
ptr += num_b
else:
# Load BN bias, weights, running mean and running variance
bn_layer = module[1]
num_b = bn_layer.bias.numel() # Number of biases
# Bias
bn_b = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.bias)
bn_layer.bias.data.copy_(bn_b)
ptr += num_b
# Weight
bn_w = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.weight)
bn_layer.weight.data.copy_(bn_w)
ptr += num_b
# Running Mean
bn_rm = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.running_mean)
bn_layer.running_mean.data.copy_(bn_rm)
ptr += num_b
# Running Var
bn_rv = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.running_var)
bn_layer.running_var.data.copy_(bn_rv)
ptr += num_b
# Load conv. weights
num_w = depthwise_layer.weight.numel()
conv_w = torch.from_numpy(weights[ptr:ptr + num_w]).view_as(depthwise_layer.weight)
depthwise_layer.weight.data.copy_(conv_w)
ptr += num_w
elif mdef['type'] == 'se':
se_layer = module[0]
fc = se_layer.fc
fc1 = fc[0]
num_fc1 = fc1.weight.numel()
fc1_w = torch.from_numpy(weights[ptr:ptr + num_fc1]).view_as(fc1.weight)
fc1.weight.data.copy_(fc1_w)
ptr += num_fc1
fc2 = fc[2]
num_fc2 = fc2.weight.numel()
fc2_w = torch.from_numpy(weights[ptr:ptr + num_fc2]).view_as(fc2.weight)
fc2.weight.data.copy_(fc2_w)
ptr += num_fc2
# 确保指针到达权重的最后一个位置
assert ptr == len(weights)
def save_weights(self, path='model.weights', cutoff=-1):
# Converts a PyTorch model to Darket format (*.pt to *.weights)
# Note: Does not work if model.fuse() is applied
with open(path, 'wb') as f:
# Write Header https://github.com/AlexeyAB/darknet/issues/2914#issuecomment-496675346
self.version.tofile(f) # (int32) version info: major, minor, revision
self.seen.tofile(f) # (int64) number of images seen during training
# Iterate through layers
for i, (mdef, module) in enumerate(zip(self.module_defs[:cutoff], self.module_list[:cutoff])):
if mdef['type'] == 'convolutional':
conv_layer = module[0]
# If batch norm, load bn first
if mdef['batch_normalize']:
bn_layer = module[1]
bn_layer.bias.data.cpu().numpy().tofile(f)
bn_layer.weight.data.cpu().numpy().tofile(f)
bn_layer.running_mean.data.cpu().numpy().tofile(f)
bn_layer.running_var.data.cpu().numpy().tofile(f)
# Load conv bias
else:
conv_layer.bias.data.cpu().numpy().tofile(f)
# Load conv weights
conv_layer.weight.data.cpu().numpy().tofile(f)
elif mdef['type'] == 'depthwise':
depthwise_layer = module[0]
# If batch norm, load bn first
if mdef['batch_normalize']:
bn_layer = module[1]
bn_layer.bias.data.cpu().numpy().tofile(f)
bn_layer.weight.data.cpu().numpy().tofile(f)
bn_layer.running_mean.data.cpu().numpy().tofile(f)
bn_layer.running_var.data.cpu().numpy().tofile(f)
# Load conv bias
else:
depthwise_layer.bias.data.cpu().numpy().tofile(f)
# Load conv weights
depthwise_layer.weight.data.cpu().numpy().tofile(f)
elif mdef['type'] == 'se':
se_layer = module[0]
fc = se_layer.fc
fc1 = fc[0]
fc2 = fc[2]
fc1.weight.data.cpu().numpy().tofile(f)
fc2.weight.data.cpu().numpy().tofile(f)
def convert(cfg='cfg/yolov3-spp.cfg', weights='weights/yolov3-spp.weights'):
# Converts between PyTorch and Darknet format per extension (i.e. *.weights convert to *.pt and vice versa)
# from models import *; convert('cfg/yolov3-spp.cfg', 'weights/yolov3-spp.weights')
# Initialize model
model = Darknet(cfg)
# Load weights and save
if weights.endswith('.pt'): # if PyTorch format
model.load_state_dict(torch.load(weights, map_location='cpu')['model'])
target = weights.rsplit('.', 1)[0] + '.weights'
save_weights(model, path=target, cutoff=-1)
print("Success: converted '%s' to '%s'" % (weights, target))
elif weights.endswith('.weights'): # darknet format
_ = load_darknet_weights(model, weights)
chkpt = {'epoch': -1,
'best_fitness': None,
'training_results': None,
'model': model.state_dict(),
'optimizer': None}
target = weights.rsplit('.', 1)[0] + '.pt'
torch.save(chkpt, target)
print("Success: converted '%s' to '%'" % (weights, target))
else:
print('Error: extension not supported.')
def attempt_download(weights):
# Attempt to download pretrained weights if not found locally
weights = weights.strip().replace("'", '')
msg = weights + ' missing, try downloading from https://drive.google.com/open?id=1LezFG5g3BCW6iYaV89B2i64cqEUZD7e0'
if len(weights) > 0 and not os.path.isfile(weights):
d = {'yolov3-spp.weights': '16lYS4bcIdM2HdmyJBVDOvt3Trx6N3W2R',
'yolov3.weights': '1uTlyDWlnaqXcsKOktP5aH_zRDbfcDp-y',
'yolov3-tiny.weights': '1CCF-iNIIkYesIDzaPvdwlcf7H9zSsKZQ',
'yolov3-spp.pt': '1f6Ovy3BSq2wYq4UfvFUpxJFNDFfrIDcR',
'yolov3.pt': '1SHNFyoe5Ni8DajDNEqgB2oVKBb_NoEad',
'yolov3-tiny.pt': '10m_3MlpQwRtZetQxtksm9jqHrPTHZ6vo',
'darknet53.conv.74': '1WUVBid-XuoUBmvzBVUCBl_ELrzqwA8dJ',
'yolov3-tiny.conv.15': '1Bw0kCpplxUqyRYAJr9RY9SGnOJbo9nEj',
'yolov3-spp-ultralytics.pt': '1UcR-zVoMs7DH5dj3N1bswkiQTA4dmKF4'}
file = Path(weights).name
if file in d:
r = gdrive_download(id=d[file], name=weights)
else: # download from pjreddie.com
url = 'https://pjreddie.com/media/files/' + file
print('Downloading ' + url)
r = os.system('curl -f ' + url + ' -o ' + weights)
# Error check
if not (r == 0 and os.path.exists(weights) and os.path.getsize(weights) > 1E6): # weights exist and > 1MB
os.system('rm ' + weights) # remove partial downloads
raise Exception(msg)
================================================
FILE: normal_prune.py
================================================
from models import *
from utils.utils import *
import torch
import numpy as np
from copy import deepcopy
from test import test
from terminaltables import AsciiTable
import time
from utils.utils import *
from utils.prune_utils import *
import os
import argparse
def prune_and_eval(model, sorted_bn, percent=.0):
model_copy = deepcopy(model)
thre_index = int(len(sorted_bn) * percent)
# 获得α参数的阈值,小于该值的α参数对应的通道,全部裁剪掉
thre = sorted_bn[thre_index]
print(f'Channels with Gamma value less than {thre:.4f} are pruned!')
remain_num = 0
for idx in prune_idx:
bn_module = model_copy.module_list[idx][1]
# 根据BN的阈值找到BN的mask
mask = obtain_bn_mask(bn_module, thre)
remain_num += int(mask.sum())
bn_module.weight.data.mul_(mask)
# with torch.no_grad():
# mAP = eval_model(model_copy)[1].mean()
print(f'Number of channels has been reduced from {len(sorted_bn)} to {remain_num}')
print(f'Prune ratio: {1 - remain_num / len(sorted_bn):.3f}')
# print(f'mAP of the pruned model is {mAP:.4f}')
return thre
def obtain_filters_mask(model, thre, CBL_idx, prune_idx):
pruned = 0
total = 0
num_filters = []
filters_mask = []
# CBL_idx存储的是所有带BN的卷积层(YOLO层的前一层卷积层是不带BN的)
for idx in CBL_idx:
bn_module = model.module_list[idx][1]
if idx in prune_idx:
mask = obtain_bn_mask(bn_module, thre).cpu().numpy()
remain = int(mask.sum())
pruned = pruned + mask.shape[0] - remain
if remain == 0:
print("Channels would be all pruned!")
raise Exception
print(f'layer index: {idx:>3d} \t total channel: {mask.shape[0]:>4d} \t '
f'remaining channel: {remain:>4d}')
else:
mask = np.ones(bn_module.weight.data.shape)
remain = mask.shape[0]
total += mask.shape[0]
num_filters.append(remain)
filters_mask.append(mask.copy())
# 因此,这里求出的prune_ratio,需要裁剪的α参数/cbl_idx中所有的α参数
prune_ratio = pruned / total
print(f'Prune channels: {pruned}\tPrune ratio: {prune_ratio:.3f}')
return num_filters, filters_mask
def obtain_avg_forward_time(input, model, repeat=200):
model.eval()
start = time.time()
with torch.no_grad():
for i in range(repeat):
output = model(input)
avg_infer_time = (time.time() - start) / repeat
return avg_infer_time, output
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path')
parser.add_argument('--data', type=str, default='data/coco2014.data', help='*.data file path')
parser.add_argument('--weights', type=str, default='weights/last.pt', help='sparse model weights')
parser.add_argument('--percent', type=float, default=0.5, help='global channel prune percent')
parser.add_argument('--img-size', type=int, default=608, help='inference size (pixels)')
parser.add_argument('--batch-size', type=int, default=16, help='batch-size')
opt = parser.parse_args()
print(opt)
percent = opt.percent
# 指定GPU
# torch.cuda.set_device(2)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = Darknet(opt.cfg).to(device)
if opt.weights:
if opt.weights.endswith(".pt"):
model.load_state_dict(torch.load(opt.weights, map_location=device)['model'])
else:
_ = load_darknet_weights(model, opt.weights)
data_config = parse_data_cfg(opt.data)
valid_path = data_config["valid"]
class_names = load_classes(data_config["names"])
# test model
eval_model = lambda model: test(model=model, cfg=opt.cfg, data=opt.data, batch_size=opt.batch_size,
imgsz=opt.img_size, rank=-1)
# 获取参数个数
obtain_num_parameters = lambda model: sum([param.nelement() for param in model.parameters()])
with torch.no_grad():
origin_model_metric = eval_model(model)
origin_nparameters = obtain_num_parameters(model)
# CBL表示后接BN的卷积层,Other_idx表示不接BN的卷积层和其他层
CBL_idx, Other_idx, prune_idx = parse_module_defs(model.module_defs)
# 将所有要剪枝的BN层的γ参数,拷贝到bn_weights列表
bn_weights = gather_bn_weights(model.module_list, prune_idx)
# torch.sort返回二维列表,第一维是排序后的值列表,第二维是排序后的值列表对应的索引
sorted_bn = torch.sort(bn_weights)[0]
# 对BN中的γ参数排序
# 避免剪掉所有channel的最高阈值(每个BN层的gamma的最大值的最小值即为阈值上限)
highest_thre = []
for idx in prune_idx:
# .item()可以得到张量里的元素值
# 获取每一层中γ参数的最大值
highest_thre.append(model.module_list[idx][1].weight.data.abs().max().item())
# 获取所有层中的最小值
highest_thre = min(highest_thre)
# 找到highest_thre对应的下标对应的百分比
percent_limit = (sorted_bn == highest_thre).nonzero().item() / len(bn_weights)
print(f'Threshold should be less than {highest_thre:.4f}.')
print(f'The corresponding prune ratio is {percent_limit:.3f}.')
# 获得在目标百分百比下的剪植阈值
threshold = prune_and_eval(model, sorted_bn, percent)
# 获得保留的卷积核的个数和每层对应的mask
num_filters, filters_mask = obtain_filters_mask(model, threshold, CBL_idx, prune_idx)
# CBLidx2mask存储CBL_idx中,每一层BN层对应的mask,将需要被剪植的层和剪植后的mask结合起来
CBLidx2mask = {idx: mask for idx, mask in zip(CBL_idx, filters_mask)}
# 返回剪植后的模型
pruned_model = prune_model_keep_size(model, prune_idx, CBL_idx, CBLidx2mask)
with torch.no_grad():
mAP = eval_model(pruned_model)[1].mean()
print('after prune_model_keep_size map is {}'.format(mAP))
# 获得原始模型的module_defs,并修改该defs中的卷积核数量
compact_module_defs = deepcopy(model.module_defs)
for idx, num in zip(CBL_idx, num_filters):
assert compact_module_defs[idx]['type'] == 'convolutional'
compact_module_defs[idx]['filters'] = str(num)
# 生成新模型
compact_model = Darknet([model.hyperparams.copy()] + compact_module_defs).to(device)
compact_nparameters = obtain_num_parameters(compact_model)
# 拷贝权重
init_weights_from_loose_model(compact_model, pruned_model, CBL_idx, Other_idx, CBLidx2mask)
# 测试运行速度
random_input = torch.rand((16, 3, 416, 416)).to(device)
pruned_forward_time, pruned_output = obtain_avg_forward_time(random_input, pruned_model)
compact_forward_time, compact_output = obtain_avg_forward_time(random_input, compact_model)
# 在测试集上测试剪枝后的模型, 并统计模型的参数数量
with torch.no_grad():
compact_model_metric = eval_model(compact_model)
# 比较剪枝前后参数数量的变化、指标性能的变化
metric_table = [
["Metric", "Before", "After"],
["mAP", f'{origin_model_metric[1].mean():.6f}', f'{compact_model_metric[1].mean():.6f}'],
["Parameters", f"{origin_nparameters}", f"{compact_nparameters}"],
["Inference", f'{pruned_forward_time:.4f}', f'{compact_forward_time:.4f}']
]
print(AsciiTable(metric_table).table)
# 生成剪枝后的cfg文件并保存模型
pruned_cfg_name = opt.cfg.replace('/', f'/normal_prune_{percent}_')
# 创建存储目录
dir_name = pruned_cfg_name.split('/')[0] + '/' + pruned_cfg_name.split('/')[1]
if not os.path.isdir(dir_name):
os.makedirs(dir_name)
# 由于原始的compact_module_defs将anchor从字符串变为了数组,因此这里将anchors重新变为字符串
file = open(opt.cfg, 'r')
lines = file.read().split('\n')
for line in lines:
if line.split(' = ')[0] == 'anchors':
anchor = line.split(' = ')[1]
break
if line.split('=')[0] == 'anchors':
anchor = line.split('=')[1]
break
file.close()
for item in compact_module_defs:
if item['type'] == 'shortcut':
item['from'] = str(item['from'][0])
elif item['type'] == 'route':
item['layers'] = ",".join('%s' % i for i in item['layers'])
elif item['type'] == 'yolo':
item['mask'] = ",".join('%s' % i for i in item['mask'])
item['anchors'] = anchor
pruned_cfg_file = write_cfg(pruned_cfg_name, [model.hyperparams.copy()] + compact_module_defs)
print(f'Config file has been saved: {pruned_cfg_file}')
weights_dir_name = dir_name.replace('cfg', 'weights')
if not os.path.isdir(weights_dir_name):
os.makedirs(weights_dir_name)
compact_model_name = weights_dir_name + f'/normal_prune_{str(percent)}_percent.weights'
save_weights(compact_model, path=compact_model_name)
print(f'Compact model has been saved: {compact_model_name}')
================================================
FILE: regular_prune.py
================================================
from models import *
from utils.utils import *
import torch
import numpy as np
from copy import deepcopy
from test import test
from terminaltables import AsciiTable
import time
from utils.utils import *
from utils.prune_utils import *
import os
import argparse
# 该函数有很重要的意义:
# ①先用深拷贝将原始模型拷贝下来,得到model_copy
# ②将model_copy中,BN层中低于阈值的α参数赋值为0
# ③在BN层中,输出y=α*x+β,由于α参数的值被赋值为0,因此输入仅加了一个偏置β
# ④很神奇的是,network slimming中是将α参数和β参数都置0,该处只将α参数置0,但效果却很好:其实在另外一篇论文中,已经提到,可以先将β参数的效果移到
# 下一层卷积层,再去剪掉本层的α参数
# 该函数用最简单的方法,让我们看到了,如何快速看到剪枝后的效果
def prune_and_eval(model, sorted_bn, percent=.0):
model_copy = deepcopy(model)
thre_index = int(len(sorted_bn) * percent)
# 获得α参数的阈值,小于该值的α参数对应的通道,全部裁剪掉
thre = sorted_bn[thre_index]
print(f'Channels with Gamma value less than {thre:.4f} are pruned!')
remain_num = 0
for idx in prune_idx:
bn_module = model_copy.module_list[idx][1]
mask = obtain_bn_mask(bn_module, thre)
mask_cnt = int(mask.sum())
if mask_cnt == 0:
this_layer_sort_bn = bn_module.weight.data.abs().clone()
sort_bn_values = torch.sort(this_layer_sort_bn)[0]
bn_cnt = bn_module.weight.shape[0]
this_layer_thre = sort_bn_values[bn_cnt - 8]
mask = obtain_bn_mask(bn_module, this_layer_thre)
else:
for i in range(len(filter_switch)):
if mask_cnt <= filter_switch[i]:
mask_cnt = filter_switch[i]
break
this_layer_sort_bn = bn_module.weight.data.abs().clone()
sort_bn_values = torch.sort(this_layer_sort_bn)[0]
bn_cnt = bn_module.weight.shape[0]
this_layer_thre = sort_bn_values[bn_cnt - mask_cnt]
mask = obtain_bn_mask(bn_module, this_layer_thre)
remain_num += int(mask.sum())
bn_module.weight.data.mul_(mask)
with torch.no_grad():
mAP = eval_model(model_copy)[1].mean()
print(f'Number of channels has been reduced from {len(sorted_bn)} to {remain_num}')
print(f'Prune ratio: {1 - remain_num / len(sorted_bn):.3f}')
print(f'mAP of the pruned model is {mAP:.4f}')
return thre
def obtain_filters_mask(model, thre, CBL_idx, prune_idx):
pruned = 0
total = 0
num_filters = []
filters_mask = []
# CBL_idx存储的是所有带BN的卷积层(YOLO层的前一层卷积层是不带BN的)
for idx in CBL_idx:
bn_module = model.module_list[idx][1]
if idx in prune_idx:
mask = obtain_bn_mask(bn_module, thre).cpu().numpy()
mask_cnt = int(mask.sum())
if mask_cnt == 0:
this_layer_sort_bn = bn_module.weight.data.abs().clone()
sort_bn_values = torch.sort(this_layer_sort_bn)[0]
bn_cnt = bn_module.weight.shape[0]
this_layer_thre = sort_bn_values[bn_cnt - 8]
mask = obtain_bn_mask(bn_module, this_layer_thre).cpu().numpy()
else:
for i in range(len(filter_switch)):
if mask_cnt <= filter_switch[i]:
mask_cnt = filter_switch[i]
break
this_layer_sort_bn = bn_module.weight.data.abs().clone()
sort_bn_values = torch.sort(this_layer_sort_bn)[0]
bn_cnt = bn_module.weight.shape[0]
this_layer_thre = sort_bn_values[bn_cnt - mask_cnt]
mask = obtain_bn_mask(bn_module, this_layer_thre).cpu().numpy()
remain = int(mask.sum())
pruned = pruned + mask.shape[0] - remain
if remain == 0:
print("Channels would be all pruned!")
raise Exception
print(f'layer index: {idx:>3d} \t total channel: {mask.shape[0]:>4d} \t '
f'remaining channel: {remain:>4d}')
else:
mask = np.ones(bn_module.weight.data.shape)
remain = mask.shape[0]
total += mask.shape[0]
num_filters.append(remain)
filters_mask.append(mask.copy())
# 因此,这里求出的prune_ratio,需要裁剪的α参数/cbl_idx中所有的α参数
prune_ratio = pruned / total
print(f'Prune channels: {pruned}\tPrune ratio: {prune_ratio:.3f}')
return num_filters, filters_mask
def obtain_avg_forward_time(input, model, repeat=200):
model.eval()
start = time.time()
with torch.no_grad():
for i in range(repeat):
output = model(input)
avg_infer_time = (time.time() - start) / repeat
return avg_infer_time, output
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path')
parser.add_argument('--data', type=str, default='data/coco.data', help='*.data file path')
parser.add_argument('--weights', type=str, default='weights/last.pt', help='sparse model weights')
parser.add_argument('--percent', type=float, default=0.5, help='global channel prune percent')
parser.add_argument('--img-size', type=int, default=608, help='inference size (pixels)')
parser.add_argument('--batch-size', type=int, default=16, help='batch-size')
opt = parser.parse_args()
print(opt)
percent = opt.percent
# 指定GPU
# torch.cuda.set_device(2)
filter_switch = [each for each in range(2048) if (each % 32 == 0)]
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = Darknet(opt.cfg).to(device)
if opt.weights:
if opt.weights.endswith(".pt"):
model.load_state_dict(torch.load(opt.weights, map_location=device)['model'])
else:
_ = load_darknet_weights(model, opt.weights)
data_config = parse_data_cfg(opt.data)
valid_path = data_config["valid"]
class_names = load_classes(data_config["names"])
eval_model = lambda model: test(model=model, cfg=opt.cfg, data=opt.data, batch_size=opt.batch_size,
imgsz=opt.img_size, rank=-1)
obtain_num_parameters = lambda model: sum([param.nelement() for param in model.parameters()])
# 这个不应该注释掉,等会要恢复
with torch.no_grad():
origin_model_metric = eval_model(model)
origin_nparameters = obtain_num_parameters(model)
CBL_idx, Conv_idx, prune_idx = parse_module_defs(model.module_defs)
# 将所有要剪枝的BN层的α参数,拷贝到bn_weights列表
bn_weights = gather_bn_weights(model.module_list, prune_idx)
# torch.sort返回二维列表,第一维是排序后的值列表,第二维是排序后的值列表对应的索引
sorted_bn = torch.sort(bn_weights)[0]
# 避免剪掉所有channel的最高阈值(每个BN层的gamma的最大值的最小值即为阈值上限)
highest_thre = []
for idx in prune_idx:
# .item()可以得到张量里的元素值
highest_thre.append(model.module_list[idx][1].weight.data.abs().max().item())
highest_thre = min(highest_thre)
# 找到highest_thre对应的下标对应的百分比
percent_limit = (sorted_bn == highest_thre).nonzero(as_tuple=False).item() / len(bn_weights)
print(f'Threshold should be less than {highest_thre:.4f}.')
print(f'The corresponding prune ratio is {percent_limit:.3f}.')
threshold = prune_and_eval(model, sorted_bn, percent)
# ****************************************************************
# 虽然上面已经能看到剪枝后的效果,但是没有生成剪枝后的模型结构,因此下面的代码是为了生成新的模型结构并拷贝旧模型参数到新模型
# %%
num_filters, filters_mask = obtain_filters_mask(model, threshold, CBL_idx, prune_idx)
# CBLidx2mask存储CBL_idx中,每一层BN层对应的mask
CBLidx2mask = {idx: mask for idx, mask in zip(CBL_idx, filters_mask)}
pruned_model = prune_model_keep_size(model, prune_idx, CBL_idx, CBLidx2mask)
with torch.no_grad():
mAP = eval_model(pruned_model)[1].mean()
print('after prune_model_keep_size map is {}'.format(mAP))
# 获得原始模型的module_defs,并修改该defs中的卷积核数量
compact_module_defs = deepcopy(model.module_defs)
for idx, num in zip(CBL_idx, num_filters):
assert compact_module_defs[idx]['type'] == 'convolutional'
compact_module_defs[idx]['filters'] = str(num)
compact_model = Darknet([model.hyperparams.copy()] + compact_module_defs).to(device)
compact_nparameters = obtain_num_parameters(compact_model)
init_weights_from_loose_model(compact_model, pruned_model, CBL_idx, Conv_idx, CBLidx2mask)
random_input = torch.rand((16, 3, 416, 416)).to(device)
pruned_forward_time, pruned_output = obtain_avg_forward_time(random_input, pruned_model)
compact_forward_time, compact_output = obtain_avg_forward_time(random_input, compact_model)
# 在测试集上测试剪枝后的模型, 并统计模型的参数数量
with torch.no_grad():
compact_model_metric = eval_model(compact_model)
# 比较剪枝前后参数数量的变化、指标性能的变化
metric_table = [
["Metric", "Before", "After"],
["mAP", f'{origin_model_metric[1].mean():.6f}', f'{compact_model_metric[1].mean():.6f}'],
["Parameters", f"{origin_nparameters}", f"{compact_nparameters}"],
["Inference", f'{pruned_forward_time:.4f}', f'{compact_forward_time:.4f}']
]
print(AsciiTable(metric_table).table)
# 生成剪枝后的cfg文件并保存模型
pruned_cfg_name = opt.cfg.replace('/', f'/regular_prune_{percent}_')
# 创建存储目录
dir_name = pruned_cfg_name.split('/')[0] + '/' + pruned_cfg_name.split('/')[1]
if not os.path.isdir(dir_name):
os.makedirs(dir_name)
# 由于原始的compact_module_defs将anchor从字符串变为了数组,因此这里将anchors重新变为字符串
file = open(opt.cfg, 'r')
lines = file.read().split('\n')
for line in lines:
if line.split(' = ')[0] == 'anchors':
anchor = line.split(' = ')[1]
break
if line.split('=')[0] == 'anchors':
anchor = line.split('=')[1]
break
file.close()
for item in compact_module_defs:
if item['type'] == 'shortcut':
item['from'] = str(item['from'][0])
elif item['type'] == 'route':
item['layers'] = ",".join('%s' % i for i in item['layers'])
elif item['type'] == 'yolo':
item['mask'] = ",".join('%s' % i for i in item['mask'])
item['anchors'] = anchor
pruned_cfg_file = write_cfg(pruned_cfg_name, [model.hyperparams.copy()] + compact_module_defs)
print(f'Config file has been saved: {pruned_cfg_file}')
weights_dir_name = dir_name.replace('cfg', 'weights')
if not os.path.isdir(weights_dir_name):
os.makedirs(weights_dir_name)
compact_model_name = weights_dir_name + f'/regular_prune_{str(percent)}_percent.weights'
save_weights(compact_model, path=compact_model_name)
print(f'Compact model has been saved: {compact_model_name}')
================================================
FILE: requirements.txt
================================================
# pip install -U -r requirements.txt
Cython
numpy==1.17
opencv-python
torch>=1.5.1
matplotlib
pillow
tensorboard
torchvision
scipy
tqdm
git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI
# Conda commands (in lieu of pip) ---------------------------------------------
# conda update -yn base -c defaults conda
# conda install -yc anaconda numpy opencv matplotlib tqdm pillow ipython
# conda install -yc conda-forge scikit-image pycocotools tensorboard
# conda install -yc spyder-ide spyder-line-profiler
# conda install -yc pytorch pytorch torchvision
# conda install -yc conda-forge protobuf numpy && pip install onnx==1.6.0 # https://github.com/onnx/onnx#linux-and-macos
================================================
FILE: shortcut_prune.py
================================================
import argparse
from models import *
from utils.utils import *
import torch
import numpy as np
from copy import deepcopy
from test import test
from terminaltables import AsciiTable
import time
from utils.utils import *
from utils.prune_utils import *
import os
# short-cut剪枝
# 该函数有很重要的意义:
# ①先用深拷贝将原始模型拷贝下来,得到model_copy
# ②将model_copy中,BN层中低于阈值的α参数赋值为0
# ③在BN层中,输出y=α*x+β,由于α参数的值被赋值为0,因此输入仅加了一个偏置β
# ④很神奇的是,network slimming中是将α参数和β参数都置0,该处只将α参数置0,但效果却很好:其实在另外一篇论文中,已经提到,可以先将β参数的效果移到
# 下一层卷积层,再去剪掉本层的α参数
# 该函数用最简单的方法,让我们看到了,如何快速看到剪枝后的效果
def prune_and_eval(model, sorted_bn, shortcut_idx, percent=.0):
model_copy = deepcopy(model)
thre_index = int(len(sorted_bn) * percent)
# 获得α参数的阈值,小于该值的α参数对应的通道,全部裁剪掉
thre1 = sorted_bn[thre_index]
print(f'Channels with Gamma value less than {thre1:.8f} are pruned!')
remain_num = 0
idx_new = dict()
for idx in prune_idx:
if idx not in shortcut_idx:
bn_module = model_copy.module_list[idx][1]
mask = obtain_bn_mask(bn_module, thre1)
# 记录剪枝后,每一层卷积层对应的mask
# idx_new[idx]=mask.cpu().numpy()
idx_new[idx] = mask
remain_num += int(mask.sum())
bn_module.weight.data.mul_(mask)
# bn_module.bias.data.mul_(mask*0.0001)
else:
bn_module = model_copy.module_list[idx][1]
mask = idx_new[shortcut_idx[idx]]
idx_new[idx] = mask
remain_num += int(mask.sum())
bn_module.weight.data.mul_(mask)
# print(int(mask.sum()))
# with torch.no_grad():
# mAP = eval_model(model_copy)[0][2]
print(f'Number of channels has been reduced from {len(sorted_bn)} to {remain_num}')
print(f'Prune ratio: {1 - remain_num / len(sorted_bn):.3f}')
# print(f'mAP of the pruned model is {mAP:.4f}')
return thre1
def obtain_filters_mask(model, thre, CBL_idx, shortcut_idx, prune_idx):
pruned = 0
total = 0
num_filters = []
filters_mask = []
idx_new = dict()
# CBL_idx存储的是所有带BN的卷积层(YOLO层的前一层卷积层是不带BN的)
for idx in CBL_idx:
bn_module = model.module_list[idx][1]
if idx in prune_idx:
if idx not in shortcut_idx:
mask = obtain_bn_mask(bn_module, thre).cpu().numpy()
idx_new[idx] = mask
remain = int(mask.sum())
pruned = pruned + mask.shape[0] - remain
# if remain == 0:
# print("Channels would be all pruned!")
# raise Exception
# print(f'layer index: {idx:>3d} \t total channel: {mask.shape[0]:>4d} \t '
# f'remaining channel: {remain:>4d}')
else:
# 如果idx在shortcut_idx之中,则试跳连层的两层的mask相等
mask = idx_new[shortcut_idx[idx]]
idx_new[idx] = mask
remain = int(mask.sum())
pruned = pruned + mask.shape[0] - remain
if remain == 0:
print("Channels would be all pruned!")
raise Exception
print(f'layer index: {idx:>3d} \t total channel: {mask.shape[0]:>4d} \t '
f'remaining channel: {remain:>4d}')
else:
mask = np.ones(bn_module.weight.data.shape)
remain = mask.shape[0]
total += mask.shape[0]
num_filters.append(remain)
filters_mask.append(mask.copy())
# 因此,这里求出的prune_ratio,需要裁剪的α参数/cbl_idx中所有的α参数
prune_ratio = pruned / total
print(f'Prune channels: {pruned}\tPrune ratio: {prune_ratio:.3f}')
return num_filters, filters_mask
def obtain_avg_forward_time(input, model, repeat=200):
model.eval()
start = time.time()
with torch.no_grad():
for i in range(repeat):
output = model(input)
avg_infer_time = (time.time() - start) / repeat
return avg_infer_time, output
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, default='cfg/yolov3/yolov3.cfg', help='cfg file path')
parser.add_argument('--data', type=str, default='cfg/coco2017.data', help='*.data file path')
parser.add_argument('--weights', type=str, default='weights/last.pt', help='sparse model weights')
parser.add_argument('--percent', type=float, default=0.6, help='channel prune percent')
parser.add_argument('--img-size', type=int, default=608, help='inference size (pixels)')
parser.add_argument('--batch-size', type=int, default=16, help='batch-size')
opt = parser.parse_args()
print(opt)
# 指定GPU
# torch.cuda.set_device(2)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = Darknet(opt.cfg).to(device)
if opt.weights:
if opt.weights.endswith(".pt"):
model.load_state_dict(torch.load(opt.weights, map_location=device)['model'])
else:
_ = load_darknet_weights(model, opt.weights)
data_config = parse_data_cfg(opt.data)
valid_path = data_config["valid"]
class_names = load_classes(data_config["names"])
# 测试模型
eval_model = lambda model: test(model=model, imgsz=opt.img_size, cfg=opt.cfg, data=opt.data,
batch_size=opt.batch_size, rank=-1)
# 获取参数总数
obtain_num_parameters = lambda model: sum([param.nelement() for param in model.parameters()])
with torch.no_grad():
origin_model_metric = eval_model(model)
origin_nparameters = obtain_num_parameters(model)
# 与normal_prune不同的是这里需要获得shortcu_idx和short_all
# 其中shortcut_idx存储的是对应关系,故shortcut[x]就对应的是与第x-1卷积层相加层的索引值
# shortcut_all存储的是所有相加层
CBL_idx, Conv_idx, prune_idx, shortcut_idx, shortcut_all = parse_module_defs2(model.module_defs)
# 将所有要剪枝的BN层的γ参数,拷贝到bn_weights列表
bn_weights = gather_bn_weights(model.module_list, prune_idx)
# 对BN中的γ参数排序
# torch.sort返回二维列表,第一维是排序后的值列表,第二维是排序后的值列表对应的索引
sorted_bn = torch.sort(bn_weights)[0]
# 避免剪掉一层中的所有channel的最高阈值(每个BN层中gamma的最大值在所有层中最小值即为阈值上限)
highest_thre = []
for idx in prune_idx:
# .item()可以得到张量里的元素值
highest_thre.append(model.module_list[idx][1].weight.data.abs().max().item())
highest_thre = min(highest_thre)
# 找到highest_thre对应的下标对应的百分比
percent_limit = (sorted_bn == highest_thre).nonzero().item() / len(bn_weights)
print(f'Threshold should be less than {highest_thre:.8f}.')
print(f'The corresponding prune ratio is {percent_limit:.3f}.')
percent = opt.percent
threshold = prune_and_eval(model, sorted_bn, shortcut_idx, percent)
num_filters, filters_mask = obtain_filters_mask(model, threshold, CBL_idx, shortcut_idx, prune_idx)
# CBLidx2mask存储CBL_idx中,每一层BN层对应的mask
CBLidx2mask = {idx: mask for idx, mask in zip(CBL_idx, filters_mask)}
pruned_model = prune_model_keep_size(model, prune_idx, CBL_idx, CBLidx2mask)
with torch.no_grad():
mAP = eval_model(pruned_model)[0][2]
print('after prune_model_keep_size map is {}'.format(mAP))
# 获得原始模型的module_defs,并修改该defs中的卷积核数量
compact_module_defs = deepcopy(model.module_defs)
for idx, num in zip(CBL_idx, num_filters):
assert compact_module_defs[idx]['type'] == 'convolutional'
compact_module_defs[idx]['filters'] = str(num)
# for item_def in compact_module_defs:
# print(item_def)
compact_model = Darknet([model.hyperparams.copy()] + compact_module_defs).to(device)
compact_nparameters = obtain_num_parameters(compact_model)
init_weights_from_loose_model(compact_model, pruned_model, CBL_idx, Conv_idx, CBLidx2mask)
random_input = torch.rand((16, 3, 416, 416)).to(device)
pruned_forward_time, pruned_output = obtain_avg_forward_time(random_input, pruned_model)
compact_forward_time, compact_output = obtain_avg_forward_time(random_input, compact_model)
# 在测试集上测试剪枝后的模型, 并统计模型的参数数量
with torch.no_grad():
compact_model_metric = eval_model(compact_model)
# 比较剪枝前后参数数量的变化、指标性能的变化
metric_table = [
["Metric", "Before", "After"],
["mAP", f'{origin_model_metric[0][2]:.6f}', f'{compact_model_metric[0][2]:.6f}'],
["Parameters", f"{origin_nparameters}", f"{compact_nparameters}"],
["Inference", f'{pruned_forward_time:.4f}', f'{compact_forward_time:.4f}']
]
print(AsciiTable(metric_table).table)
# 生成剪枝后的cfg文件并保存模型
pruned_cfg_name = opt.cfg.replace('/', f'/shortcut_prune_{percent}_')
# 创建存储目录
dir_name = pruned_cfg_name.split('/')[0] + '/' + pruned_cfg_name.split('/')[1]
if not os.path.isdir(dir_name):
os.makedirs(dir_name)
# 由于原始的compact_module_defs将anchor从字符串变为了数组,因此这里将anchors重新变为字符串
file = open(opt.cfg, 'r')
lines = file.read().split('\n')
for line in lines:
if line.split(' = ')[0] == 'anchors':
anchor = line.split(' = ')[1]
break
if line.split('=')[0] == 'anchors':
anchor = line.split('=')[1]
break
file.close()
for item in compact_module_defs:
if item['type'] == 'shortcut':
item['from'] = str(item['from'][0])
elif item['type'] == 'route':
item['layers'] = ",".join('%s' % i for i in item['layers'])
elif item['type'] == 'yolo':
item['mask'] = ",".join('%s' % i for i in item['mask'])
item['anchors'] = anchor
pruned_cfg_file = write_cfg(pruned_cfg_name, [model.hyperparams.copy()] + compact_module_defs)
print(f'Config file has been saved: {pruned_cfg_file}')
weights_dir_name = dir_name.replace('cfg', 'weights')
if not os.path.isdir(weights_dir_name):
os.makedirs(weights_dir_name)
compact_model_name = weights_dir_name + f'/shortcut_prune_{str(percent)}_percent.weights'
save_weights(compact_model, path=compact_model_name)
print(f'Compact model has been saved: {compact_model_name}')
================================================
FILE: slim_prune.py
================================================
from models import *
from utils.utils import *
import numpy as np
from copy import deepcopy
from test import test
from terminaltables import AsciiTable
import time
from utils.prune_utils import *
import argparse
# %%
def obtain_filters_mask(model, thre, CBL_idx, prune_idx):
pruned = 0
total = 0
num_filters = []
filters_mask = []
for idx in CBL_idx:
bn_module = model.module_list[idx][1]
if idx in prune_idx:
weight_copy = bn_module.weight.data.abs().clone()
channels = weight_copy.shape[0] #
min_channel_num = int(channels * opt.layer_keep) if int(channels * opt.layer_keep) > 0 else 1
mask = weight_copy.gt(thresh).float()
if int(torch.sum(mask)) < min_channel_num:
_, sorted_index_weights = torch.sort(weight_copy, descending=True)
mask[sorted_index_weights[:min_channel_num]] = 1.
remain = int(mask.sum())
pruned = pruned + mask.shape[0] - remain
print(f'layer index: {idx:>3d} \t total channel: {mask.shape[0]:>4d} \t '
f'remaining channel: {remain:>4d}')
else:
mask = torch.ones(bn_module.weight.data.shape)
remain = mask.shape[0]
total += mask.shape[0]
num_filters.append(remain)
filters_mask.append(mask.clone())
prune_ratio = pruned / total
print(f'Prune channels: {pruned}\tPrune ratio: {prune_ratio:.3f}')
return num_filters, filters_mask
def prune_and_eval(model, CBL_idx, CBLidx2mask):
model_copy = deepcopy(model)
for idx in CBL_idx:
bn_module = model_copy.module_list[idx][1]
mask = CBLidx2mask[idx].cuda()
bn_module.weight.data.mul_(mask)
with torch.no_grad():
mAP = eval_model(model_copy)[0][2]
print(f'mask the gamma as zero, mAP of the model is {mAP:.4f}')
def obtain_avg_forward_time(input, model, repeat=200):
model.eval()
start = time.time()
with torch.no_grad():
for i in range(repeat):
output = model(input)
avg_infer_time = (time.time() - start) / repeat
return avg_infer_time, output
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path')
parser.add_argument('--data', type=str, default='data/coco.data', help='*.data file path')
parser.add_argument('--weights', type=str, default='weights/last.pt', help='sparse model weights')
parser.add_argument('--percent', type=float, default=0.8, help='global channel prune percent')
parser.add_argument('--layer_keep', type=float, default=0.01, help='channel keep percent per layer')
parser.add_argument('--img-size', type=int, default=416, help='inference size (pixels)')
parser.add_argument('--batch-size', type=int, default=16, help='batch-size')
opt = parser.parse_args()
print(opt)
img_size = opt.img_size
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = Darknet(opt.cfg, (img_size, img_size)).to(device)
if opt.weights.endswith(".pt"):
model.load_state_dict(torch.load(opt.weights, map_location=device)['model'])
else:
_ = load_darknet_weights(model, opt.weights)
print('\nloaded weights from ', opt.weights)
eval_model = lambda model: test(model=model, cfg=opt.cfg, data=opt.data, batch_size=opt.batch_size,
imgsz=img_size, rank=-1)
obtain_num_parameters = lambda model: sum([param.nelement() for param in model.parameters()])
print("\nlet's test the original model first:")
with torch.no_grad():
origin_model_metric = eval_model(model)
origin_nparameters = obtain_num_parameters(model)
CBL_idx, Conv_idx, prune_idx, _, _ = parse_module_defs2(model.module_defs)
bn_weights = gather_bn_weights(model.module_list, prune_idx)
sorted_bn = torch.sort(bn_weights)[0]
sorted_bn, sorted_index = torch.sort(bn_weights)
thresh_index = int(len(bn_weights) * opt.percent)
thresh = sorted_bn[thresh_index].cuda()
print(f'Global Threshold should be less than {thresh:.4f}.')
num_filters, filters_mask = obtain_filters_mask(model, thresh, CBL_idx, prune_idx)
CBLidx2mask = {idx: mask for idx, mask in zip(CBL_idx, filters_mask)}
CBLidx2filters = {idx: filters for idx, filters in zip(CBL_idx, num_filters)}
for i in model.module_defs:
if i['type'] == 'shortcut':
i['is_access'] = False
print('merge the mask of layers connected to shortcut!')
merge_mask(model, CBLidx2mask, CBLidx2filters)
prune_and_eval(model, CBL_idx, CBLidx2mask)
for i in CBLidx2mask:
CBLidx2mask[i] = CBLidx2mask[i].clone().cpu().numpy()
pruned_model = prune_model_keep_size(model, prune_idx, CBL_idx, CBLidx2mask)
print(
"\nnow prune the model but keep size,(actually add offset of BN beta to following layers), let's see how the mAP goes")
with torch.no_grad():
eval_model(pruned_model)
for i in model.module_defs:
if i['type'] == 'shortcut':
i.pop('is_access')
compact_module_defs = deepcopy(model.module_defs)
for idx in CBL_idx:
assert compact_module_defs[idx]['type'] == 'convolutional'
compact_module_defs[idx]['filters'] = str(CBLidx2filters[idx])
compact_model = Darknet([model.hyperparams.copy()] + compact_module_defs, (img_size, img_size)).to(device)
compact_nparameters = obtain_num_parameters(compact_model)
init_weights_from_loose_model(compact_model, pruned_model, CBL_idx, Conv_idx, CBLidx2mask)
random_input = torch.rand((1, 3, img_size, img_size)).to(device)
print('testing inference time...')
pruned_forward_time, pruned_output = obtain_avg_forward_time(random_input, pruned_model)
compact_forward_time, compact_output = obtain_avg_forward_time(random_input, compact_model)
print('testing the final model...')
with torch.no_grad():
compact_model_metric = eval_model(compact_model)
metric_table = [
["Metric", "Before", "After"],
["mAP", f'{origin_model_metric[0][2]:.6f}', f'{compact_model_metric[0][2]:.6f}'],
["Parameters", f"{origin_nparameters}", f"{compact_nparameters}"],
["Inference", f'{pruned_forward_time:.4f}', f'{compact_forward_time:.4f}']
]
print(AsciiTable(metric_table).table)
pruned_cfg_name = opt.cfg.replace('/', f'/slim_prune_{opt.percent}')
# 创建存储目录
dir_name = pruned_cfg_name.split('/')[0] + '/' + pruned_cfg_name.split('/')[1]
if not os.path.isdir(dir_name):
os.makedirs(dir_name)
# 由于原始的compact_module_defs将anchor从字符串变为了数组,因此这里将anchors重新变为字符串
file = open(opt.cfg, 'r')
lines = file.read().split('\n')
for line in lines:
if line.split(' = ')[0] == 'anchors':
anchor = line.split(' = ')[1]
break
if line.split('=')[0] == 'anchors':
anchor = line.split('=')[1]
break
file.close()
for item in compact_module_defs:
if item['type'] == 'shortcut':
item['from'] = str(item['from'][0])
elif item['type'] == 'route':
item['layers'] = ",".join('%s' % i for i in item['layers'])
elif item['type'] == 'yolo':
item['mask'] = ",".join('%s' % i for i in item['mask'])
item['anchors'] = anchor
pruned_cfg_file = write_cfg(pruned_cfg_name, [model.hyperparams.copy()] + compact_module_defs)
print(f'Config file has been saved: {pruned_cfg_file}')
weights_dir_name = dir_name.replace('cfg', 'weights')
if not os.path.isdir(weights_dir_name):
os.makedirs(weights_dir_name)
compact_model_name = weights_dir_name + f'/slim_prune_{str(opt.percent)}_percent.weights'
save_weights(compact_model, path=compact_model_name)
print(f'Compact model has been saved: {compact_model_name}')
================================================
FILE: test.py
================================================
import argparse
import json
from torch.utils.data import DataLoader
from models import *
from utils.datasets import *
from utils.utils import *
def test(cfg,
data,
weights=None,
batch_size=16,
imgsz=416,
conf_thres=0.001,
iou_thres=0.6, # for nms
save_json=False,
augment=False,
model=None,
dataloader=None,
multi_label=True,
quantized=-1,
a_bit=8,
w_bit=8,
rank=-1,
plot=True,
is_gray_scale=False,
maxabsscaler=False,
shortcut_way=-1):
# Initialize/load model and set device
if model is None:
device = torch_utils.select_device(opt.device, batch_size=batch_size)
verbose = opt.task == 'test'
# Remove previous
for f in glob.glob('test_batch*.jpg'):
os.remove(f)
# Initialize model
model = Darknet(cfg, imgsz, quantized=quantized, a_bit=a_bit, w_bit=w_bit,
is_gray_scale=is_gray_scale, maxabsscaler=maxabsscaler, shortcut_way=shortcut_way)
# Load weights
attempt_download(weights)
if weights.endswith('.pt'): # pytorch format
model.load_state_dict(torch.load(weights, map_location=device)['model'])
else: # darknet format
load_darknet_weights(model, weights, quant=(quantized != -1))
# Fuse
if quantized == -1:
model.fuse()
model.to(device)
if device.type != 'cpu' and torch.cuda.device_count() > 1:
model = nn.DataParallel(model)
# summary(model, input_size=(3, imgsz, imgsz))
else: # called by train.py
device = next(model.parameters()).device # get model device
verbose = False
# Configure run
data = parse_data_cfg(data)
nc = int(data['classes']) # number of classes
path = data['valid'] # path to test images
names = load_classes(data['names']) # class names
iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95
iouv = iouv[0].view(1) # comment for mAP@0.5:0.95
niou = iouv.numel()
# Dataloader
if dataloader is None:
dataset = LoadImagesAndLabels(path, imgsz, batch_size, rect=True,
is_gray_scale=is_gray_scale)
batch_size = min(batch_size, len(dataset))
dataloader = DataLoader(dataset,
batch_size=batch_size,
num_workers=min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]),
pin_memory=True,
collate_fn=dataset.collate_fn)
seen = 0
model.eval()
# _ = model(torch.zeros((1, 3, imgsz, imgsz), device=device)) if device.type != 'cpu' else None # run once
coco91class = coco80_to_coco91_class()
s = ('%20s' + '%10s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', 'mAP@0.5', 'F1')
p, r, f1, mp, mr, map, mf1, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.
pbar = tqdm(dataloader, desc=s) if rank in [-1, 0] else dataloader
loss = torch.zeros(3, device=device)
jdict, stats, ap, ap_class = [], [], [], []
for batch_i, (imgs, targets, paths, shapes) in enumerate(pbar):
if maxabsscaler:
imgs = imgs.to(device).float() / 256.0
imgs = imgs * 2 - 1
else:
imgs = imgs.to(device).float() / 256.0 # uint8 to float32, 0 - 255 to 0.0 - 1.0
if quantized != -1:
if a_bit == 16:
img = img * (2 ** 14)
sign = torch.sign(img)
img = sign * torch.floor(torch.abs(img) + 0.5)
img = img / (2 ** 14)
targets = targets.to(device)
nb, _, height, width = imgs.shape # batch size, channels, height, width
whwh = torch.Tensor([width, height, width, height]).to(device)
# Disable gradients
with torch.no_grad():
# Run model
t = torch_utils.time_synchronized()
inf_out, train_out, _ = model(imgs, augment=augment) # inference and training outputs
t0 += torch_utils.time_synchronized() - t
# Compute loss
if hasattr(model, 'hyp'): # if model has loss hyperparameters
loss += compute_loss(train_out, targets, model)[1][:3] # GIoU, obj, cls
# Run NMS
t = torch_utils.time_synchronized()
output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres, multi_label=multi_label)
t1 += torch_utils.time_synchronized() - t
# Statistics per image
for si, pred in enumerate(output):
labels = targets[targets[:, 0] == si, 1:]
nl = len(labels)
tcls = labels[:, 0].tolist() if nl else [] # target class
seen += 1
if pred is None:
if nl:
stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))
continue
# Append to text file
# with open('test.txt', 'a') as file:
# [file.write('%11.5g' * 7 % tuple(x) + '\n') for x in pred]
# Clip boxes to image bounds
clip_coords(pred, (height, width))
# Append to pycocotools JSON dictionary
if save_json:
# [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
image_id = int(Path(paths[si]).stem.split('_')[-1])
box = pred[:, :4].clone() # xyxy
scale_coords(imgs[si].shape[1:], box, shapes[si][0], shapes[si][1]) # to original shape
box = xyxy2xywh(box) # xywh
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
for p, b in zip(pred.tolist(), box.tolist()):
jdict.append({'image_id': image_id,
'category_id': coco91class[int(p[5])],
'bbox': [round(x, 3) for x in b],
'score': round(p[4], 5)})
# Assign all predictions as incorrect
correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device)
if nl:
detected = [] # target indices
tcls_tensor = labels[:, 0]
# target boxes
tbox = xywh2xyxy(labels[:, 1:5]) * whwh
# Per target class
for cls in torch.unique(tcls_tensor):
ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # prediction indices
pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # target indices
# Search for detections
if pi.shape[0]:
# Prediction to target ious
ious, i = box_iou(pred[pi, :4], tbox[ti]).max(1) # best ious, indices
# Append detections
for j in (ious > iouv[0]).nonzero(as_tuple=False):
d = ti[i[j]] # detected target
if d not in detected:
detected.append(d)
correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn
if len(detected) == nl: # all targets already located in image
break
# Append statistics (correct, conf, pcls, tcls)
stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))
# Plot images
if batch_i < 1 and plot:
f = 'test_batch%g_gt.jpg' % batch_i # filename
plot_images(imgs, targets, paths=paths, names=names, fname=f, is_gray_scale=is_gray_scale) # ground truth
f = 'test_batch%g_pred.jpg' % batch_i
plot_images(imgs, output_to_target(output, width, height), paths=paths, names=names, fname=f,
is_gray_scale=is_gray_scale) # predictions
# Compute statistics
stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
if len(stats):
p, r, ap, f1, ap_class = ap_per_class(*stats)
if niou > 1:
p, r, ap, f1 = p[:, 0], r[:, 0], ap.mean(1), ap[:, 0] # [P, R, AP@0.5:0.95, AP@0.5]
mp, mr, map, mf1 = p.mean(), r.mean(), ap.mean(), f1.mean()
nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class
else:
nt = torch.zeros(1)
# Print results
pf = '%20s' + '%10.3g' * 6 # print format
if rank in [-1, 0]:
print(pf % ('all', seen, nt.sum(), mp, mr, map, mf1))
# Print results per class
if verbose and nc > 1 and len(stats):
for i, c in enumerate(ap_class):
print(pf % (names[c], seen, nt[c], p[i], r[i], ap[i], f1[i]))
# Print speeds
if verbose or save_json:
t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size) # tuple
print('Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g' % t)
# Save JSON
if save_json and map and len(jdict):
print('\nCOCO mAP with pycocotools...')
imgIds = [int(Path(x).stem.split('_')[-1]) for x in dataloader.dataset.img_files]
with open('results.json', 'w') as file:
json.dump(jdict, file)
try:
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
# https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
# cocovision = opt.data.split('\\')[-1].split('.')[0]
# print(cocovision)
# cocoGt = COCO(glob.glob('data/'+cocovision+'/instances_val*.json')[0]) # initialize COCO ground truth api
cocoGt = COCO(glob.glob('data/coco2014/instances_val*.json')[0]) # initialize COCO ground truth api
cocoDt = cocoGt.loadRes('results.json') # initialize COCO pred api
cocoEval = COCOeval(cocoGt, cocoDt, 'bbox')
cocoEval.params.imgIds = imgIds # [:32] # only evaluate these images
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
# mf1, map = cocoEval.stats[:2] # update to pycocotools results (mAP@0.5:0.95, mAP@0.5)
except:
print('WARNING: pycocotools must be installed with numpy==1.17 to run correctly. '
'See https://github.com/cocodataset/cocoapi/issues/356')
# Return results
maps = np.zeros(nc) + map
for i, c in enumerate(ap_class):
maps[c] = ap[i]
return (mp, mr, map, mf1, *(loss.cpu() / len(dataloader)).tolist()), maps
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='test.py')
parser.add_argument('--cfg', type=str, default='cfg/yolov3-spp.cfg', help='*.cfg path')
parser.add_argument('--data', type=str, default='data/coco2014.data', help='*.data path')
parser.add_argument('--weights', type=str, default='weights/yolov3-spp-ultralytics.pt', help='weights path')
parser.add_argument('--batch-size', type=int, default=16, help='size of each image batch')
parser.add_argument('--img-size', type=int, default=512, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.6, help='IOU threshold for NMS')
parser.add_argument('--save-json', action='store_true', help='save a cocoapi-compatible JSON results file')
parser.add_argument('--task', default='test', help="'test', 'study', 'benchmark'")
parser.add_argument('--device', default='', help='device id (i.e. 0 or 0,1) or cpu')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--quantized', type=int, default=-1, help='quantization way')
parser.add_argument('--shortcut_way', type=int, default=1, help='--shortcut quantization way')
parser.add_argument('--a-bit', type=int, default=8, help='a-bit')
parser.add_argument('--w-bit', type=int, default=8, help='w-bit')
parser.add_argument('--gray-scale', action='store_true', help='gray scale trainning')
parser.add_argument('--maxabsscaler', '-mas', action='store_true', help='Standarize input to (-1,1)')
opt = parser.parse_args()
opt.save_json = opt.save_json or any([x in opt.data for x in ['coco.data', 'coco2014.data', 'coco2017.data']])
opt.cfg = list(glob.iglob('./**/' + opt.cfg, recursive=True))[0] # find file
opt.data = list(glob.iglob('./**/' + opt.data, recursive=True))[0] # find file
print(opt)
# task = 'test', 'study', 'benchmark'
if opt.task == 'test': # (default) test normally
test(opt.cfg,
opt.data,
opt.weights,
opt.batch_size,
opt.img_size,
opt.conf_thres,
opt.iou_thres,
opt.save_json,
opt.augment,
quantized=opt.quantized,
a_bit=opt.a_bit,
w_bit=opt.w_bit,
rank=-1,
is_gray_scale=opt.gray_scale,
maxabsscaler=opt.maxabsscaler,
shortcut_way=opt.shortcut_way)
elif opt.task == 'benchmark': # mAPs at 256-640 at conf 0.5 and 0.7
y = []
for i in list(range(256, 640, 128)): # img-size
for j in [0.6, 0.7]: # iou-thres
t = time.time()
r = test(opt.cfg, opt.data, opt.weights, opt.batch_size, i, opt.conf_thres, j, opt.save_json)[0]
y.append(r + (time.time() - t,))
np.savetxt('benchmark.txt', y, fmt='%10.4g') # y = np.loadtxt('study.txt')
================================================
FILE: train.py
================================================
import argparse
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
from torch.utils.tensorboard import SummaryWriter
import test # import test.py to get mAP after each epoch
from models import *
from utils.datasets import *
from utils.utils import *
from utils.prune_utils import *
import math
from torch.cuda import amp
from utils.torch_utils import ModelEMA, select_device # DDP import
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
wdir = 'weights' + os.sep # weights dir
last = wdir + 'last.pt'
best = wdir + 'best.pt'
results_file = 'results.txt'
# Hyperparameters
hyp = {'giou': 3.54, # giou loss gain
'cls': 37.4, # cls loss gain
'cls_pw': 1.0, # cls BCELoss positive_weight
'obj': 64.3, # obj loss gain (*=img_size/320 if img_size != 320)
'obj_pw': 1.0, # obj BCELoss positive_weight
'iou_t': 0.20, # iou training threshold
'lr0': 0.01, # initial learning rate (SGD=5E-3, Adam=5E-4)
'lrf': 0.0005, # final learning rate (with cos scheduler)
'momentum': 0.937, # SGD momentum
'weight_decay': 0.0005, # optimizer weight decay
'fl_gamma': 0.0, # focal loss gamma (efficientDet default is gamma=1.5)
'hsv_h': 0.0138, # image HSV-Hue augmentation (fraction)
'hsv_s': 0.678, # image HSV-Saturation augmentation (fraction)
'hsv_v': 0.36, # image HSV-Value augmentation (fraction)
'degrees': 1.98 * 0, # image rotation (+/- deg)
'translate': 0.05 * 0, # image translation (+/- fraction)
'scale': 0.05 * 0, # image scale (+/- gain)
'shear': 0.641 * 0} # image shear (+/- deg)
# Overwrite hyp with hyp*.txt (optional)
f = glob.glob('hyp*.txt')
if f:
print('Using %s' % f[0])
for k, v in zip(hyp.keys(), np.loadtxt(f[0])):
hyp[k] = v
# Print focal loss if gamma > 0
if hyp['fl_gamma']:
print('Using FocalLoss(gamma=%g)' % hyp['fl_gamma'])
def train(hyp):
cfg = opt.cfg
t_cfg = opt.t_cfg # teacher model cfg for knowledge distillation
data = opt.data
epochs = opt.epochs # 500200 batches at bs 64, 117263 images = 273 epochs
batch_size = opt.batch_size
accumulate = max(round(64 / batch_size), 1) # accumulate n times before optimizer update (bs 64)
weights = opt.weights # initial training weights
t_weights = opt.t_weights # teacher model weights
imgsz_min, imgsz_max, imgsz_test = opt.img_size # img sizes (min, max, test)
# Image Sizes
gs = 32 # (pixels) grid size
start_epoch = 0
assert math.fmod(imgsz_min, gs) == 0, '--img-size %g must be a %g-multiple' % (imgsz_min, gs)
opt.multi_scale |= imgsz_min != imgsz_max # multi if different (min, max)
if opt.multi_scale:
if imgsz_min == imgsz_max:
imgsz_min //= 1.5
imgsz_max //= 0.667
grid_min, grid_max = imgsz_min // gs, imgsz_max // gs
imgsz_min, imgsz_max = int(grid_min * gs), int(grid_max * gs)
img_size = imgsz_max # initialize with max size
# Configure run
init_seeds()
data_dict = parse_data_cfg(data)
train_path = data_dict['train']
test_path = data_dict['valid']
nc = int(data_dict['classes']) # number of classes
hyp['cls'] *= nc / 80 # update coco-tuned hyp['cls'] to current dataset
# Remove previous results
for f in glob.glob('*_batch*.jpg') + glob.glob(results_file):
os.remove(f)
# DDP init
if opt.local_rank != -1:
if opt.local_rank == 0:
print("--------------using ddp---------------")
assert torch.cuda.device_count() > opt.local_rank
torch.cuda.set_device(opt.local_rank)
dist.init_process_group(backend='nccl', init_method='env://') # distributed backend
assert opt.batch_size % opt.world_size == 0, '--batch-size must be multiple of CUDA device count'
opt.batch_size = opt.batch_size // opt.world_size
else:
dist.init_process_group(backend='nccl', # 'distributed backend'
init_method='tcp://127.0.0.1:9999', # distributed training init method
world_size=1, # number of nodes for distributed training
rank=0) # distributed training node rank
# Initialize model
steps = math.ceil(len(open(train_path).readlines()) / batch_size) * epochs
model = Darknet(cfg, quantized=opt.quantized, a_bit=opt.a_bit, w_bit=opt.w_bit,
steps=steps, is_gray_scale=opt.gray_scale, maxabsscaler=opt.maxabsscaler,
shortcut_way=opt.shortcut_way).to(device)
if t_cfg:
t_model = Darknet(t_cfg).to(device)
# print('<.....................using gridmask.......................>')
# gridmask = GridMask(d1=96, d2=224, rotate=360, ratio=0.6, mode=1, prob=0.8)
# Optimizer
if opt.quantized == 2:
pg0, pg1, pg2, pg3 = [], [], [], [] # optimizer parameter groups
else:
pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
for k, v in dict(model.named_parameters()).items():
if '.bias' in k:
pg2 += [v] # biases
elif 'Conv2d.weight' in k:
pg1 += [v] # apply weight_decay
elif 'scale' in k and opt.quantized == 2:
pg3 += [v]
else:
pg0 += [v] # all else
if opt.adam or opt.quantized != -1:
# hyp['lr0'] *= 0.1 # reduce lr (i.e. SGD=5E-3, Adam=5E-4)
optimizer = optim.Adam(pg0, lr=hyp['lr0'] * 0.005)
if opt.quantized == 2:
optimizer.add_param_group({'params': pg3})
# optimizer = AdaBound(pg0, lr=hyp['lr0'], final_lr=0.1)
else:
optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)
optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay
optimizer.add_param_group({'params': pg2}) # add pg2 (biases)
if opt.quantized == 2:
print('Optimizer groups: %g .scale, %g .bias, %g Conv2d.weight, %g other' % (
len(pg3), len(pg2), len(pg1), len(pg0)))
del pg0, pg1, pg2, pg3
else:
print('Optimizer groups: %g .bias, %g Conv2d.weight, %g other' % (len(pg2), len(pg1), len(pg0)))
del pg0, pg1, pg2
best_fitness = 0.0
if weights != 'None':
attempt_download(weights)
if weights.endswith('.pt'): # pytorch format
# possible weights are '*.pt', 'yolov3-spp.pt', 'yolov3-tiny.pt' etc.
chkpt = torch.load(weights, map_location=device)
# load model
try:
chkpt['model'] = {k: v for k, v in chkpt['model'].items() if model.state_dict()[k].numel() == v.numel()}
model.load_state_dict(chkpt['model'], strict=False)
except KeyError as e:
s = "%s is not compatible with %s. Specify --weights '' or specify a --cfg compatible with %s. " \
"See https://github.com/ultralytics/yolov3/issues/657" % (opt.weights, opt.cfg, opt.weights)
raise KeyError(s) from e
# load optimizer
if chkpt['optimizer'] is not None:
optimizer.load_state_dict(chkpt['optimizer'])
if chkpt.get('best_fitness') is not None:
best_fitness = chkpt['best_fitness']
# load results
if chkpt.get('training_results') is not None:
with open(results_file, 'w') as file:
file.write(chkpt['training_results']) # write results.txt
if chkpt.get('epoch') is not None:
start_epoch = chkpt['epoch'] + 1
del chkpt
elif len(weights) > 0: # darknet format
# possible weights are '*.weights', 'yolov3-tiny.conv.15', 'darknet53.conv.74' etc.
load_darknet_weights(model, weights, pt=opt.pt, quant=(opt.quantized != -1))
if t_cfg:
if t_weights.endswith('.pt'):
t_model.load_state_dict(torch.load(t_weights, map_location=device)['model'])
elif t_weights.endswith('.weights'):
load_darknet_weights(t_model, t_weights)
else:
raise Exception('pls provide proper teacher weights for knowledge distillation')
t_model.eval()
print('<.....................using knowledge distillation.......................>')
print('teacher model:', t_weights, '\n')
# Scheduler https://arxiv.org/pdf/1812.01187.pdf
if opt.quantized != -1:
scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[epochs // 5, epochs // 2, epochs // 1.25],
gamma=0.3)
else:
lf = lambda x: (((1 + math.cos(x * math.pi / epochs)) / 2) ** 1.0) * 0.95 + 0.05 # cosine
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
scheduler.last_epoch = start_epoch - 1 # see link below
# https://discuss.pytorch.org/t/a-problem-occured-when-resuming-an-optimizer/28822
# # Plot lr schedule
# y = []
# for _ in range(epochs):
# scheduler.step()
# y.append(optimizer.param_groups[0]['lr'])
# plt.plot(y, '.-', label='LambdaLR')
# plt.xlabel('epoch')
# plt.ylabel('LR')
# plt.tight_layout()
# plt.savefig('LR.png', dpi=300)
# Initialize distributed training
if opt.local_rank != -1:
model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank, find_unused_parameters=True)
else:
model = torch.nn.parallel.DistributedDataParallel(model, find_unused_parameters=True)
model.yolo_layers = model.module.yolo_layers # move yolo layer indices to top level
# Dataset
dataset = LoadImagesAndLabels(train_path, img_size, batch_size,
augment=True,
hyp=hyp, # augmentation hyperparameters
rect=opt.rect, # rectangular training
cache_images=True,
rank=opt.local_rank,
is_gray_scale=True if opt.gray_scale else False)
testset = LoadImagesAndLabels(test_path, imgsz_test, batch_size // 4,
hyp=hyp,
rect=True,
cache_images=True,
rank=opt.local_rank,
is_gray_scale=True if opt.gray_scale else False)
# 获得要剪枝的层
if hasattr(model, 'module'):
print('muti-gpus sparse')
if opt.prune == 0:
print('normal sparse training ')
_, _, prune_idx = parse_module_defs(model.module.module_defs)
elif opt.prune == 1:
print('shortcut sparse training')
_, _, prune_idx, _, _ = parse_module_defs2(model.module.module_defs)
elif opt.prune == 2:
print('layer sparse training')
_, _, prune_idx = parse_module_defs4(model.module.module_defs)
else:
print('single-gpu sparse')
if opt.prune == 0:
print('normal sparse training')
_, _, prune_idx = parse_module_defs(model.module_defs)
elif opt.prune == 1:
print('shortcut sparse training')
_, _, prune_idx, _, _ = parse_module_defs2(model.module_defs)
elif opt.prune == 2:
print('layer sparse training')
_, _, prune_idx = parse_module_defs4(model.module_defs)
train_sampler = torch.utils.data.distributed.DistributedSampler(dataset) # ddp sampler
test_sampler = torch.utils.data.distributed.DistributedSampler(testset)
# Dataloader
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=int(batch_size / opt.world_size),
num_workers=nw,
shuffle=False if (opt.local_rank != -1) else not opt.rect,
pin_memory=True,
collate_fn=dataset.collate_fn,
sampler=train_sampler if (opt.local_rank != -1) else None
)
# Testloader
testloader = torch.utils.data.DataLoader(LoadImagesAndLabels(test_path, imgsz_test, batch_size // 4,
hyp=hyp,
rect=True,
cache_images=True,
rank=opt.local_rank,
is_gray_scale=True if opt.gray_scale else False),
batch_size=batch_size // 4,
num_workers=nw,
pin_memory=True,
collate_fn=dataset.collate_fn)
if opt.prune != -1:
for idx in prune_idx:
if hasattr(model, 'module'):
bn_weights = gather_bn_weights(model.module.module_list, [idx])
else:
bn_weights = gather_bn_weights(model.module_list, [idx])
tb_writer.add_histogram('before_train_perlayer_bn_weights/hist', bn_weights.numpy(), idx, bins='doane')
# Model parameters
model.nc = nc # attach number of classes to model
model.hyp = hyp # attach hyperparameters to model
model.gr = 1.0 # giou loss ratio (obj_loss = 1.0 or giou)
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) # attach class weights
# Model EMA
if opt.ema:
ema = torch_utils.ModelEMA(model)
# Start training
nb = len(dataloader) # number of batches
n_burn = max(3 * nb, 500) # burn-in iterations, max(3 epochs, 500 iterations)
maps = np.zeros(nc) # mAP per class
# torch.autograd.set_detect_anomaly(True)
results = (0, 0, 0, 0, 0, 0, 0) # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification'
t0 = time.time()
if opt.local_rank == -1 or opt.local_rank == 0:
print('Image sizes %g - %g train, %g test' % (imgsz_min, imgsz_max, imgsz_test))
print('Using %g dataloader workers' % nw)
print('Starting training for %g epochs...' % epochs)
if opt.mpt:
cuda = device.type != 'cpu'
scaler = amp.GradScaler(enabled=cuda)
for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
if opt.local_rank != -1:
dataloader.sampler.set_epoch(epoch) # DDP set seed
# gridmask.set_prob(epoch, max_epoch)
model.train()
# 稀疏化标志
if opt.prune == -1:
sr_flag = False
else:
sr_flag = True
# Update image weights (optional)
if dataset.image_weights:
w = model.class_weights.cpu().numpy() * (1 - maps) ** 2 # class weights
image_weights = labels_to_image_weights(dataset.labels, nc=nc, class_weights=w)
dataset.indices = random.choices(range(dataset.n), weights=image_weights, k=dataset.n) # rand weighted idx
mloss = torch.zeros(4).to(device) # mean losses
if opt.local_rank == -1 or opt.local_rank == 0:
print(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'GIoU', 'obj', 'cls', 'total', 'targets', 'img_size'))
pbar = tqdm(enumerate(dataloader), total=nb) # progress bar
for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
ni = i + nb * epoch # number integrated batches (since train start)
if opt.maxabsscaler:
imgs = imgs.to(device).float() / 256.0 # uint8 to float32, 0 - 255 to 0.0 - 1.0
imgs = imgs * 2 - 1
else:
imgs = imgs.to(device).float() / 256.0 # uint8 to float32, 0 - 255 to 0.0 - 1.0
if opt.quantized != -1:
if opt.a_bit == 16:
img = img * (2 ** 14)
sign = torch.sign(img)
img = sign * torch.floor(torch.abs(img) + 0.5)
img = img / (2 ** 14)
# Burn-in
if ni <= n_burn and opt.quantized == -1:
xi = [0, n_burn] # x interp
model.gr = np.interp(ni, xi, [0.0, 1.0]) # giou loss ratio (obj_loss = 1.0 or giou)
accumulate = max(1, np.interp(ni, xi, [1, 64 / batch_size]).round())
for j, x in enumerate(optimizer.param_groups):
# bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
x['lr'] = np.interp(ni, xi, [0.1 if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])
x['weight_decay'] = np.interp(ni, xi, [0.0, hyp['weight_decay'] if j == 1 else 0.0])
if 'momentum' in x:
x['momentum'] = np.interp(ni, xi, [0.9, hyp['momentum']])
# Multi-Scale
if opt.multi_scale:
if ni / accumulate % 1 == 0: # adjust img_size (67% - 150%) every 1 batch
img_size = random.randrange(grid_min, grid_max + 1) * gs
sf = img_size / max(imgs.shape[2:]) # scale factor
if sf != 1:
ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to 32-multiple)
imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
# Forward
if opt.mpt:
with amp.autocast(enabled=cuda):
targets = targets.to(device)
pred, feature_s = model(imgs)
# Loss
loss, loss_items = compute_loss(pred, targets, model)
if not torch.isfinite(loss):
print('WARNING: non-finite loss, ending training ', loss_items)
return results
soft_target = 0
if t_cfg:
_, output_t, feature_t = t_model(imgs)
if opt.KDstr == 1:
soft_target = compute_lost_KD(pred, output_t, model.nc, imgs.size(0))
elif opt.KDstr == 2:
soft_target, reg_ratio = compute_lost_KD2(model, targets, pred, output_t)
elif opt.KDstr == 3:
soft_target = compute_lost_KD3(model, targets, pred, output_t)
elif opt.KDstr == 4:
soft_target = compute_lost_KD4(model, targets, pred, output_t, feature_s, feature_t,
imgs.size(0))
elif opt.KDstr == 5:
soft_target = compute_lost_KD5(model, targets, pred, output_t, feature_s, feature_t,
imgs.size(0),
img_size)
else:
print("please select KD strategy!")
loss += soft_target
else:
targets = targets.to(device)
pred, feature_s = model(imgs)
# Loss
loss, loss_items = compute_loss(pred, targets, model)
if not torch.isfinite(loss):
print('WARNING: non-finite loss, ending training ', loss_items)
return results
soft_target = 0
if t_cfg:
_, output_t, feature_t = t_model(imgs)
if opt.KDstr == 1:
soft_target = compute_lost_KD(pred, output_t, model.nc, imgs.size(0))
elif opt.KDstr == 2:
soft_target, reg_ratio = compute_lost_KD2(model, targets, pred, output_t)
elif opt.KDstr == 3:
soft_target = compute_lost_KD3(model, targets, pred, output_t)
elif opt.KDstr == 4:
soft_target = compute_lost_KD4(model, targets, pred, output_t, feature_s, feature_t,
imgs.size(0))
elif opt.KDstr == 5:
soft_target = compute_lost_KD5(model, targets, pred, output_t, feature_s, feature_t,
imgs.size(0),
img_size)
else:
print("please select KD strategy!")
loss += soft_target
# Backward
loss *= batch_size / 64 # scale loss
if opt.mpt:
scaler.scale(loss).backward()
else:
loss.backward()
# 对要剪枝层的γ参数稀疏化
if hasattr(model, 'module'):
if opt.prune != -1:
BNOptimizer.updateBN(sr_flag, model.module.module_list, opt.s, prune_idx)
else:
if opt.prune != -1:
BNOptimizer.updateBN(sr_flag, model.module_list, opt.s, prune_idx)
# Optimize
if ni % accumulate == 0:
if opt.mpt:
scaler.step(optimizer) # optimizer.step
scaler.update()
else:
optimizer.step()
optimizer.zero_grad()
if opt.ema:
ema.update(model)
# Print
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB)
s = ('%10s' * 2 + '%10.3g' * 6) % ('%g/%g' % (epoch, epochs - 1), mem, *mloss, len(targets), img_size)
pbar.set_description(s)
# Plot
if i == 0:
if not os.path.isdir('train_sample/'):
os.makedirs('train_sample/')
f = 'train_sample/train_batch%g.jpg' % epoch # filename
res = plot_images(images=imgs, targets=targets, paths=paths, fname=f, is_gray_scale=opt.gray_scale)
if tb_writer:
tb_writer.add_image(f, res, dataformats='HWC', global_step=epoch)
# tb_writer.add_graph(model, imgs) # add model to tensorboard
# end batch ------------------------------------------------------------------------------------------------
# Update scheduler
scheduler.step()
# Process epoch results
if opt.ema:
ema.update_attr(model)
if hasattr(model, 'module'):
module_defs, module_list = ema.eam.module.module_defs, ema.eam.module.module_list
else:
module_defs, module_list = ema.eam.module_defs, ema.eam.module_list
for i, (mdef, module) in enumerate(zip(module_defs, module_list)):
if mdef['type'] == 'yolo':
yolo_layer = module
yolo_layer.nx, yolo_layer.ny = 0, 0
if hasattr(model, 'module'):
module_defs, module_list = model.module.module_defs, model.module.module_list
else:
module_defs, module_list = model.module_defs, model.module_list
for i, (mdef, module) in enumerate(zip(module_defs, module_list)):
if mdef['type'] == 'yolo':
yolo_layer = module
yolo_layer.nx, yolo_layer.ny = 0, 0
final_epoch = epoch + 1 == epochs
if not opt.notest or final_epoch: # Calculate mAP
is_coco = any([x in data for x in ['coco.data', 'coco2014.data', 'coco2017.data']]) and model.nc == 80
results, maps = test.test(cfg,
data,
batch_size=batch_size // 4,
imgsz=imgsz_test,
model=ema.ema if opt.ema else model,
save_json=final_epoch and is_coco,
dataloader=testloader,
multi_label=ni > n_burn,
quantized=opt.quantized,
a_bit=opt.a_bit,
w_bit=opt.w_bit,
rank=opt.local_rank,
plot=True,
maxabsscaler=opt.maxabsscaler,
shortcut_way=opt.shortcut_way)
torch.cuda.empty_cache()
# Write
if opt.local_rank in [-1, 0]:
with open(results_file, 'a') as f:
f.write(s + '%10.3g' * 7 % results + '\n') # P, R, mAP, F1, test_losses=(GIoU, obj, cls)
if len(opt.name) and opt.bucket:
os.system('gsutil cp results.txt gs://%s/results/results%s.txt' % (opt.bucket, opt.name))
# Tensorboard
if tb_writer:
tags = ['train/giou_loss', 'train/obj_loss', 'train/cls_loss',
'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/F1',
'val/giou_loss', 'val/obj_loss', 'val/cls_loss']
for x, tag in zip(list(mloss[:-1]) + list(results), tags):
tb_writer.add_scalar(tag, x, epoch)
if opt.prune != -1:
if hasattr(model, 'module'):
bn_weights = gather_bn_weights(model.module.module_list, [idx])
else:
bn_weights = gather_bn_weights(model.module_list, [idx])
tb_writer.add_histogram('bn_weights/hist', bn_weights.numpy(), epoch, bins='doane')
# Update best mAP
fi = fitness(np.array(results).reshape(1, -1)) # fitness_i = weighted combination of [P, R, mAP, F1]
if fi > best_fitness:
best_fitness = fi
# Save model
save = (not opt.nosave) or (final_epoch and not opt.evolve)
if opt.ema:
if hasattr(model, 'module'):
model_temp = ema.ema.module.state_dict()
else:
model_temp = ema.ema.state_dict()
else:
if hasattr(model, 'module'):
model_temp = model.module.state_dict()
else:
model_temp = model.state_dict()
if save and dist.get_rank() == 0: # DDP save model only once
with open(results_file, 'r') as f: # create checkpoint
chkpt = {'epoch': epoch,
'best_fitness': best_fitness,
'training_results': f.read(),
'model': model_temp,
'optimizer': None if final_epoch else optimizer.state_dict()}
# Save last, best and delete
torch.save(chkpt, last)
if (best_fitness == fi) and not final_epoch:
torch.save(chkpt, best)
del chkpt
# end epoch ----------------------------------------------------------------------------------------------------
# end training
n = opt.name
if len(n):
n = '_' + n if not n.isnumeric() else n
fresults, flast, fbest = 'results%s.txt' % n, wdir + 'last%s.pt' % n, wdir + 'best%s.pt' % n
for f1, f2 in zip([wdir + 'last.pt', wdir + 'best.pt', 'results.txt'], [flast, fbest, fresults]):
if os.path.exists(f1):
os.rename(f1, f2) # rename
ispt = f2.endswith('.pt') # is *.pt
strip_optimizer(f2) if ispt else None # strip optimizer
os.system('gsutil cp %s gs://%s/weights' % (f2, opt.bucket)) if opt.bucket and ispt else None # upload
if not opt.evolve:
plot_results() # save as results.png
if opt.local_rank in [-1, 0]:
print('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))
dist.destroy_process_group() if torch.cuda.device_count() > 1 else None
torch.cuda.empty_cache()
return results
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', type=int, default=300) # 500200 batches at bs 16, 117263 COCO images = 273 epochs
parser.add_argument('--batch-size', type=int, default=16) # effective bs = batch_size * accumulate = 16 * 4 = 64
parser.add_argument('--cfg', type=str, default='cfg/yolov3-spp.cfg', help='*.cfg path')
parser.add_argument('--t_cfg', type=str, default='', help='teacher model cfg file path for knowledge distillation')
parser.add_argument('--data', type=str, default='data/coco2017.data', help='*.data path')
parser.add_argument('--multi-scale', action='store_true', help='adjust (67%% - 150%%) img_size every 10 batches')
parser.add_argument('--img-size', nargs='+', type=int, default=[320, 640], help='[min_train, max-train, test]')
parser.add_argument('--rect', action='store_true', help='rectangular training')
parser.add_argument('--resume', action='store_true', help='resume training from last.pt')
parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
parser.add_argument('--notest', action='store_true', help='only test final epoch')
parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters')
parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
parser.add_argument('--weights', type=str, default='weights/yolov3-spp-ultralytics.pt', help='initial weights path')
parser.add_argument('--t_weights', type=str, default='', help='teacher model weights')
parser.add_argument('--KDstr', type=int, default=-1, help='KD strategy')
parser.add_argument('--name', default='', help='renames results.txt to results_name.txt if supplied')
parser.add_argument('--device', default='', help='device id (i.e. 0 or 0,1 or cpu)')
parser.add_argument('--adam', action='store_true', help='use adam optimizer')
parser.add_argument('--ema', action='store_true', help='use ema')
parser.add_argument('--pretrain', '-pt', dest='pt', action='store_true',
help='use pretrain model')
parser.add_argument('--mixedprecision', '-mpt', dest='mpt', action='store_true',
help='use mixed precision training')
parser.add_argument('--s', type=float, default=0.001, help='scale sparse rate')
parser.add_argument('--prune', type=int, default=-1,
help='0:nomal prune or regular prune 1:shortcut prune 2:layer prune')
parser.add_argument('--quantized', type=int, default=-1, help='quantization way')
parser.add_argument('--shortcut_way', type=int, default=1, help='--shortcut quantization way')
parser.add_argument('--a-bit', type=int, default=8, help='a-bit')
parser.add_argument('--w-bit', type=int, default=8, help='w-bit')
parser.add_argument('--gray-scale', action='store_true', help='gray scale trainning')
parser.add_argument('--maxabsscaler', '-mas', action='store_true', help='Standarize input to (-1,1)')
# DDP get local-rank
parser.add_argument('--rank', default=0, help='rank of current process')
parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')
opt = parser.parse_args()
opt.weights = last if opt.resume else opt.weights
opt.cfg = list(glob.iglob('./**/' + opt.cfg, recursive=True))[0] # find file
# opt.data = list(glob.iglob(' ./**/' + opt.data, recursive=True))[0] # find file
if opt.local_rank in [-1, 0]:
print(opt)
opt.img_size.extend([opt.img_size[-1]] * (3 - len(opt.img_size))) # extend to 3 sizes (min, max, test)
# DDP set variables
opt.world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1
opt.global_rank = int(os.environ['RANK']) if 'RANK' in os.environ else -1
# scale hyp['obj'] by img_size (evolved at 320)
# hyp['obj'] *= opt.img_size[0] / 320.
# DDP set device
if opt.local_rank != -1:
if opt.local_rank == 0:
device = select_device(opt.device, batch_size=opt.batch_size)
device = torch.device('cuda', opt.local_rank)
else:
device = torch_utils.select_device(opt.device, batch_size=opt.batch_size)
tb_writer = None
if not opt.evolve: # Train normally
if opt.local_rank in [-1, 0]:
print('Start Tensorboard with "tensorboard --logdir=runs", view at http://localhost:6006/')
tb_writer = SummaryWriter(comment=opt.name)
train(hyp) # train normally
else: # Evolve hyperparameters (optional)
opt.notest, opt.nosave = True, True # only test/save final epoch
if opt.bucket:
os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket) # download evolve.txt if exists
for _ in range(1): # generations to evolve
if os.path.exists('evolve.txt'): # if evolve.txt exists: select best hyps and mutate
# Select parent(s)
parent = 'single' # parent selection method: 'single' or 'weighted'
x = np.loadtxt('evolve.txt', ndmin=2)
n = min(5, len(x)) # number of previous results to consider
x = x[np.argsort(-fitness(x))][:n] # top n mutations
w = fitness(x) - fitness(x).min() # weights
if parent == 'single' or len(x) == 1:
# x = x[random.randint(0, n - 1)] # random selection
x = x[random.choices(range(n), weights=w)[0]] # weighted selection
elif parent == 'weighted':
x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination
# Mutate
method, mp, s = 3, 0.9, 0.2 # method, mutation probability, sigma
npr = np.random
npr.seed(int(time.time()))
g = np.array([1, 1, 1, 1, 1, 1, 1, 0, .1, 1, 0, 1, 1, 1, 1, 1, 1, 1]) # gains
ng = len(g)
if method == 1:
v = (npr.randn(ng) * npr.random() * g * s + 1) ** 2.0
elif method == 2:
v = (npr.randn(ng) * npr.random(ng) * g * s + 1) ** 2.0
elif method == 3:
v = np.ones(ng)
while all(v == 1): # mutate until a change occurs (prevent duplicates)
# v = (g * (npr.random(ng) < mp) * npr.randn(ng) * s + 1) ** 2.0
v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)
for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300)
hyp[k] = x[i + 7] * v[i] # mutate
# Clip to limits
keys = ['lr0', 'iou_t', 'momentum', 'weight_decay', 'hsv_s', 'hsv_v', 'translate', 'scale', 'fl_gamma']
limits = [(1e-5, 1e-2), (0.00, 0.70), (0.60, 0.98), (0, 0.001), (0, .9), (0, .9), (0, .9), (0, .9), (0, 3)]
for k, v in zip(keys, limits):
hyp[k] = np.clip(hyp[k], v[0], v[1])
# Train mutation
results = train(hyp.copy())
# Write mutation results
print_mutation(hyp, results, opt.bucket)
# Plot results
# plot_evolution_results(hyp)
================================================
FILE: utils/__init__.py
================================================
================================================
FILE: utils/adabound.py
================================================
import math
import torch
from torch.optim.optimizer import Optimizer
class AdaBound(Optimizer):
"""Implements AdaBound algorithm.
It has been proposed in `Adaptive Gradient Methods with Dynamic Bound of Learning Rate`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): Adam learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
final_lr (float, optional): final (SGD) learning rate (default: 0.1)
gamma (float, optional): convergence speed of the bound functions (default: 1e-3)
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsbound (boolean, optional): whether to use the AMSBound variant of this algorithm
.. Adaptive Gradient Methods with Dynamic Bound of Learning Rate:
https://openreview.net/forum?id=Bkg3g2R9FX
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), final_lr=0.1, gamma=1e-3,
eps=1e-8, weight_decay=0, amsbound=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= final_lr:
raise ValueError("Invalid final learning rate: {}".format(final_lr))
if not 0.0 <= gamma < 1.0:
raise ValueError("Invalid gamma parameter: {}".format(gamma))
defaults = dict(lr=lr, betas=betas, final_lr=final_lr, gamma=gamma, eps=eps,
weight_decay=weight_decay, amsbound=amsbound)
super(AdaBound, self).__init__(params, defaults)
self.base_lrs = list(map(lambda group: group['lr'], self.param_groups))
def __setstate__(self, state):
super(AdaBound, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsbound', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group, base_lr in zip(self.param_groups, self.base_lrs):
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
'Adam does not support sparse gradients, please consider SparseAdam instead')
amsbound = group['amsbound']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
if amsbound:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsbound:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
if group['weight_decay'] != 0:
grad = grad.add(group['weight_decay'], p.data)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if amsbound:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
# Applies bounds on actual learning rate
# lr_scheduler cannot affect final_lr, this is a workaround to apply lr decay
final_lr = group['final_lr'] * group['lr'] / base_lr
lower_bound = final_lr * (1 - 1 / (group['gamma'] * state['step'] + 1))
upper_bound = final_lr * (1 + 1 / (group['gamma'] * state['step']))
step_size = torch.full_like(denom, step_size)
step_size.div_(denom).clamp_(lower_bound, upper_bound).mul_(exp_avg)
p.data.add_(-step_size)
return loss
class AdaBoundW(Optimizer):
"""Implements AdaBound algorithm with Decoupled Weight Decay (arxiv.org/abs/1711.05101)
It has been proposed in `Adaptive Gradient Methods with Dynamic Bound of Learning Rate`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): Adam learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
final_lr (float, optional): final (SGD) learning rate (default: 0.1)
gamma (float, optional): convergence speed of the bound functions (default: 1e-3)
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsbound (boolean, optional): whether to use the AMSBound variant of this algorithm
.. Adaptive Gradient Methods with Dynamic Bound of Learning Rate:
https://openreview.net/forum?id=Bkg3g2R9FX
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), final_lr=0.1, gamma=1e-3,
eps=1e-8, weight_decay=0, amsbound=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= final_lr:
raise ValueError("Invalid final learning rate: {}".format(final_lr))
if not 0.0 <= gamma < 1.0:
raise ValueError("Invalid gamma parameter: {}".format(gamma))
defaults = dict(lr=lr, betas=betas, final_lr=final_lr, gamma=gamma, eps=eps,
weight_decay=weight_decay, amsbound=amsbound)
super(AdaBoundW, self).__init__(params, defaults)
self.base_lrs = list(map(lambda group: group['lr'], self.param_groups))
def __setstate__(self, state):
super(AdaBoundW, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsbound', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group, base_lr in zip(self.param_groups, self.base_lrs):
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
'Adam does not support sparse gradients, please consider SparseAdam instead')
amsbound = group['amsbound']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
if amsbound:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsbound:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if amsbound:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
# Applies bounds on actual learning rate
# lr_scheduler cannot affect final_lr, this is a workaround to apply lr decay
final_lr = group['final_lr'] * group['lr'] / base_lr
lower_bound = final_lr * (1 - 1 / (group['gamma'] * state['step'] + 1))
upper_bound = final_lr * (1 + 1 / (group['gamma'] * state['step']))
step_size = torch.full_like(denom, step_size)
step_size.div_(denom).clamp_(lower_bound, upper_bound).mul_(exp_avg)
if group['weight_decay'] != 0:
decayed_weights = torch.mul(p.data, group['weight_decay'])
p.data.add_(-step_size)
p.data.sub_(decayed_weights)
else:
p.data.add_(-step_size)
return loss
================================================
FILE: utils/datasets.py
================================================
import glob
import math
import os
import random
import shutil
import time
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from utils.utils import xyxy2xywh, xywh2xyxy
help_url = 'https://github.com/ultralytics/yolov3/wiki/Train-Custom-Data'
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
vid_formats = ['.mov', '.avi', '.mp4']
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
class LoadImages: # for inference
def __init__(self, path, img_size=416, is_gray_scale=False, rect=False):
path = str(Path(path)) # os-agnostic
files = []
if os.path.isdir(path):
files = sorted(glob.glob(os.path.join(path, '*.*')))
elif os.path.isfile(path):
files = [path]
images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
nI, nV = len(images), len(videos)
self.img_size = img_size
self.files = images + videos
self.nF = nI + nV # number of files
self.video_flag = [False] * nI + [True] * nV
self.mode = 'images'
self.is_gray_scale = is_gray_scale
self.rect = rect
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nF > 0, 'No images or videos found in ' + path
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nF:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nF: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nF, self.frame, self.nframes, path), end='')
else:
# Read image
self.count += 1
if self.is_gray_scale:
img0 = cv2.imread(path, flags=cv2.IMREAD_GRAYSCALE) # gray scale
img0 = np.expand_dims(img0, axis=-1)
else:
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print('image %g/%g %s: ' % (self.count, self.nF, path), end='')
# Padded resize
if self.rect:
img = letterbox(img0, new_shape=self.img_size, is_gray_scale=self.is_gray_scale)[0]
else:
img = letterbox(img0, new_shape=self.img_size, auto=False, is_gray_scale=self.is_gray_scale)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1).copy() # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
# cv2.imwrite(path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nF # number of files
class LoadWebcam: # for inference
def __init__(self, pipe=0, img_size=416):
self.img_size = img_size
if pipe == '0':
pipe = 0 # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa' # IP traffic camera
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
# https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/
# pipe = '"rtspsrc location="rtsp://username:password@192.168.1.64/1" latency=10 ! appsink' # GStreamer
# https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/
# https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package # install help
# pipe = "rtspsrc location=rtsp://root:root@192.168.0.91:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink" # GStreamer
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, 'Camera Error %s' % self.pipe
img_path = 'webcam.jpg'
print('webcam %g: ' % self.count, end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=416):
self.mode = 'images'
self.img_size = img_size
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = sources
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print('%g/%g: %s... ' % (i + 1, n, s), end='')
cap = cv2.VideoCapture(0 if s == '0' else s)
assert cap.isOpened(), 'Failed to open %s' % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
_, self.imgs[index] = cap.retrieve()
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=416, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, rank=-1, is_gray_scale=False, subset_len=-1):
path = str(Path(path)) # os-agnostic
assert os.path.isfile(path), 'File not found %s. See %s' % (path, help_url)
with open(path, 'r') as f:
self.img_files = [x.replace('/', os.sep) for x in f.read().splitlines() # os-agnostic
if os.path.splitext(x)[-1].lower() in img_formats]
if subset_len != -1:
assert subset_len <= len(self.img_files)
self.img_files = random.sample(self.img_files, subset_len)
n = len(self.img_files)
assert n > 0, 'No images found in %s. See %s' % (path, help_url)
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.n = n
self.batch = bi # batch index of image
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.is_gray_scale = is_gray_scale
# Define labels
self.label_files = [x.replace('images', 'labels').replace(os.path.splitext(x)[-1], '.txt')
for x in self.img_files]
# Rectangular Training https://github.com/ultralytics/yolov3/issues/232
if self.rect:
# Read image shapes (wh)
sp = path.replace('.txt', '.shapes') # shapefile path
try:
with open(sp, 'r') as f: # read existing shapefile
s = [x.split() for x in f.read().splitlines()]
assert len(s) == n, 'Shapefile out of sync'
except:
s = [exif_size(Image.open(f)) for f in tqdm(self.img_files, desc='Reading image shapes')]
np.savetxt(sp, s, fmt='%g') # overwrites existing (if any)
# Sort by aspect ratio
s = np.array(s, dtype=np.float64)
ar = s[:, 1] / s[:, 0] # aspect ratio
i = ar.argsort()
self.img_files = [self.img_files[i] for i in i]
self.label_files = [self.label_files[i] for i in i]
self.shapes = s[i] # wh
ar = ar[i]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / 32.).astype(np.int) * 32
# Cache labels
self.imgs = [None] * n
self.labels = [np.zeros((0, 5), dtype=np.float32)] * n
extract_bounding_boxes = False
create_datasubset = False
pbar = tqdm(self.label_files, desc='Caching labels')
nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate
for i, file in enumerate(pbar):
try:
with open(file, 'r') as f:
l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
except:
nm += 1 # print('missing labels for image %s' % self.img_files[i]) # file missing
continue
if l.shape[0]:
assert l.shape[1] == 5, '> 5 label columns: %s' % file
assert (l >= 0).all(), 'negative labels: %s' % file
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows
nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows
self.labels[i] = l
nf += 1 # file found
# Create subdataset (a smaller dataset)
if create_datasubset and ns < 1E4:
if ns == 0:
create_folder(path='./datasubset')
os.makedirs('./datasubset/images')
exclude_classes = 43
if exclude_classes not in l[:, 0]:
ns += 1
# shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image
with open('./datasubset/images.txt', 'a') as f:
f.write(self.img_files[i] + '\n')
# Extract object detection boxes for a second stage classifier
if extract_bounding_boxes:
p = Path(self.img_files[i])
img = cv2.imread(str(p))
h, w = img.shape[:2]
for j, x in enumerate(l):
f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)
if not os.path.exists(Path(f).parent):
os.makedirs(Path(f).parent) # make new output folder
b = x[1:] * [w, h, w, h] # box
b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.3 + 30 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes'
else:
ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
# os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
pbar.desc = 'Caching labels (%g found, %g missing, %g empty, %g duplicate, for %g images)' % (
nf, nm, ne, nd, n)
assert nf > 0, 'No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url)
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
if cache_images: # if training
gb = 0 # Gigabytes of cached images
pbar = tqdm(range(len(self.img_files)), desc='Caching images')
self.img_hw0, self.img_hw = [None] * n, [None] * n
for i in pbar: # max 10k images
self.imgs[i], self.img_hw0[i], self.img_hw[i] = load_image(self, i) # img, hw_original, hw_resized
gb += self.imgs[i].nbytes
pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
# Detect corrupted images https://medium.com/joelthchao/programmatically-detect-corrupted-image-8c1b2006c3d3
detect_corrupted_images = False
if detect_corrupted_images:
from skimage import io # conda install -c conda-forge scikit-image
for file in tqdm(self.img_files, desc='Detecting corrupted images'):
try:
_ = io.imread(file)
except:
print('Corrupted image detected: %s' % file)
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
if self.image_weights:
index = self.indices[index]
hyp = self.hyp
if self.mosaic:
# Load mosaic
img, labels = load_mosaic(self, index, self.is_gray_scale)
shapes = None
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index, self.is_gray_scale)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
# Load labels
labels = []
x = self.labels[index]
if x.size > 0:
# Normalized xywh to pixel xyxy format
labels = x.copy()
labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width
labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height
labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
if self.augment:
# Augment imagespace
if not self.mosaic:
img, labels = random_affine(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'])
# Augment colorspace
if not self.is_gray_scale:
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
# convert xyxy to xywh
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5])
# Normalize coordinates 0 - 1
labels[:, [2, 4]] /= img.shape[0] # height
labels[:, [1, 3]] /= img.shape[1] # width
if self.augment:
# random left-right flip
lr_flip = True
if lr_flip and random.random() < 0.5:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
# random up-down flip
ud_flip = False
if ud_flip and random.random() < 0.5:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
if not self.is_gray_scale:
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
if self.is_gray_scale:
img = np.expand_dims(img, axis=0)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
def load_image(self, index, is_gray_scale=False):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
path = self.img_files[index]
if is_gray_scale:
img = cv2.imread(path, flags=cv2.IMREAD_GRAYSCALE) # gray scale
img = np.expand_dims(img, axis=-1)
else:
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r < 1 or (self.augment and r != 1): # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
if is_gray_scale:
img = np.expand_dims(img, axis=-1)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
# Histogram equalization
# if random.random() < 0.2:
# for i in range(3):
# img[:, :, i] = cv2.equalizeHist(img[:, :, i])
def load_mosaic(self, index, is_gray_scale=False):
# loads images in a mosaic
labels4 = []
s = self.img_size
xc, yc = [int(random.uniform(s * 0.5, s * 1.5)) for _ in range(2)] # mosaic center x, y
indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(3)] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index, is_gray_scale)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
x = self.labels[index]
labels = x.copy()
if x.size > 0: # Normalized xywh to pixel xyxy format
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
labels4.append(labels)
# Concat/clip labels
if len(labels4):
labels4 = np.concatenate(labels4, 0)
# np.clip(labels4[:, 1:] - s / 2, 0, s, out=labels4[:, 1:]) # use with center crop
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_affine
# Augment
# img4 = img4[s // 2: int(s * 1.5), s // 2:int(s * 1.5)] # center crop (WARNING, requires box pruning)
img4, labels4 = random_affine(img4, labels4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
border=-s // 2) # border to remove
return img4, labels4
def letterbox(img, new_shape=(416, 416), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True,
is_gray_scale=False):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = new_shape
ratio = new_shape[0] / shape[1], new_shape[1] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
if is_gray_scale:
img = np.expand_dims(img, axis=-1)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
if is_gray_scale:
img = np.expand_dims(img, axis=-1)
return img, ratio, (dw, dh)
def random_affine(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, border=0):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4
if targets is None: # targets = [cls, xyxy]
targets = []
height = img.shape[0] + border * 2
width = img.shape[1] + border * 2
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(-translate, translate) * img.shape[0] + border # x translation (pixels)
T[1, 2] = random.uniform(-translate, translate) * img.shape[1] + border # y translation (pixels)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Combined rotation matrix
M = S @ T @ R # ORDER IS IMPORTANT HERE!!
if (border != 0) or (M != np.eye(3)).any(): # image changed
img = cv2.warpAffine(img, M[:2], dsize=(width, height), flags=cv2.INTER_LINEAR, borderValue=(114, 114, 114))
# Transform label coordinates
n = len(targets)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = (xy @ M.T)[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# # apply angle-based reduction of bounding boxes
# radians = a * math.pi / 180
# reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
# x = (xy[:, 2] + xy[:, 0]) / 2
# y = (xy[:, 3] + xy[:, 1]) / 2
# w = (xy[:, 2] - xy[:, 0]) * reduction
# h = (xy[:, 3] - xy[:, 1]) * reduction
# xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# reject warped points outside of image
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
w = xy[:, 2] - xy[:, 0]
h = xy[:, 3] - xy[:, 1]
area = w * h
area0 = (targets[:, 3] - targets[:, 1]) * (targets[:, 4] - targets[:, 2])
ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16)) # aspect ratio
i = (w > 4) & (h > 4) & (area / (area0 * s + 1e-16) > 0.2) & (ar < 10)
targets = targets[i]
targets[:, 1:5] = xy[i]
return img, targets
def cutout(image, labels):
# https://arxiv.org/abs/1708.04552
# https://github.com/hysts/pytorch_cutout/blob/master/dataloader.py
# https://towardsdatascience.com/when-conventional-wisdom-fails-revisiting-data-augmentation-for-self-driving-cars-4831998c5509
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
# class FenceMask(torch.nn.Module):
# def __init__(self, img_size, mean, probability=0.8):
# super(FenceMask, self).__init__()
# self.x = torch.nn.Parameter((0.25 - 0.05) * torch.rand(1) + 0.05, requires_grad=True)
# self.y = torch.nn.Parameter((0.25 - 0.05) * torch.rand(1) + 0.05, requires_grad=True)
# self.l1 = torch.nn.Parameter((0.25 - 0.05) * torch.rand(1) + 0.05, requires_grad=True)
# self.l2 = torch.nn.Parameter((0.25 - 0.05) * torch.rand(1) + 0.05, requires_grad=True)
# self.mean = mean
# self.probability = probability
# self.st_prob = self.prob = probability
# self.img_size = img_size
#
# def set_prob(self, epoch, max_epoch):
# self.prob = self.st_prob * min(1, epoch / max_epoch)
#
# def forward(self, x):
# if not self.training:
# return x
# n, c, h, w = x.size()
# imgs = []
# masks = []
# for i in range(n):
# img, mask = self.Fence(x[i])
# imgs.append(img)
# masks.append(mask)
# imgs = torch.cat(imgs).view(n, c, h, w)
# masks = torch.cat(masks).view(n, c, h, w)
# return imgs, masks
#
# def Fence(self, img):
# if random.uniform(0, 1) > self.prob:
# mask = img.new_ones(img.shape)
# return img, mask
#
# sp = img.shape
# height, width = sp[1], sp[2]
#
# # mask_1代表横着的条纹,mask_2代表竖着的条纹
# mask_1 = np.ones(shape=(sp[1], sp[2], 3))
# mask_2 = np.ones(shape=(sp[1], sp[2], 3))
# x, y, l1, l2 = int(self.x * self.img_size), int(self.y * self.img_size), int(self.l1 * self.img_size), int(self.l2 * self.img_size)
# for i in range(1, height // (l1 + x) + 1):
# mask_1[i * l1 + (i - 1) * x:i * (l1 + x):, 0:, 0] = self.mean[0]
# mask_1[i * l1 + (i - 1) * x:i * (l1 + x):, 0:, 1] = self.mean[1]
# mask_1[i * l1 + (i - 1) * x:i * (l1 + x):, 0:, 2] = self.mean[2]
# for i in range(1, width // (l2 + y) + 1):
# mask_2[0:, i * l2 + (i - 1) * y:i * (l2 + y), 0] = self.mean[0]
# mask_2[0:, i * l2 + (i - 1) * y:i * (l2 + y), 1] = self.mean[1]
# mask_2[0:, i * l2 + (i - 1) * y:i * (l2 + y), 2] = self.mean[2]
#
# # 将生成的两个mask随机旋转一定角度
# center = (width / 2, height / 2)
# rotation_1, rotation_2 = random.randint(0, 360), random.randint(0, 360)
# M_1 = cv2.getRotationMatrix2D(center, rotation_1, 2)
# M_2 = cv2.getRotationMatrix2D(center, rotation_2, 2)
# mask_1 = cv2.warpAffine(mask_1, M_1, (width, height))
# mask_2 = cv2.warpAffine(mask_2, M_2, (width, height))
#
# mask = (mask_1 * mask_2)
# # cv2.imwrite('mask.png', mask * 255)
# mask = mask.transpose(2, 0, 1)
# mask = torch.from_numpy(mask).float().cuda()
# img = img * mask
# return img, mask
class FenceMask(torch.nn.Module):
def __init__(self, batch_size, img_size, probability):
super(FenceMask, self).__init__()
self.img_size = img_size
self.batch_size = batch_size
self.group_size = 10
self.group_number = None
group_masks = []
for j in range(self.group_size):
masks = []
for k in range(batch_size):
x = random.randint(self.img_size / 32, self.img_size / 16)
y = random.randint(self.img_size / 32, self.img_size / 16)
l1 = random.randint(self.img_size / 16, self.img_size / 8)
l2 = random.randint(self.img_size / 16, self.img_size / 8)
# mask_1代表横着的条纹,mask_2代表竖着的条纹
mask_1 = np.ones(shape=(self.img_size, self.img_size, 3))
mask_2 = np.ones(shape=(self.img_size, self.img_size, 3))
height = self.img_size
width = self.img_size
for i in range(1, height // (l1 + x) + 1):
mask_1[i * l1 + (i - 1) * x:i * (l1 + x):, 0:, 0] = 0
mask_1[i * l1 + (i - 1) * x:i * (l1 + x):, 0:, 1] = 0
mask_1[i * l1 + (i - 1) * x:i * (l1 + x):, 0:, 2] = 0
for i in range(1, width // (l2 + y) + 1):
mask_2[0:, i * l2 + (i - 1) * y:i * (l2 + y), 0] = 0
mask_2[0:, i * l2 + (i - 1) * y:i * (l2 + y), 1] = 0
mask_2[0:, i * l2 + (i - 1) * y:i * (l2 + y), 2] = 0
# 将生成的两个mask随机旋转一定角度
center = (width / 2, height / 2)
rotation_1, rotation_2 = random.randint(0, 360), random.randint(0, 360)
M_1 = cv2.getRotationMatrix2D(center, rotation_1, 2)
M_2 = cv2.getRotationMatrix2D(center, rotation_2, 2)
mask_1 = cv2.warpAffine(mask_1, M_1, (width, height))
mask_2 = cv2.warpAffine(mask_2, M_2, (width, height))
mask = (mask_1 * mask_2)
# cv2.imwrite('mask.png', mask * 255)
mask = mask.transpose(2, 0, 1)
mask = torch.from_numpy(mask).unsqueeze(0)
masks.append(mask)
masks = torch.cat(masks, dim=0).int()
mask_white = (0.5 * torch.rand((batch_size, 3, img_size, img_size)) + 0.5) * masks
mask_black = (0.5 * torch.rand((batch_size, 3, img_size, img_size))) * (1 - masks)
masks = mask_black + mask_white
group_masks.append(masks.unsqueeze(0))
group_masks = torch.cat(group_masks, dim=0)
self.group_masks = torch.nn.Parameter(group_masks, requires_grad=True)
self.st_prob = self.prob = probability
def forward(self, x):
masks = None
if random.uniform(0, 1) > self.prob:
return x, masks
if x.size(0) != self.group_masks.size(1):
return x, masks
# for img in x:
# img =img.cpu().detach().numpy()
# image = img.transpose(1, 2, 0)
# cv2.imshow('image', image)
# cv2.waitKey(500)
# masks = binarize(self.masks)
self.group_number = random.randrange(self.group_size)
masks = self.group_masks[self.group_number]
# for img in (x*masks):
# img = img.cpu().detach().numpy()
# image = img.transpose(1, 2, 0)
# cv2.imshow('image', image)
# cv2.waitKey(500)
return x * masks, masks
def set_prob(self, epoch, max_epoch):
self.prob = self.st_prob * min(1, epoch / max_epoch)
class Grid(object):
def __init__(self, d1, d2, rotate=1, ratio=0.5, mode=0, prob=1.):
self.d1 = d1
self.d2 = d2
self.rotate = rotate
self.ratio = ratio
self.mode = mode
self.st_prob = self.prob = prob
def set_prob(self, epoch, max_epoch):
self.prob = self.st_prob * min(1, epoch / max_epoch)
def __call__(self, img):
if np.random.rand() > self.prob:
return img
h = img.shape[1]
w = img.shape[2]
# 1.5 * h, 1.5 * w works fine with the squared images
# But with rectangular input, the mask might not be able to recover back to the input image shape
# A square mask with edge length equal to the diagnoal of the input image
# will be able to cover all the image spot after the rotation. This is also the minimum square.
hh = math.ceil((math.sqrt(h * h + w * w)))
d = np.random.randint(self.d1, self.d2)
# d = self.d
# maybe use ceil? but i guess no big difference
self.l = math.ceil(d * self.ratio)
mask = np.ones((hh, hh), np.float32)
st_h = np.random.randint(d)
st_w = np.random.randint(d)
for i in range(-1, hh // d + 1):
s = d * i + st_h
t = s + self.l
s = max(min(s, hh), 0)
t = max(min(t, hh), 0)
mask[s:t, :] *= 0
for i in range(-1, hh // d + 1):
s = d * i + st_w
t = s + self.l
s = max(min(s, hh), 0)
t = max(min(t, hh), 0)
mask[:, s:t] *= 0
r = np.random.randint(self.rotate)
mask = Image.fromarray(np.uint8(mask))
mask = mask.rotate(r)
mask = np.asarray(mask)
mask = mask[(hh - h) // 2:(hh - h) // 2 + h, (hh - w) // 2:(hh - w) // 2 + w]
mask = torch.from_numpy(mask).float().cuda()
if self.mode == 1:
mask = 1 - mask
mask = mask.expand_as(img)
img = img * mask
return img
class GridMask(torch.nn.Module):
def __init__(self, d1, d2, rotate=1, ratio=0.5, mode=0, prob=1.):
super(GridMask, self).__init__()
self.rotate = rotate
self.ratio = ratio
self.mode = mode
self.st_prob = prob
self.grid = Grid(d1, d2, rotate, ratio, mode, prob)
def set_prob(self, epoch, max_epoch):
self.grid.set_prob(epoch, max_epoch)
def forward(self, x):
if not self.training:
return x
n, c, h, w = x.size()
y = []
for i in range(n):
y.append(self.grid(x[i]))
y = torch.cat(y).view(n, c, h, w)
return y
def reduce_img_size(path='../data/sm4/images', img_size=1024): # from utils.datasets import *; reduce_img_size()
# creates a new ./images_reduced folder with reduced size images of maximum size img_size
path_new = path + '_reduced' # reduced images path
create_folder(path_new)
for f in tqdm(glob.glob('%s/*.*' % path)):
try:
img = cv2.imread(f)
h, w = img.shape[:2]
r = img_size / max(h, w) # size ratio
if r < 1.0:
img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_AREA) # _LINEAR fastest
fnew = f.replace(path, path_new) # .replace(Path(f).suffix, '.jpg')
cv2.imwrite(fnew, img)
except:
print('WARNING: image failure %s' % f)
def convert_images2bmp(): # from utils.datasets import *; convert_images2bmp()
# Save images
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
# for path in ['../coco/images/val2014', '../coco/images/train2014']:
for path in ['../data/sm4/images', '../data/sm4/background']:
create_folder(path + 'bmp')
for ext in formats: # ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
for f in tqdm(glob.glob('%s/*%s' % (path, ext)), desc='Converting %s' % ext):
cv2.imwrite(f.replace(ext.lower(), '.bmp').replace(path, path + 'bmp'), cv2.imread(f))
# Save labels
# for path in ['../coco/trainvalno5k.txt', '../coco/5k.txt']:
for file in ['../data/sm4/out_train.txt', '../data/sm4/out_test.txt']:
with open(file, 'r') as f:
lines = f.read()
# lines = f.read().replace('2014/', '2014bmp/') # coco
lines = lines.replace('/images', '/imagesbmp')
lines = lines.replace('/background', '/backgroundbmp')
for ext in formats:
lines = lines.replace(ext, '.bmp')
with open(file.replace('.txt', 'bmp.txt'), 'w') as f:
f.write(lines)
def recursive_dataset2bmp(dataset='../data/sm4_bmp'): # from utils.datasets import *; recursive_dataset2bmp()
# Converts dataset to bmp (for faster training)
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
for a, b, files in os.walk(dataset):
for file in tqdm(files, desc=a):
p = a + '/' + file
s = Path(file).suffix
if s == '.txt': # replace text
with open(p, 'r') as f:
lines = f.read()
for f in formats:
lines = lines.replace(f, '.bmp')
with open(p, 'w') as f:
f.write(lines)
elif s in formats: # replace image
cv2.imwrite(p.replace(s, '.bmp'), cv2.imread(p))
if s != '.bmp':
os.system("rm '%s'" % p)
def imagelist2folder(path='data/coco_64img.txt'): # from utils.datasets import *; imagelist2folder()
# Copies all the images in a text file (list of images) into a folder
create_folder(path[:-4])
with open(path, 'r') as f:
for line in f.read().splitlines():
os.system('cp "%s" %s' % (line, path[:-4]))
print(line)
def create_folder(path='./new_folder'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
================================================
FILE: utils/gcp.sh
================================================
#!/usr/bin/env bash
# New VM
rm -rf sample_data yolov3
git clone https://github.com/ultralytics/yolov3
# git clone -b test --depth 1 https://github.com/ultralytics/yolov3 test # branch
# sudo apt-get install zip
#git clone https://github.com/NVIDIA/apex && cd apex && pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" . --user && cd .. && rm -rf apex
sudo conda install -yc conda-forge scikit-image pycocotools
# python3 -c "from yolov3.utils.google_utils import gdrive_download; gdrive_download('193Zp_ye-3qXMonR1nZj3YyxMtQkMy50k','coco2014.zip')"
python3 -c "from yolov3.utils.google_utils import gdrive_download; gdrive_download('1WQT6SOktSe8Uw6r10-2JhbEhMY5DJaph','coco2017.zip')"
python3 -c "from yolov3.utils.google_utils import gdrive_download; gdrive_download('1C3HewOG9akA3y456SZLBJZfNDPkBwAto','knife.zip')"
python3 -c "from yolov3.utils.google_utils import gdrive_download; gdrive_download('13g3LqdpkNE8sPosVJT6KFXlfoMypzRP4','sm4.zip')"
sudo shutdown
# Mount local SSD
lsblk
sudo mkfs.ext4 -F /dev/nvme0n1
sudo mkdir -p /mnt/disks/nvme0n1
sudo mount /dev/nvme0n1 /mnt/disks/nvme0n1
sudo chmod a+w /mnt/disks/nvme0n1
cp -r coco /mnt/disks/nvme0n1
# Kill All
t=ultralytics/yolov3:v1
docker kill $(docker ps -a -q --filter ancestor=$t)
# Evolve coco
sudo -s
t=ultralytics/yolov3:evolve
# docker kill $(docker ps -a -q --filter ancestor=$t)
for i in 0 1 6 7
do
docker pull $t && docker run --gpus all -d --ipc=host -v "$(pwd)"/coco:/usr/src/coco $t bash utils/evolve.sh $i
sleep 30
done
#COCO training
n=131 && t=ultralytics/coco:v131 && sudo docker pull $t && sudo docker run -it --gpus all --ipc=host -v "$(pwd)"/coco:/usr/src/coco $t python3 train.py --data coco2014.data --img-size 320 640 --epochs 300 --batch 16 --weights '' --device 0 --cfg yolov3-spp.cfg --bucket ult/coco --name $n && sudo shutdown
n=132 && t=ultralytics/coco:v131 && sudo docker pull $t && sudo docker run -it --gpus all --ipc=host -v "$(pwd)"/coco:/usr/src/coco $t python3 train.py --data coco2014.data --img-size 320 640 --epochs 300 --batch 64 --weights '' --device 0 --cfg yolov3-tiny.cfg --bucket ult/coco --name $n && sudo shutdown
================================================
FILE: utils/google_utils.py
================================================
# This file contains google utils: https://cloud.google.com/storage/docs/reference/libraries
# pip install --upgrade google-cloud-storage
import os
import time
# from google.cloud import storage
def gdrive_download(id='1HaXkef9z6y5l4vUnCYgdmEAj61c6bfWO', name='coco.zip'):
# https://gist.github.com/tanaikech/f0f2d122e05bf5f971611258c22c110f
# Downloads a file from Google Drive, accepting presented query
# from utils.google_utils import *; gdrive_download()
t = time.time()
print('Downloading https://drive.google.com/uc?export=download&id=%s as %s... ' % (id, name), end='')
os.remove(name) if os.path.exists(name) else None # remove existing
os.remove('cookie') if os.path.exists('cookie') else None
# Attempt file download
os.system("curl -c ./cookie -s -L \"https://drive.google.com/uc?export=download&id=%s\" > /dev/null" % id)
if os.path.exists('cookie'): # large file
s = "curl -Lb ./cookie \"https://drive.google.com/uc?export=download&confirm=`awk '/download/ {print $NF}' ./cookie`&id=%s\" -o %s" % (
id, name)
else: # small file
s = "curl -s -L -o %s 'https://drive.google.com/uc?export=download&id=%s'" % (name, id)
r = os.system(s) # execute, capture return values
os.remove('cookie') if os.path.exists('cookie') else None
# Error check
if r != 0:
os.remove(name) if os.path.exists(name) else None # remove partial
print('Download error ') # raise Exception('Download error')
return r
# Unzip if archive
if name.endswith('.zip'):
print('unzipping... ', end='')
os.system('unzip -q %s' % name) # unzip
os.remove(name) # remove zip to free space
print('Done (%.1fs)' % (time.time() - t))
return r
def upload_blob(bucket_name, source_file_name, destination_blob_name):
# Uploads a file to a bucket
# https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python
storage_client = storage.Client()
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(destination_blob_name)
blob.upload_from_filename(source_file_name)
print('File {} uploaded to {}.'.format(
source_file_name,
destination_blob_name))
def download_blob(bucket_name, source_blob_name, destination_file_name):
# Uploads a blob from a bucket
storage_client = storage.Client()
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(source_blob_name)
blob.download_to_filename(destination_file_name)
print('Blob {} downloaded to {}.'.format(
source_blob_name,
destination_file_name))
================================================
FILE: utils/layers.py
================================================
from utils.utils import *
def make_divisible(v, divisor):
# Function ensures all layers have a channel number that is divisible by 8
# https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
return math.ceil(v / divisor) * divisor
class Flatten(nn.Module):
# Use after nn.AdaptiveAvgPool2d(1) to remove last 2 dimensions
def forward(self, x):
return x.view(x.size(0), -1)
class Concat(nn.Module):
# Concatenate a list of tensors along dimension
def __init__(self, dimension=1):
super(Concat, self).__init__()
self.d = dimension
def forward(self, x):
return torch.cat(x, self.d)
class FeatureConcat(nn.Module):
def __init__(self, layers, groups):
super(FeatureConcat, self).__init__()
self.layers = layers # layer indices
self.groups = groups
self.multiple = len(layers) > 1 # multiple layers flag
def forward(self, x, outputs):
if self.multiple:
return torch.cat([outputs[i] for i in self.layers], 1)
else:
if self.groups:
return x[:, (x.shape[1] // 2):]
else:
return outputs[self.layers[0]]
class Shortcut(nn.Module): # weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
def __init__(self, layers, weight=False):
super(Shortcut, self).__init__()
self.layers = layers # layer indices
self.weight = weight # apply weights boolean
self.n = len(layers) + 1 # number of layers
if weight:
self.w = nn.Parameter(torch.zeros(self.n), requires_grad=True) # layer weights
def forward(self, x, outputs):
# Weights
if self.weight:
w = torch.sigmoid(self.w) * (2 / self.n) # sigmoid weights (0-1)
x = x * w[0]
# Fusion
nx = x.shape[1] # input channels
for i in range(self.n - 1):
a = outputs[self.layers[i]] * w[i + 1] if self.weight else outputs[self.layers[i]] # feature to add
na = a.shape[1] # feature channels
# Adjust channels
if nx == na: # same shape
x = x + a
elif nx > na: # slice input
x[:, :na] = x[:, :na] + a # or a = nn.ZeroPad2d((0, 0, 0, 0, 0, dc))(a); x = x + a
else: # slice feature
x = x + a[:, :nx]
return x
class MixConv2d(nn.Module): # MixConv: Mixed Depthwise Convolutional Kernels https://arxiv.org/abs/1907.09595
def __init__(self, in_ch, out_ch, k=(3, 5, 7), stride=1, dilation=1, bias=True, method='equal_params'):
super(MixConv2d, self).__init__()
groups = len(k)
if method == 'equal_ch': # equal channels per group
i = torch.linspace(0, groups - 1E-6, out_ch).floor() # out_ch indices
ch = [(i == g).sum() for g in range(groups)]
else: # 'equal_params': equal parameter count per group
b = [out_ch] + [0] * groups
a = np.eye(groups + 1, groups, k=-1)
a -= np.roll(a, 1, axis=1)
a *= np.array(k) ** 2
a[0] = 1
ch = np.linalg.lstsq(a, b, rcond=None)[0].round().astype(int) # solve for equal weight indices, ax = b
self.m = nn.ModuleList([nn.Conv2d(in_channels=in_ch,
out_channels=ch[g],
kernel_size=k[g],
stride=stride,
padding=k[g] // 2, # 'same' pad
dilation=dilation,
bias=bias) for g in range(groups)])
def forward(self, x):
return torch.cat([m(x) for m in self.m], 1)
# Activation functions below -------------------------------------------------------------------------------------------
class SwishImplementation(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x * torch.sigmoid(x)
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_tensors[0]
sx = torch.sigmoid(x) # sigmoid(ctx)
return grad_output * (sx * (1 + x * (1 - sx)))
class MishImplementation(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x)))
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_tensors[0]
sx = torch.sigmoid(x)
fx = F.softplus(x).tanh()
return grad_output * (fx + x * sx * (1 - fx * fx))
class MemoryEfficientSwish(nn.Module):
def forward(self, x):
return SwishImplementation.apply(x)
class MemoryEfficientMish(nn.Module):
def forward(self, x):
return MishImplementation.apply(x)
class Swish(nn.Module):
def forward(self, x):
return x * torch.sigmoid(x)
class Mish(nn.Module): # https://github.com/digantamisra98/Mish
def forward(self, x):
return x * F.softplus(x).tanh()
class ReLU6(nn.Module):
def __init__(self):
super(ReLU6, self).__init__()
def forward(self, x):
return F.relu6(x, inplace=True)
class HardSwish(nn.Module):
def __init__(self):
super(HardSwish, self).__init__()
def forward(self, x):
return x * (F.relu6(x + 3.0, inplace=True) / 6.0)
class HardSigmoid(nn.Module):
def __init__(self):
super(HardSigmoid, self).__init__()
def forward(self, x):
out = F.relu6(x + 3.0, inplace=True) / 6.0
return out
class SE(nn.Module):
def __init__(self, channel, reduction=4):
super(SE, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel, bias=False),
HardSigmoid()
# nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y.expand_as(x)
================================================
FILE: utils/output_upsample.py
================================================
import numpy as np
import torch
import torch.nn.functional as F
import os
from utils.parse_config import *
# cfg = './cfg/prune_regular_0.8_keep_0.01_10_shortcut_yolov3-ship.cfg'
def Val_upsample(cfg,TN):
# if not os.path.isdir('./validation'):
# os.makedirs('./validation')
module_defs = parse_model_cfg(cfg)
#_ = module_defs.pop(0) # cfg training hyperparams (unused)
upsample_times = 0 # 上采样次数(第几次上采样)
for i, mdef in enumerate(module_defs):
if mdef['type'] == 'net':
width = mdef['width']
height = mdef['height']
channels = mdef['channels']
elif mdef['type'] == 'upsample':
upsample_times = upsample_times + 1
layer_idx = i - 1
activation_input = np.loadtxt('./quantizer_output/q_activation_out/q_activation_00%d_conv.txt'%(layer_idx-1))
input_scale = np.loadtxt('./quantizer_output/a_scale_out/a_scale_00%d_conv.txt'%(layer_idx-1))
Up_channels = int(256 / upsample_times)
Up_width = int((width * upsample_times) /32)
Up_height = int((height * upsample_times) /32)
activation_input = torch.from_numpy(activation_input).view(1, Up_channels, Up_height, Up_width)
#上采样,stride为上采样的倍数
stride = 2
temp_out = F.upsample(input=activation_input,scale_factor=stride)
#重排序
a_para = temp_out
# print("use activation reorder!")
shape_input = a_para.shape[1]
num_TN = int(shape_input / TN)
remainder_TN = shape_input % TN
first = True
reorder_a_para = None
for k in range(num_TN):
temp = a_para[:, k * TN:(k + 1) * TN, :, :]
temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])
temp = temp.permute(1, 2, 0).contiguous().view(-1)
if first:
reorder_a_para = temp.clone().cpu().data.numpy()
first = False
else:
reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())
a_para_flatten = reorder_a_para
q_activation_reorder = a_para_flatten
q_activation_reorder = np.array(q_activation_reorder).reshape(1, -1)
np.savetxt(('./quantizer_output/q_activation_reorder/%d_upsample_reorder.txt' % layer_idx),
q_activation_reorder, delimiter='\n')
###保存重排序的二进制文件
activation_flat = q_activation_reorder.astype(np.int8)
writer = open('./quantizer_output/q_activation_reorder/%d_upsample_q_bin' % layer_idx, "wb")
writer.write(activation_flat)
writer.close()
##########特征图重排序结束
input_scale = torch.from_numpy(input_scale)
#保存上采样txt文件
# val_results = np.array(temp_out.cpu()).reshape(1, -1)
# np.savetxt(('./quantizer_output/q_activation_reorder/%d_upsample_output.txt'%layer_idx), val_results,delimiter='\n')
#
output_scale = input_scale
output_scale = np.array(output_scale.cpu()).reshape(1, -1)
np.savetxt(('./quantizer_output/a_scale_out/%d_upsample_scale.txt'%layer_idx), output_scale,delimiter='\n')
#
# ###保存二进制文件
# activation_flat = val_results.astype(np.int8)
# writer = open('./quantizer_output/q_activation_reorder/%d_upsample_q_bin'%layer_idx, "wb")
# writer.write(activation_flat)
# writer.close()
# Val_upsample(cfg,32)
# import argparse
# if __name__ == '__main__':
# parser = argparse.ArgumentParser()
# parser.add_argument('--cfg', type=str, default='cfg/yolov3-spp.cfg', help='*.cfg path')
# opt = parser.parse_args()
# opt.cfg = list(glob.iglob('./**/' + opt.cfg, recursive=True))[0] # find file
================================================
FILE: utils/parse_config.py
================================================
import os
import numpy as np
def parse_model_cfg(path):
# Parse the yolo *.cfg file and return module definitions path may be 'cfg/yolov3.cfg', 'yolov3.cfg', or 'yolov3'
if not path.endswith('.cfg'): # add .cfg suffix if omitted
path += '.cfg'
if not os.path.exists(path) and os.path.exists('cfg' + os.sep + path): # add cfg/ prefix if omitted
path = 'cfg' + os.sep + path
with open(path, 'r') as f:
lines = f.read().split('\n')
lines = [x for x in lines if x and not x.startswith('#')]
lines = [x.rstrip().lstrip() for x in lines] # get rid of fringe whitespaces
mdefs = [] # module definitions
for line in lines:
if line.startswith('['): # This marks the start of a new block
mdefs.append({})
mdefs[-1]['type'] = line[1:-1].rstrip()
if mdefs[-1]['type'] == 'convolutional':
mdefs[-1]['batch_normalize'] = 0 # pre-populate with zeros (may be overwritten later)
else:
key, val = line.split("=")
key = key.rstrip()
if key == 'anchors': # return nparray
mdefs[-1][key] = np.array([float(x) for x in val.split(',')]).reshape((-1, 2)) # np anchors
elif (key in ['from', 'layers', 'mask']) or (key == 'size' and ',' in val): # return array
mdefs[-1][key] = [int(x) for x in val.split(',')]
else:
val = val.strip()
if val.isnumeric(): # return int or float
mdefs[-1][key] = int(val) if (int(val) - float(val)) == 0 else float(val)
else:
mdefs[-1][key] = val # return string
# Check all fields are supported
supported = ['type', 'batch_normalize', 'filters', 'size', 'stride', 'pad', 'activation', 'layers', 'groups',
'reduction', 'from', 'mask', 'anchors', 'classes', 'num', 'jitter', 'ignore_thresh', 'truth_thresh',
'random', 'stride_x', 'stride_y', 'weights_type', 'weights_normalization', 'scale_x_y', 'beta_nms',
'nms_kind', 'iou_loss', 'iou_normalizer', 'cls_normalizer', 'iou_thresh', 'group_id', 'resize']
f = [] # fields
for x in mdefs[1:]:
[f.append(k) for k in x if k not in f]
u = [x for x in f if x not in supported] # unsupported fields
assert not any(u), "Unsupported fields %s in %s. See https://github.com/ultralytics/yolov3/issues/631" % (u, path)
return mdefs
def parse_data_cfg(path):
# Parses the data configuration file
if not os.path.exists(path) and os.path.exists('data' + os.sep + path): # add data/ prefix if omitted
path = 'data' + os.sep + path
with open(path, 'r') as f:
lines = f.readlines()
options = dict()
for line in lines:
line = line.strip()
if line == '' or line.startswith('#'):
continue
key, val = line.split('=')
options[key.strip()] = val.strip()
return options
================================================
FILE: utils/prune_utils.py
================================================
import torch
from terminaltables import AsciiTable
from copy import deepcopy
import numpy as np
import torch.nn.functional as F
def parse_module_defs2(module_defs):
CBL_idx = []
Other_idx = []
shortcut_idx = dict()
shortcut_all = set()
ignore_idx = set()
for i, module_def in enumerate(module_defs):
if module_def['type'] == 'convolutional':
if module_def['batch_normalize']:
CBL_idx.append(i)
else:
Other_idx.append(i)
if module_defs[i + 1]['type'] == 'maxpool' and module_defs[i + 2]['type'] == 'route':
# spp前一个CBL不剪 区分spp和tiny
ignore_idx.add(i)
if module_defs[i + 1]['type'] == 'route' and 'groups' in module_defs[i + 1]:
ignore_idx.add(i)
elif module_def['type'] == 'depthwise':
Other_idx.append(i)
# 深度可分离卷积层的其前一层不剪
ignore_idx.add(i - 1)
elif module_def['type'] == 'se':
Other_idx.append(i)
# 上采样层前的卷积层不裁剪
elif module_def['type'] == 'upsample':
ignore_idx.add(i - 1)
elif module_def['type'] == 'shortcut':
identity_idx = (i + int(module_def['from'][0]))
if module_defs[identity_idx]['type'] == 'convolutional':
# ignore_idx.add(identity_idx)
shortcut_idx[i - 1] = identity_idx
shortcut_all.add(identity_idx)
elif module_defs[identity_idx]['type'] == 'shortcut':
# ignore_idx.add(identity_idx - 1)
shortcut_idx[i - 1] = identity_idx - 1
shortcut_all.add(identity_idx - 1)
shortcut_all.add(i - 1)
prune_idx = [idx for idx in CBL_idx if idx not in ignore_idx]
return CBL_idx, Other_idx, prune_idx, shortcut_idx, shortcut_all
def parse_module_defs(module_defs):
CBL_idx = []
Other_idx = []
ignore_idx = set()
for i, module_def in enumerate(module_defs):
if module_def['type'] == 'convolutional':
if module_def['batch_normalize']:
CBL_idx.append(i)
else:
Other_idx.append(i)
if module_defs[i + 1]['type'] == 'maxpool' and module_defs[i + 2]['type'] == 'route':
# spp前一个CBL不剪 区分tiny
ignore_idx.add(i)
if module_defs[i + 1]['type'] == 'route' and 'groups' in module_defs[i + 1]:
ignore_idx.add(i)
elif module_def['type'] == 'depthwise':
Other_idx.append(i)
# 深度可分离卷积层的其前一层不剪
ignore_idx.add(i - 1)
elif module_def['type'] == 'se':
Other_idx.append(i)
# 跳连层的前一层不剪,跳连层的来源层不剪
elif module_def['type'] == 'shortcut':
ignore_idx.add(i - 1)
identity_idx = (i + int(module_def['from'][0]))
if module_defs[identity_idx]['type'] == 'convolutional':
ignore_idx.add(identity_idx)
elif module_defs[identity_idx]['type'] == 'shortcut':
ignore_idx.add(identity_idx - 1)
# 上采样层前的卷积层不裁剪
elif module_def['type'] == 'upsample':
ignore_idx.add(i - 1)
prune_idx = [idx for idx in CBL_idx if idx not in ignore_idx]
return CBL_idx, Other_idx, prune_idx
def parse_module_defs4(module_defs):
CBL_idx = []
Conv_idx = []
shortcut_idx = []
for i, module_def in enumerate(module_defs):
if module_def['type'] == 'convolutional':
if module_def['batch_normalize']:
CBL_idx.append(i)
else:
Conv_idx.append(i)
elif module_def['type'] == 'shortcut':
shortcut_idx.append(i - 1)
return CBL_idx, Conv_idx, shortcut_idx
def gather_bn_weights(module_list, prune_idx):
size_list = [module_list[idx][1].weight.data.shape[0] for idx in prune_idx]
bn_weights = torch.zeros(sum(size_list))
index = 0
for idx, size in zip(prune_idx, size_list):
bn_weights[index:(index + size)] = module_list[idx][1].weight.data.abs().clone()
index += size
return bn_weights
def write_cfg(cfg_file, module_defs):
with open(cfg_file, 'w') as f:
for module_def in module_defs:
f.write(f"[{module_def['type']}]\n")
for key, value in module_def.items():
if key != 'type':
f.write(f"{key}={value}\n")
f.write("\n")
return cfg_file
class BNOptimizer():
@staticmethod
def updateBN(sr_flag, module_list, s, prune_idx):
if sr_flag:
for idx in prune_idx:
# Squential(Conv, BN, Lrelu)
bn_module = module_list[idx][1]
bn_module.weight.grad.data.add_(s * torch.sign(bn_module.weight.data)) # L1
def obtain_quantiles(bn_weights, num_quantile=5):
sorted_bn_weights, i = torch.sort(bn_weights)
total = sorted_bn_weights.shape[0]
quantiles = sorted_bn_weights.tolist()[-1::-total // num_quantile][::-1]
print("\nBN weights quantile:")
quantile_table = [
[f'{i}/{num_quantile}' for i in range(1, num_quantile + 1)],
["%.3f" % quantile for quantile in quantiles]
]
print(AsciiTable(quantile_table).table)
return quantiles
def get_input_mask(module_defs, idx, CBLidx2mask, is_gray_scale=False):
if idx == 0:
if not is_gray_scale:
return np.ones(3)
else:
return np.ones(1)
if module_defs[idx - 1]['type'] == 'convolutional':
return CBLidx2mask[idx - 1]
# for tiny
elif module_defs[idx - 1]['type'] == 'maxpool':
if module_defs[idx - 2]['type'] == 'route': # v4 tiny
return get_input_mask(module_defs, idx - 1, CBLidx2mask)
else: # v3 tiny
return CBLidx2mask[idx - 2]
# for mobilenet
elif module_defs[idx - 1]['type'] == 'se':
return CBLidx2mask[idx - 3]
elif module_defs[idx - 1]['type'] == 'depthwise':
return CBLidx2mask[idx - 2]
elif module_defs[idx - 1]['type'] == 'shortcut':
return CBLidx2mask[idx - 2]
elif module_defs[idx - 1]['type'] == 'route':
route_in_idxs = []
for layer_i in module_defs[idx - 1]['layers']:
if int(layer_i) < 0:
route_in_idxs.append(idx - 1 + int(layer_i))
else:
route_in_idxs.append(int(layer_i))
if len(route_in_idxs) == 1:
mask = CBLidx2mask[route_in_idxs[0]]
if 'groups' in module_defs[idx - 1]:
return mask[(mask.shape[0] // 2):]
return mask
elif len(route_in_idxs) == 2:
# tiny剪植时使用
if module_defs[route_in_idxs[1] - 1]['type'] == 'maxpool':
return np.concatenate([CBLidx2mask[route_in_idxs[0] - 1], CBLidx2mask[route_in_idxs[1]]])
else:
if module_defs[route_in_idxs[0]]['type'] == 'upsample':
mask1 = CBLidx2mask[route_in_idxs[0] - 1]
elif module_defs[route_in_idxs[0]]['type'] == 'convolutional':
mask1 = CBLidx2mask[route_in_idxs[0]]
if module_defs[route_in_idxs[1]]['type'] == 'convolutional':
mask2 = CBLidx2mask[route_in_idxs[1]]
else:
mask2 = CBLidx2mask[route_in_idxs[1] - 1]
return np.concatenate([mask1, mask2])
elif len(route_in_idxs) == 4:
# spp结构中最后一个route
mask = CBLidx2mask[route_in_idxs[-1]]
return np.concatenate([mask, mask, mask, mask])
else:
print("Something wrong with route module!")
raise Exception
def init_weights_from_loose_model(compact_model, loose_model, CBL_idx, Other_idx, CBLidx2mask, is_gray_scale=False):
for idx in CBL_idx:
compact_CBL = compact_model.module_list[idx]
loose_CBL = loose_model.module_list[idx]
out_channel_idx = np.argwhere(CBLidx2mask[idx])[:, 0].tolist()
compact_bn, loose_bn = compact_CBL[1], loose_CBL[1]
compact_bn.weight.data = loose_bn.weight.data[out_channel_idx].clone()
compact_bn.bias.data = loose_bn.bias.data[out_channel_idx].clone()
compact_bn.running_mean.data = loose_bn.running_mean.data[out_channel_idx].clone()
compact_bn.running_var.data = loose_bn.running_var.data[out_channel_idx].clone()
input_mask = get_input_mask(loose_model.module_defs, idx, CBLidx2mask, is_gray_scale=is_gray_scale)
in_channel_idx = np.argwhere(input_mask)[:, 0].tolist()
compact_conv, loose_conv = compact_CBL[0], loose_CBL[0]
tmp = loose_conv.weight.data[:, in_channel_idx, :, :].clone()
compact_conv.weight.data = tmp[out_channel_idx, :, :, :].clone()
for idx in Other_idx:
compact_conv = compact_model.module_list[idx][0]
loose_conv = loose_model.module_list[idx][0]
input_mask = get_input_mask(loose_model.module_defs, idx, CBLidx2mask)
in_channel_idx = np.argwhere(input_mask)[:, 0].tolist()
# 拷贝非剪植层的时候包括三种情况
# 情况1:卷积层,需要拷贝bias
# 情况2:se层,需要分别拷贝fc1和fc2
# 情况3:depthwise层,直接拷贝卷积和BN
if loose_model.module_defs[idx]['type'] == 'convolutional':
compact_conv.weight.data = loose_conv.weight.data[:, in_channel_idx, :, :].clone()
compact_conv.bias.data = loose_conv.bias.data.clone()
elif loose_model.module_defs[idx]['type'] == 'se':
compact_fc1 = compact_conv.fc[0]
loose_fc1 = loose_conv.fc[0]
compact_fc1.weight.data = loose_fc1.weight.data.clone()
compact_fc2 = compact_conv.fc[2]
loose_fc2 = loose_conv.fc[2]
compact_fc2.weight.data = loose_fc2.weight.data.clone()
else:
compact_conv.weight.data = loose_conv.weight.data.clone()
compact_bn = compact_model.module_list[idx][1]
loose_bn = loose_model.module_list[idx][1]
compact_bn.weight.data = loose_bn.weight.data.clone()
compact_bn.bias.data = loose_bn.bias.data.clone()
compact_bn.running_mean.data = loose_bn.running_mean.data.clone()
compact_bn.running_var.data = loose_bn.running_var.data.clone()
def prune_model_keep_size(model, prune_idx, CBL_idx, CBLidx2mask):
pruned_model = deepcopy(model)
activations = []
for i, model_def in enumerate(model.module_defs):
if model_def['type'] == 'convolutional' or model_def['type'] == 'depthwise' or model_def['type'] == 'se':
activation = torch.zeros(int(model_def['filters'])).cuda()
if i in prune_idx:
mask = torch.from_numpy(CBLidx2mask[i]).cuda()
bn_module = pruned_model.module_list[i][1]
bn_module.weight.data.mul_(mask)
if hasattr(pruned_model.module_list[i], 'activation'):
ac_module = pruned_model.module_list[i][2]
if ac_module.__class__.__name__ == "LeakyReLU":
activation = F.leaky_relu((1 - mask) * bn_module.bias.data, 0.1)
elif ac_module.__class__.__name__ == "ReLU6":
activation = F.relu6((1 - mask) * bn_module.bias.data, inplace=True)
elif ac_module.__class__.__name__ == "HardSwish":
x = (1 - mask) * bn_module.bias.data
activation = x * (F.relu6(x + 3.0, inplace=True) / 6.0)
elif ac_module.__class__.__name__ == "ReLU":
activation = F.relu((1 - mask) * bn_module.bias.data, 0.1)
elif ac_module.__class__.__name__ == "Mish":
x = (1 - mask) * bn_module.bias.data
activation = x * F.softplus(x).tanh()
else:
activation = (1 - mask) * bn_module.bias.data
else:
activation = (1 - mask) * bn_module.bias.data
update_activation(i, pruned_model, activation, CBL_idx)
bn_module.bias.data.mul_(mask)
activations.append(activation)
elif model_def['type'] == 'shortcut':
actv1 = activations[i - 1]
from_layer = int(model_def['from'][0])
actv2 = activations[i + from_layer]
activation = actv1 + actv2
update_activation(i, pruned_model, activation, CBL_idx)
activations.append(activation)
elif model_def['type'] == 'route':
# spp不参与剪枝,其中的route不用更新,仅占位
from_layers = [int(s) for s in model_def['layers']]
activation = None
if len(from_layers) == 1:
activation = activations[i + from_layers[0] if from_layers[0] < 0 else from_layers[0]]
if 'groups' in model_def:
activation = activation[(activation.shape[0] // 2):]
update_activation(i, pruned_model, activation, CBL_idx)
elif len(from_layers) == 2:
actv1 = activations[i + from_layers[0]]
actv2 = activations[i + from_layers[1] if from_layers[1] < 0 else from_layers[1]]
activation = torch.cat((actv1, actv2))
update_activation(i, pruned_model, activation, CBL_idx)
activations.append(activation)
elif model_def['type'] == 'upsample':
# activation = torch.zeros(int(model.module_defs[i - 1]['filters'])).cuda()
activations.append(activations[i - 1])
elif model_def['type'] == 'yolo':
activations.append(None)
elif model_def['type'] == 'maxpool': # 区分spp和tiny
if model.module_defs[i + 1]['type'] == 'route':
activations.append(None)
else:
activation = activations[i - 1]
update_activation(i, pruned_model, activation, CBL_idx)
activations.append(activation)
return pruned_model
def obtain_bn_mask(bn_module, thre):
thre = thre.to(bn_module.weight.device)
mask = bn_module.weight.data.abs().ge(thre).float()
return mask
def get_nearest_multiple(num, base):
down = num % base
up = base - down
if down >= up:
near_multi_base = num + up
else:
near_multi_base = num - down
return near_multi_base
def merge_mask(model, CBLidx2mask, CBLidx2filters, base=1):
for i in range(len(model.module_defs) - 1, -1, -1):
mtype = model.module_defs[i]['type']
if mtype == 'shortcut':
if model.module_defs[i]['is_access']:
continue
Merge_masks = []
layer_i = i
while mtype == 'shortcut': # 对shortcut的上一层和from层读取:如果为卷积层且BN=1,则对应层的CBLidx2mask扩展维度后添加到Merge_masks中
model.module_defs[layer_i]['is_access'] = True
if model.module_defs[layer_i - 1]['type'] == 'convolutional':
bn = int(model.module_defs[layer_i - 1]['batch_normalize'])
if bn:
Merge_masks.append(CBLidx2mask[layer_i - 1].unsqueeze(0))
layer_i = int(model.module_defs[layer_i]['from'][0]) + layer_i
mtype = model.module_defs[layer_i]['type']
if mtype == 'convolutional':
bn = int(model.module_defs[layer_i]['batch_normalize'])
if bn:
Merge_masks.append(CBLidx2mask[layer_i].unsqueeze(0))
if len(Merge_masks) > 1: # 若有多个shortcut层
Merge_masks = torch.cat(Merge_masks, 0) # 按列排列,是2维张量
if base == 1:
merge_mask = (torch.sum(Merge_masks, dim=0) > 0).float() # 按列求和,是1维张量
else:
sum_mask = (torch.sum(Merge_masks, dim=0)).float()
merge_num = int(torch.sum(torch.sum(Merge_masks, dim=0) > 0).item())
merge_num_multi = get_nearest_multiple(merge_num, base)
_, y = torch.topk(sum_mask, merge_num_multi)
merge_mask = torch.zeros(sum_mask.size(), dtype=torch.float32)
merge_mask[y] = 1
else:
if base == 1:
merge_mask = Merge_masks[0].float()
else:
merge_num = int(torch.sum(Merge_masks, dim=0)).item()
merge_num_multi = get_nearest_multiple(merge_num, base)
_, y = torch.topk(Merge_masks[0], merge_num_multi)
merge_mask = torch.zeros(Merge_masks[0].size(), dtype=torch.float32)
merge_mask[y] = 1
layer_i = i
mtype = 'shortcut'
while mtype == 'shortcut': # 对shortcut的上一层和from层读取:如果为卷积层且BN=1,则对应层的CBLidx2mask和CBLidx2filters分别使用merge_mask和merge_mask的所有元素之和
if model.module_defs[layer_i - 1]['type'] == 'convolutional':
bn = int(model.module_defs[layer_i - 1]['batch_normalize'])
if bn:
CBLidx2mask[layer_i - 1] = merge_mask
CBLidx2filters[layer_i - 1] = int(torch.sum(merge_mask).item()) # 全部求和并且得到元素值
layer_i = int(model.module_defs[layer_i]['from'][0]) + layer_i
mtype = model.module_defs[layer_i]['type']
if mtype == 'convolutional':
bn = int(model.module_defs[layer_i]['batch_normalize'])
if bn:
CBLidx2mask[layer_i] = merge_mask
CBLidx2filters[layer_i] = int(torch.sum(merge_mask).item())
def update_activation(i, pruned_model, activation, CBL_idx):
next_idx = i + 1
if pruned_model.module_defs[next_idx]['type'] == 'convolutional':
next_conv = pruned_model.module_list[next_idx][0]
conv_sum = next_conv.weight.data.sum(dim=(2, 3))
offset = conv_sum.matmul(activation.reshape(-1, 1)).reshape(-1)
if next_idx in CBL_idx:
next_bn = pruned_model.module_list[next_idx][1]
next_bn.running_mean.data.sub_(offset)
else:
next_conv.bias.data.add_(offset)
def prune_model_keep_size_forEagleEye(model, prune_idx, CBLidx2mask):
pruned_model = deepcopy(model)
for i, model_def in enumerate(model.module_defs):
if model_def['type'] == 'convolutional' or model_def['type'] == 'depthwise' or model_def['type'] == 'se':
if i in prune_idx:
mask = torch.from_numpy(CBLidx2mask[i]).cuda()
bn_module = pruned_model.module_list[i][1]
bn_module.weight.data.mul_(mask)
bn_module.bias.data.mul_(mask)
return pruned_model
================================================
FILE: utils/quantized/__init__.py
================================================
# Author:LiPu
================================================
FILE: utils/quantized/quantized_TPSQ.py
================================================
# Author:LiPu
import numpy as np
import os
import time
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
from torch.nn.parameter import Parameter
from torch.autograd import Function
# ********************* quantizers(量化器,量化) *********************
class Round(Function):
@staticmethod
def forward(self, input):
sign = torch.sign(input)
output = sign * torch.floor(torch.abs(input) + 0.5)
return output
@staticmethod
def backward(self, grad_output):
grad_input = grad_output.clone()
return grad_input
class Search_Pow2(Function):
@staticmethod
def forward(self, input):
input_data = input.data.clone()
output = input
output[output < 0].data.copy_(torch.Tensor([2 ** -5]))
output[output > 2 ** (8 + 5)].data.copy_(torch.Tensor([2 ** (8 + 5)]))
ceil_float_range = 2 ** output.log2().ceil()
floor_float_range = 2 ** output.log2().floor()
if abs(ceil_float_range - output) < abs(floor_float_range - output):
output.data = ceil_float_range.data
else:
output.data = floor_float_range.data
output_data = output.data.clone()
self.save_for_backward(input_data, output_data)
return output
@staticmethod
def backward(self, grad_output):
input, output = self.saved_tensors
scale = output / input
grad_input = scale * grad_output.clone()
# 线性
# grad_input = 0.8985 * (grad_output.clone())
# 多项式
# grad_input = -0.668 * grad_output.clone() + 1.335
# grad_input[grad_input.ge(0.5)] = 0
# grad_input[grad_input.le(-0.5)] = 0
# 指数
# grad_input = 0.2379145 * torch.exp(2.2235 * grad_output.clone())
# grad_input[grad_input.ge(0.1)] = 0
# grad_input[grad_input.le(-0.1)] = 0
return grad_input
class Quantizer(nn.Module):
def __init__(self, bits, out_channels, warmup=False):
super().__init__()
self.first = True
self.momentum = 0.1
self.bits = bits
if warmup:
self.register_buffer('warmup', torch.ones(1))
else:
self.register_buffer('warmup', torch.zeros(1))
self.momentum = 0.1
# 截断
def clamp(self, input):
# print('==============')
# print((Search_Pow2.apply(self.scale)).size())
# print(input.size())
# print('==============')
output = 0.5 * (
torch.abs(input + Search_Pow2.apply(self.scale)) - torch.abs(input - Search_Pow2.apply(self.scale)))
return output
# 量化
def quantize(self, input):
quantized_range = torch.tensor((1 << (self.bits - 1)) - 1)
output = (input * quantized_range) / Search_Pow2.apply(self.scale)
return output
def round(self, input):
output = Round.apply(input)
return output
# 反量化
def dequantize(self, input):
quantized_range = torch.tensor((1 << (self.bits - 1)))
output = (input * Search_Pow2.apply(self.scale)) / quantized_range
return output
def forward(self, input):
if self.bits == 32:
output = input
elif self.bits == 1:
print('!Binary quantization is not supported !')
assert self.bits != 1
else:
output = self.clamp(input) # 截断
output = self.quantize(output) # 量化
output = self.round(output)
output = self.dequantize(output) # 反量化
return output
def get_quantize_value(self, input):
if self.bits == 32:
output = input
elif self.bits == 1:
print('!Binary quantization is not supported!')
assert self.bits != 1
else:
output = self.quantize(input) # 量化
output = self.round(output)
output = self.clamp(output) # 截断
return output
class RangeTracker(nn.Module):
def __init__(self):
super().__init__()
def update_range(self, min_val, max_val):
raise NotImplementedError
@torch.no_grad()
def forward(self, input):
min_val = torch.min(input)
max_val = torch.max(input)
self.update_range(min_val, max_val)
class GlobalRangeTracker(RangeTracker): # W,min_max_shape=(N, 1, 1, 1),channel级,取本次和之前相比的min_max —— (N, C, W, H)
def __init__(self):
super().__init__()
self.register_buffer('min_val', torch.zeros(1))
self.register_buffer('max_val', torch.zeros(1))
self.register_buffer('first_w', torch.zeros(1))
def update_range(self, min_val, max_val):
temp_minval = self.min_val
temp_maxval = self.max_val
if self.first_w == 0:
self.first_w.add_(1)
self.min_val.add_(min_val)
self.max_val.add_(max_val)
else:
self.min_val.add_(-temp_minval).add_(torch.min(temp_minval, min_val))
self.max_val.add_(-temp_maxval).add_(torch.max(temp_maxval, max_val))
class Bias_Quantizer(nn.Module):
def __init__(self, bits, range_tracker):
super().__init__()
self.bits = bits
self.range_tracker = range_tracker
self.register_buffer('scale', torch.zeros(1)) # 量化比例因子
def update_params(self):
min_val = torch.tensor(-(1 << (self.bits - 1)))
max_val = torch.tensor((1 << (self.bits - 1)) - 1)
quantized_range = torch.max(torch.abs(min_val), torch.abs(max_val)) # 量化后范围
float_max = torch.max(torch.abs(self.range_tracker.min_val), torch.abs(self.range_tracker.max_val)) # 量化前范围
floor_float_range = 2 ** float_max.log2().floor()
ceil_float_range = 2 ** float_max.log2().ceil()
if abs(ceil_float_range - float_max) < abs(floor_float_range - float_max):
float_range = ceil_float_range
else:
float_range = floor_float_range
self.scale = float_range / quantized_range # 量化比例因子
# 量化
def quantize(self, input):
output = input / self.scale
return output
def round(self, input):
output = Round.apply(input)
return output
# 截断
def clamp(self, input):
min_val = torch.tensor(-(1 << (self.bits - 1)))
max_val = torch.tensor((1 << (self.bits - 1)) - 1)
output = torch.clamp(input, min_val, max_val)
return output
# 反量化
def dequantize(self, input):
output = (input) * self.scale
return output
def forward(self, input):
if self.bits == 32:
output = input
elif self.bits == 1:
print('!Binary quantization is not supported !')
assert self.bits != 1
else:
if self.training == True:
self.range_tracker(input)
self.update_params()
output = self.quantize(input) # 量化
output = self.round(output)
output = self.clamp(output) # 截断
output = self.dequantize(output) # 反量化
return output
def get_quantize_value(self, input):
if self.bits == 32:
output = input
elif self.bits == 1:
print('!Binary quantization is not supported !')
assert self.bits != 1
else:
output = self.quantize(input) # 量化
output = self.round(output)
output = self.clamp(output) # 截断
return output
################获得量化因子所对应的移位数
def get_scale(self):
#############移位修正
move_scale = math.log2(self.scale)
move_scale = np.array(move_scale).reshape(1, -1)
return move_scale
class Weight_Quantizer(Quantizer):
def __init__(self, bits, out_channels, warmup):
super().__init__(bits, warmup)
self.out_channels = out_channels
if self.out_channels == -1:
self.scale = Parameter(torch.Tensor(1)) # 量化比例因子
else:
self.scale = Parameter(torch.Tensor(self.out_channels, 1, 1, 1)) # 量化比例因子
init.ones_(self.scale)
def forward(self, input):
if self.bits == 32:
output = input
elif self.bits == 1:
print('!Binary quantization is not supported !')
assert self.bits != 1
else:
if self.warmup:
with torch.no_grad():
max_metrics = -1
max_step = -5
step = (torch.max(input)) / 100
for i in range(1, 100):
self.scale.data.copy_(torch.Tensor([step * i]))
output = self.clamp(input) # 截断
output = self.quantize(output) # 量化
output = self.round(output)
output = self.dequantize(output) # 反量化
cosine_similarity = torch.cosine_similarity(input.view(-1), output.view(-1), dim=0)
if cosine_similarity > max_metrics:
max_metrics = cosine_similarity
max_step = i
del output
torch.cuda.empty_cache()
# print("max_step:", max_step)
# print("max_metrics:", max_metrics)
self.scale.data.copy_(torch.Tensor([step * max_step]))
self.warmup.add_(-1)
output = self.clamp(input) # 截断
output = self.quantize(output) # 量化
output = self.round(output)
output = self.dequantize(output) # 反量化
return output
class Activattion_Quantizer(Quantizer):
def __init__(self, bits, out_channels, warmup):
super().__init__(bits, out_channels, warmup)
self.out_channels = out_channels
if self.out_channels == -1:
self.scale = Parameter(torch.Tensor(1)) # 量化比例因子
else:
self.scale = Parameter(torch.Tensor(1, self.out_channels, 1, 1)) # 量化比例因子
init.ones_(self.scale)
def forward(self, input):
if self.bits == 32:
output = input
elif self.bits == 1:
print('!Binary quantization is not supported !')
assert self.bits != 1
else:
if self.warmup:
with torch.no_grad():
max_metrics = -1
max_step = -5
step = (torch.max(input)) / 100
# if self.out_channels == -1:
for i in range(1, 100):
self.scale.data.copy_(torch.Tensor([step * i]))
output = self.clamp(input) # 截断
output = self.quantize(output) # 量化
output = self.round(output)
output = self.dequantize(output) # 反量化
cosine_similarity = torch.cosine_similarity(input.view(-1), output.view(-1), dim=0)
if cosine_similarity > max_metrics:
max_metrics = cosine_similarity
max_step = i
del output
torch.cuda.empty_cache()
# print("max_step:", max_step)
# print("max_metrics:", max_metrics)
self.scale.data.copy_(torch.Tensor([step * max_step]))
self.warmup.add_(-1)
output = self.clamp(input) # 截断
output = self.quantize(output) # 量化
output = self.round(output)
output = self.dequantize(output) # 反量化
return output
def reshape_to_activation(input):
return input.reshape(1, -1, 1, 1)
def reshape_to_weight(input):
return input.reshape(-1, 1, 1, 1)
def reshape_to_bias(input):
return input.reshape(-1)
# ********************* bn融合_量化卷积(bn融合后,同时量化A/W,并做卷积) *********************
class TPSQ_BNFold_QuantizedConv2d_For_FPGA(nn.Conv2d):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=False,
eps=1e-5,
momentum=0.01, # 考虑量化带来的抖动影响,对momentum进行调整(0.1 ——> 0.01),削弱batch统计参数占比,一定程度抑制抖动。经实验量化训练效果更好,acc提升1%左右
a_bits=8,
w_bits=8,
bn=0,
activate='leaky',
steps=0,
quantizer_output=False,
maxabsscaler=False,
warmup=True
):
super().__init__(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias
)
self.bn = bn
self.activate = activate
self.eps = eps
self.momentum = momentum
self.freeze_step = int(steps * 0.9)
self.gamma = Parameter(torch.Tensor(out_channels))
self.beta = Parameter(torch.Tensor(out_channels))
self.register_buffer('running_mean', torch.zeros(out_channels))
self.register_buffer('running_var', torch.zeros(out_channels))
self.register_buffer('batch_mean', torch.zeros(out_channels))
self.register_buffer('batch_var', torch.zeros(out_channels))
self.register_buffer('first_bn', torch.zeros(1))
self.register_buffer('step', torch.zeros(1))
self.quantizer_output = quantizer_output
self.maxabsscaler = maxabsscaler
init.normal_(self.gamma, 1, 0.5)
init.zeros_(self.beta)
self.activation_quantizer = Activattion_Quantizer(bits=a_bits, out_channels=-1,
warmup=warmup)
self.weight_quantizer = Weight_Quantizer(bits=w_bits, out_channels=-1, warmup=warmup)
self.bias_quantizer = Bias_Quantizer(bits=w_bits, range_tracker=GlobalRangeTracker())
def forward(self, input):
# 训练态
if self.training:
self.step += 1
if self.bn:
# 先做普通卷积得到A,以取得BN参数
output = F.conv2d(
input=input,
weight=self.weight,
bias=self.bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups
)
# 更新BN统计参数(batch和running)
dims = [dim for dim in range(4) if dim != 1]
self.batch_mean = torch.mean(output, dim=dims)
self.batch_var = torch.var(output, dim=dims)
with torch.no_grad():
if self.first_bn == 0 and torch.equal(self.running_mean, torch.zeros_like(
self.running_mean)) and torch.equal(self.running_var, torch.zeros_like(self.running_var)):
self.first_bn.add_(1)
self.running_mean.add_(self.batch_mean)
self.running_var.add_(self.batch_var)
else:
self.running_mean.mul_(1 - self.momentum).add_(self.batch_mean * self.momentum)
self.running_var.mul_(1 - self.momentum).add_(self.batch_var * self.momentum)
# BN融合
if self.step < self.freeze_step:
if self.bias is not None:
bias = reshape_to_bias(
self.beta + (self.bias - self.batch_mean) * (
self.gamma / torch.sqrt(self.batch_var + self.eps)))
else:
bias = reshape_to_bias(
self.beta - self.batch_mean * (
self.gamma / torch.sqrt(self.batch_var + self.eps))) # b融batch
weight = self.weight * reshape_to_weight(
self.gamma / torch.sqrt(self.batch_var + self.eps)) # w融running
else:
if self.bias is not None:
bias = reshape_to_bias(
self.beta + (self.bias - self.running_mean) * (
self.gamma / torch.sqrt(self.running_var + self.eps)))
else:
bias = reshape_to_bias(
self.beta - self.running_mean * (
self.gamma / torch.sqrt(self.running_var + self.eps))) # b融batch
weight = self.weight * reshape_to_weight(
self.gamma / torch.sqrt(self.running_var + self.eps)) # w融running
else:
bias = self.bias
weight = self.weight
# 测试态
else:
# print(self.running_mean, self.running_var)
if self.bn:
# BN融合
if self.bias is not None:
bias = reshape_to_bias(self.beta + (self.bias - self.running_mean) * (
self.gamma / torch.sqrt(self.running_var + self.eps)))
else:
bias = reshape_to_bias(
self.beta - self.running_mean * self.gamma / torch.sqrt(
self.running_var + self.eps)) # b融running
weight = self.weight * reshape_to_weight(
self.gamma / torch.sqrt(self.running_var + self.eps)) # w融running
else:
bias = self.bias
weight = self.weight
# 量化A和bn融合后的W
q_weight = self.weight_quantizer(weight)
q_bias = self.bias_quantizer(bias)
if self.quantizer_output == True: # 输出量化参数txt文档
# 创建的quantizer_output输出文件夹
if not os.path.isdir('./quantizer_output'):
os.makedirs('./quantizer_output')
if not os.path.isdir('./quantizer_output/q_weight_out'):
os.makedirs('./quantizer_output/q_weight_out')
if not os.path.isdir('./quantizer_output/w_scale_out'):
os.makedirs('./quantizer_output/w_scale_out')
if not os.path.isdir('./quantizer_output/q_weight_max'):
os.makedirs('./quantizer_output/q_weight_max')
if not os.path.isdir('./quantizer_output/max_weight_count'):
os.makedirs('./quantizer_output/max_weight_count')
#######################输出当前层的权重量化因子
weight_scale = self.weight_quantizer.get_scale()
np.savetxt(('./quantizer_output/w_scale_out/%f.txt' % time.time()), weight_scale, delimiter='\n')
#######################输出当前层的量化权重
q_weight_txt = self.weight_quantizer.get_quantize_value(weight)
q_weight_txt = np.array(q_weight_txt.cpu()).reshape(1, -1)
q_weight_max = [np.max(q_weight_txt)]
# q_weight_max = np.argmax(q_weight_txt)
max_weight_count = [np.sum(abs(q_weight_txt) >= 127)] # 统计该层溢出的数目
np.savetxt(('./quantizer_output/max_weight_count/%f.txt' % time.time()), max_weight_count)
np.savetxt(('./quantizer_output/q_weight_max/%f.txt' % time.time()), q_weight_max)
np.savetxt(('./quantizer_output/q_weight_out/%f.txt' % time.time()), q_weight_txt, delimiter='\n')
# io.savemat('save.mat',{'q_weight_txt':q_weight_txt})
#######################创建输出偏置txt的文件夹
if not os.path.isdir('./quantizer_output/q_bias_out'):
os.makedirs('./quantizer_output/q_bias_out')
if not os.path.isdir('./quantizer_output/b_scale_out'):
os.makedirs('./quantizer_output/b_scale_out')
#######################输出当前层偏置的量化因子
bias_scale = self.bias_quantizer.get_scale()
np.savetxt(('./quantizer_output/b_scale_out/%f.txt' % time.time()), bias_scale, delimiter='\n')
#######################输出当前层的量化偏置
q_bias_txt = self.bias_quantizer.get_quantize_value(bias)
q_bias_txt = np.array(q_bias_txt.cpu()).reshape(1, -1)
np.savetxt(('./quantizer_output/q_bias_out/%f.txt' % time.time()), q_bias_txt, delimiter='\n')
# 量化卷积
output = F.conv2d(
input=input,
weight=q_weight,
bias=q_bias, # 注意,这里加bias,做完整的conv+bn
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups
)
if self.activate == 'leaky':
output = F.leaky_relu(output, 0.1 if not self.maxabsscaler else 0.25, inplace=True)
elif self.activate == 'relu6':
output = F.relu6(output, inplace=True)
elif self.activate == 'h_swish':
output = output * (F.relu6(output + 3.0, inplace=True) / 6.0)
elif self.activate == 'relu':
output = F.relu(output, inplace=True)
elif self.activate == 'mish':
output = output * F.softplus(output).tanh()
elif self.activate == 'linear':
# return output
pass
else:
print(self.activate + "%s is not supported !")
if self.quantizer_output == True:
if not os.path.isdir('./quantizer_output/q_activation_out'):
os.makedirs('./quantizer_output/q_activation_out')
if not os.path.isdir('./quantizer_output/a_scale_out'):
os.makedirs('./quantizer_output/a_scale_out')
if not os.path.isdir('./quantizer_output/q_activation_max'):
os.makedirs('./quantizer_output/q_activation_max')
if not os.path.isdir('./quantizer_output/max_activation_count'):
os.makedirs('./quantizer_output/max_activation_count')
##################输出当前激活的量化因子
activation_scale = self.activation_quantizer.get_scale()
np.savetxt(('./quantizer_output/a_scale_out/%f.txt' % time.time()), activation_scale, delimiter='\n')
##################输出当前层的量化激活
q_activation_txt = self.activation_quantizer.get_quantize_value(output)
q_activation_txt = np.array(q_activation_txt.cpu()).reshape(1, -1)
q_activation_max = [np.max(q_activation_txt)] # 统计该层的最大值(即查看是否有溢出)
max_activation_count = [np.sum(abs(q_activation_txt) >= 127)] # 统计该层溢出的数目
# q_weight_max = np.argmax(q_weight_txt)
np.savetxt(('./quantizer_output/max_activation_count/%f.txt' % time.time()),
max_activation_count)
np.savetxt(('./quantizer_output/q_activation_max/%f.txt' % time.time()), q_activation_max)
np.savetxt(('./quantizer_output/q_activation_out/%f.txt' % time.time()), q_activation_txt,
delimiter='\n')
output = self.activation_quantizer(output)
return output
def BN_fuse(self):
if self.bn:
# BN融合
if self.bias is not None:
bias = reshape_to_bias(self.beta + (self.bias - self.running_mean) * (
self.gamma / torch.sqrt(self.running_var + self.eps)))
else:
bias = reshape_to_bias(
self.beta - self.running_mean * self.gamma / torch.sqrt(
self.running_var + self.eps)) # b融running
weight = self.weight * reshape_to_weight(
self.gamma / torch.sqrt(self.running_var + self.eps)) # w融running
else:
bias = self.bias
weight = self.weight
return weight, bias
================================================
FILE: utils/quantized/quantized_dorefa.py
================================================
# Author:LiPu
import time
import numpy as np
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
from torch.nn.parameter import Parameter
from torch.autograd import Function
class Round(Function):
@staticmethod
def forward(self, input):
sign = torch.sign(input)
output = sign * torch.floor(torch.abs(input) + 0.5)
return output
@staticmethod
def backward(self, grad_output):
grad_input = grad_output.clone()
return grad_input
# ********************* A(特征)量化 ***********************
class activation_quantize(nn.Module):
def __init__(self, a_bits):
super().__init__()
self.a_bits = a_bits
def round(self, input):
output = Round.apply(input)
return output
def get_quantize_value(self, input):
output = torch.clamp(input * 0.1, 0, 1) # 特征A截断前先进行缩放(* 0.1),以减小截断误差
scale = float(2 ** self.a_bits - 1)
output = output * scale
output = self.round(output)
return output
################获得量化因子所对应的移位数
def get_scale(self):
#############移位修正
# scale = float(2 ** self.a_bits - 1)
# move_scale = math.log2(scale)
scale = np.array(self.a_bits).reshape(1, -1)
return scale
def forward(self, input):
if self.a_bits == 32:
output = input
elif self.a_bits == 1:
print('!Binary quantization is not supported !')
assert self.a_bits != 1
else:
output = torch.clamp(input * 0.1, 0, 1) # 特征A截断前先进行缩放(* 0.1),以减小截断误差
scale = float(2 ** self.a_bits - 1)
output = output * scale
output = self.round(output)
output = output / scale
return output
# ********************* W(模型参数)量化 ***********************
class weight_quantize(nn.Module):
def __init__(self, w_bits):
super().__init__()
self.w_bits = w_bits
def round(self, input):
output = Round.apply(input)
return output
def get_quantize_value(self, input):
output = torch.tanh(input)
output = output / 2 / torch.max(torch.abs(output)) + 0.5 # 归一化-[0,1]
scale = float(2 ** self.w_bits - 1)
output = output * scale
output = self.round(output)
# output = 2 * output - 1
return output
################获得量化因子所对应的移位数
def get_scale(self):
#############移位修正
# scale = float(2 ** self.w_bits - 1)
# scale = math.log2(scale)
scale = np.array(self.w_bits).reshape(1, -1)
return scale
def forward(self, input):
if self.w_bits == 32:
output = input
elif self.w_bits == 1:
print('!Binary quantization is not supported !')
assert self.w_bits != 1
else:
output = torch.tanh(input)
output = output / 2 / torch.max(torch.abs(output)) + 0.5 # 归一化-[0,1]
scale = float(2 ** self.w_bits - 1)
output = output * scale
output = self.round(output)
output = output / scale
output = 2 * output - 1
return output
def get_weights(self, input):
if self.w_bits == 32:
output = input
elif self.w_bits == 1:
print('!Binary quantization is not supported !')
assert self.w_bits != 1
else:
output = torch.tanh(input)
output = output / 2 / torch.max(torch.abs(output)) + 0.5 # 归一化-[0,1]
scale = float(2 ** self.w_bits - 1)
output = output * scale
output = self.round(output)
return output
# ********************* 量化卷积(同时量化A/W,并做卷积) ***********************
class DorefaConv2d(nn.Conv2d):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
a_bits=8,
w_bits=8,
):
super().__init__(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias
)
# 实例化调用A和W量化器
self.activation_quantizer = activation_quantize(a_bits=a_bits)
self.weight_quantizer = weight_quantize(w_bits=w_bits)
def forward(self, input):
# 量化A和W
if input.shape[1] != 3:
input = self.activation_quantizer(input)
q_weight = self.weight_quantizer(self.weight)
# 量化卷积
output = F.conv2d(
input=input,
weight=q_weight,
bias=self.bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups
)
return output
def reshape_to_activation(input):
return input.reshape(1, -1, 1, 1)
def reshape_to_weight(input):
return input.reshape(-1, 1, 1, 1)
def reshape_to_bias(input):
return input.reshape(-1)
class BNFold_DorefaConv2d(DorefaConv2d):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=False,
eps=1e-5,
momentum=0.01, # 考虑量化带来的抖动影响,对momentum进行调整(0.1 ——> 0.01),削弱batch统计参数占比,一定程度抑制抖动。经实验量化训练效果更好,acc提升1%左右
a_bits=8,
w_bits=8,
bn=0,
activate='leaky',
steps=0,
quantizer_output=False,
maxabsscaler=False
):
super().__init__(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias
)
self.bn = bn
self.activate = activate
self.eps = eps
self.momentum = momentum
self.freeze_step = int(steps * 0.9)
self.gamma = Parameter(torch.Tensor(out_channels))
self.beta = Parameter(torch.Tensor(out_channels))
self.register_buffer('running_mean', torch.zeros(out_channels))
self.register_buffer('running_var', torch.zeros(out_channels))
self.register_buffer('batch_mean', torch.zeros(out_channels))
self.register_buffer('batch_var', torch.zeros(out_channels))
self.register_buffer('first_bn', torch.zeros(1))
self.register_buffer('step', torch.zeros(1))
self.quantizer_output = quantizer_output
self.maxabsscaler = maxabsscaler
init.normal_(self.gamma, 1, 0.5)
init.zeros_(self.beta)
# 实例化量化器(A-layer级,W-channel级)
self.activation_quantizer = activation_quantize(a_bits=a_bits)
self.weight_quantizer = weight_quantize(w_bits=w_bits)
self.bias_quantizer = weight_quantize(w_bits=w_bits)
def forward(self, input):
# 训练态
if self.training:
self.step += 1
if self.bn:
# 先做普通卷积得到A,以取得BN参数
output = F.conv2d(
input=input,
weight=self.weight,
bias=self.bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups
)
# 更新BN统计参数(batch和running)
dims = [dim for dim in range(4) if dim != 1]
self.batch_mean = torch.mean(output, dim=dims)
self.batch_var = torch.var(output, dim=dims)
with torch.no_grad():
if self.first_bn == 0 and torch.equal(self.running_mean, torch.zeros_like(
self.running_mean)) and torch.equal(self.running_var, torch.zeros_like(self.running_var)):
self.first_bn.add_(1)
self.running_mean.add_(self.batch_mean)
self.running_var.add_(self.batch_var)
else:
self.running_mean.mul_(1 - self.momentum).add_(self.momentum * self.batch_mean)
self.running_var.mul_(1 - self.momentum).add_(self.momentum * self.batch_var)
# BN融合
if self.step < self.freeze_step:
if self.bias is not None:
bias = reshape_to_bias(
self.beta + (self.bias - self.batch_mean) * (
self.gamma / torch.sqrt(self.batch_var + self.eps)))
else:
bias = reshape_to_bias(
self.beta - self.batch_mean * (
self.gamma / torch.sqrt(self.batch_var + self.eps))) # b融batch
weight = self.weight * reshape_to_weight(
self.gamma / torch.sqrt(self.batch_var + self.eps)) # w融running
else:
if self.bias is not None:
bias = reshape_to_bias(
self.beta + (self.bias - self.running_mean) * (
self.gamma / torch.sqrt(self.running_var + self.eps)))
else:
bias = reshape_to_bias(
self.beta - self.running_mean * (
self.gamma / torch.sqrt(self.running_var + self.eps))) # b融batch
weight = self.weight * reshape_to_weight(
self.gamma / torch.sqrt(self.running_var + self.eps)) # w融running
else:
bias = self.bias
weight = self.weight
# 测试态
else:
# print(self.running_mean, self.running_var)
# BN融合
if self.bn:
if self.bias is not None:
bias = reshape_to_bias(self.beta + (self.bias - self.running_mean) * (
self.gamma / torch.sqrt(self.running_var + self.eps)))
else:
bias = reshape_to_bias(
self.beta - self.running_mean * (
self.gamma / torch.sqrt(self.running_var + self.eps))) # b融running
weight = self.weight * reshape_to_weight(
self.gamma / torch.sqrt(self.running_var + self.eps)) # w融running
else:
bias = self.bias
weight = self.weight
# 量化A和bn融合后的W
q_weight = self.weight_quantizer(weight)
q_bias = self.bias_quantizer(bias)
if self.quantizer_output == True: # 输出量化参数txt文档
# 创建的quantizer_output输出文件夹
if not os.path.isdir('./quantizer_output'):
os.makedirs('./quantizer_output')
if not os.path.isdir('./quantizer_output/q_weight_out'):
os.makedirs('./quantizer_output/q_weight_out')
if not os.path.isdir('./quantizer_output/w_scale_out'):
os.makedirs('./quantizer_output/w_scale_out')
if not os.path.isdir('./quantizer_output/q_weight_max'):
os.makedirs('./quantizer_output/q_weight_max')
if not os.path.isdir('./quantizer_output/max_weight_count'):
os.makedirs('./quantizer_output/max_weight_count')
#######################输出当前层的权重量化因子
weight_scale = self.weight_quantizer.get_scale()
np.savetxt(('./quantizer_output/w_scale_out/scale %f.txt' % time.time()), weight_scale, delimiter='\n')
#######################输出当前层的量化权重
q_weight_txt = self.weight_quantizer.get_quantize_value(weight)
q_weight_txt = np.array(q_weight_txt.cpu()).reshape(1, -1)
q_weight_max = [np.max(q_weight_txt)]
# q_weight_max = np.argmax(q_weight_txt)
max_weight_count = [np.sum(abs(q_weight_txt) >= 255)] # 统计该层溢出的数目
np.savetxt(('./quantizer_output/max_weight_count/max_weight_count %f.txt' % time.time()), max_weight_count)
np.savetxt(('./quantizer_output/q_weight_max/max_weight %f.txt' % time.time()), q_weight_max)
np.savetxt(('./quantizer_output/q_weight_out/weight %f.txt' % time.time()), q_weight_txt, delimiter='\n')
# io.savemat('save.mat',{'q_weight_txt':q_weight_txt})
#######################创建输出偏置txt的文件夹
if not os.path.isdir('./quantizer_output/q_bias_out'):
os.makedirs('./quantizer_output/q_bias_out')
if not os.path.isdir('./quantizer_output/b_scale_out'):
os.makedirs('./quantizer_output/b_scale_out')
#######################输出当前层偏置的量化因子
bias_scale = self.bias_quantizer.get_scale()
np.savetxt(('./quantizer_output/b_scale_out/scale %f.txt' % time.time()), bias_scale, delimiter='\n')
#######################输出当前层的量化偏置
q_bias_txt = self.bias_quantizer.get_quantize_value(bias)
q_bias_txt = np.array(q_bias_txt.cpu()).reshape(1, -1)
np.savetxt(('./quantizer_output/q_bias_out/bias %f.txt' % time.time()), q_bias_txt, delimiter='\n')
# 量化卷积
if self.training: # 训练态
output = F.conv2d(
input=input,
weight=q_weight,
# bias=self.bias, # 注意,这里不加bias(self.bias为None)
bias=q_bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups
)
else: # 测试态
output = F.conv2d(
input=input,
weight=q_weight,
bias=q_bias, # 注意,这里加bias,做完整的conv+bn
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups
)
if self.activate == 'leaky':
output = F.leaky_relu(output, 0.125 if not self.maxabsscaler else 0.25, inplace=True)
elif self.activate == 'relu6':
output = F.relu6(output, inplace=True)
elif self.activate == 'h_swish':
output = output * (F.relu6(output + 3.0, inplace=True) / 6.0)
elif self.activate == 'relu':
output = F.relu(output, inplace=True)
elif self.activate == 'mish':
output = output * F.softplus(output).tanh()
elif self.activate == 'linear':
return output
# pass
else:
print(self.activate + " is not supported !")
if self.quantizer_output == True:
if not os.path.isdir('./quantizer_output/q_activation_out'):
os.makedirs('./quantizer_output/q_activation_out')
if not os.path.isdir('./quantizer_output/a_scale_out'):
os.makedirs('./quantizer_output/a_scale_out')
if not os.path.isdir('./quantizer_output/q_activation_max'):
os.makedirs('./quantizer_output/q_activation_max')
if not os.path.isdir('./quantizer_output/max_activation_count'):
os.makedirs('./quantizer_output/max_activation_count')
##################输出当前激活的量化因子
activation_scale = self.activation_quantizer.get_scale()
np.savetxt(('./quantizer_output/a_scale_out/scale %f.txt' % time.time()), activation_scale, delimiter='\n')
##################输出当前层的量化激活
q_activation_txt = self.activation_quantizer.get_quantize_value(output)
q_activation_txt = np.array(q_activation_txt.cpu()).reshape(1, -1)
q_activation_max = [np.max(q_activation_txt)] # 统计该层的最大值(即查看是否有溢出)
max_activation_count = [np.sum(abs(q_activation_txt) >= 255)] # 统计该层溢出的数目
# q_weight_max = np.argmax(q_weight_txt)
np.savetxt(('./quantizer_output/max_activation_count/max_activation_count %f.txt' % time.time()),
max_activation_count)
np.savetxt(('./quantizer_output/q_activation_max/max_activation %f.txt' % time.time()), q_activation_max)
np.savetxt(('./quantizer_output/q_activation_out/activation %f.txt' % time.time()), q_activation_txt,
delimiter='\n')
output = self.activation_quantizer(output)
return output
def BN_fuse(self):
if self.bn:
# BN融合
if self.bias is not None:
bias = reshape_to_bias(self.beta + (self.bias - self.running_mean) * (
self.gamma / torch.sqrt(self.running_var + self.eps)))
else:
bias = reshape_to_bias(
self.beta - self.running_mean * self.gamma / torch.sqrt(
self.running_var + self.eps)) # b融running
weight = self.weight * reshape_to_weight(
self.gamma / torch.sqrt(self.running_var + self.eps)) # w融running
else:
bias = self.bias
weight = self.weight
return weight, bias
class DorefaLinear(nn.Linear):
def __init__(self, in_features, out_features, bias=True, a_bits=2, w_bits=2):
super().__init__(in_features=in_features, out_features=out_features, bias=bias)
self.activation_quantizer = activation_quantize(a_bits=a_bits)
self.weight_quantizer = weight_quantize(w_bits=w_bits)
def forward(self, input):
# 量化A和W
q_input = self.activation_quantizer(input)
q_weight = self.weight_quantizer(self.weight)
# 量化全连接
output = F.linear(input=q_input, weight=q_weight, bias=self.bias)
return output
================================================
FILE: utils/quantized/quantized_google.py
================================================
import copy
import math
import time
import numpy as np
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
from torch.nn.parameter import Parameter
from torch.autograd import Function
# ********************* range_trackers(范围统计器,统计量化前范围) *********************
class RangeTracker(nn.Module):
def __init__(self, q_level):
super().__init__()
self.q_level = q_level
def update_range(self, min_val, max_val):
raise NotImplementedError
@torch.no_grad()
def forward(self, input):
if self.q_level == 'L': # A,min_max_shape=(1, 1, 1, 1),layer级
min_val = torch.min(input)
max_val = torch.max(input)
elif self.q_level == 'C': # W,min_max_shape=(N, 1, 1, 1),channel级
min_val = torch.min(torch.min(torch.min(input, 3, keepdim=True)[0], 2, keepdim=True)[0], 1, keepdim=True)[0]
max_val = torch.max(torch.max(torch.max(input, 3, keepdim=True)[0], 2, keepdim=True)[0], 1, keepdim=True)[0]
self.update_range(min_val, max_val)
class GlobalRangeTracker(RangeTracker): # W,min_max_shape=(N, 1, 1, 1),channel级,取本次和之前相比的min_max —— (N, C, W, H)
def __init__(self, q_level, out_channels):
super().__init__(q_level)
if self.q_level == 'L':
self.register_buffer('min_val', torch.zeros(1))
self.register_buffer('max_val', torch.zeros(1))
elif self.q_level == 'C':
self.register_buffer('min_val', torch.zeros(out_channels, 1, 1, 1))
self.register_buffer('max_val', torch.zeros(out_channels, 1, 1, 1))
self.register_buffer('first_w', torch.zeros(1))
def update_range(self, min_val, max_val):
temp_minval = self.min_val
temp_maxval = self.max_val
if self.first_w == 0:
self.first_w.add_(1)
self.min_val.add_(min_val)
self.max_val.add_(max_val)
else:
self.min_val.add_(-temp_minval).add_(torch.min(temp_minval, min_val))
self.max_val.add_(-temp_maxval).add_(torch.max(temp_maxval, max_val))
class AveragedRangeTracker(RangeTracker): # A,min_max_shape=(1, 1, 1, 1),layer级,取running_min_max —— (N, C, W, H)
def __init__(self, q_level, out_channels, momentum=0.1):
super().__init__(q_level)
self.momentum = momentum
if self.q_level == 'L':
self.register_buffer('min_val', torch.zeros(1))
self.register_buffer('max_val', torch.zeros(1))
elif self.q_level == 'C':
self.register_buffer('min_val', torch.zeros(out_channels, 1, 1, 1))
self.register_buffer('max_val', torch.zeros(out_channels, 1, 1, 1))
self.register_buffer('first_a', torch.zeros(1))
def update_range(self, min_val, max_val):
if self.first_a == 0:
self.first_a.add_(1)
self.min_val.add_(min_val)
self.max_val.add_(max_val)
else:
self.min_val.mul_(1 - self.momentum).add_(min_val * self.momentum)
self.max_val.mul_(1 - self.momentum).add_(max_val * self.momentum)
# ********************* quantizers(量化器,量化) *********************
class Round(Function):
@staticmethod
def forward(self, input):
sign = torch.sign(input)
output = sign * torch.floor(torch.abs(input) + 0.5)
return output
@staticmethod
def backward(self, grad_output):
grad_input = grad_output.clone()
return grad_input
class Quantizer(nn.Module):
def __init__(self, bits, range_tracker, out_channels, Scale_freeze_step, sign=True):
super().__init__()
self.bits = bits
self.range_tracker = range_tracker
self.register_buffer('step', torch.zeros(1))
self.Scale_freeze_step = Scale_freeze_step
self.sign = sign
if out_channels == -1:
self.register_buffer('scale', torch.zeros(1)) # 量化比例因子
self.register_buffer('zero_point', torch.zeros(1)) # 量化零点
else:
self.register_buffer('scale', torch.zeros(out_channels, 1, 1, 1)) # 量化比例因子
self.register_buffer('zero_point', torch.zeros(out_channels, 1, 1, 1)) # 量化零点
def update_params(self):
raise NotImplementedError
# 量化
def quantize(self, input):
output = input / self.scale + self.zero_point
return output
def round(self, input):
output = Round.apply(input)
return output
# 截断
def clamp(self, input):
if self.sign:
min_val = torch.tensor(-(1 << (self.bits - 1)))
max_val = torch.tensor((1 << (self.bits - 1)) - 1)
if not self.sign:
min_val = torch.tensor(0)
max_val = torch.tensor((1 << self.bits) - 1)
output = torch.clamp(input, min_val, max_val)
return output
# 反量化
def dequantize(self, input):
output = (input - self.zero_point) * self.scale
return output
def forward(self, input):
if self.bits == 32:
output = input
elif self.bits == 1:
print('!Binary quantization is not supported !')
assert self.bits != 1
else:
if self.training == True and self.step < self.Scale_freeze_step:
self.range_tracker(input)
self.update_params()
output = self.quantize(input) # 量化
output = self.round(output)
output = self.clamp(output) # 截断
output = self.dequantize(output) # 反量化
self.step += 1
return output
def get_quantize_value(self, input):
if self.bits == 32:
output = input
elif self.bits == 1:
print('!Binary quantization is not supported !')
assert self.bits != 1
else:
output = self.quantize(input) # 量化
output = self.round(output)
output = self.clamp(output) # 截断
return output
################获得量化因子所对应的移位数
def get_scale(self):
#############移位修正
move_scale = math.log2(self.scale)
move_scale = np.array(move_scale).reshape(1, -1)
return move_scale
# 对称量化
class SymmetricQuantizer(Quantizer):
def update_params(self):
if self.sign:
min_val = torch.tensor(-(1 << (self.bits - 1)))
max_val = torch.tensor((1 << (self.bits - 1)) - 1)
else:
min_val = torch.tensor(0)
max_val = torch.tensor((1 << self.bits) - 1)
quantized_range = torch.max(torch.abs(min_val), torch.abs(max_val)) # 量化后范围
float_max = torch.max(torch.abs(self.range_tracker.min_val), torch.abs(self.range_tracker.max_val)) # 量化前范围
floor_float_range = 2 ** float_max.log2().floor()
ceil_float_range = 2 ** float_max.log2().ceil()
if abs(ceil_float_range - float_max) < abs(floor_float_range - float_max):
float_range = ceil_float_range
else:
float_range = floor_float_range
self.scale = float_range / quantized_range # 量化比例因子
self.zero_point = torch.zeros_like(self.scale) # 量化零点
# 非对称量化
class AsymmetricQuantizer(Quantizer):
def update_params(self):
if self.sign:
min_val = torch.tensor(-(1 << (self.bits - 1)))
max_val = torch.tensor((1 << (self.bits - 1)) - 1)
else:
min_val = torch.tensor(0)
max_val = torch.tensor((1 << self.bits) - 1)
quantized_range = max_val - min_val # 量化后范围
float_range = self.range_tracker.max_val - self.range_tracker.min_val # 量化前范围
ceil_float_range = 2 ** float_range.log2().ceil()
floor_float_range = 2 ** float_range.log2().floor()
if abs(ceil_float_range - float_range) < abs(floor_float_range - float_range):
float_range = ceil_float_range
else:
float_range = floor_float_range
self.scale = float_range / quantized_range # 量化比例因子
self.zero_point = torch.round(max_val - self.range_tracker.max_val / self.scale) # 量化零点
def reshape_to_activation(input):
return input.reshape(1, -1, 1, 1)
def reshape_to_weight(input):
return input.reshape(-1, 1, 1, 1)
def reshape_to_bias(input):
return input.reshape(-1)
# ********************* bn融合_量化卷积(bn融合后,同时量化A/W,并做卷积) *********************
class BNFold_QuantizedConv2d_For_FPGA(nn.Conv2d):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=False,
eps=1e-5,
momentum=0.01, # 考虑量化带来的抖动影响,对momentum进行调整(0.1 ——> 0.01),削弱batch统计参数占比,一定程度抑制抖动。经实验量化训练效果更好,acc提升1%左右
a_bits=8,
w_bits=8,
q_type=0,
bn=0,
activate='leaky',
steps=0,
quantizer_output=False,
reorder=False, TM=32, TN=32,
name='', layer_idx=-1, maxabsscaler=False
):
super().__init__(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias
)
self.bn = bn
self.activate = activate
self.eps = eps
self.momentum = momentum
self.BN_freeze_step = int(steps * 0.9)
self.Scale_freeze_step = int(steps * 0.1)
self.gamma = Parameter(torch.Tensor(out_channels))
self.beta = Parameter(torch.Tensor(out_channels))
self.register_buffer('running_mean', torch.zeros(out_channels))
self.register_buffer('running_var', torch.zeros(out_channels))
self.register_buffer('batch_mean', torch.zeros(out_channels))
self.register_buffer('batch_var', torch.zeros(out_channels))
self.register_buffer('first_bn', torch.zeros(1))
self.register_buffer('step', torch.zeros(1))
self.quantizer_output = quantizer_output
self.reorder = reorder
self.TM = TM
self.TN = TN
self.name = name
self.layer_idx = layer_idx
self.w_bits = w_bits
self.a_bits = a_bits
self.maxabsscaler = maxabsscaler
init.normal_(self.gamma, 1, 0.5)
init.zeros_(self.beta)
# 实例化量化器(A-layer级,W-channel级)
if q_type == 0:
self.activation_quantizer = SymmetricQuantizer(bits=a_bits, range_tracker=AveragedRangeTracker(q_level='L',
out_channels=-1),
out_channels=-1, Scale_freeze_step=self.Scale_freeze_step)
self.weight_quantizer = SymmetricQuantizer(bits=w_bits,
range_tracker=GlobalRangeTracker(q_level='L', out_channels=-1),
out_channels=-1, Scale_freeze_step=self.Scale_freeze_step)
self.bias_quantizer = SymmetricQuantizer(bits=w_bits,
range_tracker=GlobalRangeTracker(q_level='L', out_channels=-1),
out_channels=-1, Scale_freeze_step=self.Scale_freeze_step)
else:
self.activation_quantizer = AsymmetricQuantizer(bits=a_bits,
range_tracker=AveragedRangeTracker(q_level='L',
out_channels=-1),
out_channels=-1, Scale_freeze_step=self.Scale_freeze_step,
sign=False)
self.weight_quantizer = AsymmetricQuantizer(bits=w_bits,
range_tracker=GlobalRangeTracker(q_level='L', out_channels=-1),
out_channels=-1, Scale_freeze_step=self.Scale_freeze_step,
sign=False)
self.bias_quantizer = AsymmetricQuantizer(bits=w_bits,
range_tracker=GlobalRangeTracker(q_level='L', out_channels=-1),
out_channels=-1, Scale_freeze_step=self.Scale_freeze_step,
sign=False)
def forward(self, input):
# 训练态
if self.training:
self.step += 1
if self.bn:
# 先做普通卷积得到A,以取得BN参数
output = F.conv2d(
input=input,
weight=self.weight,
bias=self.bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups
)
# 更新BN统计参数(batch和running)
dims = [dim for dim in range(4) if dim != 1]
self.batch_mean = torch.mean(output, dim=dims)
self.batch_var = torch.var(output, dim=dims)
with torch.no_grad():
if self.first_bn == 0 and torch.equal(self.running_mean, torch.zeros_like(
self.running_mean)) and torch.equal(self.running_var, torch.zeros_like(self.running_var)):
self.first_bn.add_(1)
self.running_mean.add_(self.batch_mean)
self.running_var.add_(self.batch_var)
else:
self.running_mean.mul_(1 - self.momentum).add_(self.batch_mean * self.momentum)
self.running_var.mul_(1 - self.momentum).add_(self.batch_var * self.momentum)
# BN融合
if self.step < self.BN_freeze_step:
if self.bias is not None:
bias = reshape_to_bias(
self.beta + (self.bias - self.batch_mean) * (
self.gamma / torch.sqrt(self.batch_var + self.eps)))
else:
bias = reshape_to_bias(
self.beta - self.batch_mean * (
self.gamma / torch.sqrt(self.batch_var + self.eps))) # b融batch
weight = self.weight * reshape_to_weight(
self.gamma / torch.sqrt(self.batch_var + self.eps)) # w融running
else:
if self.bias is not None:
bias = reshape_to_bias(
self.beta + (self.bias - self.running_mean) * (
self.gamma / torch.sqrt(self.running_var + self.eps)))
else:
bias = reshape_to_bias(
self.beta - self.running_mean * (
self.gamma / torch.sqrt(self.running_var + self.eps))) # b融batch
weight = self.weight * reshape_to_weight(
self.gamma / torch.sqrt(self.running_var + self.eps)) # w融running
else:
bias = self.bias
weight = self.weight
# 测试态
else:
# print(self.running_mean, self.running_var)
if self.bn:
# BN融合
if self.bias is not None:
bias = reshape_to_bias(self.beta + (self.bias - self.running_mean) * (
self.gamma / torch.sqrt(self.running_var + self.eps)))
else:
bias = reshape_to_bias(
self.beta - self.running_mean * self.gamma / torch.sqrt(
self.running_var + self.eps)) # b融running
weight = self.weight * reshape_to_weight(
self.gamma / torch.sqrt(self.running_var + self.eps)) # w融running
else:
bias = self.bias
weight = self.weight
# 量化A和bn融合后的W
q_weight = self.weight_quantizer(weight)
q_bias = self.bias_quantizer(bias)
if self.quantizer_output == True: # 输出量化参数txt文档
# 创建的quantizer_output输出文件夹
if not os.path.isdir('./quantizer_output'):
os.makedirs('./quantizer_output')
if not os.path.isdir('./quantizer_output/q_weight_out'):
os.makedirs('./quantizer_output/q_weight_out')
if not os.path.isdir('./quantizer_output/w_scale_out'):
os.makedirs('./quantizer_output/w_scale_out')
if not os.path.isdir('./quantizer_output/q_weight_max'):
os.makedirs('./quantizer_output/q_weight_max')
if not os.path.isdir('./quantizer_output/max_weight_count'):
os.makedirs('./quantizer_output/max_weight_count')
if not os.path.isdir('./quantizer_output/q_weight_reorder'):
os.makedirs('./quantizer_output/q_weight_reorder')
if not os.path.isdir('./quantizer_output/q_bias_reorder'):
os.makedirs('./quantizer_output/q_bias_reorder')
if self.layer_idx == -1:
#######################输出当前层的权重量化因子
weight_scale = - self.weight_quantizer.get_scale()
np.savetxt(('./quantizer_output/w_scale_out/w_scale_%s.txt' % self.name), weight_scale, delimiter='\n')
#######################输出当前层的量化权重
q_weight_txt = self.weight_quantizer.get_quantize_value(weight)
#############权重重排序
w_para = q_weight_txt # 重排序参数
if self.reorder == True:
# print("use weights reorder!")
shape_output = w_para.shape[0]
shape_input = w_para.shape[1]
num_TN = int(shape_input / self.TN)
remainder_TN = shape_input % self.TN
num_TM = int(shape_output / self.TM)
remainder_TM = shape_output % self.TM
first = True
reorder_w_para = None
if self.activate == 'linear':
print('layer-linear reorder!')
for k in range(num_TN):
temp = w_para[0:remainder_TM, k * self.TN:(k + 1) * self.TN, :, :]
temp = temp.view(temp.shape[0], temp.shape[1], temp.shape[2] * temp.shape[3])
temp = temp.permute(2, 0, 1).contiguous().view(-1)
if first:
reorder_w_para = temp.clone().cpu().data.numpy()
first = False
else:
reorder_w_para = np.append(reorder_w_para, temp.cpu().data.numpy())
else:
for j in range(num_TM):
if shape_input == 3 or shape_input == 1: # 第一层
print('The first layer~~~~~~~~~~~~')
temp = w_para[j * self.TM:(j + 1) * self.TM,
num_TN * self.TN:num_TN * self.TN + remainder_TN, :,
:]
temp = temp.view(temp.shape[0], temp.shape[1], temp.shape[2] * temp.shape[3])
fill = torch.zeros(self.TM, self.TN, temp.shape[2]).to(temp.device)
fill[:, 0:remainder_TN, :] = temp
temp = fill.permute(2, 0, 1).contiguous().view(-1)
if first: # 创建数组存储
reorder_w_para = temp.clone().cpu().data.numpy()
first = False
else:
reorder_w_para = np.append(reorder_w_para, temp.cpu().data.numpy())
else:
for k in range(num_TN):
temp = w_para[j * self.TM:(j + 1) * self.TM, k * self.TN:(k + 1) * self.TN, :, :]
# #合并成论文图10(a)的TM*TN*(K2)的张量格式
temp = temp.view(temp.shape[0], temp.shape[1], temp.shape[2] * temp.shape[3])
# 转换为图10(b)的重排序格式
temp = temp.permute(2, 0, 1).contiguous().view(-1)
if first:
reorder_w_para = temp.clone().cpu().data.numpy()
first = False
else:
reorder_w_para = np.append(reorder_w_para, temp.cpu().data.numpy())
w_para_flatten = reorder_w_para
# print(reorder_w_para.size)
#####验证重排序结果的正确性
'''if w_para_flatten.size == w_para.shape[0] * w_para.shape[1] * w_para.shape[2] * w_para.shape[3]:
print("weights convert correctly!")
else:
print("weights convert mismatchingly!")'''
q_weight_reorder = w_para_flatten
q_weight_reorder = np.array(q_weight_reorder).reshape(1, -1)
np.savetxt(('./quantizer_output/q_weight_reorder/w_reorder_%s.txt' % self.name), q_weight_reorder,
delimiter='\n')
################权重重排序结束
q_weight_txt = np.array(q_weight_txt.cpu()).reshape(1, -1)
q_weight_max = [np.max(q_weight_txt)]
# q_weight_max = np.argmax(q_weight_txt)
max_weight_count = [np.sum(abs(q_weight_txt) >= (1 << (self.w_bits - 1)) - 1)] # 统计该层溢出的数目
np.savetxt(('./quantizer_output/max_weight_count/max_w_count_%s.txt' % self.name), max_weight_count)
np.savetxt(('./quantizer_output/q_weight_max/max_w_%s.txt' % self.name), q_weight_max)
np.savetxt(('./quantizer_output/q_weight_out/q_weight_%s.txt' % self.name), q_weight_txt,
delimiter='\n')
# io.savemat('save.mat',{'q_weight_txt':q_weight_txt})
#######################创建输出偏置txt的文件夹
if not os.path.isdir('./quantizer_output/q_bias_out'):
os.makedirs('./quantizer_output/q_bias_out')
if not os.path.isdir('./quantizer_output/b_scale_out'):
os.makedirs('./quantizer_output/b_scale_out')
#######################输出当前层偏置的量化因子
bias_scale = - self.bias_quantizer.get_scale()
np.savetxt(('./quantizer_output/b_scale_out/b_scale_%s.txt' % self.name), bias_scale, delimiter='\n')
#######################输出当前层的量化偏置
q_bias_txt = self.bias_quantizer.get_quantize_value(bias)
q_bias_txt = np.array(q_bias_txt.cpu()).reshape(1, -1)
np.savetxt(('./quantizer_output/q_bias_out/q_bias_%s.txt' % self.name), q_bias_txt, delimiter='\n')
#############偏置重排序
if self.reorder == True:
b_para = np.zeros(2048, dtype=int)
b_para[0:q_bias_txt.size] = q_bias_txt
# print(b_para.shape)
# b_para = np.array(b_para.cpu()).reshape(1, -1)
np.savetxt(('./quantizer_output/q_bias_reorder/q_b_reorder_%s.txt' % self.name), b_para,
delimiter='\n')
######权重和偏置的重排序数据的二进制文件保存
bias_weight_reorder = np.append(b_para, q_weight_reorder)
wb_flat = bias_weight_reorder.astype(np.int8)
writer = open('./quantizer_output/q_weight_reorder/%s_bias_weight_q_bin' % self.name, "wb")
writer.write(wb_flat)
writer.close()
################偏置重排序结束
elif int(self.name[1:4]) == self.layer_idx:
#######################输出当前层的权重量化因子
weight_scale = - self.weight_quantizer.get_scale()
np.savetxt(('./quantizer_output/w_scale_out/w_scale_%s.txt' % self.name), weight_scale, delimiter='\n')
#######################输出当前层的量化权重
q_weight_txt = self.weight_quantizer.get_quantize_value(weight)
#############权重重排序
w_para = q_weight_txt # 重排序参数
if self.reorder == True:
# print("use weights reorder!")
shape_output = w_para.shape[0]
shape_input = w_para.shape[1]
num_TN = int(shape_input / self.TN)
remainder_TN = shape_input % self.TN
num_TM = int(shape_output / self.TM)
remainder_TM = shape_output % self.TM
first = True
reorder_w_para = None
if self.activate == 'linear':
print('layer-linear reorder!')
for k in range(num_TN):
temp = w_para[0:remainder_TM, k * self.TN:(k + 1) * self.TN, :, :]
temp = temp.view(temp.shape[0], temp.shape[1], temp.shape[2] * temp.shape[3])
temp = temp.permute(2, 0, 1).contiguous().view(-1)
if first:
reorder_w_para = temp.clone().cpu().data.numpy()
first = False
else:
reorder_w_para = np.append(reorder_w_para, temp.cpu().data.numpy())
else:
for j in range(num_TM):
if shape_input == 3 or shape_input == 1: # 第一层
print('The first layer~~~~~~~~~~~~')
temp = w_para[j * self.TM:(j + 1) * self.TM,
num_TN * self.TN:num_TN * self.TN + remainder_TN, :,
:]
temp = temp.view(temp.shape[0], temp.shape[1], temp.shape[2] * temp.shape[3])
fill = torch.zeros(self.TM, self.TN, temp.shape[2]).to(temp.device)
fill[:, 0:remainder_TN, :] = temp
temp = fill.permute(2, 0, 1).contiguous().view(-1)
if first: # 创建数组存储
reorder_w_para = temp.clone().cpu().data.numpy()
first = False
else:
reorder_w_para = np.append(reorder_w_para, temp.cpu().data.numpy())
else:
for k in range(num_TN):
temp = w_para[j * self.TM:(j + 1) * self.TM, k * self.TN:(k + 1) * self.TN, :, :]
# #合并成论文图10(a)的TM*TN*(K2)的张量格式
temp = temp.view(temp.shape[0], temp.shape[1], temp.shape[2] * temp.shape[3])
# 转换为图10(b)的重排序格式
temp = temp.permute(2, 0, 1).contiguous().view(-1)
if first:
reorder_w_para = temp.clone().cpu().data.numpy()
first = False
else:
reorder_w_para = np.append(reorder_w_para, temp.cpu().data.numpy())
w_para_flatten = reorder_w_para
# print(reorder_w_para.size)
#####验证重排序结果的正确性
'''if w_para_flatten.size == w_para.shape[0] * w_para.shape[1] * w_para.shape[2] * w_para.shape[3]:
print("weights convert correctly!")
else:
print("weights convert mismatchingly!")'''
q_weight_reorder = w_para_flatten
q_weight_reorder = np.array(q_weight_reorder).reshape(1, -1)
np.savetxt(('./quantizer_output/q_weight_reorder/w_reorder_%s.txt' % self.name), q_weight_reorder,
delimiter='\n')
################权重重排序结束
q_weight_txt = np.array(q_weight_txt.cpu()).reshape(1, -1)
q_weight_max = [np.max(q_weight_txt)]
# q_weight_max = np.argmax(q_weight_txt)
max_weight_count = [np.sum(abs(q_weight_txt) >= 127)] # 统计该层溢出的数目
np.savetxt(('./quantizer_output/max_weight_count/max_w_count_%s.txt' % self.name), max_weight_count)
np.savetxt(('./quantizer_output/q_weight_max/max_w_%s.txt' % self.name), q_weight_max)
np.savetxt(('./quantizer_output/q_weight_out/q_weight_%s.txt' % self.name), q_weight_txt,
delimiter='\n')
# io.savemat('save.mat',{'q_weight_txt':q_weight_txt})
#######################创建输出偏置txt的文件夹
if not os.path.isdir('./quantizer_output/q_bias_out'):
os.makedirs('./quantizer_output/q_bias_out')
if not os.path.isdir('./quantizer_output/b_scale_out'):
os.makedirs('./quantizer_output/b_scale_out')
#######################输出当前层偏置的量化因子
bias_scale = - self.bias_quantizer.get_scale()
np.savetxt(('./quantizer_output/b_scale_out/b_scale_%s.txt' % self.name), bias_scale, delimiter='\n')
#######################输出当前层的量化偏置
q_bias_txt = self.bias_quantizer.get_quantize_value(bias)
q_bias_txt = np.array(q_bias_txt.cpu()).reshape(1, -1)
np.savetxt(('./quantizer_output/q_bias_out/q_bias_%s.txt' % self.name), q_bias_txt, delimiter='\n')
#############偏置重排序
if self.reorder == True:
b_para = np.zeros(2048, dtype=int)
b_para[0:q_bias_txt.size] = q_bias_txt
# print(b_para.shape)
# b_para = np.array(b_para.cpu()).reshape(1, -1)
np.savetxt(('./quantizer_output/q_bias_reorder/q_b_reorder_%s.txt' % self.name), b_para,
delimiter='\n')
######权重和偏置的重排序数据的二进制文件保存
bias_weight_reorder = np.append(b_para, q_weight_reorder)
wb_flat = bias_weight_reorder.astype(np.int8)
writer = open('./quantizer_output/q_weight_reorder/%s_bias_weight_q_bin' % self.name, "wb")
writer.write(wb_flat)
writer.close()
################偏置重排序结束
# 量化卷积
output = F.conv2d(
input=input,
weight=q_weight,
bias=q_bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups
)
if self.activate == 'leaky':
output = F.leaky_relu(output, 0.1 if not self.maxabsscaler else 0.25, inplace=True)
elif self.activate == 'relu6':
output = F.relu6(output, inplace=True)
elif self.activate == 'h_swish':
output = output * (F.relu6(output + 3.0, inplace=True) / 6.0)
elif self.activate == 'relu':
output = F.relu(output, inplace=True)
elif self.activate == 'mish':
output = output * F.softplus(output).tanh()
elif self.activate == 'linear':
# return output
pass
else:
print(self.activate + "%s is not supported !")
if self.quantizer_output == True:
if not os.path.isdir('./quantizer_output/q_activation_out'):
os.makedirs('./quantizer_output/q_activation_out')
if not os.path.isdir('./quantizer_output/a_scale_out'):
os.makedirs('./quantizer_output/a_scale_out')
if not os.path.isdir('./quantizer_output/q_activation_max'):
os.makedirs('./quantizer_output/q_activation_max')
if not os.path.isdir('./quantizer_output/max_activation_count'):
os.makedirs('./quantizer_output/max_activation_count')
if not os.path.isdir('./quantizer_output/q_activation_reorder'):
os.makedirs('./quantizer_output/q_activation_reorder')
if self.layer_idx == -1:
##################输出当前激活的量化因子
activation_scale = - self.activation_quantizer.get_scale()
np.savetxt(('./quantizer_output/a_scale_out/a_scale_%s.txt' % self.name), activation_scale,
delimiter='\n')
##################输出当前层的量化激活
q_activation_txt = self.activation_quantizer.get_quantize_value(output)
a_para = q_activation_txt
#############输入特征图重排序
if self.reorder == True:
# 重排序参数
# print("use activation reorder!")
shape_input = a_para.shape[1]
num_TN = int(shape_input / self.TN)
remainder_TN = shape_input % self.TN
first = True
reorder_a_para = None
if self.activate == 'linear':
print('layer-linear reorder!')
temp = a_para[:, 0:remainder_TN, :, :]
temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])
temp = temp.permute(1, 2, 0).contiguous().view(-1)
if first:
reorder_a_para = temp.clone().cpu().data.numpy()
first = False
else:
reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())
else:
for k in range(num_TN):
temp = a_para[:, k * self.TN:(k + 1) * self.TN, :, :]
temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])
temp = temp.permute(1, 2, 0).contiguous().view(-1)
if first:
reorder_a_para = temp.clone().cpu().data.numpy()
first = False
else:
reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())
a_para_flatten = reorder_a_para
#####验证重排序结果的正确性
'''if a_para_flatten.size == a_para.shape[0] * a_para.shape[1] * a_para.shape[2] * a_para.shape[3]:
print("activation convert correctly!")
else:
print("activation convert mismatchingly!")'''
q_activation_reorder = a_para_flatten
q_activation_reorder = np.array(q_activation_reorder).reshape(1, -1)
np.savetxt(('./quantizer_output/q_activation_reorder/a_reorder_%s.txt' % self.name),
q_activation_reorder, delimiter='\n')
###保存重排序的二进制文件
activation_flat = q_activation_reorder.astype(np.int8)
writer = open('./quantizer_output/q_activation_reorder/%s_activation_q_bin' % self.name, "wb")
writer.write(activation_flat)
writer.close()
##########特征图重排序结束
q_activation_txt = np.array(q_activation_txt.cpu()).reshape(1, -1)
q_activation_max = [np.max(q_activation_txt)] # 统计该层的最大值(即查看是否有溢出)
max_activation_count = [np.sum(abs(q_activation_txt) >= (1 << (self.a_bits - 1)) - 1)] # 统计该层溢出的数目
# q_weight_max = np.argmax(q_weight_txt)
np.savetxt(('./quantizer_output/max_activation_count/max_a_count_%s.txt' % self.name),
max_activation_count)
np.savetxt(('./quantizer_output/q_activation_max/q_a_max_%s.txt' % self.name), q_activation_max)
np.savetxt(('./quantizer_output/q_activation_out/q_activation_%s.txt' % self.name), q_activation_txt,
delimiter='\n')
elif int(self.name[1:4]) == self.layer_idx:
##################输出当前激活的量化因子
activation_scale = - self.activation_quantizer.get_scale()
np.savetxt(('./quantizer_output/a_scale_out/a_scale_%s.txt' % self.name), activation_scale,
delimiter='\n')
##################输出当前层的量化激活
q_activation_txt = self.activation_quantizer.get_quantize_value(output)
a_para = q_activation_txt
#############输入特征图重排序
if self.reorder == True:
# 重排序参数
# print("use activation reorder!")
shape_input = a_para.shape[1]
num_TN = int(shape_input / self.TN)
remainder_TN = shape_input % self.TN
first = True
reorder_a_para = None
if self.activate == 'linear':
print('layer-linear reorder!')
temp = a_para[:, 0:remainder_TN, :, :]
temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])
temp = temp.permute(1, 2, 0).contiguous().view(-1)
if first:
reorder_a_para = temp.clone().cpu().data.numpy()
first = False
else:
reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())
else:
for k in range(num_TN):
temp = a_para[:, k * self.TN:(k + 1) * self.TN, :, :]
temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])
temp = temp.permute(1, 2, 0).contiguous().view(-1)
if first:
reorder_a_para = temp.clone().cpu().data.numpy()
first = False
else:
reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())
a_para_flatten = reorder_a_para
#####验证重排序结果的正确性
'''if a_para_flatten.size == a_para.shape[0] * a_para.shape[1] * a_para.shape[2] * a_para.shape[3]:
print("activation convert correctly!")
else:
print("activation convert mismatchingly!")'''
q_activation_reorder = a_para_flatten
q_activation_reorder = np.array(q_activation_reorder).reshape(1, -1)
np.savetxt(('./quantizer_output/q_activation_reorder/a_reorder_%s.txt' % self.name),
q_activation_reorder, delimiter='\n')
###保存重排序的二进制文件
activation_flat = q_activation_reorder.astype(np.int8)
writer = open('./quantizer_output/q_activation_reorder/%s_activation_q_bin' % self.name, "wb")
writer.write(activation_flat)
writer.close()
##########特征图重排序结束
q_activation_txt = np.array(q_activation_txt.cpu()).reshape(1, -1)
q_activation_max = [np.max(q_activation_txt)] # 统计该层的最大值(即查看是否有溢出)
max_activation_count = [np.sum(abs(q_activation_txt) >= 127)] # 统计该层溢出的数目
# q_weight_max = np.argmax(q_weight_txt)
np.savetxt(('./quantizer_output/max_activation_count/max_a_count_%s.txt' % self.name),
max_activation_count)
np.savetxt(('./quantizer_output/q_activation_max/q_a_max_%s.txt' % self.name), q_activation_max)
np.savetxt(('./quantizer_output/q_activation_out/q_activation_%s.txt' % self.name), q_activation_txt,
delimiter='\n')
output = self.activation_quantizer(output)
return output
def BN_fuse(self):
if self.bn:
# BN融合
if self.bias is not None:
bias = reshape_to_bias(self.beta + (self.bias - self.running_mean) * (
self.gamma / torch.sqrt(self.running_var + self.eps)))
else:
bias = reshape_to_bias(
self.beta - self.running_mean * self.gamma / torch.sqrt(
self.running_var + self.eps)) # b融running
weight = self.weight * reshape_to_weight(
self.gamma / torch.sqrt(self.running_var + self.eps)) # w融running
else:
bias = self.bias
weight = self.weight
return weight, bias
class QuantizedShortcut_max(nn.Module): # weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
def __init__(self, layers, weight=False, bits=8,
quantizer_output=False, reorder=False, TM=32, TN=32, name='', layer_idx=-1, ):
super(QuantizedShortcut_max, self).__init__()
self.layers = layers # layer indices
self.weight = weight # apply weights boolean
self.n = len(layers) + 1 # number of layers
self.bits = bits
self.range_tracker_x = AveragedRangeTracker(q_level='L', out_channels=-1)
self.range_tracker_a = AveragedRangeTracker(q_level='L', out_channels=-1)
self.range_tracker_sum = AveragedRangeTracker(q_level='L', out_channels=-1)
self.register_buffer('scale', torch.zeros(1)) # 量化比例因子
self.quantizer_output = quantizer_output
self.reorder = reorder
self.TM = TM
self.TN = TN
self.name = name
self.layer_idx = layer_idx
if weight:
self.w = nn.Parameter(torch.zeros(self.n), requires_grad=True) # layer weights
# 量化
def quantize(self, input):
output = input / self.scale
return output
def round(self, input):
output = Round.apply(input)
return output
# 截断
def clamp(self, input):
min_val = torch.tensor(-(1 << (self.bits - 1)))
max_val = torch.tensor((1 << (self.bits - 1)) - 1)
output = torch.clamp(input, min_val, max_val)
return output
# 反量化
def dequantize(self, input):
output = (input) * self.scale
return output
def forward(self, x, outputs):
# Weights
if self.weight:
w = torch.sigmoid(self.w) * (2 / self.n) # sigmoid weights (0-1)
x = x * w[0]
# Fusion
nx = x.shape[1] # input channels
for i in range(self.n - 1):
a = outputs[self.layers[i]] * w[i + 1] if self.weight else outputs[self.layers[i]] # feature to add
na = a.shape[1] # feature channels
if self.training == True:
# 得到输入两个feature和一个输出的scale
self.range_tracker_a(x)
self.range_tracker_x(a)
if nx == na: # same shape
self.range_tracker_sum(x + a)
elif nx > na: # slice input
self.range_tracker_sum(x[:, :na] + a) # or a = nn.ZeroPad2d((0, 0, 0, 0, 0, dc))(a); x = x + a
else: # slice feature
self.range_tracker_sum(x + a[:, :nx])
float_max_val = max(self.range_tracker_sum.max_val, self.range_tracker_x.max_val,
self.range_tracker_a.max_val)
float_min_val = min(self.range_tracker_sum.min_val, self.range_tracker_x.min_val,
self.range_tracker_a.min_val)
quantized_min_val = torch.tensor(-(1 << (self.bits - 1)))
quantized_max_val = torch.tensor((1 << (self.bits - 1)) - 1)
quantized_range = torch.max(torch.abs(quantized_min_val), torch.abs(quantized_max_val)) # 量化后范围
float_max = torch.max(torch.abs(float_min_val),
torch.abs(float_max_val)) # 量化前范围
floor_float_range = 2 ** float_max.log2().floor()
ceil_float_range = 2 ** float_max.log2().ceil()
if abs(ceil_float_range - float_max) < abs(floor_float_range - float_max):
float_range = ceil_float_range
else:
float_range = floor_float_range
self.scale = float_range / quantized_range # 量化比例因子
# 量化因子数据输出
if self.quantizer_output == True:
if not os.path.isdir('./quantizer_output/q_activation_out'):
os.makedirs('./quantizer_output/q_activation_out')
if not os.path.isdir('./quantizer_output/a_scale_out'):
os.makedirs('./quantizer_output/a_scale_out')
if not os.path.isdir('./quantizer_output/q_activation_max'):
os.makedirs('./quantizer_output/q_activation_max')
if not os.path.isdir('./quantizer_output/max_activation_count'):
os.makedirs('./quantizer_output/max_activation_count')
if not os.path.isdir('./quantizer_output/q_activation_reorder'):
os.makedirs('./quantizer_output/q_activation_reorder')
if self.layer_idx == -1:
move_scale = math.log2(self.scale)
shortcut_scale = -np.array(move_scale).reshape(1, -1)
np.savetxt(('./quantizer_output/a_scale_out/shortcut_scale_%s.txt' % self.name), shortcut_scale,
delimiter='\n')
elif int(self.name[1:4]) == self.layer_idx:
move_scale = math.log2(self.scale)
shortcut_scale = -np.array(move_scale).reshape(1, -1)
np.savetxt(('./quantizer_output/a_scale_out/shortcut_scale_%s.txt' % self.name), shortcut_scale,
delimiter='\n')
# 量化x
x = self.quantize(x) # 量化
x = self.round(x)
x = self.clamp(x) # 截断
x = self.dequantize(x) # 反量化
# 量化a
a = self.quantize(a) # 量化
a = self.round(a)
a = self.clamp(a) # 截断
a = self.dequantize(a) # 反量化
# Adjust channels
if nx == na: # same shape
x = x + a
elif nx > na: # slice input
x[:, :na] = x[:, :na] + a # or a = nn.ZeroPad2d((0, 0, 0, 0, 0, dc))(a); x = x + a
else: # slice feature
x = x + a[:, :nx]
# 量化和
x = self.quantize(x) # 量化
x = self.round(x)
x = self.clamp(x) # 截断
# 特征图量化数据输出
if self.quantizer_output == True:
if not os.path.isdir('./quantizer_output/q_activation_out'):
os.makedirs('./quantizer_output/q_activation_out')
if not os.path.isdir('./quantizer_output/a_scale_out'):
os.makedirs('./quantizer_output/a_scale_out')
if not os.path.isdir('./quantizer_output/q_activation_max'):
os.makedirs('./quantizer_output/q_activation_max')
if not os.path.isdir('./quantizer_output/max_activation_count'):
os.makedirs('./quantizer_output/max_activation_count')
if not os.path.isdir('./quantizer_output/q_activation_reorder'):
os.makedirs('./quantizer_output/q_activation_reorder')
if self.layer_idx == -1:
q_x_shortcut = x
if self.reorder == True:
a_para = q_x_shortcut
# 重排序参数
# print("use activation reorder!")
shape_input = a_para.shape[1]
num_TN = int(shape_input / self.TN)
remainder_TN = shape_input % self.TN
first = True
reorder_a_para = None
for k in range(num_TN):
temp = a_para[:, k * self.TN:(k + 1) * self.TN, :, :]
temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])
temp = temp.permute(1, 2, 0).contiguous().view(-1)
if first:
reorder_a_para = temp.clone().cpu().data.numpy()
first = False
else:
reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())
a_para_flatten = reorder_a_para
q_activation_reorder = a_para_flatten
q_activation_reorder = np.array(q_activation_reorder).reshape(1, -1)
np.savetxt(('./quantizer_output/q_activation_reorder/r_shortcut_%s.txt' % self.name),
q_activation_reorder, delimiter='\n')
###保存重排序的二进制文件
activation_flat = q_activation_reorder.astype(np.int8)
writer = open('./quantizer_output/q_activation_reorder/%s_shortcut_q_bin' % self.name, "wb")
writer.write(activation_flat)
writer.close()
##########shortcut重排序结束
Q_shortcut = np.array(q_x_shortcut.cpu()).reshape(1, -1)
np.savetxt(('./quantizer_output/q_activation_out/Q_shortcut_%s.txt' % self.name), Q_shortcut,
delimiter='\n')
elif int(self.name[1:4]) == self.layer_idx:
q_x_shortcut = x
if self.reorder == True:
a_para = q_x_shortcut
# 重排序参数
# print("use activation reorder!")
shape_input = a_para.shape[1]
num_TN = int(shape_input / self.TN)
remainder_TN = shape_input % self.TN
first = True
reorder_a_para = None
for k in range(num_TN):
temp = a_para[:, k * self.TN:(k + 1) * self.TN, :, :]
temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])
temp = temp.permute(1, 2, 0).contiguous().view(-1)
if first:
reorder_a_para = temp.clone().cpu().data.numpy()
first = False
else:
reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())
a_para_flatten = reorder_a_para
q_activation_reorder = a_para_flatten
q_activation_reorder = np.array(q_activation_reorder).reshape(1, -1)
np.savetxt(('./quantizer_output/q_activation_reorder/r_shortcut_%s.txt' % self.name),
q_activation_reorder, delimiter='\n')
###保存重排序的二进制文件
activation_flat = q_activation_reorder.astype(np.int8)
writer = open('./quantizer_output/q_activation_reorder/%s_shortcut_q_bin' % self.name, "wb")
writer.write(activation_flat)
writer.close()
##########shortcut重排序结束
Q_shortcut = np.array(q_x_shortcut.cpu()).reshape(1, -1)
np.savetxt(('./quantizer_output/q_activation_out/Q_shortcut_%s.txt' % self.name), Q_shortcut,
delimiter='\n')
x = self.dequantize(x) # 反量化
return x
class QuantizedShortcut_min(nn.Module): # weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
def __init__(self, layers, weight=False, bits=8,
quantizer_output=False, reorder=False, TM=32, TN=32, name='', layer_idx=-1, ):
super(QuantizedShortcut_min, self).__init__()
self.layers = layers # layer indices
self.weight = weight # apply weights boolean
self.n = len(layers) + 1 # number of layers
self.bits = bits
self.range_tracker_x = AveragedRangeTracker(q_level='L', out_channels=-1)
self.range_tracker_a = AveragedRangeTracker(q_level='L', out_channels=-1)
self.range_tracker_sum = AveragedRangeTracker(q_level='L', out_channels=-1)
self.register_buffer('input_scale', torch.zeros(1)) # 量化比例因子
self.register_buffer('scale', torch.zeros(1)) # 量化比例因子
self.quantizer_output = quantizer_output
self.reorder = reorder
self.TM = TM
self.TN = TN
self.name = name
self.layer_idx = layer_idx
if weight:
self.w = nn.Parameter(torch.zeros(self.n), requires_grad=True) # layer weights
# 量化
def quantize(self, input, featrure_in=False):
if featrure_in:
output = input / self.input_scale
else:
output = input / self.scale
return output
def round(self, input):
output = Round.apply(input)
return output
# 截断
def clamp(self, input):
min_val = torch.tensor(-(1 << (self.bits - 1)))
max_val = torch.tensor((1 << (self.bits - 1)) - 1)
output = torch.clamp(input, min_val, max_val)
return output
# 反量化
def dequantize(self, input, featrure_in=False):
if featrure_in:
output = (input) * self.input_scale
else:
output = (input) * self.scale
return output
def forward(self, x, outputs):
# Weights
if self.weight:
w = torch.sigmoid(self.w) * (2 / self.n) # sigmoid weights (0-1)
x = x * w[0]
# Fusion
nx = x.shape[1] # input channels
for i in range(self.n - 1):
a = outputs[self.layers[i]] * w[i + 1] if self.weight else outputs[self.layers[i]] # feature to add
na = a.shape[1] # feature channels
if self.training == True:
# 得到输入两个feature和一个输出的scale
self.range_tracker_a(a)
self.range_tracker_x(x)
float_max_val = min(self.range_tracker_x.max_val, self.range_tracker_a.max_val)
float_min_val = max(self.range_tracker_x.min_val, self.range_tracker_a.min_val)
quantized_min_val = torch.tensor(-(1 << (self.bits - 1)))
quantized_max_val = torch.tensor((1 << (self.bits - 1)) - 1)
quantized_range = torch.max(torch.abs(quantized_min_val), torch.abs(quantized_max_val)) # 量化后范围
float_max = torch.max(torch.abs(float_min_val),
torch.abs(float_max_val)) # 量化前范围
floor_float_range = 2 ** float_max.log2().floor()
ceil_float_range = 2 ** float_max.log2().ceil()
if abs(ceil_float_range - float_max) < abs(floor_float_range - float_max):
float_range = ceil_float_range
else:
float_range = floor_float_range
self.input_scale = float_range / quantized_range # 量化比例因子
# 量化x
x = self.quantize(x, featrure_in=True) # 量化
x = self.round(x)
x = self.dequantize(x, featrure_in=True) # 反量化
# 量化a
a = self.quantize(a, featrure_in=True) # 量化
a = self.round(a)
a = self.dequantize(a, featrure_in=True) # 反量化
# Adjust channels
if nx == na: # same shape
x = x + a
elif nx > na: # slice input
x[:, :na] = x[:, :na] + a # or a = nn.ZeroPad2d((0, 0, 0, 0, 0, dc))(a); x = x + a
else: # slice feature
x = x + a[:, :nx]
# 量化和
if self.training == True:
# 得到输入两个feature和一个输出的scale
self.range_tracker_sum(x)
float_max_val = self.range_tracker_sum.max_val
float_min_val = self.range_tracker_sum.min_val
quantized_min_val = torch.tensor(-(1 << (self.bits - 1)))
quantized_max_val = torch.tensor((1 << (self.bits - 1)) - 1)
quantized_range = torch.max(torch.abs(quantized_min_val), torch.abs(quantized_max_val)) # 量化后范围
float_max = torch.max(torch.abs(float_min_val),
torch.abs(float_max_val)) # 量化前范围
floor_float_range = 2 ** float_max.log2().floor()
ceil_float_range = 2 ** float_max.log2().ceil()
if abs(ceil_float_range - float_max) < abs(floor_float_range - float_max):
float_range = ceil_float_range
else:
float_range = floor_float_range
self.scale = float_range / quantized_range # 量化比例因子
x = self.quantize(x) # 量化
x = self.round(x)
x = self.clamp(x) # 截断
# 量化因子数据输出
if self.quantizer_output == True:
if not os.path.isdir('./quantizer_output/q_activation_out'):
os.makedirs('./quantizer_output/q_activation_out')
if not os.path.isdir('./quantizer_output/a_scale_out'):
os.makedirs('./quantizer_output/a_scale_out')
if not os.path.isdir('./quantizer_output/q_activation_max'):
os.makedirs('./quantizer_output/q_activation_max')
if not os.path.isdir('./quantizer_output/max_activation_count'):
os.makedirs('./quantizer_output/max_activation_count')
if not os.path.isdir('./quantizer_output/q_activation_reorder'):
os.makedirs('./quantizer_output/q_activation_reorder')
if self.layer_idx == -1:
move_scale = math.log2(self.scale)
shortcut_scale = - np.array(move_scale).reshape(1, -1)
np.savetxt(('./quantizer_output/a_scale_out/shortcut_scale_%s.txt' % self.name), shortcut_scale,
delimiter='\n')
elif int(self.name[1:4]) == self.layer_idx:
move_scale = math.log2(self.scale)
shortcut_scale = - np.array(move_scale).reshape(1, -1)
np.savetxt(('./quantizer_output/a_scale_out/shortcut_scale_%s.txt' % self.name), shortcut_scale,
delimiter='\n')
# 特征图量化数据输出
if self.quantizer_output == True:
if not os.path.isdir('./quantizer_output/q_activation_out'):
os.makedirs('./quantizer_output/q_activation_out')
if not os.path.isdir('./quantizer_output/a_scale_out'):
os.makedirs('./quantizer_output/a_scale_out')
if not os.path.isdir('./quantizer_output/q_activation_max'):
os.makedirs('./quantizer_output/q_activation_max')
if not os.path.isdir('./quantizer_output/max_activation_count'):
os.makedirs('./quantizer_output/max_activation_count')
if not os.path.isdir('./quantizer_output/q_activation_reorder'):
os.makedirs('./quantizer_output/q_activation_reorder')
if self.layer_idx == -1:
q_x_shortcut = x
if self.reorder == True:
a_para = q_x_shortcut
# 重排序参数
# print("use activation reorder!")
shape_input = a_para.shape[1]
num_TN = int(shape_input / self.TN)
remainder_TN = shape_input % self.TN
first = True
reorder_a_para = None
for k in range(num_TN):
temp = a_para[:, k * self.TN:(k + 1) * self.TN, :, :]
temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])
temp = temp.permute(1, 2, 0).contiguous().view(-1)
if first:
reorder_a_para = temp.clone().cpu().data.numpy()
first = False
else:
reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())
a_para_flatten = reorder_a_para
q_activation_reorder = a_para_flatten
q_activation_reorder = np.array(q_activation_reorder).reshape(1, -1)
np.savetxt(('./quantizer_output/q_activation_reorder/r_shortcut_%s.txt' % self.name),
q_activation_reorder, delimiter='\n')
###保存重排序的二进制文件
activation_flat = q_activation_reorder.astype(np.int8)
writer = open('./quantizer_output/q_activation_reorder/%s_shortcut_q_bin' % self.name, "wb")
writer.write(activation_flat)
writer.close()
##########shortcut重排序结束
Q_shortcut = np.array(q_x_shortcut.cpu()).reshape(1, -1)
np.savetxt(('./quantizer_output/q_activation_out/Q_shortcut_%s.txt' % self.name), Q_shortcut,
delimiter='\n')
elif int(self.name[1:4]) == self.layer_idx:
q_x_shortcut = x
if self.reorder == True:
a_para = q_x_shortcut
# 重排序参数
# print("use activation reorder!")
shape_input = a_para.shape[1]
num_TN = int(shape_input / self.TN)
remainder_TN = shape_input % self.TN
first = True
reorder_a_para = None
for k in range(num_TN):
temp = a_para[:, k * self.TN:(k + 1) * self.TN, :, :]
temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])
temp = temp.permute(1, 2, 0).contiguous().view(-1)
if first:
reorder_a_para = temp.clone().cpu().data.numpy()
first = False
else:
reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())
a_para_flatten = reorder_a_para
q_activation_reorder = a_para_flatten
q_activation_reorder = np.array(q_activation_reorder).reshape(1, -1)
np.savetxt(('./quantizer_output/q_activation_reorder/r_shortcut_%s.txt' % self.name),
q_activation_reorder, delimiter='\n')
###保存重排序的二进制文件
activation_flat = q_activation_reorder.astype(np.int8)
writer = open('./quantizer_output/q_activation_reorder/%s_shortcut_q_bin' % self.name, "wb")
writer.write(activation_flat)
writer.close()
##########shortcut重排序结束
Q_shortcut = np.array(q_x_shortcut.cpu()).reshape(1, -1)
np.savetxt(('./quantizer_output/q_activation_out/Q_shortcut_%s.txt' % self.name), Q_shortcut,
delimiter='\n')
x = self.dequantize(x) # 反量化
return x
class QuantizedFeatureConcat(nn.Module):
def __init__(self, layers, groups, bits=8,
quantizer_output=False, reorder=False, TM=32, TN=32, name='', layer_idx=-1, ):
super(QuantizedFeatureConcat, self).__init__()
self.layers = layers # layer indices
self.groups = groups
self.multiple = len(layers) > 1 # multiple layers flag
self.register_buffer('scale', torch.zeros(1)) # 量化比例因子
self.register_buffer('float_max_list', torch.zeros(len(layers)))
self.bits = bits
self.momentum = 0.1
self.quantizer_output = quantizer_output
self.reorder = reorder
self.TM = TM
self.TN = TN
self.name = name
self.layer_idx = layer_idx
# 量化
def quantize(self, input):
output = input / self.scale
return output
def round(self, input):
output = Round.apply(input)
return output
# 截断
def clamp(self, input):
min_val = torch.tensor(-(1 << (self.bits - 1)))
max_val = torch.tensor((1 << (self.bits - 1)) - 1)
output = torch.clamp(input, min_val, max_val)
return output
# 反量化
def dequantize(self, input):
output = (input) * self.scale
return output
def forward(self, x, outputs):
if self.multiple:
if self.training == True:
quantized_min_val = torch.tensor(-(1 << (self.bits - 1)))
quantized_max_val = torch.tensor((1 << (self.bits - 1)) - 1)
quantized_range = torch.max(torch.abs(quantized_min_val), torch.abs(quantized_max_val)) # 量化后范围
j = 0
for i in self.layers:
temp = outputs[i].detach()
if self.float_max_list[j] == 0:
self.float_max_list[j].add_(
torch.max(torch.max(temp), torch.abs(torch.min(temp))))
else:
self.float_max_list[j].mul_(1 - self.momentum).add_(
torch.max(torch.max(temp), torch.abs(torch.min(temp))) * self.momentum)
j = j + 1
del temp
torch.cuda.empty_cache()
float_max = max(self.float_max_list).unsqueeze(0) # 量化前范围
floor_float_range = 2 ** float_max.log2().floor()
ceil_float_range = 2 ** float_max.log2().ceil()
if abs(ceil_float_range - float_max) < abs(floor_float_range - float_max):
float_range = ceil_float_range
else:
float_range = floor_float_range
self.scale = float_range / quantized_range # 量化比例因子
if self.quantizer_output == True:
if self.layer_idx == -1:
q_a_concat = copy.deepcopy(outputs)
move_scale = math.log2(self.scale)
concat_scale = -np.array(move_scale).reshape(1, -1)
np.savetxt(('./quantizer_output/a_scale_out/concat_scale_%s.txt' % self.name), concat_scale,
delimiter='\n')
for i in self.layers:
q_a_concat[i] = self.quantize(q_a_concat[i]) # 量化
q_a_concat[i] = self.round(q_a_concat[i])
q_a_concat[i] = self.clamp(q_a_concat[i]) # 截断
Q_concat = torch.cat([q_a_concat[i] for i in self.layers], 1)
if self.reorder == True:
a_para = Q_concat
# 重排序参数
# print("use activation reorder!")
shape_input = a_para.shape[1]
num_TN = int(shape_input / self.TN)
first = True
reorder_a_para = None
for k in range(num_TN):
temp = a_para[:, k * self.TN:(k + 1) * self.TN, :, :]
temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])
temp = temp.permute(1, 2, 0).contiguous().view(-1)
if first:
reorder_a_para = temp.clone().cpu().data.numpy()
first = False
else:
reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())
a_para_flatten = reorder_a_para
q_activation_reorder = a_para_flatten
q_activation_reorder = np.array(q_activation_reorder).reshape(1, -1)
np.savetxt(('./quantizer_output/q_activation_reorder/r_concat_%s.txt' % self.name),
q_activation_reorder, delimiter='\n')
###保存重排序的二进制文件
activation_flat = q_activation_reorder.astype(np.int8)
writer = open('./quantizer_output/q_activation_reorder/%s_concat_q_bin' % self.name, "wb")
writer.write(activation_flat)
writer.close()
##########concat重排序结束
Q_concat = np.array(Q_concat.cpu()).reshape(1, -1)
np.savetxt(('./quantizer_output/q_activation_out/a_concat_%s.txt' % self.name), Q_concat,
delimiter='\n')
elif int(self.name[1:4]) == self.layer_idx:
q_a_concat = copy.deepcopy(outputs)
move_scale = math.log2(self.scale)
concat_scale = -np.array(move_scale).reshape(1, -1)
np.savetxt(('./quantizer_output/a_scale_out/concat_scale_%s.txt' % self.name), concat_scale,
delimiter='\n')
for i in self.layers:
q_a_concat[i] = self.quantize(q_a_concat[i]) # 量化
q_a_concat[i] = self.round(q_a_concat[i])
q_a_concat[i] = self.clamp(q_a_concat[i]) # 截断
Q_concat = torch.cat([q_a_concat[i] for i in self.layers], 1)
if self.reorder == True:
a_para = Q_concat
# 重排序参数
# print("use activation reorder!")
shape_input = a_para.shape[1]
num_TN = int(shape_input / self.TN)
first = True
reorder_a_para = None
for k in range(num_TN):
temp = a_para[:, k * self.TN:(k + 1) * self.TN, :, :]
temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])
temp = temp.permute(1, 2, 0).contiguous().view(-1)
if first:
reorder_a_para = temp.clone().cpu().data.numpy()
first = False
else:
reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())
a_para_flatten = reorder_a_para
q_activation_reorder = a_para_flatten
q_activation_reorder = np.array(q_activation_reorder).reshape(1, -1)
np.savetxt(('./quantizer_output/q_activation_reorder/r_concat_%s.txt' % self.name),
q_activation_reorder, delimiter='\n')
###保存重排序的二进制文件
activation_flat = q_activation_reorder.astype(np.int8)
writer = open('./quantizer_output/q_activation_reorder/%s_concat_q_bin' % self.name, "wb")
writer.write(activation_flat)
writer.close()
##########concat重排序结束
Q_concat = np.array(Q_concat.cpu()).reshape(1, -1)
np.savetxt(('./quantizer_output/q_activation_out/a_concat_%s.txt' % self.name), Q_concat,
delimiter='\n')
# 量化
for i in self.layers:
outputs[i] = self.quantize(outputs[i]) # 量化
outputs[i] = self.round(outputs[i])
outputs[i] = self.clamp(outputs[i]) # 截断
outputs[i] = self.dequantize(outputs[i]) # 反量化
return torch.cat([outputs[i] for i in self.layers], 1)
else:
if self.groups:
return x[:, (x.shape[1] // 2):]
else:
return outputs[self.layers[0]]
================================================
FILE: utils/quantized/quantized_lowbit.py
================================================
# Author:LiPu
import math
import torch
import torch.nn as nn
from torch.autograd import Function
import torch.nn.functional as F
# 定义前向传播,反向传播三值化函数
class Ternarize(Function):
'''
Binarize the input activations and calculate the mean across channel dimension.
'''
# 使用静态方法定义三值激活类
@staticmethod
def forward(self, input):
self.save_for_backward(input)
output = input.new(input.size())
output[input > 0.5] = 1
# 由于暂时不知道pytorch如何进行与运算,用此段代码实现
# output[input>=-0.5 and input<=0.5]
temp = torch.add((input >= -0.5), (input <= 0.5))
temp[temp == 2] = 1
temp[temp == 1] = 0
output[temp] = 0
output[input < -0.5] = -1
return output
@staticmethod
def backward(self, grad_output):
input, = self.saved_tensors
grad_input = grad_output.clone()
grad_input[input.ge(1)] = 0
grad_input[input.le(-1)] = 0
return grad_input
# 定义前向传播,反向传播二值化函数
class Binarize(Function):
@staticmethod
def forward(self, input):
self.save_for_backward(input)
output = input.new(input.size())
output[input >= 0] = 1
output[input < 0] = 0
return output
@staticmethod
def backward(self, grad_output):
input, = self.saved_tensors
grad_input = grad_output.clone()
grad_input[input.ge(1)] = 0
grad_input[input.le(-1)] = 0
return grad_input
binarize = Binarize.apply
ternarize = Ternarize.apply
# 重载LeakyRelu
class BinaryLeakyReLU(nn.LeakyReLU):
def __init__(self):
super(BinaryLeakyReLU, self).__init__()
def forward(self, input):
output = EQ(input)
return output
# 对线性层权重做量化,必须有reset_parameters函数
class BinaryLinear(nn.Linear):
def forward(self, input):
binary_weight = ternarize(self.weight)
if self.bias is None:
return F.linear(input, binary_weight)
else:
return F.linear(input, binary_weight, self.bias)
def reset_parameters(self):
# Glorot initialization
in_features, out_features = self.weight.size()
stdv = math.sqrt(1.5 / (in_features + out_features))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.zero_()
self.weight.lr_scale = 1. / stdv
# BWN量化
class BWNConv2d(nn.Conv2d):
def forward(self, input):
bw = binarize(self.weight)
alpha = torch.div(self.weight.norm(1), torch.numel(self.weight))
output = alpha * (F.conv2d(input, bw, self.bias, self.stride,
self.padding, self.dilation, self.groups))
return output
def reset_parameters(self):
# Glorot initialization
in_features = self.in_channels
out_features = self.out_channels
for k in self.kernel_size:
in_features *= k
out_features *= k
stdv = math.sqrt(1.5 / (in_features + out_features))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.zero_()
self.weight.lr_scale = 1. / stdv
# BNN量化
class BinaryConv2d(nn.Conv2d):
def forward(self, input):
# bw = (self.weight - torch.mean(self.weight)) / torch.sqrt(torch.std(self.weight))
bw = binarize(self.weight)
return F.conv2d(input, bw, self.bias, self.stride,
self.padding, self.dilation, self.groups)
def reset_parameters(self):
# Glorot initialization
in_features = self.in_channels
out_features = self.out_channels
for k in self.kernel_size:
in_features *= k
out_features *= k
stdv = math.sqrt(1.5 / (in_features + out_features))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.zero_()
self.weight.lr_scale = 1. / stdv
================================================
FILE: utils/quantized/quantized_ptq.py
================================================
# Author:LiPu
import math
import time
import numpy as np
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch.autograd import Function
# ********************* range_trackers(范围统计器,统计量化前范围) *********************
class RangeTracker(nn.Module):
def __init__(self, q_level):
super().__init__()
self.q_level = q_level
def update_range(self, min_val, max_val):
raise NotImplementedError
@torch.no_grad()
def forward(self, input):
if self.q_level == 'L': # A,min_max_shape=(1, 1, 1, 1),layer级
min_val = torch.min(input)
max_val = torch.max(input)
elif self.q_level == 'C': # W,min_max_shape=(N, 1, 1, 1),channel级
min_val = torch.min(torch.min(torch.min(input, 3, keepdim=True)[0], 2, keepdim=True)[0], 1, keepdim=True)[0]
max_val = torch.max(torch.max(torch.max(input, 3, keepdim=True)[0], 2, keepdim=True)[0], 1, keepdim=True)[0]
self.update_range(min_val, max_val)
class GlobalRangeTracker(RangeTracker): # W,min_max_shape=(N, 1, 1, 1),channel级,取本次和之前相比的min_max —— (N, C, W, H)
def __init__(self, q_level, out_channels):
super().__init__(q_level)
if self.q_level == 'L':
self.register_buffer('min_val', torch.zeros(1))
self.register_buffer('max_val', torch.zeros(1))
elif self.q_level == 'C':
self.register_buffer('min_val', torch.zeros(out_channels, 1, 1, 1))
self.register_buffer('max_val', torch.zeros(out_channels, 1, 1, 1))
self.register_buffer('first_w', torch.zeros(1))
def update_range(self, min_val, max_val):
temp_minval = self.min_val
temp_maxval = self.max_val
if self.first_w == 0:
self.first_w.add_(1)
self.min_val.add_(min_val)
self.max_val.add_(max_val)
else:
self.min_val.add_(-temp_minval).add_(torch.min(temp_minval, min_val))
self.max_val.add_(-temp_maxval).add_(torch.max(temp_maxval, max_val))
class AveragedRangeTracker(RangeTracker): # A,min_max_shape=(1, 1, 1, 1),layer级,取running_min_max —— (N, C, W, H)
def __init__(self, q_level, out_channels, momentum=0.1):
super().__init__(q_level)
self.momentum = momentum
if self.q_level == 'L':
self.register_buffer('min_val', torch.zeros(1))
self.register_buffer('max_val', torch.zeros(1))
elif self.q_level == 'C':
self.register_buffer('min_val', torch.zeros(out_channels, 1, 1, 1))
self.register_buffer('max_val', torch.zeros(out_channels, 1, 1, 1))
self.register_buffer('first_a', torch.zeros(1))
def update_range(self, min_val, max_val):
if self.first_a == 0:
self.first_a.add_(1)
self.min_val.add_(min_val)
self.max_val.add_(max_val)
else:
self.min_val.mul_(1 - self.momentum).add_(min_val * self.momentum)
self.max_val.mul_(1 - self.momentum).add_(max_val * self.momentum)
# ********************* quantizers(量化器,量化) *********************
class Round(Function):
@staticmethod
def forward(self, input):
sign = torch.sign(input)
output = sign * torch.floor(torch.abs(input) + 0.5)
return output
class Quantizer(nn.Module):
def __init__(self, bits, range_tracker, out_channels, FPGA, sign=True):
super().__init__()
self.bits = bits
self.range_tracker = range_tracker
self.FPGA = FPGA
self.sign = sign
if out_channels == -1:
self.register_buffer('scale', torch.zeros(1)) # 量化比例因子
self.register_buffer('zero_point', torch.zeros(1)) # 量化零点
else:
self.register_buffer('scale', torch.zeros(out_channels, 1, 1, 1)) # 量化比例因子
self.register_buffer('zero_point', torch.zeros(out_channels, 1, 1, 1)) # 量化零点
def update_params(self):
raise NotImplementedError
# 量化
def quantize(self, input):
output = input / self.scale + self.zero_point
return output
def round(self, input):
output = Round.apply(input)
return output
# 截断
def clamp(self, input):
if self.sign:
min_val = torch.tensor(-(1 << (self.bits - 1)))
max_val = torch.tensor((1 << (self.bits - 1)) - 1)
if not self.sign:
min_val = torch.tensor(0)
max_val = torch.tensor((1 << self.bits) - 1)
output = torch.clamp(input, min_val, max_val)
return output
# 反量化
def dequantize(self, input):
output = (input - self.zero_point) * self.scale
return output
def forward(self, input):
if self.bits == 32:
output = input
elif self.bits == 1:
print('!Binary quantization is not supported !')
assert self.bits != 1
else:
if self.training == True:
self.range_tracker(input)
self.update_params()
output = self.quantize(input) # 量化
output = self.round(output)
output = self.clamp(output) # 截断
output = self.dequantize(output) # 反量化
return output
def get_quantize_value(self, input):
if self.bits == 32:
output = input
elif self.bits == 1:
print('!Binary quantization is not supported !')
assert self.bits != 1
else:
output = self.quantize(input) # 量化
output = self.round(output)
output = self.clamp(output) # 截断
return output
################获得量化因子所对应的移位数
def get_scale(self):
#############移位修正
move_scale = math.log2(self.scale)
move_scale = np.array(move_scale).reshape(1, -1)
return move_scale
# 对称量化
class SymmetricQuantizer(Quantizer):
def update_params(self):
if self.sign:
min_val = torch.tensor(-(1 << (self.bits - 1)))
max_val = torch.tensor((1 << (self.bits - 1)) - 1)
else:
min_val = torch.tensor(0)
max_val = torch.tensor((1 << self.bits) - 1)
quantized_range = torch.max(torch.abs(min_val), torch.abs(max_val)) # 量化后范围
if self.FPGA == False:
float_range = torch.max(torch.abs(self.range_tracker.min_val),
torch.abs(self.range_tracker.max_val)) # 量化前范围
else:
float_max = torch.max(torch.abs(self.range_tracker.min_val), torch.abs(self.range_tracker.max_val)) # 量化前范围
floor_float_range = 2 ** float_max.log2().floor()
ceil_float_range = 2 ** float_max.log2().ceil()
if abs(ceil_float_range - float_max) < abs(floor_float_range - float_max):
float_range = ceil_float_range
else:
float_range = floor_float_range
self.scale = float_range / quantized_range # 量化比例因子
self.zero_point = torch.zeros_like(self.scale) # 量化零点
# 非对称量化
class AsymmetricQuantizer(Quantizer):
def update_params(self):
if self.sign:
min_val = torch.tensor(-(1 << (self.bits - 1)))
max_val = torch.tensor((1 << (self.bits - 1)) - 1)
else:
min_val = torch.tensor(0)
max_val = torch.tensor((1 << self.bits) - 1)
quantized_range = max_val - min_val # 量化后范围
if self.FPGA == False:
float_range = self.range_tracker.max_val - self.range_tracker.min_val # 量化前范围
else:
float_range = self.range_tracker.max_val - self.range_tracker.min_val # 量化前范围
ceil_float_range = 2 ** float_range.log2().ceil()
floor_float_range = 2 ** float_range.log2().floor()
if abs(ceil_float_range - float_range) < abs(floor_float_range - float_range):
float_range = ceil_float_range
else:
float_range = floor_float_range
self.scale = float_range / quantized_range # 量化比例因子
self.zero_point = torch.round(max_val - self.range_tracker.max_val / self.scale) # 量化零点
# ********************* 量化卷积(同时量化A/W,并做卷积) *********************
class PTQuantizedConv2d(nn.Conv2d):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
a_bits=8,
w_bits=8,
q_type=0):
super().__init__(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias
)
# 实例化量化器(A-layer级,W-channel级)
if q_type == 0:
self.activation_quantizer = SymmetricQuantizer(bits=a_bits, range_tracker=AveragedRangeTracker(q_level='L',
out_channels=-1),
out_channels=-1, FPGA=False)
self.weight_quantizer = SymmetricQuantizer(bits=w_bits, range_tracker=GlobalRangeTracker(q_level='C',
out_channels=out_channels),
out_channels=out_channels, FPGA=False)
else:
self.activation_quantizer = AsymmetricQuantizer(bits=a_bits,
range_tracker=AveragedRangeTracker(q_level='L',
out_channels=-1),
out_channels=-1, FPGA=False, sign=False)
self.weight_quantizer = AsymmetricQuantizer(bits=w_bits, range_tracker=GlobalRangeTracker(q_level='C',
out_channels=out_channels),
out_channels=out_channels, FPGA=False, sign=False)
def forward(self, input):
# 量化A和W
if input.shape[1] != 3:
input = self.activation_quantizer(input)
q_weight = self.weight_quantizer(self.weight)
# 量化卷积
output = F.conv2d(
input=input,
weight=q_weight,
bias=self.bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups
)
return output
def reshape_to_activation(input):
return input.reshape(1, -1, 1, 1)
def reshape_to_weight(input):
return input.reshape(-1, 1, 1, 1)
def reshape_to_bias(input):
return input.reshape(-1)
# ********************* bn融合_量化卷积(bn融合后,同时量化A/W,并做卷积) *********************
class BNFold_PTQuantizedConv2d_For_FPGA(PTQuantizedConv2d):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=False,
eps=1e-5,
momentum=0.01, # 考虑量化带来的抖动影响,对momentum进行调整(0.1 ——> 0.01),削弱batch统计参数占比,一定程度抑制抖动。经实验量化训练效果更好,acc提升1%左右
a_bits=8,
w_bits=8,
q_type=0,
bn=0,
activate='leaky',
quantizer_output=False,
reorder=False, TM=32, TN=32,
name='', layer_idx=-1,
maxabsscaler=False
):
super().__init__(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias
)
self.bn = bn
self.activate = activate
self.eps = eps
self.momentum = momentum
self.gamma = Parameter(torch.Tensor(out_channels))
self.beta = Parameter(torch.Tensor(out_channels))
self.register_buffer('running_mean', torch.zeros(out_channels))
self.register_buffer('running_var', torch.zeros(out_channels))
self.register_buffer('batch_mean', torch.zeros(out_channels))
self.register_buffer('batch_var', torch.zeros(out_channels))
self.register_buffer('first_bn', torch.zeros(1))
self.quantizer_output = quantizer_output
self.reorder = reorder
self.TM = TM
self.TN = TN
self.name = name
self.layer_idx = layer_idx
self.maxabsscaler = maxabsscaler
self.a_bits = a_bits
self.w_bits = w_bits
# 实例化量化器(A-layer级,W-channel级)
if q_type == 0:
self.activation_quantizer = SymmetricQuantizer(bits=a_bits, range_tracker=AveragedRangeTracker(q_level='L',
out_channels=-1),
out_channels=-1, FPGA=True)
self.weight_quantizer = SymmetricQuantizer(bits=w_bits,
range_tracker=GlobalRangeTracker(q_level='L', out_channels=-1),
out_channels=-1, FPGA=True)
self.bias_quantizer = SymmetricQuantizer(bits=w_bits,
range_tracker=GlobalRangeTracker(q_level='L', out_channels=-1),
out_channels=-1, FPGA=True)
else:
self.activation_quantizer = AsymmetricQuantizer(bits=a_bits,
range_tracker=AveragedRangeTracker(q_level='L',
out_channels=-1),
out_channels=-1, FPGA=True, sign=False)
self.weight_quantizer = AsymmetricQuantizer(bits=w_bits,
range_tracker=GlobalRangeTracker(q_level='L', out_channels=-1),
out_channels=-1, FPGA=True, sign=False)
self.bias_quantizer = AsymmetricQuantizer(bits=w_bits,
range_tracker=GlobalRangeTracker(q_level='L', out_channels=-1),
out_channels=-1, FPGA=True, sign=False)
def forward(self, input):
if self.bn:
# BN融合
if self.bias is not None:
bias = reshape_to_bias(self.beta + (self.bias - self.running_mean) * (
self.gamma / torch.sqrt(self.running_var + self.eps)))
else:
bias = reshape_to_bias(
self.beta - self.running_mean * self.gamma / torch.sqrt(
self.running_var + self.eps)) # b融running
weight = self.weight * reshape_to_weight(
self.gamma / torch.sqrt(self.running_var + self.eps)) # w融running
else:
bias = self.bias
weight = self.weight
# 量化A和bn融合后的W
q_weight = self.weight_quantizer(weight)
q_bias = self.bias_quantizer(bias)
if self.quantizer_output == True: # 输出量化参数txt文档
# 创建的quantizer_output输出文件夹
if not os.path.isdir('./quantizer_output'):
os.makedirs('./quantizer_output')
if not os.path.isdir('./quantizer_output/q_weight_out'):
os.makedirs('./quantizer_output/q_weight_out')
if not os.path.isdir('./quantizer_output/w_scale_out'):
os.makedirs('./quantizer_output/w_scale_out')
if not os.path.isdir('./quantizer_output/q_weight_max'):
os.makedirs('./quantizer_output/q_weight_max')
if not os.path.isdir('./quantizer_output/max_weight_count'):
os.makedirs('./quantizer_output/max_weight_count')
if not os.path.isdir('./quantizer_output/q_weight_reorder'):
os.makedirs('./quantizer_output/q_weight_reorder')
if not os.path.isdir('./quantizer_output/q_bias_reorder'):
os.makedirs('./quantizer_output/q_bias_reorder')
if self.layer_idx == -1:
#######################输出当前层的权重量化因子
weight_scale = - self.weight_quantizer.get_scale()
np.savetxt(('./quantizer_output/w_scale_out/w_scale_%s.txt' % self.name), weight_scale, delimiter='\n')
#######################输出当前层的量化权重
q_weight_txt = self.weight_quantizer.get_quantize_value(weight)
#############权重重排序
w_para = q_weight_txt # 重排序参数
if self.reorder == True:
# print("use weights reorder!")
shape_output = w_para.shape[0]
shape_input = w_para.shape[1]
num_TN = int(shape_input / self.TN)
remainder_TN = shape_input % self.TN
num_TM = int(shape_output / self.TM)
remainder_TM = shape_output % self.TM
first = True
reorder_w_para = None
if self.activate == 'linear':
print('layer-linear reorder!')
for k in range(num_TN):
temp = w_para[0:remainder_TM, k * self.TN:(k + 1) * self.TN, :, :]
temp = temp.view(temp.shape[0], temp.shape[1], temp.shape[2] * temp.shape[3])
temp = temp.permute(2, 0, 1).contiguous().view(-1)
if first:
reorder_w_para = temp.clone().cpu().data.numpy()
first = False
else:
reorder_w_para = np.append(reorder_w_para, temp.cpu().data.numpy())
else:
for j in range(num_TM):
if shape_input == 3 or shape_input == 1: # 第一层
print('The first layer~~~~~~~~~~~~')
temp = w_para[j * self.TM:(j + 1) * self.TM,
num_TN * self.TN:num_TN * self.TN + remainder_TN, :,
:]
temp = temp.view(temp.shape[0], temp.shape[1], temp.shape[2] * temp.shape[3])
fill = torch.zeros(self.TM, self.TN, temp.shape[2]).to(temp.device)
fill[:, 0:remainder_TN, :] = temp
temp = fill.permute(2, 0, 1).contiguous().view(-1)
if first: # 创建数组存储
reorder_w_para = temp.clone().cpu().data.numpy()
first = False
else:
reorder_w_para = np.append(reorder_w_para, temp.cpu().data.numpy())
else:
for k in range(num_TN):
temp = w_para[j * self.TM:(j + 1) * self.TM, k * self.TN:(k + 1) * self.TN, :, :]
# #合并成论文图10(a)的TM*TN*(K2)的张量格式
temp = temp.view(temp.shape[0], temp.shape[1], temp.shape[2] * temp.shape[3])
# 转换为图10(b)的重排序格式
temp = temp.permute(2, 0, 1).contiguous().view(-1)
if first:
reorder_w_para = temp.clone().cpu().data.numpy()
first = False
else:
reorder_w_para = np.append(reorder_w_para, temp.cpu().data.numpy())
w_para_flatten = reorder_w_para
# print(reorder_w_para.size)
#####验证重排序结果的正确性
'''if w_para_flatten.size == w_para.shape[0] * w_para.shape[1] * w_para.shape[2] * w_para.shape[3]:
print("weights convert correctly!")
else:
print("weights convert mismatchingly!")'''
q_weight_reorder = w_para_flatten
q_weight_reorder = np.array(q_weight_reorder).reshape(1, -1)
np.savetxt(('./quantizer_output/q_weight_reorder/w_reorder_%s.txt' % self.name), q_weight_reorder,
delimiter='\n')
################权重重排序结束
q_weight_txt = np.array(q_weight_txt.cpu()).reshape(1, -1)
q_weight_max = [np.max(q_weight_txt)]
# q_weight_max = np.argmax(q_weight_txt)
max_weight_count = [np.sum(abs(q_weight_txt) >= (1 << (self.w_bits - 1)) - 1)] # 统计该层溢出的数目
np.savetxt(('./quantizer_output/max_weight_count/max_w_count_%s.txt' % self.name), max_weight_count)
np.savetxt(('./quantizer_output/q_weight_max/max_w_%s.txt' % self.name), q_weight_max)
np.savetxt(('./quantizer_output/q_weight_out/q_weight_%s.txt' % self.name), q_weight_txt,
delimiter='\n')
# io.savemat('save.mat',{'q_weight_txt':q_weight_txt})
#######################创建输出偏置txt的文件夹
if not os.path.isdir('./quantizer_output/q_bias_out'):
os.makedirs('./quantizer_output/q_bias_out')
if not os.path.isdir('./quantizer_output/b_scale_out'):
os.makedirs('./quantizer_output/b_scale_out')
#######################输出当前层偏置的量化因子
bias_scale = - self.bias_quantizer.get_scale()
np.savetxt(('./quantizer_output/b_scale_out/b_scale_%s.txt' % self.name), bias_scale, delimiter='\n')
#######################输出当前层的量化偏置
q_bias_txt = self.bias_quantizer.get_quantize_value(bias)
q_bias_txt = np.array(q_bias_txt.cpu()).reshape(1, -1)
np.savetxt(('./quantizer_output/q_bias_out/q_bias_%s.txt' % self.name), q_bias_txt, delimiter='\n')
#############偏置重排序
if self.reorder == True:
b_para = np.zeros(2048, dtype=int)
b_para[0:q_bias_txt.size] = q_bias_txt
# print(b_para.shape)
# b_para = np.array(b_para.cpu()).reshape(1, -1)
np.savetxt(('./quantizer_output/q_bias_reorder/q_b_reorder_%s.txt' % self.name), b_para,
delimiter='\n')
######权重和偏置的重排序数据的二进制文件保存
bias_weight_reorder = np.append(b_para, q_weight_reorder)
wb_flat = bias_weight_reorder.astype(np.int8)
writer = open('./quantizer_output/q_weight_reorder/%s_bias_weight_q_bin' % self.name, "wb")
writer.write(wb_flat)
writer.close()
################偏置重排序结束
elif int(self.name[1:4]) == self.layer_idx:
#######################输出当前层的权重量化因子
weight_scale = - self.weight_quantizer.get_scale()
np.savetxt(('./quantizer_output/w_scale_out/w_scale_%s.txt' % self.name), weight_scale, delimiter='\n')
#######################输出当前层的量化权重
q_weight_txt = self.weight_quantizer.get_quantize_value(weight)
#############权重重排序
w_para = q_weight_txt # 重排序参数
if self.reorder == True:
# print("use weights reorder!")
shape_output = w_para.shape[0]
shape_input = w_para.shape[1]
num_TN = int(shape_input / self.TN)
remainder_TN = shape_input % self.TN
num_TM = int(shape_output / self.TM)
remainder_TM = shape_output % self.TM
first = True
reorder_w_para = None
if self.activate == 'linear':
print('layer-linear reorder!')
for k in range(num_TN):
temp = w_para[0:remainder_TM, k * self.TN:(k + 1) * self.TN, :, :]
temp = temp.view(temp.shape[0], temp.shape[1], temp.shape[2] * temp.shape[3])
temp = temp.permute(2, 0, 1).contiguous().view(-1)
if first:
reorder_w_para = temp.clone().cpu().data.numpy()
first = False
else:
reorder_w_para = np.append(reorder_w_para, temp.cpu().data.numpy())
else:
for j in range(num_TM):
if shape_input == 3 or shape_input == 1: # 第一层
print('The first layer~~~~~~~~~~~~')
temp = w_para[j * self.TM:(j + 1) * self.TM,
num_TN * self.TN:num_TN * self.TN + remainder_TN, :,
:]
temp = temp.view(temp.shape[0], temp.shape[1], temp.shape[2] * temp.shape[3])
fill = torch.zeros(self.TM, self.TN, temp.shape[2]).to(temp.device)
fill[:, 0:remainder_TN, :] = temp
temp = fill.permute(2, 0, 1).contiguous().view(-1)
if first: # 创建数组存储
reorder_w_para = temp.clone().cpu().data.numpy()
first = False
else:
reorder_w_para = np.append(reorder_w_para, temp.cpu().data.numpy())
else:
for k in range(num_TN):
temp = w_para[j * self.TM:(j + 1) * self.TM, k * self.TN:(k + 1) * self.TN, :, :]
# #合并成论文图10(a)的TM*TN*(K2)的张量格式
temp = temp.view(temp.shape[0], temp.shape[1], temp.shape[2] * temp.shape[3])
# 转换为图10(b)的重排序格式
temp = temp.permute(2, 0, 1).contiguous().view(-1)
if first:
reorder_w_para = temp.clone().cpu().data.numpy()
first = False
else:
reorder_w_para = np.append(reorder_w_para, temp.cpu().data.numpy())
w_para_flatten = reorder_w_para
# print(reorder_w_para.size)
#####验证重排序结果的正确性
'''if w_para_flatten.size == w_para.shape[0] * w_para.shape[1] * w_para.shape[2] * w_para.shape[3]:
print("weights convert correctly!")
else:
print("weights convert mismatchingly!")'''
q_weight_reorder = w_para_flatten
q_weight_reorder = np.array(q_weight_reorder).reshape(1, -1)
np.savetxt(('./quantizer_output/q_weight_reorder/w_reorder_%s.txt' % self.name), q_weight_reorder,
delimiter='\n')
################权重重排序结束
q_weight_txt = np.array(q_weight_txt.cpu()).reshape(1, -1)
q_weight_max = [np.max(q_weight_txt)]
# q_weight_max = np.argmax(q_weight_txt)
max_weight_count = [np.sum(abs(q_weight_txt) >= (1 << (self.w_bits - 1)) - 1)] # 统计该层溢出的数目
np.savetxt(('./quantizer_output/max_weight_count/max_w_count_%s.txt' % self.name), max_weight_count)
np.savetxt(('./quantizer_output/q_weight_max/max_w_%s.txt' % self.name), q_weight_max)
np.savetxt(('./quantizer_output/q_weight_out/q_weight_%s.txt' % self.name), q_weight_txt,
delimiter='\n')
# io.savemat('save.mat',{'q_weight_txt':q_weight_txt})
#######################创建输出偏置txt的文件夹
if not os.path.isdir('./quantizer_output/q_bias_out'):
os.makedirs('./quantizer_output/q_bias_out')
if not os.path.isdir('./quantizer_output/b_scale_out'):
os.makedirs('./quantizer_output/b_scale_out')
#######################输出当前层偏置的量化因子
bias_scale = - self.bias_quantizer.get_scale()
np.savetxt(('./quantizer_output/b_scale_out/b_scale_%s.txt' % self.name), bias_scale, delimiter='\n')
#######################输出当前层的量化偏置
q_bias_txt = self.bias_quantizer.get_quantize_value(bias)
q_bias_txt = np.array(q_bias_txt.cpu()).reshape(1, -1)
np.savetxt(('./quantizer_output/q_bias_out/q_bias_%s.txt' % self.name), q_bias_txt, delimiter='\n')
#############偏置重排序
if self.reorder == True:
b_para = np.zeros(2048, dtype=int)
b_para[0:q_bias_txt.size] = q_bias_txt
# print(b_para.shape)
# b_para = np.array(b_para.cpu()).reshape(1, -1)
np.savetxt(('./quantizer_output/q_bias_reorder/q_b_reorder_%s.txt' % self.name), b_para,
delimiter='\n')
######权重和偏置的重排序数据的二进制文件保存
bias_weight_reorder = np.append(b_para, q_weight_reorder)
wb_flat = bias_weight_reorder.astype(np.int8)
writer = open('./quantizer_output/q_weight_reorder/%s_bias_weight_q_bin' % self.name, "wb")
writer.write(wb_flat)
writer.close()
################偏置重排序结束
# 量化卷积
output = F.conv2d(
input=input,
weight=q_weight,
bias=q_bias, # 注意,这里加bias,做完整的conv+bn
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups
)
if self.activate == 'leaky':
output = F.leaky_relu(output, 0.125 if not self.maxabsscaler else 0.25, inplace=True)
elif self.activate == 'relu6':
output = F.relu6(output, inplace=True)
elif self.activate == 'h_swish':
output = output * (F.relu6(output + 3.0, inplace=True) / 6.0)
elif self.activate == 'relu':
output = F.relu(output, inplace=True)
elif self.activate == 'mish':
output = output * F.softplus(output).tanh()
elif self.activate == 'linear':
# return output
pass
else:
print(self.activate + "%s is not supported !")
if self.quantizer_output == True:
if not os.path.isdir('./quantizer_output/q_activation_out'):
os.makedirs('./quantizer_output/q_activation_out')
if not os.path.isdir('./quantizer_output/a_scale_out'):
os.makedirs('./quantizer_output/a_scale_out')
if not os.path.isdir('./quantizer_output/q_activation_max'):
os.makedirs('./quantizer_output/q_activation_max')
if not os.path.isdir('./quantizer_output/max_activation_count'):
os.makedirs('./quantizer_output/max_activation_count')
if not os.path.isdir('./quantizer_output/q_activation_reorder'):
os.makedirs('./quantizer_output/q_activation_reorder')
if self.layer_idx == -1:
##################输出当前激活的量化因子
activation_scale = - self.activation_quantizer.get_scale()
np.savetxt(('./quantizer_output/a_scale_out/a_scale_%s.txt' % self.name), activation_scale,
delimiter='\n')
##################输出当前层的量化激活
q_activation_txt = self.activation_quantizer.get_quantize_value(output)
a_para = q_activation_txt
#############输入特征图重排序
if self.reorder == True:
# 重排序参数
# print("use activation reorder!")
shape_input = a_para.shape[1]
num_TN = int(shape_input / self.TN)
remainder_TN = shape_input % self.TN
first = True
reorder_a_para = None
if self.activate == 'linear':
print('layer-linear reorder!')
temp = a_para[:, 0:remainder_TN, :, :]
temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])
temp = temp.permute(1, 2, 0).contiguous().view(-1)
if first:
reorder_a_para = temp.clone().cpu().data.numpy()
first = False
else:
reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())
else:
for k in range(num_TN):
temp = a_para[:, k * self.TN:(k + 1) * self.TN, :, :]
temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])
temp = temp.permute(1, 2, 0).contiguous().view(-1)
if first:
reorder_a_para = temp.clone().cpu().data.numpy()
first = False
else:
reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())
a_para_flatten = reorder_a_para
#####验证重排序结果的正确性
'''if a_para_flatten.size == a_para.shape[0] * a_para.shape[1] * a_para.shape[2] * a_para.shape[3]:
print("activation convert correctly!")
else:
print("activation convert mismatchingly!")'''
q_activation_reorder = a_para_flatten
q_activation_reorder = np.array(q_activation_reorder).reshape(1, -1)
np.savetxt(('./quantizer_output/q_activation_reorder/a_reorder_%s.txt' % self.name),
q_activation_reorder, delimiter='\n')
###保存重排序的二进制文件
activation_flat = q_activation_reorder.astype(np.int8)
writer = open('./quantizer_output/q_activation_reorder/%s_activation_q_bin' % self.name, "wb")
writer.write(activation_flat)
writer.close()
##########特征图重排序结束
q_activation_txt = np.array(q_activation_txt.cpu()).reshape(1, -1)
q_activation_max = [np.max(q_activation_txt)] # 统计该层的最大值(即查看是否有溢出)
max_activation_count = [np.sum(abs(q_activation_txt) >= (1 << (self.w_bits - 1)) - 1)] # 统计该层溢出的数目
# q_weight_max = np.argmax(q_weight_txt)
np.savetxt(('./quantizer_output/max_activation_count/max_a_count_%s.txt' % self.name),
max_activation_count)
np.savetxt(('./quantizer_output/q_activation_max/q_a_max_%s.txt' % self.name), q_activation_max)
np.savetxt(('./quantizer_output/q_activation_out/q_activation_%s.txt' % self.name), q_activation_txt,
delimiter='\n')
elif int(self.name[1:4]) == self.layer_idx:
##################输出当前激活的量化因子
activation_scale = - self.activation_quantizer.get_scale()
np.savetxt(('./quantizer_output/a_scale_out/a_scale_%s.txt' % self.name), activation_scale,
delimiter='\n')
##################输出当前层的量化激活
q_activation_txt = self.activation_quantizer.get_quantize_value(output)
a_para = q_activation_txt
#############输入特征图重排序
if self.reorder == True:
# 重排序参数
# print("use activation reorder!")
shape_input = a_para.shape[1]
num_TN = int(shape_input / self.TN)
remainder_TN = shape_input % self.TN
first = True
reorder_a_para = None
if self.activate == 'linear':
print('layer-linear reorder!')
temp = a_para[:, 0:remainder_TN, :, :]
temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])
temp = temp.permute(1, 2, 0).contiguous().view(-1)
if first:
reorder_a_para = temp.clone().cpu().data.numpy()
first = False
else:
reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())
else:
for k in range(num_TN):
temp = a_para[:, k * self.TN:(k + 1) * self.TN, :, :]
temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])
temp = temp.permute(1, 2, 0).contiguous().view(-1)
if first:
reorder_a_para = temp.clone().cpu().data.numpy()
first = False
else:
reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())
a_para_flatten = reorder_a_para
#####验证重排序结果的正确性
'''if a_para_flatten.size == a_para.shape[0] * a_para.shape[1] * a_para.shape[2] * a_para.shape[3]:
print("activation convert correctly!")
else:
print("activation convert mismatchingly!")'''
q_activation_reorder = a_para_flatten
q_activation_reorder = np.array(q_activation_reorder).reshape(1, -1)
np.savetxt(('./quantizer_output/q_activation_reorder/a_reorder_%s.txt' % self.name),
q_activation_reorder, delimiter='\n')
###保存重排序的二进制文件
activation_flat = q_activation_reorder.astype(np.int8)
writer = open('./quantizer_output/q_activation_reorder/%s_activation_q_bin' % self.name, "wb")
writer.write(activation_flat)
writer.close()
##########特征图重排序结束
q_activation_txt = np.array(q_activation_txt.cpu()).reshape(1, -1)
q_activation_max = [np.max(q_activation_txt)] # 统计该层的最大值(即查看是否有溢出)
max_activation_count = [np.sum(abs(q_activation_txt) >= (1 << (self.w_bits - 1)) - 1)] # 统计该层溢出的数目
# q_weight_max = np.argmax(q_weight_txt)
np.savetxt(('./quantizer_output/max_activation_count/max_a_count_%s.txt' % self.name),
max_activation_count)
np.savetxt(('./quantizer_output/q_activation_max/q_a_max_%s.txt' % self.name), q_activation_max)
np.savetxt(('./quantizer_output/q_activation_out/q_activation_%s.txt' % self.name), q_activation_txt,
delimiter='\n')
output = self.activation_quantizer(output)
return output
def BN_fuse(self):
if self.bn:
# BN融合
if self.bias is not None:
bias = reshape_to_bias(self.beta + (self.bias - self.running_mean) * (
self.gamma / torch.sqrt(self.running_var + self.eps)))
else:
bias = reshape_to_bias(
self.beta - self.running_mean * self.gamma / torch.sqrt(
self.running_var + self.eps)) # b融running
weight = self.weight * reshape_to_weight(
self.gamma / torch.sqrt(self.running_var + self.eps)) # w融running
else:
bias = self.bias
weight = self.weight
return weight, bias
================================================
FILE: utils/quantized/quantized_ptq_cos.py
================================================
# Author:LiPu
import math
import numpy as np
import os
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch.autograd import Function
# ********************* quantizers(量化器,量化) *********************
class Round(Function):
@staticmethod
def forward(self, input):
sign = torch.sign(input)
output = sign * torch.floor(torch.abs(input) + 0.5)
return output
class Quantizer(nn.Module):
def __init__(self, bits, out_channels):
super().__init__()
self.bits = bits
if out_channels == -1:
self.register_buffer('scale', torch.zeros(1)) # 量化比例因子
self.register_buffer('float_range', torch.zeros(1))
else:
self.register_buffer('scale', torch.zeros(out_channels, 1, 1, 1)) # 量化比例因子
self.register_buffer('float_range', torch.zeros(out_channels, 1, 1, 1))
self.scale_list = [0 for i in range(bits + 7)]
def update_params(self, step):
min_val = torch.tensor(-(1 << (self.bits - 1)))
max_val = torch.tensor((1 << (self.bits - 1)) - 1)
quantized_range = torch.max(torch.abs(min_val), torch.abs(max_val)) # 量化后范围
temp = self.float_range
self.float_range.add_(-temp).add_(2 ** step)
self.scale = self.float_range / quantized_range # 量化比例因子
# 量化
def quantize(self, input):
output = input / self.scale
return output
def round(self, input):
output = Round.apply(input)
return output
# 截断
def clamp(self, input):
min_val = torch.tensor(-(1 << (self.bits - 1)))
max_val = torch.tensor((1 << (self.bits - 1)) - 1)
output = torch.clamp(input, min_val, max_val)
return output
# 反量化
def dequantize(self, input):
output = (input) * self.scale
return output
def forward(self, input):
if self.bits == 32:
output = input
elif self.bits == 1:
print('!Binary quantization is not supported !')
assert self.bits != 1
else:
if self.training == True:
max_metrics = -1
max_step = 0
for i in range(self.bits + 7):
self.update_params(i - 5)
output = self.quantize(input) # 量化
output = self.round(output)
output = self.clamp(output) # 截断
output = self.dequantize(output) # 反量化
cosine_similarity = torch.cosine_similarity(input.view(-1), output.view(-1), dim=0)
if cosine_similarity > max_metrics:
max_metrics = cosine_similarity
max_step = i
torch.cuda.empty_cache()
self.scale_list[max_step] += 1
Global_max_step = self.scale_list.index(max(self.scale_list)) - 5
self.update_params(Global_max_step)
output = self.quantize(input) # 量化
output = self.round(output)
output = self.clamp(output) # 截断
output = self.dequantize(output) # 反量化
return output
def get_quantize_value(self, input):
if self.bits == 32:
output = input
elif self.bits == 1:
print('!Binary quantization is not supported !')
assert self.bits != 1
else:
output = self.quantize(input) # 量化
output = self.round(output)
output = self.clamp(output) # 截断
return output
################获得量化因子所对应的移位数
def get_scale(self):
#############移位修正
move_scale = math.log2(self.scale)
move_scale = np.array(move_scale).reshape(1, -1)
return move_scale
def reshape_to_activation(input):
return input.reshape(1, -1, 1, 1)
def reshape_to_weight(input):
return input.reshape(-1, 1, 1, 1)
def reshape_to_bias(input):
return input.reshape(-1)
# ********************* bn融合_量化卷积(bn融合后,同时量化A/W,并做卷积) *********************
class BNFold_COSPTQuantizedConv2d_For_FPGA(nn.Conv2d):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=False,
eps=1e-5,
momentum=0.1,
a_bits=8,
w_bits=8,
bn=0,
activate='leaky',
quantizer_output=False,
reorder=False, TM=32, TN=32,
name='', layer_idx=-1, maxabsscaler=False
):
super().__init__(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
)
self.bn = bn
if bias == False:
self.bias = Parameter(torch.zeros(out_channels))
self.activate = activate
self.eps = eps
self.momentum = momentum
self.gamma = Parameter(torch.Tensor(out_channels))
self.beta = Parameter(torch.Tensor(out_channels))
self.register_buffer('running_mean', torch.zeros(out_channels))
self.register_buffer('running_var', torch.zeros(out_channels))
self.register_buffer('q_bias', torch.zeros(out_channels))
self.register_buffer('q_weight', torch.zeros(self.weight.shape))
self.efficency = 0
self.deviation = 0
self.stop = False
self.quantized = False
self.quantizer_output = quantizer_output
self.reorder = reorder
self.TM = TM
self.TN = TN
self.name = name
self.layer_idx = layer_idx
self.maxabsscaler = maxabsscaler
self.a_bits = a_bits
self.w_bits = w_bits
# 实例化量化器(A-layer级,W-channel级)
self.activation_quantizer = Quantizer(bits=a_bits, out_channels=-1)
self.weight_quantizer = Quantizer(bits=w_bits, out_channels=-1)
self.bias_quantizer = Quantizer(bits=w_bits, out_channels=-1)
def forward(self, input):
if not self.quantized:
if self.bn:
# BN融合
if self.bias is not None:
self.bias.data = reshape_to_bias(self.beta + (self.bias - self.running_mean) * (
self.gamma / torch.sqrt(self.running_var + self.eps)))
else:
self.bias.data = reshape_to_bias(
self.beta - self.running_mean * self.gamma / torch.sqrt(
self.running_var + self.eps)) # b融running
self.weight.data = self.weight * reshape_to_weight(
self.gamma / torch.sqrt(self.running_var + self.eps)) # w融running
else:
self.bias = self.bias
self.weight = self.weight
# 量化A和bn融合后的W
self.q_weight = self.weight_quantizer(self.weight)
self.q_bias = self.bias_quantizer(self.bias)
self.quantized = True
if self.training:
if isinstance(input, list):
quant_input = input[0]
float_input = input[1]
else:
quant_input = input
float_input = input
# 浮点卷积
float_output = F.conv2d(
input=float_input,
weight=self.weight,
bias=self.bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups
)
# 计算bias_correct
if not self.stop:
# 量化卷积
output = F.conv2d(
input=quant_input,
weight=self.q_weight,
bias=self.q_bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups
)
# 补偿卷积
correct_output = F.conv2d(
input=quant_input,
weight=self.weight,
bias=self.bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups
)
rate = 0.05
error = torch.add(output, correct_output, alpha=-1).data
noise = error.pow(2).mean()
if noise > 0:
eff = 1.25 * correct_output.pow(2).mean().div(noise).log10().detach().cpu().numpy()
dev = math.fabs(eff - self.efficency)
if dev > 0:
self.efficency = (self.efficency * 4 + eff) * 0.2
self.deviation = (self.deviation * 4 + dev) * 0.2
if self.efficency > 4.0:
rate = rate * 0.5
if self.efficency > 4.3 or (self.deviation / self.efficency) < 0.05 or math.fabs(
dev - self.deviation / dev) < 0.05:
self.stop = True
else:
self.stop = True
else:
self.stop = True
if not self.stop:
error = error.mean(dim=[0, 2, 3])
self.bias.data = torch.sub(self.bias.data, error, alpha=rate)
self.q_bias = self.bias_quantizer(self.bias)
torch.cuda.empty_cache()
output = F.conv2d(
input=quant_input,
weight=self.q_weight,
bias=self.q_bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups
)
else:
output = F.conv2d(
input=input,
weight=self.q_weight,
bias=self.q_bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups
)
if self.quantizer_output == True: # 输出量化参数txt文档
# 创建的quantizer_output输出文件夹
if not os.path.isdir('./quantizer_output'):
os.makedirs('./quantizer_output')
if not os.path.isdir('./quantizer_output/q_weight_out'):
os.makedirs('./quantizer_output/q_weight_out')
if not os.path.isdir('./quantizer_output/w_scale_out'):
os.makedirs('./quantizer_output/w_scale_out')
if not os.path.isdir('./quantizer_output/q_weight_max'):
os.makedirs('./quantizer_output/q_weight_max')
if not os.path.isdir('./quantizer_output/max_weight_count'):
os.makedirs('./quantizer_output/max_weight_count')
if not os.path.isdir('./quantizer_output/q_weight_reorder'):
os.makedirs('./quantizer_output/q_weight_reorder')
if not os.path.isdir('./quantizer_output/q_bias_reorder'):
os.makedirs('./quantizer_output/q_bias_reorder')
if self.layer_idx == -1:
#######################输出当前层的权重量化因子
weight_scale = - self.weight_quantizer.get_scale()
np.savetxt(('./quantizer_output/w_scale_out/w_scale_%s.txt' % self.name), weight_scale, delimiter='\n')
#######################输出当前层的量化权重
q_weight_txt = self.weight_quantizer.get_quantize_value(weight)
#############权重重排序
w_para = q_weight_txt # 重排序参数
if self.reorder == True:
# print("use weights reorder!")
shape_output = w_para.shape[0]
shape_input = w_para.shape[1]
num_TN = int(shape_input / self.TN)
remainder_TN = shape_input % self.TN
num_TM = int(shape_output / self.TM)
remainder_TM = shape_output % self.TM
first = True
reorder_w_para = None
if self.activate == 'linear':
print('layer-linear reorder!')
for k in range(num_TN):
temp = w_para[0:remainder_TM, k * self.TN:(k + 1) * self.TN, :, :]
temp = temp.view(temp.shape[0], temp.shape[1], temp.shape[2] * temp.shape[3])
temp = temp.permute(2, 0, 1).contiguous().view(-1)
if first:
reorder_w_para = temp.clone().cpu().data.numpy()
first = False
else:
reorder_w_para = np.append(reorder_w_para, temp.cpu().data.numpy())
else:
for j in range(num_TM):
if shape_input == 3 or shape_input == 1: # 第一层
print('The first layer~~~~~~~~~~~~')
temp = w_para[j * self.TM:(j + 1) * self.TM,
num_TN * self.TN:num_TN * self.TN + remainder_TN, :,
:]
temp = temp.view(temp.shape[0], temp.shape[1], temp.shape[2] * temp.shape[3])
fill = torch.zeros(self.TM, self.TN, temp.shape[2]).to(temp.device)
fill[:, 0:remainder_TN, :] = temp
temp = fill.permute(2, 0, 1).contiguous().view(-1)
if first: # 创建数组存储
reorder_w_para = temp.clone().cpu().data.numpy()
first = False
else:
reorder_w_para = np.append(reorder_w_para, temp.cpu().data.numpy())
else:
for k in range(num_TN):
temp = w_para[j * self.TM:(j + 1) * self.TM, k * self.TN:(k + 1) * self.TN, :, :]
# #合并成论文图10(a)的TM*TN*(K2)的张量格式
temp = temp.view(temp.shape[0], temp.shape[1], temp.shape[2] * temp.shape[3])
# 转换为图10(b)的重排序格式
temp = temp.permute(2, 0, 1).contiguous().view(-1)
if first:
reorder_w_para = temp.clone().cpu().data.numpy()
first = False
else:
reorder_w_para = np.append(reorder_w_para, temp.cpu().data.numpy())
w_para_flatten = reorder_w_para
# print(reorder_w_para.size)
#####验证重排序结果的正确性
'''if w_para_flatten.size == w_para.shape[0] * w_para.shape[1] * w_para.shape[2] * w_para.shape[3]:
print("weights convert correctly!")
else:
print("weights convert mismatchingly!")'''
q_weight_reorder = w_para_flatten
q_weight_reorder = np.array(q_weight_reorder).reshape(1, -1)
np.savetxt(('./quantizer_output/q_weight_reorder/w_reorder_%s.txt' % self.name), q_weight_reorder,
delimiter='\n')
################权重重排序结束
q_weight_txt = np.array(q_weight_txt.cpu()).reshape(1, -1)
q_weight_max = [np.max(q_weight_txt)]
# q_weight_max = np.argmax(q_weight_txt)
max_weight_count = [np.sum(abs(q_weight_txt) >= (1 << (self.w_bits - 1)) - 1)] # 统计该层溢出的数目
np.savetxt(('./quantizer_output/max_weight_count/max_w_count_%s.txt' % self.name), max_weight_count)
np.savetxt(('./quantizer_output/q_weight_max/max_w_%s.txt' % self.name), q_weight_max)
np.savetxt(('./quantizer_output/q_weight_out/q_weight_%s.txt' % self.name), q_weight_txt,
delimiter='\n')
# io.savemat('save.mat',{'q_weight_txt':q_weight_txt})
#######################创建输出偏置txt的文件夹
if not os.path.isdir('./quantizer_output/q_bias_out'):
os.makedirs('./quantizer_output/q_bias_out')
if not os.path.isdir('./quantizer_output/b_scale_out'):
os.makedirs('./quantizer_output/b_scale_out')
#######################输出当前层偏置的量化因子
bias_scale = - self.bias_quantizer.get_scale()
np.savetxt(('./quantizer_output/b_scale_out/b_scale_%s.txt' % self.name), bias_scale, delimiter='\n')
#######################输出当前层的量化偏置
q_bias_txt = self.bias_quantizer.get_quantize_value(bias)
q_bias_txt = np.array(q_bias_txt.cpu()).reshape(1, -1)
np.savetxt(('./quantizer_output/q_bias_out/q_bias_%s.txt' % self.name), q_bias_txt, delimiter='\n')
#############偏置重排序
if self.reorder == True:
b_para = np.zeros(2048, dtype=int)
b_para[0:q_bias_txt.size] = q_bias_txt
# print(b_para.shape)
# b_para = np.array(b_para.cpu()).reshape(1, -1)
np.savetxt(('./quantizer_output/q_bias_reorder/q_b_reorder_%s.txt' % self.name), b_para,
delimiter='\n')
######权重和偏置的重排序数据的二进制文件保存
bias_weight_reorder = np.append(b_para, q_weight_reorder)
wb_flat = bias_weight_reorder.astype(np.int8)
writer = open('./quantizer_output/q_weight_reorder/%s_bias_weight_q_bin' % self.name, "wb")
writer.write(wb_flat)
writer.close()
################偏置重排序结束
elif int(self.name[1:4]) == self.layer_idx:
#######################输出当前层的权重量化因子
weight_scale = - self.weight_quantizer.get_scale()
np.savetxt(('./quantizer_output/w_scale_out/w_scale_%s.txt' % self.name), weight_scale, delimiter='\n')
#######################输出当前层的量化权重
q_weight_txt = self.weight_quantizer.get_quantize_value(weight)
#############权重重排序
w_para = q_weight_txt # 重排序参数
if self.reorder == True:
# print("use weights reorder!")
shape_output = w_para.shape[0]
shape_input = w_para.shape[1]
num_TN = int(shape_input / self.TN)
remainder_TN = shape_input % self.TN
num_TM = int(shape_output / self.TM)
remainder_TM = shape_output % self.TM
first = True
reorder_w_para = None
if self.activate == 'linear':
print('layer-linear reorder!')
for k in range(num_TN):
temp = w_para[0:remainder_TM, k * self.TN:(k + 1) * self.TN, :, :]
temp = temp.view(temp.shape[0], temp.shape[1], temp.shape[2] * temp.shape[3])
temp = temp.permute(2, 0, 1).contiguous().view(-1)
if first:
reorder_w_para = temp.clone().cpu().data.numpy()
first = False
else:
reorder_w_para = np.append(reorder_w_para, temp.cpu().data.numpy())
else:
for j in range(num_TM):
if shape_input == 3 or shape_input == 1: # 第一层
print('The first layer~~~~~~~~~~~~')
temp = w_para[j * self.TM:(j + 1) * self.TM,
num_TN * self.TN:num_TN * self.TN + remainder_TN, :,
:]
temp = temp.view(temp.shape[0], temp.shape[1], temp.shape[2] * temp.shape[3])
fill = torch.zeros(self.TM, self.TN, temp.shape[2]).to(temp.device)
fill[:, 0:remainder_TN, :] = temp
temp = fill.permute(2, 0, 1).contiguous().view(-1)
if first: # 创建数组存储
reorder_w_para = temp.clone().cpu().data.numpy()
first = False
else:
reorder_w_para = np.append(reorder_w_para, temp.cpu().data.numpy())
else:
for k in range(num_TN):
temp = w_para[j * self.TM:(j + 1) * self.TM, k * self.TN:(k + 1) * self.TN, :, :]
# #合并成论文图10(a)的TM*TN*(K2)的张量格式
temp = temp.view(temp.shape[0], temp.shape[1], temp.shape[2] * temp.shape[3])
# 转换为图10(b)的重排序格式
temp = temp.permute(2, 0, 1).contiguous().view(-1)
if first:
reorder_w_para = temp.clone().cpu().data.numpy()
first = False
else:
reorder_w_para = np.append(reorder_w_para, temp.cpu().data.numpy())
w_para_flatten = reorder_w_para
# print(reorder_w_para.size)
#####验证重排序结果的正确性
'''if w_para_flatten.size == w_para.shape[0] * w_para.shape[1] * w_para.shape[2] * w_para.shape[3]:
print("weights convert correctly!")
else:
print("weights convert mismatchingly!")'''
q_weight_reorder = w_para_flatten
q_weight_reorder = np.array(q_weight_reorder).reshape(1, -1)
np.savetxt(('./quantizer_output/q_weight_reorder/w_reorder_%s.txt' % self.name), q_weight_reorder,
delimiter='\n')
################权重重排序结束
q_weight_txt = np.array(q_weight_txt.cpu()).reshape(1, -1)
q_weight_max = [np.max(q_weight_txt)]
# q_weight_max = np.argmax(q_weight_txt)
max_weight_count = [np.sum(abs(q_weight_txt) >= (1 << (self.w_bits - 1)) - 1)] # 统计该层溢出的数目
np.savetxt(('./quantizer_output/max_weight_count/max_w_count_%s.txt' % self.name), max_weight_count)
np.savetxt(('./quantizer_output/q_weight_max/max_w_%s.txt' % self.name), q_weight_max)
np.savetxt(('./quantizer_output/q_weight_out/q_weight_%s.txt' % self.name), q_weight_txt,
delimiter='\n')
# io.savemat('save.mat',{'q_weight_txt':q_weight_txt})
#######################创建输出偏置txt的文件夹
if not os.path.isdir('./quantizer_output/q_bias_out'):
os.makedirs('./quantizer_output/q_bias_out')
if not os.path.isdir('./quantizer_output/b_scale_out'):
os.makedirs('./quantizer_output/b_scale_out')
#######################输出当前层偏置的量化因子
bias_scale = - self.bias_quantizer.get_scale()
np.savetxt(('./quantizer_output/b_scale_out/b_scale_%s.txt' % self.name), bias_scale, delimiter='\n')
#######################输出当前层的量化偏置
q_bias_txt = self.bias_quantizer.get_quantize_value(bias)
q_bias_txt = np.array(q_bias_txt.cpu()).reshape(1, -1)
np.savetxt(('./quantizer_output/q_bias_out/q_bias_%s.txt' % self.name), q_bias_txt, delimiter='\n')
#############偏置重排序
if self.reorder == True:
b_para = np.zeros(2048, dtype=int)
b_para[0:q_bias_txt.size] = q_bias_txt
# print(b_para.shape)
# b_para = np.array(b_para.cpu()).reshape(1, -1)
np.savetxt(('./quantizer_output/q_bias_reorder/q_b_reorder_%s.txt' % self.name), b_para,
delimiter='\n')
######权重和偏置的重排序数据的二进制文件保存
bias_weight_reorder = np.append(b_para, q_weight_reorder)
wb_flat = bias_weight_reorder.astype(np.int8)
writer = open('./quantizer_output/q_weight_reorder/%s_bias_weight_q_bin' % self.name, "wb")
writer.write(wb_flat)
writer.close()
################偏置重排序结束
if self.activate == 'leaky':
output = F.leaky_relu(output, 0.1 if not self.maxabsscaler else 0.25, inplace=True)
if self.training:
float_output = F.leaky_relu(float_output, 0.1 if not self.maxabsscaler else 0.25, inplace=True)
elif self.activate == 'relu6':
output = F.relu6(output, inplace=True)
if self.training:
float_output = F.relu6(float_output, inplace=True)
elif self.activate == 'h_swish':
output = output * (F.relu6(output + 3.0, inplace=True) / 6.0)
if self.training:
float_output = output * (F.relu6(float_output + 3.0, inplace=True) / 6.0)
elif self.activate == 'relu':
output = F.relu(output, inplace=True)
if self.training:
float_output = F.relu(float_output, inplace=True)
elif self.activate == 'mish':
output = output * F.softplus(output).tanh()
if self.training:
float_output = output * F.softplus(float_output).tanh()
elif self.activate == 'linear':
# return output
pass
else:
print(self.activate + "%s is not supported !")
if self.quantizer_output == True:
if not os.path.isdir('./quantizer_output/q_activation_out'):
os.makedirs('./quantizer_output/q_activation_out')
if not os.path.isdir('./quantizer_output/a_scale_out'):
os.makedirs('./quantizer_output/a_scale_out')
if not os.path.isdir('./quantizer_output/q_activation_max'):
os.makedirs('./quantizer_output/q_activation_max')
if not os.path.isdir('./quantizer_output/max_activation_count'):
os.makedirs('./quantizer_output/max_activation_count')
if not os.path.isdir('./quantizer_output/q_activation_reorder'):
os.makedirs('./quantizer_output/q_activation_reorder')
if self.layer_idx == -1:
##################输出当前激活的量化因子
activation_scale = - self.activation_quantizer.get_scale()
np.savetxt(('./quantizer_output/a_scale_out/a_scale_%s.txt' % self.name), activation_scale,
delimiter='\n')
##################输出当前层的量化激活
q_activation_txt = self.activation_quantizer.get_quantize_value(output)
a_para = q_activation_txt
#############输入特征图重排序
if self.reorder == True:
# 重排序参数
# print("use activation reorder!")
shape_input = a_para.shape[1]
num_TN = int(shape_input / self.TN)
remainder_TN = shape_input % self.TN
first = True
reorder_a_para = None
if self.activate == 'linear':
print('layer-linear reorder!')
temp = a_para[:, 0:remainder_TN, :, :]
temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])
temp = temp.permute(1, 2, 0).contiguous().view(-1)
if first:
reorder_a_para = temp.clone().cpu().data.numpy()
first = False
else:
reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())
else:
for k in range(num_TN):
temp = a_para[:, k * self.TN:(k + 1) * self.TN, :, :]
temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])
temp = temp.permute(1, 2, 0).contiguous().view(-1)
if first:
reorder_a_para = temp.clone().cpu().data.numpy()
first = False
else:
reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())
a_para_flatten = reorder_a_para
#####验证重排序结果的正确性
'''if a_para_flatten.size == a_para.shape[0] * a_para.shape[1] * a_para.shape[2] * a_para.shape[3]:
print("activation convert correctly!")
else:
print("activation convert mismatchingly!")'''
q_activation_reorder = a_para_flatten
q_activation_reorder = np.array(q_activation_reorder).reshape(1, -1)
np.savetxt(('./quantizer_output/q_activation_reorder/a_reorder_%s.txt' % self.name),
q_activation_reorder, delimiter='\n')
###保存重排序的二进制文件
activation_flat = q_activation_reorder.astype(np.int8)
writer = open('./quantizer_output/q_activation_reorder/%s_activation_q_bin' % self.name, "wb")
writer.write(activation_flat)
writer.close()
##########特征图重排序结束
q_activation_txt = np.array(q_activation_txt.cpu()).reshape(1, -1)
q_activation_max = [np.max(q_activation_txt)] # 统计该层的最大值(即查看是否有溢出)
max_activation_count = [np.sum(abs(q_activation_txt) >= (1 << (self.w_bits - 1)) - 1)] # 统计该层溢出的数目
# q_weight_max = np.argmax(q_weight_txt)
np.savetxt(('./quantizer_output/max_activation_count/max_a_count_%s.txt' % self.name),
max_activation_count)
np.savetxt(('./quantizer_output/q_activation_max/q_a_max_%s.txt' % self.name), q_activation_max)
np.savetxt(('./quantizer_output/q_activation_out/q_activation_%s.txt' % self.name), q_activation_txt,
delimiter='\n')
elif int(self.name[1:4]) == self.layer_idx:
##################输出当前激活的量化因子
activation_scale = - self.activation_quantizer.get_scale()
np.savetxt(('./quantizer_output/a_scale_out/a_scale_%s.txt' % self.name), activation_scale,
delimiter='\n')
##################输出当前层的量化激活
q_activation_txt = self.activation_quantizer.get_quantize_value(output)
a_para = q_activation_txt
#############输入特征图重排序
if self.reorder == True:
# 重排序参数
# print("use activation reorder!")
shape_input = a_para.shape[1]
num_TN = int(shape_input / self.TN)
remainder_TN = shape_input % self.TN
first = True
reorder_a_para = None
if self.activate == 'linear':
print('layer-linear reorder!')
temp = a_para[:, 0:remainder_TN, :, :]
temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])
temp = temp.permute(1, 2, 0).contiguous().view(-1)
if first:
reorder_a_para = temp.clone().cpu().data.numpy()
first = False
else:
reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())
else:
for k in range(num_TN):
temp = a_para[:, k * self.TN:(k + 1) * self.TN, :, :]
temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])
temp = temp.permute(1, 2, 0).contiguous().view(-1)
if first:
reorder_a_para = temp.clone().cpu().data.numpy()
first = False
else:
reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())
a_para_flatten = reorder_a_para
#####验证重排序结果的正确性
'''if a_para_flatten.size == a_para.shape[0] * a_para.shape[1] * a_para.shape[2] * a_para.shape[3]:
print("activation convert correctly!")
else:
print("activation convert mismatchingly!")'''
q_activation_reorder = a_para_flatten
q_activation_reorder = np.array(q_activation_reorder).reshape(1, -1)
np.savetxt(('./quantizer_output/q_activation_reorder/a_reorder_%s.txt' % self.name),
q_activation_reorder, delimiter='\n')
###保存重排序的二进制文件
activation_flat = q_activation_reorder.astype(np.int8)
writer = open('./quantizer_output/q_activation_reorder/%s_activation_q_bin' % self.name, "wb")
writer.write(activation_flat)
writer.close()
##########特征图重排序结束
q_activation_txt = np.array(q_activation_txt.cpu()).reshape(1, -1)
q_activation_max = [np.max(q_activation_txt)] # 统计该层的最大值(即查看是否有溢出)
max_activation_count = [np.sum(abs(q_activation_txt) >= (1 << (self.w_bits - 1)) - 1)] # 统计该层溢出的数目
# q_weight_max = np.argmax(q_weight_txt)
np.savetxt(('./quantizer_output/max_activation_count/max_a_count_%s.txt' % self.name),
max_activation_count)
np.savetxt(('./quantizer_output/q_activation_max/q_a_max_%s.txt' % self.name), q_activation_max)
np.savetxt(('./quantizer_output/q_activation_out/q_activation_%s.txt' % self.name), q_activation_txt,
delimiter='\n')
output = self.activation_quantizer(output)
if self.training and self.activate != 'linear':
return [output, float_output]
else:
return output
def BN_fuse(self):
if self.bn:
# BN融合
if self.bias is not None:
bias = reshape_to_bias(self.beta + (self.bias - self.running_mean) * (
self.gamma / torch.sqrt(self.running_var + self.eps)))
else:
bias = reshape_to_bias(
self.beta - self.running_mean * self.gamma / torch.sqrt(
self.running_var + self.eps)) # b融running
weight = self.weight * reshape_to_weight(
self.gamma / torch.sqrt(self.running_var + self.eps)) # w融running
else:
bias = self.bias
weight = self.weight
return weight, bias
class COSPTQuantizedShortcut_min(nn.Module): # weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
def __init__(self, layers, weight=False, bits=8,
quantizer_output=False, reorder=False, TM=32, TN=32, name='', layer_idx=-1, ):
super(COSPTQuantizedShortcut_min, self).__init__()
self.layers = layers # layer indices
self.weight = weight # apply weights boolean
self.n = len(layers) + 1 # number of layers
self.bits = bits
self.register_buffer('scale_x', torch.zeros(1)) # 量化比例因子
self.register_buffer('float_range_x', torch.zeros(1))
self.scale_list_x = [0 for i in range(bits)]
self.register_buffer('scale_a', torch.zeros(1)) # 量化比例因子
self.register_buffer('float_range_a', torch.zeros(1))
self.scale_list_a = [0 for i in range(bits)]
self.register_buffer('scale_sum', torch.zeros(1)) # 量化比例因子
self.register_buffer('float_range_sum', torch.zeros(1))
self.scale_list_sum = [0 for i in range(bits)]
self.quantizer_output = quantizer_output
self.reorder = reorder
self.TM = TM
self.TN = TN
self.name = name
self.layer_idx = layer_idx
if weight:
self.w = nn.Parameter(torch.zeros(self.n), requires_grad=True) # layer weights
# 量化
def quantize(self, input, type):
if type == "a":
output = input / self.scale_a
elif type == "x":
output = input / self.scale_x
elif type == "sum":
output = input / self.scale_sum
return output
def round(self, input):
output = Round.apply(input)
return output
# 截断
def clamp(self, input):
min_val = torch.tensor(-(1 << (self.bits - 1)))
max_val = torch.tensor((1 << (self.bits - 1)) - 1)
output = torch.clamp(input, min_val, max_val)
return output
# 反量化
def dequantize(self, input, type):
if type == "a":
output = (input) * self.scale_a
elif type == "x":
output = (input) * self.scale_x
elif type == "sum":
output = (input) * self.scale_sum
return output
# 更新参数
def update_params(self, step, type):
min_val = torch.tensor(-(1 << (self.bits - 1)))
max_val = torch.tensor((1 << (self.bits - 1)) - 1)
quantized_range = torch.max(torch.abs(min_val), torch.abs(max_val)) # 量化后范围
if type == "a":
temp = self.float_range_a
self.float_range_a.add_(-temp).add_(2 ** step)
self.scale_a = self.float_range_a / quantized_range # 量化比例因子
elif type == "x":
temp = self.float_range_x
self.float_range_x.add_(-temp).add_(2 ** step)
self.scale_x = self.float_range_x / quantized_range # 量化比例因子
elif type == "sum":
temp = self.float_range_sum
self.float_range_sum.add_(-temp).add_(2 ** step)
self.scale_sum = self.float_range_sum / quantized_range # 量化比例因子
def forward(self, x, outputs):
if self.training:
float = x[1]
x = x[0]
# Weights
if self.weight:
w = torch.sigmoid(self.w) * (2 / self.n) # sigmoid weights (0-1)
x = x * w[0]
# Fusion
nx = x.shape[1] # input channels
for i in range(self.n - 1):
if self.training:
a = outputs[self.layers[i]][0] * w[i + 1] if self.weight else outputs[self.layers[i]][
0] # feature to add
else:
a = outputs[self.layers[i]] * w[i + 1] if self.weight else outputs[self.layers[i]] # feature to add
na = a.shape[1] # feature channels
if self.training == True:
# 得到输入两个feature和输出的scale
max_metrics = -1
max_step = 0
for i in range(self.bits):
self.update_params(i, type="a")
output = self.quantize(a, type="a") # 量化
output = self.round(output)
output = self.clamp(output) # 截断
output = self.dequantize(output, type="a") # 反量化
cosine_similarity = torch.cosine_similarity(a.view(-1), output.view(-1), dim=0)
if cosine_similarity > max_metrics:
max_metrics = cosine_similarity
max_step = i
self.scale_list_a[max_step] += 1
Global_max_step = self.scale_list_a.index(max(self.scale_list_a))
self.update_params(Global_max_step, type="a")
max_metrics = -1
max_step = 0
for i in range(self.bits):
self.update_params(i, type="x")
output = self.quantize(x, type="x") # 量化
output = self.round(output)
output = self.clamp(output) # 截断
output = self.dequantize(output, type="x") # 反量化
cosine_similarity = torch.cosine_similarity(x.view(-1), output.view(-1), dim=0)
if cosine_similarity > max_metrics:
max_metrics = cosine_similarity
max_step = i
self.scale_list_x[max_step] += 1
Global_max_step = self.scale_list_x.index(max(self.scale_list_x))
self.update_params(Global_max_step, type="x")
float_max_val = min(self.float_range_a, self.float_range_x)
self.update_params(float_max_val.log2(), type="a")
self.update_params(float_max_val.log2(), type="x")
# 量化x
x = self.quantize(x, type="x") # 量化
x = self.round(x)
x = self.dequantize(x, type="x") # 反量化
# 量化a
a = self.quantize(a, type="a") # 量化
a = self.round(a)
a = self.dequantize(a, type="a") # 反量化
# Adjust channels
if nx == na: # same shape
x = x + a
elif nx > na: # slice input
x[:, :na] = x[:, :na] + a # or a = nn.ZeroPad2d((0, 0, 0, 0, 0, dc))(a); x = x + a
else: # slice feature
x = x + a[:, :nx]
# 量化和
if self.training == True:
max_metrics = -1
max_step = 0
for i in range(self.bits):
self.update_params(i, type="sum")
output = self.quantize(x, type="sum") # 量化
output = self.round(output)
output = self.clamp(output) # 截断
output = self.dequantize(output, type="sum") # 反量化
cosine_similarity = torch.cosine_similarity(x.view(-1), output.view(-1), dim=0)
if cosine_similarity > max_metrics:
max_metrics = cosine_similarity
max_step = i
self.scale_list_sum[max_step] += 1
Global_max_step = self.scale_list_sum.index(max(self.scale_list_sum))
self.update_params(Global_max_step, type="sum")
x = self.quantize(x, type="sum") # 量化
x = self.round(x)
x = self.clamp(x) # 截断
# 量化因子数据输出
if self.quantizer_output == True:
if not os.path.isdir('./quantizer_output/q_activation_out'):
os.makedirs('./quantizer_output/q_activation_out')
if not os.path.isdir('./quantizer_output/a_scale_out'):
os.makedirs('./quantizer_output/a_scale_out')
if not os.path.isdir('./quantizer_output/q_activation_max'):
os.makedirs('./quantizer_output/q_activation_max')
if not os.path.isdir('./quantizer_output/max_activation_count'):
os.makedirs('./quantizer_output/max_activation_count')
if not os.path.isdir('./quantizer_output/q_activation_reorder'):
os.makedirs('./quantizer_output/q_activation_reorder')
if self.layer_idx == -1:
move_scale = math.log2(self.scale_sum)
shortcut_scale = - np.array(move_scale).reshape(1, -1)
np.savetxt(('./quantizer_output/a_scale_out/shortcut_scale_%s.txt' % self.name), shortcut_scale,
delimiter='\n')
elif int(self.name[1:4]) == self.layer_idx:
move_scale = math.log2(self.scale_sum)
shortcut_scale = - np.array(move_scale).reshape(1, -1)
np.savetxt(('./quantizer_output/a_scale_out/shortcut_scale_%s.txt' % self.name), shortcut_scale,
delimiter='\n')
# 特征图量化数据输出
if self.quantizer_output == True:
if not os.path.isdir('./quantizer_output/q_activation_out'):
os.makedirs('./quantizer_output/q_activation_out')
if not os.path.isdir('./quantizer_output/a_scale_out'):
os.makedirs('./quantizer_output/a_scale_out')
if not os.path.isdir('./quantizer_output/q_activation_max'):
os.makedirs('./quantizer_output/q_activation_max')
if not os.path.isdir('./quantizer_output/max_activation_count'):
os.makedirs('./quantizer_output/max_activation_count')
if not os.path.isdir('./quantizer_output/q_activation_reorder'):
os.makedirs('./quantizer_output/q_activation_reorder')
if self.layer_idx == -1:
q_x_shortcut = x
if self.reorder == True:
a_para = q_x_shortcut
# 重排序参数
# print("use activation reorder!")
shape_input = a_para.shape[1]
num_TN = int(shape_input / self.TN)
remainder_TN = shape_input % self.TN
first = True
reorder_a_para = None
for k in range(num_TN):
temp = a_para[:, k * self.TN:(k + 1) * self.TN, :, :]
temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])
temp = temp.permute(1, 2, 0).contiguous().view(-1)
if first:
reorder_a_para = temp.clone().cpu().data.numpy()
first = False
else:
reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())
a_para_flatten = reorder_a_para
q_activation_reorder = a_para_flatten
q_activation_reorder = np.array(q_activation_reorder).reshape(1, -1)
np.savetxt(('./quantizer_output/q_activation_reorder/r_shortcut_%s.txt' % self.name),
q_activation_reorder, delimiter='\n')
###保存重排序的二进制文件
activation_flat = q_activation_reorder.astype(np.int8)
writer = open('./quantizer_output/q_activation_reorder/%s_shortcut_q_bin' % self.name, "wb")
writer.write(activation_flat)
writer.close()
##########shortcut重排序结束
Q_shortcut = np.array(q_x_shortcut.cpu()).reshape(1, -1)
np.savetxt(('./quantizer_output/q_activation_out/Q_shortcut_%s.txt' % self.name), Q_shortcut,
delimiter='\n')
elif int(self.name[1:4]) == self.layer_idx:
q_x_shortcut = x
if self.reorder == True:
a_para = q_x_shortcut
# 重排序参数
# print("use activation reorder!")
shape_input = a_para.shape[1]
num_TN = int(shape_input / self.TN)
remainder_TN = shape_input % self.TN
first = True
reorder_a_para = None
for k in range(num_TN):
temp = a_para[:, k * self.TN:(k + 1) * self.TN, :, :]
temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])
temp = temp.permute(1, 2, 0).contiguous().view(-1)
if first:
reorder_a_para = temp.clone().cpu().data.numpy()
first = False
else:
reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())
a_para_flatten = reorder_a_para
q_activation_reorder = a_para_flatten
q_activation_reorder = np.array(q_activation_reorder).reshape(1, -1)
np.savetxt(('./quantizer_output/q_activation_reorder/r_shortcut_%s.txt' % self.name),
q_activation_reorder, delimiter='\n')
###保存重排序的二进制文件
activation_flat = q_activation_reorder.astype(np.int8)
writer = open('./quantizer_output/q_activation_reorder/%s_shortcut_q_bin' % self.name, "wb")
writer.write(activation_flat)
writer.close()
##########shortcut重排序结束
Q_shortcut = np.array(q_x_shortcut.cpu()).reshape(1, -1)
np.savetxt(('./quantizer_output/q_activation_out/Q_shortcut_%s.txt' % self.name), Q_shortcut,
delimiter='\n')
x = self.dequantize(x, type="sum") # 反量化
if self.training:
# float compute
# Weights
if self.weight:
w = torch.sigmoid(self.w) * (2 / self.n) # sigmoid weights (0-1)
float = float * w[0]
# Fusion
nx = float.shape[1] # input channels
for i in range(self.n - 1):
a = outputs[self.layers[i]][1] * w[i + 1] if self.weight else outputs[self.layers[i]][
1] # feature to add
na = a.shape[1] # feature channels
# Adjust channels
if nx == na: # same shape
float = float + a
elif nx > na: # slice input
float[:, :na] = float[:, :na] + a # or a = nn.ZeroPad2d((0, 0, 0, 0, 0, dc))(a); x = x + a
else: # slice feature
float = float + a[:, :nx]
return [x, float]
else:
return x
class COSPTQuantizedShortcut_max(nn.Module): # weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
def __init__(self, layers, weight=False, bits=8,
quantizer_output=False, reorder=False, TM=32, TN=32, name='', layer_idx=-1, ):
super(COSPTQuantizedShortcut_max, self).__init__()
self.layers = layers # layer indices
self.weight = weight # apply weights boolean
self.n = len(layers) + 1 # number of layers
self.bits = bits
self.register_buffer('scale_x', torch.zeros(1)) # 量化比例因子
self.register_buffer('float_range_x', torch.zeros(1))
self.register_buffer('scale_a', torch.zeros(1)) # 量化比例因子
self.register_buffer('float_range_a', torch.zeros(1))
self.register_buffer('scale_sum', torch.zeros(1)) # 量化比例因子
self.register_buffer('float_range_sum', torch.zeros(1))
self.scale_list = [0 for i in range(bits)]
self.quantizer_output = quantizer_output
self.reorder = reorder
self.TM = TM
self.TN = TN
self.name = name
self.layer_idx = layer_idx
if weight:
self.w = nn.Parameter(torch.zeros(self.n), requires_grad=True) # layer weights
# 量化
def quantize(self, input, type):
if type == "a":
output = input / self.scale_a
elif type == "x":
output = input / self.scale_x
elif type == "sum":
output = input / self.scale_sum
return output
def round(self, input):
output = Round.apply(input)
return output
# 截断
def clamp(self, input):
min_val = torch.tensor(-(1 << (self.bits - 1)))
max_val = torch.tensor((1 << (self.bits - 1)) - 1)
output = torch.clamp(input, min_val, max_val)
return output
# 反量化
def dequantize(self, input, type):
if type == "a":
output = (input) * self.scale_a
elif type == "x":
output = (input) * self.scale_x
elif type == "sum":
output = (input) * self.scale_sum
return output
# 更新参数
def update_params(self, step, type):
min_val = torch.tensor(-(1 << (self.bits - 1)))
max_val = torch.tensor((1 << (self.bits - 1)) - 1)
quantized_range = torch.max(torch.abs(min_val), torch.abs(max_val)) # 量化后范围
if type == "a":
temp = self.float_range_a
self.float_range_a.add_(-temp).add_(2 ** step)
self.scale_a = self.float_range_a / quantized_range # 量化比例因子
elif type == "x":
temp = self.float_range_x
self.float_range_x.add_(-temp).add_(2 ** step)
self.scale_x = self.float_range_x / quantized_range # 量化比例因子
elif type == "sum":
temp = self.float_range_sum
self.float_range_sum.add_(-temp).add_(2 ** step)
self.scale_sum = self.float_range_sum / quantized_range # 量化比例因子
def forward(self, x, outputs):
if self.training:
float = x[1]
x = x[0]
# Weights
if self.weight:
w = torch.sigmoid(self.w) * (2 / self.n) # sigmoid weights (0-1)
x = x * w[0]
# Fusion
nx = x.shape[1] # input channels
for i in range(self.n - 1):
if self.training:
a = outputs[self.layers[i]][0] * w[i + 1] if self.weight else outputs[self.layers[i]][
0] # feature to add
else:
a = outputs[self.layers[i]] * w[i + 1] if self.weight else outputs[self.layers[i]] # feature to add
na = a.shape[1] # feature channels
if self.training == True:
# 得到输入两个feature和输出的scale
max_metrics = -1
max_step = 0
for i in range(self.bits):
cosine_similarity = 0
self.update_params(i, type="a")
output = self.quantize(a, type="a") # 量化
output = self.round(output)
output = self.clamp(output) # 截断
output = self.dequantize(output, type="a") # 反量化
cosine_similarity = cosine_similarity + torch.cosine_similarity(a.view(-1), output.view(-1), dim=0)
self.update_params(i, type="x")
output = self.quantize(x, type="x") # 量化
output = self.round(output)
output = self.clamp(output) # 截断
output = self.dequantize(output, type="x") # 反量化
cosine_similarity = cosine_similarity + torch.cosine_similarity(x.view(-1), output.view(-1), dim=0)
# Adjust channels
if nx == na: # same shape
temp_x = x + a
elif nx > na: # slice input
temp_x[:, :na] = x[:, :na] + a # or a = nn.ZeroPad2d((0, 0, 0, 0, 0, dc))(a); x = x + a
else: # slice feature
temp_x = x + a[:, :nx]
self.update_params(i, type="sum")
output = self.quantize(temp_x, type="sum") # 量化
output = self.round(output)
output = self.clamp(output) # 截断
output = self.dequantize(output, type="sum") # 反量化
cosine_similarity = cosine_similarity + torch.cosine_similarity(temp_x.view(-1), output.view(-1),
dim=0)
del temp_x
if cosine_similarity > max_metrics:
max_metrics = cosine_similarity
max_step = i
self.scale_list[max_step] += 1
Global_max_step = self.scale_list.index(max(self.scale_list))
self.update_params(Global_max_step, type="x")
self.update_params(Global_max_step, type="a")
self.update_params(Global_max_step, type="sum")
# 量化x
x = self.quantize(x, type="x") # 量化
x = self.round(x)
x = self.dequantize(x, type="x") # 反量化
# 量化a
a = self.quantize(a, type="a") # 量化
a = self.round(a)
a = self.dequantize(a, type="a") # 反量化
# Adjust channels
if nx == na: # same shape
x = x + a
elif nx > na: # slice input
x[:, :na] = x[:, :na] + a # or a = nn.ZeroPad2d((0, 0, 0, 0, 0, dc))(a); x = x + a
else: # slice feature
x = x + a[:, :nx]
# 量化和
x = self.quantize(x, type="sum") # 量化
x = self.round(x)
x = self.clamp(x) # 截断
# 量化因子数据输出
if self.quantizer_output == True:
if not os.path.isdir('./quantizer_output/q_activation_out'):
os.makedirs('./quantizer_output/q_activation_out')
if not os.path.isdir('./quantizer_output/a_scale_out'):
os.makedirs('./quantizer_output/a_scale_out')
if not os.path.isdir('./quantizer_output/q_activation_max'):
os.makedirs('./quantizer_output/q_activation_max')
if not os.path.isdir('./quantizer_output/max_activation_count'):
os.makedirs('./quantizer_output/max_activation_count')
if not os.path.isdir('./quantizer_output/q_activation_reorder'):
os.makedirs('./quantizer_output/q_activation_reorder')
if self.layer_idx == -1:
move_scale = math.log2(self.scale_sum)
shortcut_scale = - np.array(move_scale).reshape(1, -1)
np.savetxt(('./quantizer_output/a_scale_out/shortcut_scale_%s.txt' % self.name), shortcut_scale,
delimiter='\n')
elif int(self.name[1:4]) == self.layer_idx:
move_scale = math.log2(self.scale_sum)
shortcut_scale = - np.array(move_scale).reshape(1, -1)
np.savetxt(('./quantizer_output/a_scale_out/shortcut_scale_%s.txt' % self.name), shortcut_scale,
delimiter='\n')
# 特征图量化数据输出
if self.quantizer_output == True:
if not os.path.isdir('./quantizer_output/q_activation_out'):
os.makedirs('./quantizer_output/q_activation_out')
if not os.path.isdir('./quantizer_output/a_scale_out'):
os.makedirs('./quantizer_output/a_scale_out')
if not os.path.isdir('./quantizer_output/q_activation_max'):
os.makedirs('./quantizer_output/q_activation_max')
if not os.path.isdir('./quantizer_output/max_activation_count'):
os.makedirs('./quantizer_output/max_activation_count')
if not os.path.isdir('./quantizer_output/q_activation_reorder'):
os.makedirs('./quantizer_output/q_activation_reorder')
if self.layer_idx == -1:
q_x_shortcut = x
if self.reorder == True:
a_para = q_x_shortcut
# 重排序参数
# print("use activation reorder!")
shape_input = a_para.shape[1]
num_TN = int(shape_input / self.TN)
remainder_TN = shape_input % self.TN
first = True
reorder_a_para = None
for k in range(num_TN):
temp = a_para[:, k * self.TN:(k + 1) * self.TN, :, :]
temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])
temp = temp.permute(1, 2, 0).contiguous().view(-1)
if first:
reorder_a_para = temp.clone().cpu().data.numpy()
first = False
else:
reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())
a_para_flatten = reorder_a_para
q_activation_reorder = a_para_flatten
q_activation_reorder = np.array(q_activation_reorder).reshape(1, -1)
np.savetxt(('./quantizer_output/q_activation_reorder/r_shortcut_%s.txt' % self.name),
q_activation_reorder, delimiter='\n')
###保存重排序的二进制文件
activation_flat = q_activation_reorder.astype(np.int8)
writer = open('./quantizer_output/q_activation_reorder/%s_shortcut_q_bin' % self.name, "wb")
writer.write(activation_flat)
writer.close()
##########shortcut重排序结束
Q_shortcut = np.array(q_x_shortcut.cpu()).reshape(1, -1)
np.savetxt(('./quantizer_output/q_activation_out/Q_shortcut_%s.txt' % self.name), Q_shortcut,
delimiter='\n')
elif int(self.name[1:4]) == self.layer_idx:
q_x_shortcut = x
if self.reorder == True:
a_para = q_x_shortcut
# 重排序参数
# print("use activation reorder!")
shape_input = a_para.shape[1]
num_TN = int(shape_input / self.TN)
remainder_TN = shape_input % self.TN
first = True
reorder_a_para = None
for k in range(num_TN):
temp = a_para[:, k * self.TN:(k + 1) * self.TN, :, :]
temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])
temp = temp.permute(1, 2, 0).contiguous().view(-1)
if first:
reorder_a_para = temp.clone().cpu().data.numpy()
first = False
else:
reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())
a_para_flatten = reorder_a_para
q_activation_reorder = a_para_flatten
q_activation_reorder = np.array(q_activation_reorder).reshape(1, -1)
np.savetxt(('./quantizer_output/q_activation_reorder/r_shortcut_%s.txt' % self.name),
q_activation_reorder, delimiter='\n')
###保存重排序的二进制文件
activation_flat = q_activation_reorder.astype(np.int8)
writer = open('./quantizer_output/q_activation_reorder/%s_shortcut_q_bin' % self.name, "wb")
writer.write(activation_flat)
writer.close()
##########shortcut重排序结束
Q_shortcut = np.array(q_x_shortcut.cpu()).reshape(1, -1)
np.savetxt(('./quantizer_output/q_activation_out/Q_shortcut_%s.txt' % self.name), Q_shortcut,
delimiter='\n')
x = self.dequantize(x, type="sum") # 反量化
if self.training:
# float compute
# Weights
if self.weight:
w = torch.sigmoid(self.w) * (2 / self.n) # sigmoid weights (0-1)
float = float * w[0]
# Fusion
nx = float.shape[1] # input channels
for i in range(self.n - 1):
a = outputs[self.layers[i]][1] * w[i + 1] if self.weight else outputs[self.layers[i]][
1] # feature to add
na = a.shape[1] # feature channels
# Adjust channels
if nx == na: # same shape
float = float + a
elif nx > na: # slice input
float[:, :na] = float[:, :na] + a # or a = nn.ZeroPad2d((0, 0, 0, 0, 0, dc))(a); x = x + a
else: # slice feature
float = float + a[:, :nx]
return [x, float]
else:
return x
class COSPTQuantizedFeatureConcat(nn.Module):
def __init__(self, layers, groups, bits=8,
quantizer_output=False, reorder=False, TM=32, TN=32, name='', layer_idx=-1, ):
super(COSPTQuantizedFeatureConcat, self).__init__()
self.layers = layers # layer indices
self.groups = groups
self.multiple = len(layers) > 1 # multiple layers flag
self.register_buffer('scale', torch.zeros(1)) # 量化比例因子
self.register_buffer('float_max_list', torch.zeros(len(layers)))
self.bits = bits
self.momentum = 0.1
self.quantizer_output = quantizer_output
self.reorder = reorder
self.TM = TM
self.TN = TN
self.name = name
self.layer_idx = layer_idx
# 量化
def quantize(self, input):
output = input / self.scale
return output
def round(self, input):
output = Round.apply(input)
return output
# 截断
def clamp(self, input):
min_val = torch.tensor(-(1 << (self.bits - 1)))
max_val = torch.tensor((1 << (self.bits - 1)) - 1)
output = torch.clamp(input, min_val, max_val)
return output
# 反量化
def dequantize(self, input):
output = (input) * self.scale
return output
def forward(self, x, outputs):
if self.training:
float = x[1]
x = x[0]
if self.multiple:
if self.training == True:
quantized_min_val = torch.tensor(-(1 << (self.bits - 1)))
quantized_max_val = torch.tensor((1 << (self.bits - 1)) - 1)
quantized_range = torch.max(torch.abs(quantized_min_val), torch.abs(quantized_max_val)) # 量化后范围
j = 0
for i in self.layers:
temp = outputs[i][0].detach()
if self.float_max_list[j] == 0:
self.float_max_list[j].add_(
torch.max(torch.max(temp), torch.abs(torch.min(temp))))
else:
self.float_max_list[j].mul_(1 - self.momentum).add_(
torch.max(torch.max(temp), torch.abs(torch.min(temp))) * self.momentum)
j = j + 1
del temp
torch.cuda.empty_cache()
float_max = max(self.float_max_list).unsqueeze(0) # 量化前范围
floor_float_range = 2 ** float_max.log2().floor()
ceil_float_range = 2 ** float_max.log2().ceil()
if abs(ceil_float_range - float_max) < abs(floor_float_range - float_max):
float_range = ceil_float_range
else:
float_range = floor_float_range
self.scale = float_range / quantized_range # 量化比例因子
if self.quantizer_output == True:
if self.layer_idx == -1:
q_a_concat = copy.deepcopy(outputs[0])
move_scale = math.log2(self.scale)
concat_scale = -np.array(move_scale).reshape(1, -1)
np.savetxt(('./quantizer_output/a_scale_out/concat_scale_%s.txt' % self.name), concat_scale,
delimiter='\n')
for i in self.layers:
q_a_concat[i] = self.quantize(q_a_concat[i]) # 量化
q_a_concat[i] = self.round(q_a_concat[i])
q_a_concat[i] = self.clamp(q_a_concat[i]) # 截断
Q_concat = torch.cat([q_a_concat[i] for i in self.layers], 1)
if self.reorder == True:
a_para = Q_concat
# 重排序参数
# print("use activation reorder!")
shape_input = a_para.shape[1]
num_TN = int(shape_input / self.TN)
first = True
reorder_a_para = None
for k in range(num_TN):
temp = a_para[:, k * self.TN:(k + 1) * self.TN, :, :]
temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])
temp = temp.permute(1, 2, 0).contiguous().view(-1)
if first:
reorder_a_para = temp.clone().cpu().data.numpy()
first = False
else:
reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())
a_para_flatten = reorder_a_para
q_activation_reorder = a_para_flatten
q_activation_reorder = np.array(q_activation_reorder).reshape(1, -1)
np.savetxt(('./quantizer_output/q_activation_reorder/r_concat_%s.txt' % self.name),
q_activation_reorder, delimiter='\n')
###保存重排序的二进制文件
activation_flat = q_activation_reorder.astype(np.int8)
writer = open('./quantizer_output/q_activation_reorder/%s_concat_q_bin' % self.name, "wb")
writer.write(activation_flat)
writer.close()
##########concat重排序结束
Q_concat = np.array(Q_concat.cpu()).reshape(1, -1)
np.savetxt(('./quantizer_output/q_activation_out/a_concat_%s.txt' % self.name), Q_concat,
delimiter='\n')
elif int(self.name[1:4]) == self.layer_idx:
q_a_concat = copy.deepcopy(outputs[0])
move_scale = math.log2(self.scale)
concat_scale = -np.array(move_scale).reshape(1, -1)
np.savetxt(('./quantizer_output/a_scale_out/concat_scale_%s.txt' % self.name), concat_scale,
delimiter='\n')
for i in self.layers:
q_a_concat[i] = self.quantize(q_a_concat[i]) # 量化
q_a_concat[i] = self.round(q_a_concat[i])
q_a_concat[i] = self.clamp(q_a_concat[i]) # 截断
Q_concat = torch.cat([q_a_concat[i] for i in self.layers], 1)
if self.reorder == True:
a_para = Q_concat
# 重排序参数
# print("use activation reorder!")
shape_input = a_para.shape[1]
num_TN = int(shape_input / self.TN)
first = True
reorder_a_para = None
for k in range(num_TN):
temp = a_para[:, k * self.TN:(k + 1) * self.TN, :, :]
temp = temp.view(temp.shape[1], temp.shape[2], temp.shape[3])
temp = temp.permute(1, 2, 0).contiguous().view(-1)
if first:
reorder_a_para = temp.clone().cpu().data.numpy()
first = False
else:
reorder_a_para = np.append(reorder_a_para, temp.cpu().data.numpy())
a_para_flatten = reorder_a_para
q_activation_reorder = a_para_flatten
q_activation_reorder = np.array(q_activation_reorder).reshape(1, -1)
np.savetxt(('./quantizer_output/q_activation_reorder/r_concat_%s.txt' % self.name),
q_activation_reorder, delimiter='\n')
###保存重排序的二进制文件
activation_flat = q_activation_reorder.astype(np.int8)
writer = open('./quantizer_output/q_activation_reorder/%s_concat_q_bin' % self.name, "wb")
writer.write(activation_flat)
writer.close()
##########concat重排序结束
Q_concat = np.array(Q_concat.cpu()).reshape(1, -1)
np.savetxt(('./quantizer_output/q_activation_out/a_concat_%s.txt' % self.name), Q_concat,
delimiter='\n')
# 量化
if self.training:
for i in self.layers:
outputs[i][0] = self.quantize(outputs[i][0]) # 量化
outputs[i][0] = self.round(outputs[i][0])
outputs[i][0] = self.clamp(outputs[i][0]) # 截断
outputs[i][0] = self.dequantize(outputs[i][0]) # 反量化
return [torch.cat([outputs[i][0] for i in self.layers], 1),
torch.cat([outputs[i][1] for i in self.layers], 1)]
else:
for i in self.layers:
outputs[i] = self.quantize(outputs[i]) # 量化
outputs[i] = self.round(outputs[i])
outputs[i] = self.clamp(outputs[i]) # 截断
outputs[i] = self.dequantize(outputs[i]) # 反量化
return torch.cat([outputs[i] for i in self.layers], 1)
else:
if self.groups:
if self.training:
return [x[:, (x.shape[1] // 2):], float[:, (x.shape[1] // 2):]]
else:
return x[:, (x.shape[1] // 2):]
else:
return outputs[self.layers[0]]
================================================
FILE: utils/torch_utils.py
================================================
from copy import deepcopy
import torch.backends.cudnn as cudnn
from utils.quantized.quantized_google import *
def init_seeds(seed=0):
torch.manual_seed(seed)
# Remove randomness (may be slower on Tesla GPUs) # https://pytorch.org/docs/stable/notes/randomness.html
if seed == 0:
cudnn.deterministic = True
cudnn.benchmark = False
def select_device(device='', batch_size=None):
# device = 'cpu' or '0' or '0,1,2,3'
cpu_request = device.lower() == 'cpu'
if device and not cpu_request: # if device requested other than 'cpu'
os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable
assert torch.cuda.is_available(), 'CUDA unavailable, invalid device %s requested' % device # check availablity
cuda = False if cpu_request else torch.cuda.is_available()
if cuda:
c = 1024 ** 2 # bytes to MB
ng = torch.cuda.device_count()
if ng > 1 and batch_size: # check that batch_size is compatible with device_count
assert batch_size % ng == 0, 'batch-size %g not multiple of GPU count %g' % (batch_size, ng)
x = [torch.cuda.get_device_properties(i) for i in range(ng)]
s = 'Using CUDA '
for i in range(0, ng):
if i == 1:
s = ' ' * len(s)
print("%sdevice%g _CudaDeviceProperties(name='%s', total_memory=%dMB)" %
(s, i, x[i].name, x[i].total_memory / c))
else:
print('Using CPU')
print('') # skip a line
return torch.device('cuda:0' if cuda else 'cpu')
def time_synchronized():
torch.cuda.synchronize() if torch.cuda.is_available() else None
return time.time()
def initialize_weights(model):
for m in model.modules():
t = type(m)
if t is nn.Conv2d:
pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif t is nn.BatchNorm2d:
m.eps = 1e-4
m.momentum = 0.03
elif t in [nn.LeakyReLU, nn.ReLU, nn.ReLU6]:
m.inplace = True
def find_modules(model, mclass=nn.Conv2d):
# finds layer indices matching module class 'mclass'
return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)]
def fuse_conv_and_bn(conv, bn):
# https://tehnokv.com/posts/fusing-batchnorm-and-conv/
with torch.no_grad():
# init
fusedconv = torch.nn.Conv2d(conv.in_channels,
conv.out_channels,
groups=conv.groups,
kernel_size=conv.kernel_size,
stride=conv.stride,
padding=conv.padding,
bias=True)
# prepare filters
w_conv = conv.weight.clone().view(conv.out_channels, -1)
w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size()))
# prepare spatial bias
if conv.bias is not None:
b_conv = conv.bias
else:
b_conv = torch.zeros(conv.weight.size(0))
b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)
return fusedconv
def model_info(model, verbose=False):
# Plots a line-by-line description of a PyTorch model
n_p = sum(x.numel() for x in model.parameters()) # number parameters
n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
if verbose:
print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))
for i, (name, p) in enumerate(model.named_parameters()):
name = name.replace('module_list.', '')
print('%5g %40s %9s %12g %20s %10.3g %10.3g' %
(i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))
try: # FLOPS
from thop import profile
macs, _ = profile(model, inputs=(torch.zeros(1, 3, 480, 640),), verbose=False)
fs = ', %.1f GFLOPS' % (macs / 1024 ** 3)
except:
fs = ''
print('Model Summary: %g layers, %g parameters, %g gradients%s' % (len(list(model.parameters())), n_p, n_g, fs))
def load_classifier(name='resnet101', n=2):
# Loads a pretrained model reshaped to n-class output
import pretrainedmodels # https://github.com/Cadene/pretrained-models.pytorch#torchvision
model = pretrainedmodels.__dict__[name](num_classes=1000, pretrained='imagenet')
# Display model properties
for x in ['model.input_size', 'model.input_space', 'model.input_range', 'model.mean', 'model.std']:
print(x + ' =', eval(x))
# Reshape output to n classes
filters = model.last_linear.weight.shape[1]
model.last_linear.bias = torch.nn.Parameter(torch.zeros(n))
model.last_linear.weight = torch.nn.Parameter(torch.zeros(n, filters))
model.last_linear.out_features = n
return model
def scale_img(img, ratio=1.0, same_shape=True): # img(16,3,256,416), r=ratio
# scales img(bs,3,y,x) by ratio
h, w = img.shape[2:]
s = (int(h * ratio), int(w * ratio)) # new size
img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize
if not same_shape: # pad/crop img
gs = 64 # (pixels) grid size
h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)]
return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean
class ModelEMA:
""" Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models
Keep a moving average of everything in the model state_dict (parameters and buffers).
This is intended to allow functionality like
https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
A smoothed version of the weights is necessary for some training schemes to perform well.
E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use
RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA
smoothing of weights to match results. Pay attention to the decay constant you are using
relative to your update count per epoch.
To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but
disable validation of the EMA weights. Validation will have to be done manually in a separate
process, or after the training stops converging.
This class is sensitive where it is initialized in the sequence of model init,
GPU assignment and distributed training wrappers.
I've tested with the sequence in my own train.py for torch.DataParallel, apex.DDP, and single-GPU.
"""
def __init__(self, model, decay=0.9999, device=''):
# make a copy of the model for accumulating moving average of weights
self.ema = deepcopy(model)
self.ema.eval()
self.updates = 0 # number of EMA updates
self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs)
self.device = device # perform ema on different device from model if set
if device:
self.ema.to(device=device)
for p in self.ema.parameters():
p.requires_grad_(False)
def update(self, model):
self.updates += 1
d = self.decay(self.updates)
with torch.no_grad():
if type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel):
msd, esd = model.module.state_dict(), self.ema.module.state_dict()
else:
msd, esd = model.state_dict(), self.ema.state_dict()
for k, v in esd.items():
if v.dtype.is_floating_point:
v *= d
v += (1. - d) * msd[k].detach()
def update_attr(self, model):
# Assign attributes (which may change during training)
for k in model.__dict__.keys():
if not k.startswith('_'):
setattr(self.ema, k, getattr(model, k))
================================================
FILE: utils/utils.py
================================================
import glob
import math
import os
import random
import shutil
import subprocess
from pathlib import Path
from sys import platform
import cv2
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torchvision
from tqdm import tqdm
import torch.nn.functional as F
from . import torch_utils # , google_utils
# Set printoptions
torch.set_printoptions(linewidth=320, precision=5, profile='long')
np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
matplotlib.rc('font', **{'size': 11})
# Prevent OpenCV from multithreading (to use PyTorch DataLoader)
cv2.setNumThreads(0)
def init_seeds(seed=0):
random.seed(seed)
np.random.seed(seed)
torch_utils.init_seeds(seed=seed)
def load_classes(path):
# Loads *.names file at 'path'
with open(path, 'r') as f:
names = f.read().split('\n')
return list(filter(None, names)) # filter removes empty strings (such as last line)
def labels_to_class_weights(labels, nc=80):
# Get class weights (inverse frequency) from training labels
if labels[0] is None: # no labels loaded
return torch.Tensor()
labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
classes = labels[:, 0].astype(np.int) # labels = [class xywh]
weights = np.bincount(classes, minlength=nc) # occurences per class
# Prepend gridpoint count (for uCE trianing)
# gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image
# weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start
weights[weights == 0] = 1 # replace empty bins with 1
weights = 1 / weights # number of targets per class
weights /= weights.sum() # normalize
return torch.from_numpy(weights)
def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
# Produces image weights based on class mAPs
n = len(labels)
class_counts = np.array([np.bincount(labels[i][:, 0].astype(np.int), minlength=nc) for i in range(n)])
image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1)
# index = random.choices(range(n), weights=image_weights, k=1) # weight image sample
return image_weights
def coco_class_weights(): # frequency of each class in coco train2014
n = [187437, 4955, 30920, 6033, 3838, 4332, 3160, 7051, 7677, 9167, 1316, 1372, 833, 6757, 7355, 3302, 3776, 4671,
6769, 5706, 3908, 903, 3686, 3596, 6200, 7920, 8779, 4505, 4272, 1862, 4698, 1962, 4403, 6659, 2402, 2689,
4012, 4175, 3411, 17048, 5637, 14553, 3923, 5539, 4289, 10084, 7018, 4314, 3099, 4638, 4939, 5543, 2038, 4004,
5053, 4578, 27292, 4113, 5931, 2905, 11174, 2873, 4036, 3415, 1517, 4122, 1980, 4464, 1190, 2302, 156, 3933,
1877, 17630, 4337, 4624, 1075, 3468, 135, 1380]
weights = 1 / torch.Tensor(n)
weights /= weights.sum()
# with open('data/coco.names', 'r') as f:
# for k, v in zip(f.read().splitlines(), n):
# print('%20s: %g' % (k, v))
return weights
def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
# https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
# a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
# b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
# x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
# x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
return x
def xyxy2xywh(x):
# Transform box coordinates from [x1, y1, x2, y2] (where xy1=top-left, xy2=bottom-right) to [x, y, w, h]
y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
y[:, 2] = x[:, 2] - x[:, 0] # width
y[:, 3] = x[:, 3] - x[:, 1] # height
return y
def xywh2xyxy(x):
# Transform box coordinates from [x, y, w, h] to [x1, y1, x2, y2] (where xy1=top-left, xy2=bottom-right)
y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
return y
# def xywh2xyxy(box):
# # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2]
# if isinstance(box, torch.Tensor):
# x, y, w, h = box.t()
# return torch.stack((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).t()
# else: # numpy
# x, y, w, h = box.T
# return np.stack((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).T
#
#
# def xyxy2xywh(box):
# # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h]
# if isinstance(box, torch.Tensor):
# x1, y1, x2, y2 = box.t()
# return torch.stack(((x1 + x2) / 2, (y1 + y2) / 2, x2 - x1, y2 - y1)).t()
# else: # numpy
# x1, y1, x2, y2 = box.T
# return np.stack(((x1 + x2) / 2, (y1 + y2) / 2, x2 - x1, y2 - y1)).T
def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
# Rescale coords (xyxy) from img1_shape to img0_shape
if ratio_pad is None: # calculate from img0_shape
gain = max(img1_shape) / max(img0_shape) # gain = old / new
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
else:
gain = ratio_pad[0][0]
pad = ratio_pad[1]
coords[:, [0, 2]] -= pad[0] # x padding
coords[:, [1, 3]] -= pad[1] # y padding
coords[:, :4] /= gain
clip_coords(coords, img0_shape)
return coords
def clip_coords(boxes, img_shape):
# Clip bounding xyxy bounding boxes to image shape (height, width)
boxes[:, 0].clamp_(0, img_shape[1]) # x1
boxes[:, 1].clamp_(0, img_shape[0]) # y1
boxes[:, 2].clamp_(0, img_shape[1]) # x2
boxes[:, 3].clamp_(0, img_shape[0]) # y2
def ap_per_class(tp, conf, pred_cls, target_cls):
""" Compute the average precision, given the recall and precision curves.
Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
# Arguments
tp: True positives (nparray, nx1 or nx10).
conf: Objectness value from 0-1 (nparray).
pred_cls: Predicted object classes (nparray).
target_cls: True object classes (nparray).
# Returns
The average precision as computed in py-faster-rcnn.
"""
# Sort by objectness
i = np.argsort(-conf)
tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
# Find unique classes
unique_classes = np.unique(target_cls)
# Create Precision-Recall curve and compute AP for each class
pr_score = 0.1 # score to evaluate P and R https://github.com/ultralytics/yolov3/issues/898
s = [len(unique_classes), tp.shape[1]] # number class, number iou thresholds (i.e. 10 for mAP0.5...0.95)
ap, p, r = np.zeros(s), np.zeros(s), np.zeros(s)
for ci, c in enumerate(unique_classes):
i = pred_cls == c
n_gt = (target_cls == c).sum() # Number of ground truth objects
n_p = i.sum() # Number of predicted objects
if n_p == 0 or n_gt == 0:
continue
else:
# Accumulate FPs and TPs
fpc = (1 - tp[i]).cumsum(0)
tpc = tp[i].cumsum(0)
# Recall
recall = tpc / (n_gt + 1e-16) # recall curve
r[ci] = np.interp(-pr_score, -conf[i], recall[:, 0]) # r at pr_score, negative x, xp because xp decreases
# Precision
precision = tpc / (tpc + fpc) # precision curve
p[ci] = np.interp(-pr_score, -conf[i], precision[:, 0]) # p at pr_score
# AP from recall-precision curve
for j in range(tp.shape[1]):
ap[ci, j] = compute_ap(recall[:, j], precision[:, j])
# Plot
# fig, ax = plt.subplots(1, 1, figsize=(5, 5))
# ax.plot(recall, precision)
# ax.set_xlabel('Recall')
# ax.set_ylabel('Precision')
# ax.set_xlim(0, 1.01)
# ax.set_ylim(0, 1.01)
# fig.tight_layout()
# fig.savefig('PR_curve.png', dpi=300)
# Compute F1 score (harmonic mean of precision and recall)
f1 = 2 * p * r / (p + r + 1e-16)
return p, r, ap, f1, unique_classes.astype('int32')
def compute_ap(recall, precision):
""" Compute the average precision, given the recall and precision curves.
Source: https://github.com/rbgirshick/py-faster-rcnn.
# Arguments
recall: The recall curve (list).
precision: The precision curve (list).
# Returns
The average precision as computed in py-faster-rcnn.
"""
# Append sentinel values to beginning and end
mrec = np.concatenate(([0.], recall, [min(recall[-1] + 1E-3, 1.)]))
mpre = np.concatenate(([0.], precision, [0.]))
# Compute the precision envelope
mpre = np.flip(np.maximum.accumulate(np.flip(mpre)))
# Integrate area under curve
method = 'interp' # methods: 'continuous', 'interp'
if method == 'interp':
x = np.linspace(0, 1, 101) # 101-point interp (COCO)
ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate
else: # 'continuous'
i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve
return ap
def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False):
# Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
box2 = box2.t()
# Get the coordinates of bounding boxes
if x1y1x2y2: # x1, y1, x2, y2 = box1
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
else: # transform from xywh to xyxy
b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
# Intersection area
inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
(torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
# Union Area
w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1
w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1
union = (w1 * h1 + 1e-16) + w2 * h2 - inter
iou = inter / union # iou
if GIoU or DIoU or CIoU:
cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width
ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
if GIoU: # Generalized IoU https://arxiv.org/pdf/1902.09630.pdf
c_area = cw * ch + 1e-16 # convex area
return iou - (c_area - union) / c_area # GIoU
if DIoU or CIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
# convex diagonal squared
c2 = cw ** 2 + ch ** 2 + 1e-16
# centerpoint distance squared
rho2 = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2)) ** 2 / 4 + ((b2_y1 + b2_y2) - (b1_y1 + b1_y2)) ** 2 / 4
if DIoU:
return iou - rho2 / c2 # DIoU
elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
with torch.no_grad():
alpha = v / (1 - iou + v)
return iou - (rho2 / c2 + v * alpha) # CIoU
return iou
def box_iou(box1, box2):
# https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
"""
Return intersection-over-union (Jaccard index) of boxes.
Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
Arguments:
box1 (Tensor[N, 4])
box2 (Tensor[M, 4])
Returns:
iou (Tensor[N, M]): the NxM matrix containing the pairwise
IoU values for every element in boxes1 and boxes2
"""
def box_area(box):
# box = 4xn
return (box[2] - box[0]) * (box[3] - box[1])
area1 = box_area(box1.t())
area2 = box_area(box2.t())
# inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)
def wh_iou(wh1, wh2):
# Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2
wh1 = wh1[:, None] # [N,1,2]
wh2 = wh2[None] # [1,M,2]
inter = torch.min(wh1, wh2).prod(2) # [N,M]
return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter)
class FocalLoss(nn.Module):
# Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
super(FocalLoss, self).__init__()
self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
self.gamma = gamma
self.alpha = alpha
self.reduction = loss_fcn.reduction
self.loss_fcn.reduction = 'none' # required to apply FL to each element
def forward(self, pred, true):
loss = self.loss_fcn(pred, true)
# p_t = torch.exp(-loss)
# loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability
# TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py
pred_prob = torch.sigmoid(pred) # prob from logits
p_t = true * pred_prob + (1 - true) * (1 - pred_prob)
alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
modulating_factor = (1.0 - p_t) ** self.gamma
loss *= alpha_factor * modulating_factor
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
else: # 'none'
return loss
def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441
# return positive, negative label smoothing BCE targets
return 1.0 - 0.5 * eps, 0.5 * eps
def compute_loss(p, targets, model): # predictions, targets, model
ft = torch.cuda.FloatTensor if p[0].is_cuda else torch.Tensor
lcls, lbox, lobj = ft([0]), ft([0]), ft([0])
tcls, tbox, indices, anchor_vec = build_targets(p, targets, model)
h = model.hyp # hyperparameters
red = 'mean' # Loss reduction (sum or mean)
# Define criteria
BCEcls = nn.BCEWithLogitsLoss(pos_weight=ft([h['cls_pw']]), reduction=red)
BCEobj = nn.BCEWithLogitsLoss(pos_weight=ft([h['obj_pw']]), reduction=red)
# class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
cp, cn = smooth_BCE(eps=0.0)
# focal loss
g = h['fl_gamma'] # focal loss gamma
if g > 0:
BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
# Compute losses
np, ng = 0, 0 # number grid points, targets
for i, pi in enumerate(p): # layer index, layer predictions
b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
tobj = torch.zeros_like(pi[..., 0]) # target obj
np += tobj.numel()
# Compute losses
nb = len(b)
if nb: # number of targets
ng += nb
ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
# ps[:, 2:4] = torch.sigmoid(ps[:, 2:4]) # wh power loss (uncomment)
# GIoU
pxy = torch.sigmoid(ps[:, 0:2]) # pxy = pxy * s - (s - 1) / 2, s = 1.5 (scale_xy)
pwh = torch.exp(ps[:, 2:4]).clamp(max=1E3) * anchor_vec[i]
pbox = torch.cat((pxy, pwh), 1) # predicted box
giou = bbox_iou(pbox.t(), tbox[i], x1y1x2y2=False, GIoU=True) # giou computation
lbox += (1.0 - giou).sum() if red == 'sum' else (1.0 - giou).mean() # giou loss
tobj[b, a, gj, gi] = (1.0 - model.gr) + model.gr * giou.detach().clamp(0).type(tobj.dtype) # giou ratio
if model.nc > 1: # cls loss (only if multiple classes)
t = torch.full_like(ps[:, 5:], cn) # targets
t[range(nb), tcls[i]] = cp
lcls += BCEcls(ps[:, 5:], t) # BCE
# lcls += CE(ps[:, 5:], tcls[i]) # CE
# Append targets to text file
# with open('targets.txt', 'a') as file:
# [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
lobj += BCEobj(pi[..., 4], tobj) # obj loss
lbox *= h['giou']
lobj *= h['obj']
lcls *= h['cls']
if red == 'sum':
bs = tobj.shape[0] # batch size
lobj *= 3 / (6300 * bs) * 2 # 3 / np * 2
if ng:
lcls *= 3 / ng / model.nc
lbox *= 3 / ng
loss = lbox + lobj + lcls
return loss, torch.cat((lbox, lobj, lcls, loss)).detach()
def compute_lost_KD(output_s, output_t, num_classes, batch_size):
T = 3.0
Lambda_ST = 0.001
criterion_st = torch.nn.KLDivLoss(reduction='sum')
output_s = torch.cat([i.view(-1, num_classes + 5) for i in output_s])
output_t = torch.cat([i.view(-1, num_classes + 5) for i in output_t])
loss_st = criterion_st(nn.functional.log_softmax(output_s / T, dim=1),
nn.functional.softmax(output_t / T, dim=1)) * (T * T) / batch_size
return loss_st * Lambda_ST
def compute_lost_KD2(model, targets, output_s, output_t):
reg_m = 0.0
T = 3.0
Lambda_cls, Lambda_box = 0.0001, 0.001
criterion_st = torch.nn.KLDivLoss(reduction='sum')
ft = torch.cuda.FloatTensor if output_s[0].is_cuda else torch.Tensor
lcls, lbox = ft([0]), ft([0])
tcls, tbox, indices, anchor_vec = build_targets(output_s, targets, model)
reg_ratio, reg_num, reg_nb = 0, 0, 0
for i, (ps, pt) in enumerate(zip(output_s, output_t)): # layer index, layer predictions
b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
nb = len(b)
if nb: # number of targets
pss = ps[b, a, gj, gi] # prediction subset corresponding to targets
pts = pt[b, a, gj, gi]
psxy = torch.sigmoid(pss[:, 0:2]) # pxy = pxy * s - (s - 1) / 2, s = 1.5 (scale_xy)
psbox = torch.cat((psxy, torch.exp(pss[:, 2:4]) * anchor_vec[i]), 1).view(-1, 4) # predicted box
ptxy = torch.sigmoid(pts[:, 0:2]) # pxy = pxy * s - (s - 1) / 2, s = 1.5 (scale_xy)
ptbox = torch.cat((ptxy, torch.exp(pts[:, 2:4]) * anchor_vec[i]), 1).view(-1, 4) # predicted box
l2_dis_s = (psbox - tbox[i]).pow(2).sum(1)
l2_dis_s_m = l2_dis_s + reg_m
l2_dis_t = (ptbox - tbox[i]).pow(2).sum(1)
l2_num = l2_dis_s_m > l2_dis_t
lbox += l2_dis_s[l2_num].sum()
reg_num += l2_num.sum().item()
reg_nb += nb
output_s_i = ps[..., 4:].view(-1, model.nc + 1)
output_t_i = pt[..., 4:].view(-1, model.nc + 1)
lcls += criterion_st(nn.functional.log_softmax(output_s_i / T, dim=1),
nn.functional.softmax(output_t_i / T, dim=1)) * (T * T) / ps.size(0)
if reg_nb:
reg_ratio = reg_num / reg_nb
return lcls * Lambda_cls + lbox * Lambda_box, reg_ratio
def compute_lost_KD3(model, targets, output_s, output_t):
T = 3.0
Lambda_cls, Lambda_box = 0.0001, 0.001
criterion_st = torch.nn.KLDivLoss(reduction='sum')
ft = torch.cuda.FloatTensor if output_s[0].is_cuda else torch.Tensor
lcls, lbox = ft([0]), ft([0])
tcls, tbox, indices, anchor_vec = build_targets(output_s, targets, model)
for i, (ps, pt) in enumerate(zip(output_s, output_t)): # layer index, layer predictions
b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
nb = len(b)
if nb: # number of targets
pss = ps[b, a, gj, gi] # prediction subset corresponding to targets
pts = pt[b, a, gj, gi]
psxy = torch.sigmoid(pss[:, 0:2]) # pxy = pxy * s - (s - 1) / 2, s = 1.5 (scale_xy)
psbox = torch.cat((psxy, torch.exp(pss[:, 2:4]) * anchor_vec[i]), 1).view(-1, 4) # predicted box
ptxy = torch.sigmoid(pts[:, 0:2]) # pxy = pxy * s - (s - 1) / 2, s = 1.5 (scale_xy)
ptbox = torch.cat((ptxy, torch.exp(pts[:, 2:4]) * anchor_vec[i]), 1).view(-1, 4) # predicted box
l2_dis = (psbox - ptbox).pow(2).sum(1)
lbox += l2_dis.sum()
output_s_i = ps[..., 4:].view(-1, model.nc + 1)
output_t_i = pt[..., 4:].view(-1, model.nc + 1)
lcls += criterion_st(nn.functional.log_softmax(output_s_i / T, dim=1),
nn.functional.softmax(output_t_i / T, dim=1)) * (T * T) / ps.size(0)
return lcls * Lambda_cls + lbox * Lambda_box
def compute_lost_KD4(model, targets, output_s, output_t, feature_s, feature_t, batch_size):
T = 3.0
Lambda_cls, Lambda_box, Lambda_feature = 0.001, 0.001, 0.001
criterion_st = torch.nn.KLDivLoss(reduction='sum')
criterion_stf = torch.nn.KLDivLoss(reduction='sum')
ft = torch.cuda.FloatTensor if output_s[0].is_cuda else torch.Tensor
lcls, lbox, lfeature = ft([0]), ft([0]), ft([0])
tcls, tbox, indices, anchor_vec = build_targets(output_s, targets, model)
for i, (ps, pt) in enumerate(zip(output_s, output_t)): # layer index, layer predictions
b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
nb = len(b)
if nb: # number of targets
pss = ps[b, a, gj, gi] # prediction subset corresponding to targets
pts = pt[b, a, gj, gi]
psxy = torch.sigmoid(pss[:, 0:2]) # pxy = pxy * s - (s - 1) / 2, s = 1.5 (scale_xy)
psbox = torch.cat((psxy, torch.exp(pss[:, 2:4]) * anchor_vec[i]), 1).view(-1, 4) # predicted box
ptxy = torch.sigmoid(pts[:, 0:2]) # pxy = pxy * s - (s - 1) / 2, s = 1.5 (scale_xy)
ptbox = torch.cat((ptxy, torch.exp(pts[:, 2:4]) * anchor_vec[i]), 1).view(-1, 4) # predicted box
l2_dis = (psbox - ptbox).pow(2).sum(1)
lbox += l2_dis.sum()
# cls loss
output_s_i = ps[..., 4:].view(-1, model.nc + 1)
output_t_i = pt[..., 4:].view(-1, model.nc + 1)
lcls += criterion_st(nn.functional.log_softmax(output_s_i / T, dim=1),
nn.functional.softmax(output_t_i / T, dim=1)) * (T * T) / ps.size(0)
# feature loss
if len(feature_t) != len(feature_s):
print("feature mismatch!")
exit()
for i in range(len(feature_t)):
# feature_t[i] = feature_t[i].pow(2).sum(1)
feature_t[i] = feature_t[i].abs().sum(1).view(feature_t[i].size(0), -1)
# feature_s[i] = feature_s[i].pow(2).sum(1)
feature_s[i] = feature_s[i].abs().sum(1).view(feature_s[i].size(0), -1)
lfeature += criterion_stf(nn.functional.log_softmax(feature_s[i] / T, dim=1),
nn.functional.softmax(feature_t[i] / T, dim=1)) * (T * T) / batch_size
return lcls * Lambda_cls + lbox * Lambda_box + lfeature * Lambda_feature
def indices_merge(indices):
indices_merge = []
for i in range(len(indices)):
temp = list(indices[i])
temp[2] = temp[2] * (2 ** (5 - i))
temp[3] = temp[3] * (2 ** (5 - i))
indices_merge.append(temp)
return indices_merge
def fine_grained_imitation_feature_mask(feature_s, feature_t, indices, img_size):
if feature_t.size() != feature_s.size():
print("feature mismatch!")
exit()
B, Gj, Gi = torch.Tensor(0).long().cuda(), torch.Tensor(0).long().cuda(), torch.Tensor(0).long().cuda()
feature_size = feature_s.size()[1]
scale = img_size / feature_size
for j in range(len(indices)):
if 2 ** (5 - j) < scale:
break
b, _, gj, gi = indices[j] # image, gridy, gridx
gj, gi = (gj / scale).long(), (gi / scale).long()
for i in range(gj.size()[0]):
if 2 ** (5 - j) == scale:
break
b_temp = (torch.ones(int(2 ** (5 - j) / scale - 1)).long().cuda() * b[i])
gj_temp = torch.arange(int(gj[i].item()) + 1, int(gj[i].item() + 2 ** (5 - j) / scale)).cuda()
gi_temp = torch.arange(int(gi[i].item()) + 1, int(gi[i].item() + 2 ** (5 - j) / scale)).cuda()
b = torch.cat((b, b_temp))
gj = torch.cat((gj, gj_temp))
gi = torch.cat((gi, gi_temp))
B = torch.cat((B, b))
Gj = torch.cat((Gj, gj))
Gi = torch.cat((Gi, gi))
mask = torch.zeros(feature_s.size())
mask[B, Gj, Gi] = 1
return mask
def compute_lost_KD5(model, targets, output_s, output_t, feature_s, feature_t, batch_size, img_size):
T = 3.0
Lambda_cls, Lambda_box, Lambda_feature = 0.001, 0.001, 0.001
criterion_st = torch.nn.KLDivLoss(reduction='sum')
ft = torch.cuda.FloatTensor if output_s[0].is_cuda else torch.Tensor
lcls, lbox, lfeature = ft([0]), ft([0]), ft([0])
tcls, tbox, indices, anchor_vec = build_targets(output_s, targets, model)
for i, (ps, pt) in enumerate(zip(output_s, output_t)): # layer index, layer predictions
b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
nb = len(b)
if nb: # number of targets
pss = ps[b, a, gj, gi] # prediction subset corresponding to targets
pts = pt[b, a, gj, gi]
psxy = torch.sigmoid(pss[:, 0:2]) # pxy = pxy * s - (s - 1) / 2, s = 1.5 (scale_xy)
psbox = torch.cat((psxy, torch.exp(pss[:, 2:4]) * anchor_vec[i]), 1).view(-1, 4) # predicted box
ptxy = torch.sigmoid(pts[:, 0:2]) # pxy = pxy * s - (s - 1) / 2, s = 1.5 (scale_xy)
ptbox = torch.cat((ptxy, torch.exp(pts[:, 2:4]) * anchor_vec[i]), 1).view(-1, 4) # predicted box
l2_dis = (psbox - ptbox).pow(2).sum(1)
lbox += l2_dis.sum()
# cls loss
output_s_i = ps[..., 4:].view(-1, model.nc + 1)
output_t_i = pt[..., 4:].view(-1, model.nc + 1)
lcls += criterion_st(nn.functional.log_softmax(output_s_i / T, dim=1),
nn.functional.softmax(output_t_i / T, dim=1)) * (T * T) / ps.size(0)
# feature loss
if len(feature_t) != len(feature_s):
print("feature mismatch!")
exit()
merge = indices_merge(indices)
for i in range(len(feature_t)):
# feature_t[i] = feature_t[i].pow(2).sum(1)
feature_t[i] = feature_t[i].abs().sum(1)
# feature_s[i] = feature_s[i].pow(2).sum(1)
feature_s[i] = feature_s[i].abs().sum(1)
mask = fine_grained_imitation_feature_mask(feature_s[i], feature_t[i], merge, img_size)
mask = mask.to(targets.device)
feature_t[i] = (feature_t[i] * mask).view(batch_size, -1)
feature_s[i] = (feature_s[i] * mask).view(batch_size, -1)
lfeature += criterion_st(nn.functional.log_softmax(feature_s[i] / T, dim=1),
nn.functional.softmax(feature_t[i] / T, dim=1)) * (T * T) / batch_size
# print(lcls.data)
# print(lbox.data)
# print(lfeature.data)
return lcls * Lambda_cls + lbox * Lambda_box + lfeature * Lambda_feature
def fine_grained_imitation_mask(feature_s, feature_t, indices):
if len(feature_t) != len(feature_s):
print("feature mismatch!")
exit()
mask = []
for i in range(len(feature_t)):
temp = torch.zeros(feature_s[i].size())
b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
temp[b, a, gj, gi] = 1
mask.append(temp)
return mask
# FineGrainedmask
def compute_lost_KD6(model, targets, output_s, output_t, batch_size):
T = 3.0
Lambda_feature = 0.001
criterion_st = torch.nn.KLDivLoss(reduction='sum')
feature_s = list(output_s)
feature_t = list(output_t)
tcls, tbox, indices, anchor_vec = build_targets(output_s, targets, model)
mask = fine_grained_imitation_mask(feature_s, feature_t, indices)
test = indices_merge(indices)
# feature loss
for i in range(len(mask)):
mask[i] = mask[i].to(targets.device)
feature_t[i] = feature_t[i] * mask[i]
feature_s[i] = feature_s[i] * mask[i]
feature_s = torch.cat([i.view(-1, 3 * (model.nc + 5)) for i in feature_s])
feature_t = torch.cat([i.view(-1, 3 * (model.nc + 5)) for i in feature_t])
lfeature = criterion_st(nn.functional.log_softmax(feature_s / T, dim=1),
nn.functional.softmax(feature_t / T, dim=1)) * (T * T) / batch_size
return lfeature * Lambda_feature
def Failure_Case_Loss_FM(masks, imgs, targets):
criterion = torch.nn.KLDivLoss(reduction='sum')
if masks == None:
return torch.zeros([1]).to(imgs.device)
PBI = 0
PBO = 0
# masks_target = []
for i in range(masks.size(0)):
mask = masks[i]
pbi = torch.sum(mask[0]) / (mask.shape[1] * mask.shape[2])
PBI = PBI + pbi
target = targets[targets[:, 0] == i]
# mask_target = torch.zeros((3, mask.shape[1], mask.shape[2])).to(mask.device)
for object in target:
x, y = mask.shape[1] * float(object[2]), mask.shape[2] * float(object[3])
w, h = mask.shape[1] * float(object[4]), mask.shape[2] * float(object[5])
mask_object = torch.zeros((3, mask.shape[1], mask.shape[2])).to(mask.device)
mask_object[:, round(y - h / 2):round(y + h / 2), round(x - w / 2):round(x + w / 2)] = 1
pbo = torch.sum((mask * mask_object)) / torch.sum(mask_object)
PBO = PBO + pbo
# mask_target = mask_target + mask_object
# masks_target.append(mask_target.unsqueeze(0))
# masks_target = torch.cat(masks_target, dim=0)
F_loss = abs(PBI - PBO) / imgs.shape[0]
# return criterion(masks*imgs, masks_target*imgs)#, PBI, PBO
fence_imgs = F.log_softmax((masks * imgs).view(imgs.size(0), -1), dim=-1)
original_imgs = F.softmax(imgs.view(imgs.size(0), -1), dim=-1)
D_loss = criterion(fence_imgs, original_imgs)
return F_loss + D_loss
def build_targets(p, targets, model):
# targets = [image, class, x, y, w, h]
nt = targets.shape[0]
tcls, tbox, indices, av = [], [], [], []
reject, use_all_anchors = True, True
gain = torch.ones(6, device=targets.device) # normalized to gridspace gain
# m = list(model.modules())[-1]
# for i in range(m.nl):
# anchors = m.anchors[i]
multi_gpu = type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)
for i, j in enumerate(model.yolo_layers):
# get number of grid points and anchor vec for this yolo layer
anchors = model.module.module_list[j].anchor_vec if multi_gpu else model.module_list[j].anchor_vec
# iou of targets-anchors
gain[2:] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
t, a = targets * gain, []
gwh = t[:, 4:6]
if nt:
iou = wh_iou(anchors, gwh) # iou(3,n) = wh_iou(anchors(3,2), gwh(n,2))
if use_all_anchors:
na = anchors.shape[0] # number of anchors
a = torch.arange(na).view(-1, 1).repeat(1, nt).view(-1)
t = t.repeat(na, 1)
else: # use best anchor only
iou, a = iou.max(0) # best iou and anchor
# reject anchors below iou_thres (OPTIONAL, increases P, lowers R)
if reject:
j = iou.view(-1) > model.hyp['iou_t'] # iou threshold hyperparameter
t, a = t[j], a[j]
# Indices
b, c = t[:, :2].long().t() # target image, class
gxy = t[:, 2:4] # grid x, y
gwh = t[:, 4:6] # grid w, h
gi, gj = gxy.long().t() # grid x, y indices
indices.append((b, a, gj, gi))
# Box
gxy -= gxy.floor() # xy
tbox.append(torch.cat((gxy, gwh), 1)) # xywh (grids)
av.append(anchors[a]) # anchor vec
# Class
tcls.append(c)
if c.shape[0]: # if any targets
assert c.max() < model.nc, 'Model accepts %g classes labeled from 0-%g, however you labelled a class %g. ' \
'See https://github.com/ultralytics/yolov3/wiki/Train-Custom-Data' % (
model.nc, model.nc - 1, c.max())
return tcls, tbox, indices, av
def non_max_suppression(prediction, conf_thres=0.1, iou_thres=0.6, multi_label=True, classes=None, agnostic=False):
"""
Performs Non-Maximum Suppression on inference results
Returns detections with shape:
nx6 (x1, y1, x2, y2, conf, cls)
"""
# Box constraints
min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
method = 'merge'
nc = prediction[0].shape[1] - 5 # number of classes
multi_label &= nc > 1 # multiple labels per box
output = [None] * len(prediction)
for xi, x in enumerate(prediction): # image index, image inference
# Apply conf constraint
x = x[x[:, 4] > conf_thres]
# Apply width-height constraint
x = x[((x[:, 2:4] > min_wh) & (x[:, 2:4] < max_wh)).all(1)]
# If none remain process next image
if not x.shape[0]:
continue
# Compute conf
x[..., 5:] *= x[..., 4:5] # conf = obj_conf * cls_conf
# Box (center x, center y, width, height) to (x1, y1, x2, y2)
box = xywh2xyxy(x[:, :4])
# Detections matrix nx6 (xyxy, conf, cls)
if multi_label:
i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).t()
x = torch.cat((box[i], x[i, j + 5].unsqueeze(1), j.float().unsqueeze(1)), 1)
else: # best class only
conf, j = x[:, 5:].max(1)
x = torch.cat((box, conf.unsqueeze(1), j.float().unsqueeze(1)), 1)
# Filter by class
if classes:
x = x[(j.view(-1, 1) == torch.tensor(classes, device=j.device)).any(1)]
# Apply finite constraint
if not torch.isfinite(x).all():
x = x[torch.isfinite(x).all(1)]
# If none remain process next image
n = x.shape[0] # number of boxes
if not n:
continue
# Sort by confidence
# if method == 'fast_batch':
# x = x[x[:, 4].argsort(descending=True)]
# Batched NMS
c = x[:, 5] * 0 if agnostic else x[:, 5] # classes
boxes, scores = x[:, :4].clone() + c.view(-1, 1) * max_wh, x[:, 4] # boxes (offset by class), scores
if method == 'merge': # Merge NMS (boxes merged using weighted mean)
i = torchvision.ops.boxes.nms(boxes, scores, iou_thres)
if 1 < n < 3E3: # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
try:
# weights = (box_iou(boxes, boxes).tril_() > iou_thres) * scores.view(-1, 1) # box weights
# weights /= weights.sum(0) # normalize
# x[:, :4] = torch.mm(weights.T, x[:, :4])
weights = (box_iou(boxes[i], boxes) > iou_thres) * scores[None] # box weights
x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
except: # possible CUDA error https://github.com/ultralytics/yolov3/issues/1139
pass
elif method == 'vision':
i = torchvision.ops.boxes.nms(boxes, scores, iou_thres)
elif method == 'fast': # FastNMS from https://github.com/dbolya/yolact
iou = box_iou(boxes, boxes).triu_(diagonal=1) # upper triangular iou matrix
i = iou.max(0)[0] < iou_thres
output[xi] = x[i]
return output
def get_yolo_layers(model):
bool_vec = [x['type'] == 'yolo' for x in model.module_defs]
return [i for i, x in enumerate(bool_vec) if x] # [82, 94, 106] for yolov3
def print_model_biases(model):
# prints the bias neurons preceding each yolo layer
print('\nModel Bias Summary: %8s%18s%18s%18s' % ('layer', 'regression', 'objectness', 'classification'))
try:
multi_gpu = type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)
for l in model.yolo_layers: # print pretrained biases
if multi_gpu:
na = model.module.module_list[l].na # number of anchors
b = model.module.module_list[l - 1][0].bias.view(na, -1) # bias 3x85
else:
na = model.module_list[l].na
b = model.module_list[l - 1][0].bias.view(na, -1) # bias 3x85
print(' ' * 20 + '%8g %18s%18s%18s' % (l, '%5.2f+/-%-5.2f' % (b[:, :4].mean(), b[:, :4].std()),
'%5.2f+/-%-5.2f' % (b[:, 4].mean(), b[:, 4].std()),
'%5.2f+/-%-5.2f' % (b[:, 5:].mean(), b[:, 5:].std())))
except:
pass
def strip_optimizer(f='weights/last.pt'): # from utils.utils import *; strip_optimizer()
# Strip optimizer from *.pt files for lighter files (reduced by 2/3 size)
x = torch.load(f, map_location=torch.device('cpu'))
x['optimizer'] = None
torch.save(x, f)
def create_backbone(f='weights/last.pt'): # from utils.utils import *; create_backbone()
# create a backbone from a *.pt file
x = torch.load(f, map_location=torch.device('cpu'))
x['optimizer'] = None
x['training_results'] = None
x['epoch'] = -1
for p in x['model'].values():
try:
p.requires_grad = True
except:
pass
torch.save(x, 'weights/backbone.pt')
def coco_class_count(path='../coco/labels/train2014/'):
# Histogram of occurrences per class
nc = 80 # number classes
x = np.zeros(nc, dtype='int32')
files = sorted(glob.glob('%s/*.*' % path))
for i, file in enumerate(files):
labels = np.loadtxt(file, dtype=np.float32).reshape(-1, 5)
x += np.bincount(labels[:, 0].astype('int32'), minlength=nc)
print(i, len(files))
def coco_only_people(path='../coco/labels/train2017/'): # from utils.utils import *; coco_only_people()
# Find images with only people
files = sorted(glob.glob('%s/*.*' % path))
for i, file in enumerate(files):
labels = np.loadtxt(file, dtype=np.float32).reshape(-1, 5)
if all(labels[:, 0] == 0):
print(labels.shape[0], file)
def select_best_evolve(path='evolve*.txt'): # from utils.utils import *; select_best_evolve()
# Find best evolved mutation
for file in sorted(glob.glob(path)):
x = np.loadtxt(file, dtype=np.float32, ndmin=2)
print(file, x[fitness(x).argmax()])
def crop_images_random(path='../images/', scale=0.50): # from utils.utils import *; crop_images_random()
# crops images into random squares up to scale fraction
# WARNING: overwrites images!
for file in tqdm(sorted(glob.glob('%s/*.*' % path))):
img = cv2.imread(file) # BGR
if img is not None:
h, w = img.shape[:2]
# create random mask
a = 30 # minimum size (pixels)
mask_h = random.randint(a, int(max(a, h * scale))) # mask height
mask_w = mask_h # mask width
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
cv2.imwrite(file, img[ymin:ymax, xmin:xmax])
def coco_single_class_labels(path='../coco/labels/train2014/', label_class=43):
# Makes single-class coco datasets. from utils.utils import *; coco_single_class_labels()
if os.path.exists('new/'):
shutil.rmtree('new/') # delete output folder
os.makedirs('new/') # make new output folder
os.makedirs('new/labels/')
os.makedirs('new/images/')
for file in tqdm(sorted(glob.glob('%s/*.*' % path))):
with open(file, 'r') as f:
labels = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
i = labels[:, 0] == label_class
if any(i):
img_file = file.replace('labels', 'images').replace('txt', 'jpg')
labels[:, 0] = 0 # reset class to 0
with open('new/images.txt', 'a') as f: # add image to dataset list
f.write(img_file + '\n')
with open('new/labels/' + Path(file).name, 'a') as f: # write label
for l in labels[i]:
f.write('%g %.6f %.6f %.6f %.6f\n' % tuple(l))
shutil.copyfile(src=img_file, dst='new/images/' + Path(file).name.replace('txt', 'jpg')) # copy images
def kmean_anchors(path='./data/coco64.txt', n=9, img_size=(320, 1024), thr=0.20, gen=1000):
# Creates kmeans anchors for use in *.cfg files: from utils.utils import *; _ = kmean_anchors()
# n: number of anchors
# img_size: (min, max) image size used for multi-scale training (can be same values)
# thr: IoU threshold hyperparameter used for training (0.0 - 1.0)
# gen: generations to evolve anchors using genetic algorithm
from utils.datasets import LoadImagesAndLabels
def print_results(k):
k = k[np.argsort(k.prod(1))] # sort small to large
iou = wh_iou(wh, torch.Tensor(k))
max_iou = iou.max(1)[0]
bpr, aat = (max_iou > thr).float().mean(), (iou > thr).float().mean() * n # best possible recall, anch > thr
print('%.2f iou_thr: %.3f best possible recall, %.2f anchors > thr' % (thr, bpr, aat))
print('n=%g, img_size=%s, IoU_all=%.3f/%.3f-mean/best, IoU>thr=%.3f-mean: ' %
(n, img_size, iou.mean(), max_iou.mean(), iou[iou > thr].mean()), end='')
for i, x in enumerate(k):
print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg
return k
def fitness(k): # mutation fitness
iou = wh_iou(wh, torch.Tensor(k)) # iou
max_iou = iou.max(1)[0]
return (max_iou * (max_iou > thr).float()).mean() # product
# Get label wh
wh = []
dataset = LoadImagesAndLabels(path, augment=True, rect=True)
nr = 1 if img_size[0] == img_size[1] else 10 # number augmentation repetitions
for s, l in zip(dataset.shapes, dataset.labels):
wh.append(l[:, 3:5] * (s / s.max())) # image normalized to letterbox normalized wh
wh = np.concatenate(wh, 0).repeat(nr, axis=0) # augment 10x
wh *= np.random.uniform(img_size[0], img_size[1], size=(wh.shape[0], 1)) # normalized to pixels (multi-scale)
wh = wh[(wh > 2.0).all(1)] # remove below threshold boxes (< 2 pixels wh)
# Darknet yolov3.cfg anchors
use_darknet = False
if use_darknet and n == 9:
k = np.array([[10, 13], [16, 30], [33, 23], [30, 61], [62, 45], [59, 119], [116, 90], [156, 198], [373, 326]])
else:
# Kmeans calculation
from scipy.cluster.vq import kmeans
print('Running kmeans for %g anchors on %g points...' % (n, len(wh)))
s = wh.std(0) # sigmas for whitening
k, dist = kmeans(wh / s, n, iter=30) # points, mean distance
k *= s
wh = torch.Tensor(wh)
k = print_results(k)
# # Plot
# k, d = [None] * 20, [None] * 20
# for i in tqdm(range(1, 21)):
# k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance
# fig, ax = plt.subplots(1, 2, figsize=(14, 7))
# ax = ax.ravel()
# ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.')
# fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh
# ax[0].hist(wh[wh[:, 0]<100, 0],400)
# ax[1].hist(wh[wh[:, 1]<100, 1],400)
# fig.tight_layout()
# fig.savefig('wh.png', dpi=200)
# Evolve
npr = np.random
f, sh, mp, s = fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma
for _ in tqdm(range(gen), desc='Evolving anchors'):
v = np.ones(sh)
while (v == 1).all(): # mutate until a change occurs (prevent duplicates)
v = ((npr.random(sh) < mp) * npr.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0) # 98.6, 61.6
kg = (k.copy() * v).clip(min=2.0)
fg = fitness(kg)
if fg > f:
f, k = fg, kg.copy()
print_results(k)
k = print_results(k)
return k
def print_mutation(hyp, results, bucket=''):
# Print mutation results to evolve.txt (for use with train.py --evolve)
a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys
b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values
c = '%10.4g' * len(results) % results # results (P, R, mAP, F1, test_loss)
print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c))
if bucket:
os.system('gsutil cp gs://%s/evolve.txt .' % bucket) # download evolve.txt
with open('evolve.txt', 'a') as f: # append result
f.write(c + b + '\n')
x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0) # load unique rows
np.savetxt('evolve.txt', x[np.argsort(-fitness(x))], '%10.3g') # save sort by fitness
if bucket:
os.system('gsutil cp evolve.txt gs://%s' % bucket) # upload evolve.txt
def apply_classifier(x, model, img, im0):
# applies a second stage classifier to yolo outputs
im0 = [im0] if isinstance(im0, np.ndarray) else im0
for i, d in enumerate(x): # per image
if d is not None and len(d):
d = d.clone()
# Reshape and pad cutouts
b = xyxy2xywh(d[:, :4]) # boxes
b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
d[:, :4] = xywh2xyxy(b).long()
# Rescale boxes from img_size to im0 size
scale_coords(img.shape[2:], d[:, :4], im0[i].shape)
# Classes
pred_cls1 = d[:, 5].long()
ims = []
for j, a in enumerate(d): # per item
cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
im = cv2.resize(cutout, (224, 224)) # BGR
# cv2.imwrite('test%i.jpg' % j, cutout)
im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32
im /= 255.0 # 0 - 255 to 0.0 - 1.0
ims.append(im)
pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction
x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections
return x
def fitness(x):
# Returns fitness (for use with results.txt or evolve.txt)
w = [0.0, 0.00, 1, 0.00] # weights for [P, R, mAP, F1]@0.5 or [P, R, mAP@0.5, mAP@0.5:0.95]
return (x[:, :4] * w).sum(1)
def output_to_target(output, width, height):
"""
Convert a YOLO model output to target format
[batch_id, class_id, x, y, w, h, conf]
"""
for i in range(len(output)):
if isinstance(output[i], torch.Tensor):
output[i] = output[i].cpu().numpy()
targets = []
for i, o in enumerate(output):
if o is not None:
for pred in o:
box = pred[:4]
w = (box[2] - box[0]) / width
h = (box[3] - box[1]) / height
x = box[0] / width + w / 2
y = box[1] / height + h / 2
conf = pred[4]
cls = int(pred[5])
targets.append([i, cls, x, y, w, h, conf])
return np.array(targets)
# Plotting functions ---------------------------------------------------------------------------------------------------
def plot_one_box(x, img, color=None, label=None, line_thickness=None):
# Plots one bounding box on image img
tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
color = color or [random.randint(0, 255) for _ in range(3)]
c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
cv2.rectangle(img, c1, c2, color, thickness=tl)
if label:
tf = max(tl - 1, 1) # font thickness
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
cv2.rectangle(img, c1, c2, color, -1) # filled
cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
def plot_wh_methods(): # from utils.utils import *; plot_wh_methods()
# Compares the two methods for width-height anchor multiplication
# https://github.com/ultralytics/yolov3/issues/168
x = np.arange(-4.0, 4.0, .1)
ya = np.exp(x)
yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2
fig = plt.figure(figsize=(6, 3), dpi=150)
plt.plot(x, ya, '.-', label='yolo method')
plt.plot(x, yb ** 2, '.-', label='^2 power method')
plt.plot(x, yb ** 2.5, '.-', label='^2.5 power method')
plt.xlim(left=-4, right=4)
plt.ylim(bottom=0, top=6)
plt.xlabel('input')
plt.ylabel('output')
plt.legend()
fig.tight_layout()
fig.savefig('comparison.png', dpi=200)
def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16,
is_gray_scale=False):
tl = 3 # line thickness
tf = max(tl - 1, 1) # font thickness
if isinstance(images, torch.Tensor):
images = images.cpu().numpy()
if isinstance(targets, torch.Tensor):
targets = targets.cpu().numpy()
# un-normalise
if np.max(images[0]) <= 1:
images *= 255
bs, _, h, w = images.shape # batch size, _, height, width
bs = min(bs, max_subplots) # limit plot images
ns = np.ceil(bs ** 0.5) # number of subplots (square)
# Check if we should resize
scale_factor = max_size / max(h, w)
if scale_factor < 1:
h = math.ceil(scale_factor * h)
w = math.ceil(scale_factor * w)
# Empty array for output
mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8)
# Fix class - colour map
prop_cycle = plt.rcParams['axes.prop_cycle']
# https://stackoverflow.com/questions/51350872/python-from-color-name-to-rgb
hex2rgb = lambda h: tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
color_lut = [hex2rgb(h) for h in prop_cycle.by_key()['color']]
for i, img in enumerate(images):
if i == max_subplots: # if last batch has fewer images than we expect
break
block_x = int(w * (i // ns))
block_y = int(h * (i % ns))
img = img.transpose(1, 2, 0)
if scale_factor < 1:
img = cv2.resize(img, (w, h))
img = np.expand_dims(img, axis=-1)
mosaic[block_y:block_y + h, block_x:block_x + w, :] = img
if len(targets) > 0:
image_targets = targets[targets[:, 0] == i]
boxes = xywh2xyxy(image_targets[:, 2:6]).T
classes = image_targets[:, 1].astype('int')
gt = image_targets.shape[1] == 6 # ground truth if no conf column
conf = None if gt else image_targets[:, 6] # check for confidence presence (gt vs pred)
boxes[[0, 2]] *= w
boxes[[0, 2]] += block_x
boxes[[1, 3]] *= h
boxes[[1, 3]] += block_y
for j, box in enumerate(boxes.T):
cls = int(classes[j])
color = color_lut[cls % len(color_lut)]
cls = names[cls] if names else cls
if gt or conf[j] > 0.1: # 0.3 conf thresh
label = '%s' % cls if gt else '%s %.1f' % (cls, conf[j])
plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl)
# Draw image filename labels
if paths is not None:
label = os.path.basename(paths[i])[:40] # trim to 40 char
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf,
lineType=cv2.LINE_AA)
# Image border
cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3)
if fname is not None:
cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB))
return mosaic
def plot_test_txt(): # from utils.utils import *; plot_test()
# Plot test.txt histograms
x = np.loadtxt('test.txt', dtype=np.float32)
box = xyxy2xywh(x[:, :4])
cx, cy = box[:, 0], box[:, 1]
fig, ax = plt.subplots(1, 1, figsize=(6, 6))
ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0)
ax.set_aspect('equal')
fig.tight_layout()
plt.savefig('hist2d.png', dpi=300)
fig, ax = plt.subplots(1, 2, figsize=(12, 6))
ax[0].hist(cx, bins=600)
ax[1].hist(cy, bins=600)
fig.tight_layout()
plt.savefig('hist1d.png', dpi=200)
def plot_targets_txt(): # from utils.utils import *; plot_targets_txt()
# Plot targets.txt histograms
x = np.loadtxt('targets.txt', dtype=np.float32).T
s = ['x targets', 'y targets', 'width targets', 'height targets']
fig, ax = plt.subplots(2, 2, figsize=(8, 8))
ax = ax.ravel()
for i in range(4):
ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std()))
ax[i].legend()
ax[i].set_title(s[i])
fig.tight_layout()
plt.savefig('targets.jpg', dpi=200)
def plot_evolution_results(hyp): # from utils.utils import *; plot_evolution_results(hyp)
# Plot hyperparameter evolution results in evolve.txt
x = np.loadtxt('evolve.txt', ndmin=2)
f = fitness(x)
weights = (f - f.min()) ** 2 # for weighted results
fig = plt.figure(figsize=(12, 10))
matplotlib.rc('font', **{'size': 8})
for i, (k, v) in enumerate(hyp.items()):
y = x[:, i + 7]
# mu = (y * weights).sum() / weights.sum() # best weighted result
mu = y[f.argmax()] # best single result
plt.subplot(4, 5, i + 1)
plt.plot(mu, f.max(), 'o', markersize=10)
plt.plot(y, f, '.')
plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters
print('%15s: %.3g' % (k, mu))
fig.tight_layout()
plt.savefig('evolve.png', dpi=200)
def plot_results_overlay(start=0, stop=0): # from utils.utils import *; plot_results_overlay()
# Plot training results files 'results*.txt', overlaying train and val losses
s = ['train', 'train', 'train', 'Precision', 'mAP@0.5', 'val', 'val', 'val', 'Recall', 'F1'] # legends
t = ['GIoU', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles
for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')):
results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
n = results.shape[1] # number of rows
x = range(start, min(stop, n) if stop else n)
fig, ax = plt.subplots(1, 5, figsize=(14, 3.5))
ax = ax.ravel()
for i in range(5):
for j in [i, i + 5]:
y = results[j, x]
if i in [0, 1, 2]:
y[y == 0] = np.nan # dont show zero loss values
ax[i].plot(x, y, marker='.', label=s[j])
ax[i].set_title(t[i])
ax[i].legend()
ax[i].set_ylabel(f) if i == 0 else None # add filename
fig.tight_layout()
fig.savefig(f.replace('.txt', '.png'), dpi=200)
def plot_results(start=0, stop=0, bucket='', id=()): # from utils.utils import *; plot_results()
# Plot training 'results*.txt' as seen in https://github.com/ultralytics/yolov3#training
fig, ax = plt.subplots(2, 5, figsize=(12, 6))
ax = ax.ravel()
s = ['GIoU', 'Objectness', 'Classification', 'Precision', 'Recall',
'val GIoU', 'val Objectness', 'val Classification', 'mAP@0.5', 'F1']
if bucket:
os.system('rm -rf storage.googleapis.com')
files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id]
else:
files = glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')
for f in sorted(files):
try:
results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
n = results.shape[1] # number of rows
x = range(start, min(stop, n) if stop else n)
for i in range(10):
y = results[i, x]
if i in [0, 1, 2, 5, 6, 7]:
y[y == 0] = np.nan # dont show zero loss values
# y /= y[0] # normalize
ax[i].plot(x, y, marker='.', label=Path(f).stem, linewidth=2, markersize=8)
ax[i].set_title(s[i])
if i in [5, 6, 7]: # share train and val loss y axes
ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
except:
print('Warning: Plotting error for %s, skipping file' % f)
fig.tight_layout()
ax[1].legend()
fig.savefig('results.png', dpi=200)
================================================
FILE: weights/pretrain_weights/download_yolov3_weights.sh
================================================
#!/bin/bash
# make '/weights' directory if it does not exist and cd into it
# mkdir -p weights && cd weights
# copy darknet weight files, continue '-c' if partially downloaded
# wget -c https://pjreddie.com/media/files/yolov3.weights
# wget -c https://pjreddie.com/media/files/yolov3-tiny.weights
# wget -c https://pjreddie.com/media/files/yolov3-spp.weights
# yolov3 pytorch weights
# download from Google Drive: https://drive.google.com/drive/folders/1uxgUBemJVw9wZsdpboYbzUN4bcRhsuAI
# darknet53 weights (first 75 layers only)
# wget -c https://pjreddie.com/media/files/darknet53.conv.74
# yolov3-tiny weights from darknet (first 16 layers only)
# ./darknet partial cfg/yolov3-tiny.cfg yolov3-tiny.weights yolov3-tiny.conv.15 15
# mv yolov3-tiny.conv.15 ../
# new method
python3 -c "from models import *;
attempt_download('weights/yolov3.pt');
attempt_download('weights/yolov3-spp.pt')"