From c4c4763957226cfc6ee64924fe06f667f5313d6f Mon Sep 17 00:00:00 2001
From: "Kin-Yiu, Wong" <102582011@cc.ncu.edu.tw>
Date: Mon, 16 Nov 2020 14:46:46 +0800
Subject: [PATCH 01/37] Update README.md
---
README.md | 61 ++++++++++++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 60 insertions(+), 1 deletion(-)
diff --git a/README.md b/README.md
index cf18bbd..e8ad71f 100644
--- a/README.md
+++ b/README.md
@@ -1 +1,60 @@
-# scaledYOLOv4
\ No newline at end of file
+# YOLOv4-CSP
+
+This is the implementation of "Scaled-YOLOv4: Scaling Cross Stage Partial Network" using PyTorch framwork.
+
+## Installation
+
+```
+# create the docker container, you can change the share memory size if you have more.
+nvidia-docker run --name yolov4_csp -it -v your_coco_path/:/coco/ -v your_code_path/:/yolo --shm-size=64g nvcr.io/nvidia/pytorch:20.06-py3
+
+# install mish-cuda, if you use different pytorch version, you could try https://github.com/JunnYu/mish-cuda
+cd /
+git clone https://github.com/thomasbrandon/mish-cuda
+cd mish-cuda
+python setup.py build install
+
+# go to code folder
+cd /yolo
+```
+
+## Testing
+
+```
+# download yolov4-csp.weights and put it in /yolo/weights/ folder.
+python test.py --img 640 --conf 0.001 --batch 8 --device 0 --data coco.yaml --cfg models/yolov4-csp.cfg --weights weights/yolov4-csp.weights
+```
+
+You will get the results:
+```
+ Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.47827
+ Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.66448
+ Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.51928
+ Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.30647
+ Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.53106
+ Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.61056
+ Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.36823
+ Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.60434
+ Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.65795
+ Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.48486
+ Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.70892
+ Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.79914
+```
+
+## Training
+
+```
+# you can change batch size to fit your GPU RAM.
+python train.py --device 0 --batch-size 16 --data coco.yaml --cfg yolov4-csp.cfg --weights '' --name yolov4-csp
+```
+
+For resume training:
+```
+# assume the checkpoint is stored in runs/exp0_yolov4-csp/weights/.
+python train.py --device 0 --batch-size 16 --data coco.yaml --cfg yolov4-csp.cfg --weights 'runs/exp0_yolov4-csp/weights/last.pt' --name yolov4-csp --resume
+```
+
+If you want to use multiple GPUs for training
+```
+python -m torch.distributed.launch --nproc_per_node 4 train.py --device 0,1,2,3 --batch-size 64 --data coco.yaml --cfg yolov4-csp.cfg --weights '' --name yolov4-csp --sync-bn
+```
From 053b7167a1464a33506600ce6fdefcca0eade21b Mon Sep 17 00:00:00 2001
From: "Kin-Yiu, Wong" <102582011@cc.ncu.edu.tw>
Date: Mon, 16 Nov 2020 16:17:30 +0800
Subject: [PATCH 02/37] Add files via upload
---
data/coco.names | 80 +++
data/coco.yaml | 18 +
data/hyp.scratch.yaml | 27 +
detect.py | 186 ++++++
models/common.py | 188 ++++++
models/experimental.py | 145 +++++
models/export.py | 68 +++
models/models.py | 504 ++++++++++++++++
models/yolo.py | 259 +++++++++
models/yolov3-spp.cfg | 821 ++++++++++++++++++++++++++
models/yolov4-csp.cfg | 1259 ++++++++++++++++++++++++++++++++++++++++
models/yolov4.cfg | 1154 ++++++++++++++++++++++++++++++++++++
test.py | 310 ++++++++++
train.py | 514 ++++++++++++++++
14 files changed, 5533 insertions(+)
create mode 100644 data/coco.names
create mode 100644 data/coco.yaml
create mode 100644 data/hyp.scratch.yaml
create mode 100644 detect.py
create mode 100644 models/common.py
create mode 100644 models/experimental.py
create mode 100644 models/export.py
create mode 100644 models/models.py
create mode 100644 models/yolo.py
create mode 100644 models/yolov3-spp.cfg
create mode 100644 models/yolov4-csp.cfg
create mode 100644 models/yolov4.cfg
create mode 100644 test.py
create mode 100644 train.py
diff --git a/data/coco.names b/data/coco.names
new file mode 100644
index 0000000..941cb4e
--- /dev/null
+++ b/data/coco.names
@@ -0,0 +1,80 @@
+person
+bicycle
+car
+motorcycle
+airplane
+bus
+train
+truck
+boat
+traffic light
+fire hydrant
+stop sign
+parking meter
+bench
+bird
+cat
+dog
+horse
+sheep
+cow
+elephant
+bear
+zebra
+giraffe
+backpack
+umbrella
+handbag
+tie
+suitcase
+frisbee
+skis
+snowboard
+sports ball
+kite
+baseball bat
+baseball glove
+skateboard
+surfboard
+tennis racket
+bottle
+wine glass
+cup
+fork
+knife
+spoon
+bowl
+banana
+apple
+sandwich
+orange
+broccoli
+carrot
+hot dog
+pizza
+donut
+cake
+chair
+couch
+potted plant
+bed
+dining table
+toilet
+tv
+laptop
+mouse
+remote
+keyboard
+cell phone
+microwave
+oven
+toaster
+sink
+refrigerator
+book
+clock
+vase
+scissors
+teddy bear
+hair drier
+toothbrush
diff --git a/data/coco.yaml b/data/coco.yaml
new file mode 100644
index 0000000..a31e20f
--- /dev/null
+++ b/data/coco.yaml
@@ -0,0 +1,18 @@
+# train and val datasets (image directory or *.txt file with image paths)
+train: ../coco/train2017.txt # 118k images
+val: ../coco/val2017.txt # 5k images
+test: ../coco/testdev2017.txt # 20k images for submission to https://competitions.codalab.org/competitions/20794
+
+# number of classes
+nc: 80
+
+# class names
+names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
+ 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+ 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
+ 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+ 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
+ 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
+ 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
+ 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+ 'hair drier', 'toothbrush']
diff --git a/data/hyp.scratch.yaml b/data/hyp.scratch.yaml
new file mode 100644
index 0000000..fa8c9fd
--- /dev/null
+++ b/data/hyp.scratch.yaml
@@ -0,0 +1,27 @@
+# Hyperparameters for COCO training from scratch
+# python train.py --batch 40 --cfg yolov5m.yaml --weights '' --data coco.yaml --img 640 --epochs 300
+# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials
+
+
+lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)
+momentum: 0.937 # SGD momentum/Adam beta1
+weight_decay: 0.0005 # optimizer weight decay 5e-4
+giou: 0.05 # GIoU loss gain
+cls: 0.5 # cls loss gain
+cls_pw: 1.0 # cls BCELoss positive_weight
+obj: 1.0 # obj loss gain (scale with pixels)
+obj_pw: 1.0 # obj BCELoss positive_weight
+iou_t: 0.20 # IoU training threshold
+anchor_t: 4.0 # anchor-multiple threshold
+fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)
+hsv_h: 0.015 # image HSV-Hue augmentation (fraction)
+hsv_s: 0.7 # image HSV-Saturation augmentation (fraction)
+hsv_v: 0.4 # image HSV-Value augmentation (fraction)
+degrees: 0.0 # image rotation (+/- deg)
+translate: 0.0 # image translation (+/- fraction)
+scale: 0.5 # image scale (+/- gain)
+shear: 0.0 # image shear (+/- deg)
+perspective: 0.0 # image perspective (+/- fraction), range 0-0.001
+flipud: 0.0 # image flip up-down (probability)
+fliplr: 0.5 # image flip left-right (probability)
+mixup: 0.0 # image mixup (probability)
diff --git a/detect.py b/detect.py
new file mode 100644
index 0000000..76d4bc3
--- /dev/null
+++ b/detect.py
@@ -0,0 +1,186 @@
+import argparse
+import os
+import platform
+import shutil
+import time
+from pathlib import Path
+
+import cv2
+import torch
+import torch.backends.cudnn as cudnn
+from numpy import random
+
+from models.experimental import attempt_load
+from utils.datasets import LoadStreams, LoadImages
+from utils.general import (
+ check_img_size, non_max_suppression, apply_classifier, scale_coords, xyxy2xywh, plot_one_box, strip_optimizer)
+from utils.torch_utils import select_device, load_classifier, time_synchronized
+
+from models.models import *
+from models.experimental import *
+from utils.datasets import *
+from utils.general import *
+
+def load_classes(path):
+ # Loads *.names file at 'path'
+ with open(path, 'r') as f:
+ names = f.read().split('\n')
+ return list(filter(None, names)) # filter removes empty strings (such as last line)
+
+def detect(save_img=False):
+ out, source, weights, view_img, save_txt, imgsz, cfg, names = \
+ opt.output, opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size, opt.cfg, opt.names
+ webcam = source == '0' or source.startswith('rtsp') or source.startswith('http') or source.endswith('.txt')
+
+ # Initialize
+ device = select_device(opt.device)
+ if os.path.exists(out):
+ shutil.rmtree(out) # delete output folder
+ os.makedirs(out) # make new output folder
+ half = device.type != 'cpu' # half precision only supported on CUDA
+
+ # Load model
+ model = Darknet(cfg, imgsz).cuda()
+ model.load_state_dict(torch.load(weights[0], map_location=device)['model'])
+ #model = attempt_load(weights, map_location=device) # load FP32 model
+ #imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
+ model.to(device).eval()
+ if half:
+ model.half() # to FP16
+
+ # Second-stage classifier
+ classify = False
+ if classify:
+ modelc = load_classifier(name='resnet101', n=2) # initialize
+ modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']) # load weights
+ modelc.to(device).eval()
+
+ # Set Dataloader
+ vid_path, vid_writer = None, None
+ if webcam:
+ view_img = True
+ cudnn.benchmark = True # set True to speed up constant image size inference
+ dataset = LoadStreams(source, img_size=imgsz)
+ else:
+ save_img = True
+ dataset = LoadImages(source, img_size=imgsz)
+
+ # Get names and colors
+ names = load_classes(names)
+ colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(names))]
+
+ # Run inference
+ t0 = time.time()
+ img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
+ _ = model(img.half() if half else img) if device.type != 'cpu' else None # run once
+ for path, img, im0s, vid_cap in dataset:
+ img = torch.from_numpy(img).to(device)
+ img = img.half() if half else img.float() # uint8 to fp16/32
+ img /= 255.0 # 0 - 255 to 0.0 - 1.0
+ if img.ndimension() == 3:
+ img = img.unsqueeze(0)
+
+ # Inference
+ t1 = time_synchronized()
+ pred = model(img, augment=opt.augment)[0]
+
+ # Apply NMS
+ pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)
+ t2 = time_synchronized()
+
+ # Apply Classifier
+ if classify:
+ pred = apply_classifier(pred, modelc, img, im0s)
+
+ # Process detections
+ for i, det in enumerate(pred): # detections per image
+ if webcam: # batch_size >= 1
+ p, s, im0 = path[i], '%g: ' % i, im0s[i].copy()
+ else:
+ p, s, im0 = path, '', im0s
+
+ save_path = str(Path(out) / Path(p).name)
+ txt_path = str(Path(out) / Path(p).stem) + ('_%g' % dataset.frame if dataset.mode == 'video' else '')
+ s += '%gx%g ' % img.shape[2:] # print string
+ gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
+ if det is not None and len(det):
+ # Rescale boxes from img_size to im0 size
+ det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
+
+ # Print results
+ for c in det[:, -1].unique():
+ n = (det[:, -1] == c).sum() # detections per class
+ s += '%g %ss, ' % (n, names[int(c)]) # add to string
+
+ # Write results
+ for *xyxy, conf, cls in det:
+ if save_txt: # Write to file
+ xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
+ with open(txt_path + '.txt', 'a') as f:
+ f.write(('%g ' * 5 + '\n') % (cls, *xywh)) # label format
+
+ if save_img or view_img: # Add bbox to image
+ label = '%s %.2f' % (names[int(cls)], conf)
+ plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3)
+
+ # Print time (inference + NMS)
+ print('%sDone. (%.3fs)' % (s, t2 - t1))
+
+ # Stream results
+ if view_img:
+ cv2.imshow(p, im0)
+ if cv2.waitKey(1) == ord('q'): # q to quit
+ raise StopIteration
+
+ # Save results (image with detections)
+ if save_img:
+ if dataset.mode == 'images':
+ cv2.imwrite(save_path, im0)
+ else:
+ if vid_path != save_path: # new video
+ vid_path = save_path
+ if isinstance(vid_writer, cv2.VideoWriter):
+ vid_writer.release() # release previous video writer
+
+ fourcc = 'mp4v' # output video codec
+ fps = vid_cap.get(cv2.CAP_PROP_FPS)
+ w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
+ h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
+ vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h))
+ vid_writer.write(im0)
+
+ if save_txt or save_img:
+ print('Results saved to %s' % Path(out))
+ if platform == 'darwin' and not opt.update: # MacOS
+ os.system('open ' + save_path)
+
+ print('Done. (%.3fs)' % (time.time() - t0))
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--weights', nargs='+', type=str, default='yolov4.pt', help='model.pt path(s)')
+ parser.add_argument('--source', type=str, default='inference/images', help='source') # file/folder, 0 for webcam
+ parser.add_argument('--output', type=str, default='inference/output', help='output folder') # output folder
+ parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
+ parser.add_argument('--conf-thres', type=float, default=0.4, help='object confidence threshold')
+ parser.add_argument('--iou-thres', type=float, default=0.5, help='IOU threshold for NMS')
+ parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
+ parser.add_argument('--view-img', action='store_true', help='display results')
+ parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
+ parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
+ parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
+ parser.add_argument('--augment', action='store_true', help='augmented inference')
+ parser.add_argument('--update', action='store_true', help='update all models')
+ parser.add_argument('--cfg', type=str, default='cfg/yolov4.cfg', help='*.cfg path')
+ parser.add_argument('--names', type=str, default='data/coco.names', help='*.cfg path')
+ opt = parser.parse_args()
+ print(opt)
+
+ with torch.no_grad():
+ if opt.update: # update all models (to fix SourceChangeWarning)
+ for opt.weights in ['']:
+ detect()
+ strip_optimizer(opt.weights)
+ else:
+ detect()
diff --git a/models/common.py b/models/common.py
new file mode 100644
index 0000000..a11240b
--- /dev/null
+++ b/models/common.py
@@ -0,0 +1,188 @@
+# This file contains modules common to various models
+import math
+
+import torch
+import torch.nn as nn
+
+from mish_cuda import MishCuda as Mish
+
+
+def autopad(k, p=None): # kernel, padding
+ # Pad to 'same'
+ if p is None:
+ p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
+ return p
+
+
+def DWConv(c1, c2, k=1, s=1, act=True):
+ # Depthwise convolution
+ return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act)
+
+
+class Conv(nn.Module):
+ # Standard convolution
+ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
+ super(Conv, self).__init__()
+ self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
+ self.bn = nn.BatchNorm2d(c2)
+ self.act = Mish() if act else nn.Identity()
+
+ def forward(self, x):
+ return self.act(self.bn(self.conv(x)))
+
+ def fuseforward(self, x):
+ return self.act(self.conv(x))
+
+
+class Bottleneck(nn.Module):
+ # Standard bottleneck
+ def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion
+ super(Bottleneck, self).__init__()
+ c_ = int(c2 * e) # hidden channels
+ self.cv1 = Conv(c1, c_, 1, 1)
+ self.cv2 = Conv(c_, c2, 3, 1, g=g)
+ self.add = shortcut and c1 == c2
+
+ def forward(self, x):
+ return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
+
+
+class BottleneckCSP(nn.Module):
+ # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
+ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
+ super(BottleneckCSP, self).__init__()
+ c_ = int(c2 * e) # hidden channels
+ self.cv1 = Conv(c1, c_, 1, 1)
+ self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
+ self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
+ self.cv4 = Conv(2 * c_, c2, 1, 1)
+ self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
+ self.act = Mish()
+ self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
+
+ def forward(self, x):
+ y1 = self.cv3(self.m(self.cv1(x)))
+ y2 = self.cv2(x)
+ return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))
+
+
+class BottleneckCSP2(nn.Module):
+ # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
+ def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
+ super(BottleneckCSP2, self).__init__()
+ c_ = int(c2) # hidden channels
+ self.cv1 = Conv(c1, c_, 1, 1)
+ self.cv2 = nn.Conv2d(c_, c_, 1, 1, bias=False)
+ self.cv3 = Conv(2 * c_, c2, 1, 1)
+ self.bn = nn.BatchNorm2d(2 * c_)
+ self.act = Mish()
+ self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
+
+ def forward(self, x):
+ x1 = self.cv1(x)
+ y1 = self.m(x1)
+ y2 = self.cv2(x1)
+ return self.cv3(self.act(self.bn(torch.cat((y1, y2), dim=1))))
+
+
+class VoVCSP(nn.Module):
+ # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
+ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
+ super(VoVCSP, self).__init__()
+ c_ = int(c2) # hidden channels
+ self.cv1 = Conv(c1//2, c_//2, 3, 1)
+ self.cv2 = Conv(c_//2, c_//2, 3, 1)
+ self.cv3 = Conv(c_, c2, 1, 1)
+
+ def forward(self, x):
+ _, x1 = x.chunk(2, dim=1)
+ x1 = self.cv1(x1)
+ x2 = self.cv2(x1)
+ return self.cv3(torch.cat((x1,x2), dim=1))
+
+
+class SPP(nn.Module):
+ # Spatial pyramid pooling layer used in YOLOv3-SPP
+ def __init__(self, c1, c2, k=(5, 9, 13)):
+ super(SPP, self).__init__()
+ c_ = c1 // 2 # hidden channels
+ self.cv1 = Conv(c1, c_, 1, 1)
+ self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
+ self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
+
+ def forward(self, x):
+ x = self.cv1(x)
+ return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))
+
+
+class SPPCSP(nn.Module):
+ # CSP SPP https://github.com/WongKinYiu/CrossStagePartialNetworks
+ def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5, k=(5, 9, 13)):
+ super(SPPCSP, self).__init__()
+ c_ = int(2 * c2 * e) # hidden channels
+ self.cv1 = Conv(c1, c_, 1, 1)
+ self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
+ self.cv3 = Conv(c_, c_, 3, 1)
+ self.cv4 = Conv(c_, c_, 1, 1)
+ self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
+ self.cv5 = Conv(4 * c_, c_, 1, 1)
+ self.cv6 = Conv(c_, c_, 3, 1)
+ self.bn = nn.BatchNorm2d(2 * c_)
+ self.act = Mish()
+ self.cv7 = Conv(2 * c_, c2, 1, 1)
+
+ def forward(self, x):
+ x1 = self.cv4(self.cv3(self.cv1(x)))
+ y1 = self.cv6(self.cv5(torch.cat([x1] + [m(x1) for m in self.m], 1)))
+ y2 = self.cv2(x)
+ return self.cv7(self.act(self.bn(torch.cat((y1, y2), dim=1))))
+
+
+class MP(nn.Module):
+ # Spatial pyramid pooling layer used in YOLOv3-SPP
+ def __init__(self, k=2):
+ super(MP, self).__init__()
+ self.m = nn.MaxPool2d(kernel_size=k, stride=k)
+
+ def forward(self, x):
+ return self.m(x)
+
+
+class Focus(nn.Module):
+ # Focus wh information into c-space
+ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
+ super(Focus, self).__init__()
+ self.conv = Conv(c1 * 4, c2, k, s, p, g, act)
+
+ def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
+ return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1))
+
+
+class Concat(nn.Module):
+ # Concatenate a list of tensors along dimension
+ def __init__(self, dimension=1):
+ super(Concat, self).__init__()
+ self.d = dimension
+
+ def forward(self, x):
+ return torch.cat(x, self.d)
+
+
+class Flatten(nn.Module):
+ # Use after nn.AdaptiveAvgPool2d(1) to remove last 2 dimensions
+ @staticmethod
+ def forward(x):
+ return x.view(x.size(0), -1)
+
+
+class Classify(nn.Module):
+ # Classification head, i.e. x(b,c1,20,20) to x(b,c2)
+ def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups
+ super(Classify, self).__init__()
+ self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1)
+ self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False) # to x(b,c2,1,1)
+ self.flat = Flatten()
+
+ def forward(self, x):
+ z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list
+ return self.flat(self.conv(z)) # flatten to x(b,c2)
\ No newline at end of file
diff --git a/models/experimental.py b/models/experimental.py
new file mode 100644
index 0000000..1b99ce4
--- /dev/null
+++ b/models/experimental.py
@@ -0,0 +1,145 @@
+# This file contains experimental modules
+
+import numpy as np
+import torch
+import torch.nn as nn
+
+from models.common import Conv, DWConv
+from utils.google_utils import attempt_download
+
+
+class CrossConv(nn.Module):
+ # Cross Convolution Downsample
+ def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):
+ # ch_in, ch_out, kernel, stride, groups, expansion, shortcut
+ super(CrossConv, self).__init__()
+ c_ = int(c2 * e) # hidden channels
+ self.cv1 = Conv(c1, c_, (1, k), (1, s))
+ self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g)
+ self.add = shortcut and c1 == c2
+
+ def forward(self, x):
+ return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
+
+
+class C3(nn.Module):
+ # Cross Convolution CSP
+ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
+ super(C3, self).__init__()
+ c_ = int(c2 * e) # hidden channels
+ self.cv1 = Conv(c1, c_, 1, 1)
+ self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
+ self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
+ self.cv4 = Conv(2 * c_, c2, 1, 1)
+ self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
+ self.act = nn.LeakyReLU(0.1, inplace=True)
+ self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)])
+
+ def forward(self, x):
+ y1 = self.cv3(self.m(self.cv1(x)))
+ y2 = self.cv2(x)
+ return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))
+
+
+class Sum(nn.Module):
+ # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
+ def __init__(self, n, weight=False): # n: number of inputs
+ super(Sum, self).__init__()
+ self.weight = weight # apply weights boolean
+ self.iter = range(n - 1) # iter object
+ if weight:
+ self.w = nn.Parameter(-torch.arange(1., n) / 2, requires_grad=True) # layer weights
+
+ def forward(self, x):
+ y = x[0] # no weight
+ if self.weight:
+ w = torch.sigmoid(self.w) * 2
+ for i in self.iter:
+ y = y + x[i + 1] * w[i]
+ else:
+ for i in self.iter:
+ y = y + x[i + 1]
+ return y
+
+
+class GhostConv(nn.Module):
+ # Ghost Convolution https://github.com/huawei-noah/ghostnet
+ def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups
+ super(GhostConv, self).__init__()
+ c_ = c2 // 2 # hidden channels
+ self.cv1 = Conv(c1, c_, k, s, g, act)
+ self.cv2 = Conv(c_, c_, 5, 1, c_, act)
+
+ def forward(self, x):
+ y = self.cv1(x)
+ return torch.cat([y, self.cv2(y)], 1)
+
+
+class GhostBottleneck(nn.Module):
+ # Ghost Bottleneck https://github.com/huawei-noah/ghostnet
+ def __init__(self, c1, c2, k, s):
+ super(GhostBottleneck, self).__init__()
+ c_ = c2 // 2
+ self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw
+ DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw
+ GhostConv(c_, c2, 1, 1, act=False)) # pw-linear
+ self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False),
+ Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity()
+
+ def forward(self, x):
+ return self.conv(x) + self.shortcut(x)
+
+
+class MixConv2d(nn.Module):
+ # Mixed Depthwise Conv https://arxiv.org/abs/1907.09595
+ def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True):
+ super(MixConv2d, self).__init__()
+ groups = len(k)
+ if equal_ch: # equal c_ per group
+ i = torch.linspace(0, groups - 1E-6, c2).floor() # c2 indices
+ c_ = [(i == g).sum() for g in range(groups)] # intermediate channels
+ else: # equal weight.numel() per group
+ b = [c2] + [0] * groups
+ a = np.eye(groups + 1, groups, k=-1)
+ a -= np.roll(a, 1, axis=1)
+ a *= np.array(k) ** 2
+ a[0] = 1
+ c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b
+
+ self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)])
+ self.bn = nn.BatchNorm2d(c2)
+ self.act = nn.LeakyReLU(0.1, inplace=True)
+
+ def forward(self, x):
+ return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))
+
+
+class Ensemble(nn.ModuleList):
+ # Ensemble of models
+ def __init__(self):
+ super(Ensemble, self).__init__()
+
+ def forward(self, x, augment=False):
+ y = []
+ for module in self:
+ y.append(module(x, augment)[0])
+ # y = torch.stack(y).max(0)[0] # max ensemble
+ # y = torch.cat(y, 1) # nms ensemble
+ y = torch.stack(y).mean(0) # mean ensemble
+ return y, None # inference, train output
+
+
+def attempt_load(weights, map_location=None):
+ # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
+ model = Ensemble()
+ for w in weights if isinstance(weights, list) else [weights]:
+ attempt_download(w)
+ model.append(torch.load(w, map_location=map_location)['model'].float().fuse().eval()) # load FP32 model
+
+ if len(model) == 1:
+ return model[-1] # return model
+ else:
+ print('Ensemble created with %s\n' % weights)
+ for k in ['names', 'stride']:
+ setattr(model, k, getattr(model[-1], k))
+ return model # return ensemble
diff --git a/models/export.py b/models/export.py
new file mode 100644
index 0000000..d91813a
--- /dev/null
+++ b/models/export.py
@@ -0,0 +1,68 @@
+import argparse
+
+import torch
+
+from utils.google_utils import attempt_download
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--weights', type=str, default='./yolov4.pt', help='weights path')
+ parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size')
+ parser.add_argument('--batch-size', type=int, default=1, help='batch size')
+ opt = parser.parse_args()
+ opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand
+ print(opt)
+
+ # Input
+ img = torch.zeros((opt.batch_size, 3, *opt.img_size)) # image size(1,3,320,192) iDetection
+
+ # Load PyTorch model
+ attempt_download(opt.weights)
+ model = torch.load(opt.weights, map_location=torch.device('cpu'))['model'].float()
+ model.eval()
+ model.model[-1].export = True # set Detect() layer export=True
+ y = model(img) # dry run
+
+ # TorchScript export
+ try:
+ print('\nStarting TorchScript export with torch %s...' % torch.__version__)
+ f = opt.weights.replace('.pt', '.torchscript.pt') # filename
+ ts = torch.jit.trace(model, img)
+ ts.save(f)
+ print('TorchScript export success, saved as %s' % f)
+ except Exception as e:
+ print('TorchScript export failure: %s' % e)
+
+ # ONNX export
+ try:
+ import onnx
+
+ print('\nStarting ONNX export with onnx %s...' % onnx.__version__)
+ f = opt.weights.replace('.pt', '.onnx') # filename
+ model.fuse() # only for ONNX
+ torch.onnx.export(model, img, f, verbose=False, opset_version=12, input_names=['images'],
+ output_names=['classes', 'boxes'] if y is None else ['output'])
+
+ # Checks
+ onnx_model = onnx.load(f) # load onnx model
+ onnx.checker.check_model(onnx_model) # check onnx model
+ print(onnx.helper.printable_graph(onnx_model.graph)) # print a human readable model
+ print('ONNX export success, saved as %s' % f)
+ except Exception as e:
+ print('ONNX export failure: %s' % e)
+
+ # CoreML export
+ try:
+ import coremltools as ct
+
+ print('\nStarting CoreML export with coremltools %s...' % ct.__version__)
+ # convert model from torchscript and apply pixel scaling as per detect.py
+ model = ct.convert(ts, inputs=[ct.ImageType(name='images', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])])
+ f = opt.weights.replace('.pt', '.mlmodel') # filename
+ model.save(f)
+ print('CoreML export success, saved as %s' % f)
+ except Exception as e:
+ print('CoreML export failure: %s' % e)
+
+ # Finish
+ print('\nExport complete. Visualize with https://github.com/lutzroeder/netron.')
diff --git a/models/models.py b/models/models.py
new file mode 100644
index 0000000..5dcb248
--- /dev/null
+++ b/models/models.py
@@ -0,0 +1,504 @@
+from utils.google_utils import *
+from utils.layers import *
+from utils.parse_config import *
+from utils import torch_utils
+
+ONNX_EXPORT = False
+
+
+def create_modules(module_defs, img_size, cfg):
+ # Constructs module list of layer blocks from module configuration in module_defs
+
+ img_size = [img_size] * 2 if isinstance(img_size, int) else img_size # expand if necessary
+ _ = module_defs.pop(0) # cfg training hyperparams (unused)
+ output_filters = [3] # input channels
+ module_list = nn.ModuleList()
+ routs = [] # list of layers which rout to deeper layers
+ yolo_index = -1
+
+ for i, mdef in enumerate(module_defs):
+ modules = nn.Sequential()
+
+ if mdef['type'] == 'convolutional':
+ bn = mdef['batch_normalize']
+ filters = mdef['filters']
+ k = mdef['size'] # kernel size
+ stride = mdef['stride'] if 'stride' in mdef else (mdef['stride_y'], mdef['stride_x'])
+ if isinstance(k, int): # single-size conv
+ modules.add_module('Conv2d', nn.Conv2d(in_channels=output_filters[-1],
+ out_channels=filters,
+ kernel_size=k,
+ stride=stride,
+ padding=k // 2 if mdef['pad'] else 0,
+ groups=mdef['groups'] if 'groups' in mdef else 1,
+ bias=not bn))
+ else: # multiple-size conv
+ modules.add_module('MixConv2d', MixConv2d(in_ch=output_filters[-1],
+ out_ch=filters,
+ k=k,
+ stride=stride,
+ bias=not bn))
+
+ if bn:
+ modules.add_module('BatchNorm2d', nn.BatchNorm2d(filters, momentum=0.03, eps=1E-4))
+ else:
+ routs.append(i) # detection output (goes into yolo layer)
+
+ if mdef['activation'] == 'leaky': # activation study https://github.com/ultralytics/yolov3/issues/441
+ modules.add_module('activation', nn.LeakyReLU(0.1, inplace=True))
+ elif mdef['activation'] == 'swish':
+ modules.add_module('activation', Swish())
+ elif mdef['activation'] == 'mish':
+ modules.add_module('activation', Mish())
+
+ elif mdef['type'] == 'deformableconvolutional':
+ bn = mdef['batch_normalize']
+ filters = mdef['filters']
+ k = mdef['size'] # kernel size
+ stride = mdef['stride'] if 'stride' in mdef else (mdef['stride_y'], mdef['stride_x'])
+ if isinstance(k, int): # single-size conv
+ modules.add_module('DeformConv2d', DeformConv2d(output_filters[-1],
+ filters,
+ kernel_size=k,
+ padding=k // 2 if mdef['pad'] else 0,
+ stride=stride,
+ bias=not bn,
+ modulation=True))
+ else: # multiple-size conv
+ modules.add_module('MixConv2d', MixConv2d(in_ch=output_filters[-1],
+ out_ch=filters,
+ k=k,
+ stride=stride,
+ bias=not bn))
+
+ if bn:
+ modules.add_module('BatchNorm2d', nn.BatchNorm2d(filters, momentum=0.03, eps=1E-4))
+ else:
+ routs.append(i) # detection output (goes into yolo layer)
+
+ if mdef['activation'] == 'leaky': # activation study https://github.com/ultralytics/yolov3/issues/441
+ modules.add_module('activation', nn.LeakyReLU(0.1, inplace=True))
+ elif mdef['activation'] == 'swish':
+ modules.add_module('activation', Swish())
+ elif mdef['activation'] == 'mish':
+ modules.add_module('activation', Mish())
+
+ elif mdef['type'] == 'BatchNorm2d':
+ filters = output_filters[-1]
+ modules = nn.BatchNorm2d(filters, momentum=0.03, eps=1E-4)
+ if i == 0 and filters == 3: # normalize RGB image
+ # imagenet mean and var https://pytorch.org/docs/stable/torchvision/models.html#classification
+ modules.running_mean = torch.tensor([0.485, 0.456, 0.406])
+ modules.running_var = torch.tensor([0.0524, 0.0502, 0.0506])
+
+ elif mdef['type'] == 'maxpool':
+ k = mdef['size'] # kernel size
+ stride = mdef['stride']
+ maxpool = nn.MaxPool2d(kernel_size=k, stride=stride, padding=(k - 1) // 2)
+ if k == 2 and stride == 1: # yolov3-tiny
+ modules.add_module('ZeroPad2d', nn.ZeroPad2d((0, 1, 0, 1)))
+ modules.add_module('MaxPool2d', maxpool)
+ else:
+ modules = maxpool
+
+ elif mdef['type'] == 'upsample':
+ if ONNX_EXPORT: # explicitly state size, avoid scale_factor
+ g = (yolo_index + 1) * 2 / 32 # gain
+ modules = nn.Upsample(size=tuple(int(x * g) for x in img_size)) # img_size = (320, 192)
+ else:
+ modules = nn.Upsample(scale_factor=mdef['stride'])
+
+ elif mdef['type'] == 'route': # nn.Sequential() placeholder for 'route' layer
+ layers = mdef['layers']
+ filters = sum([output_filters[l + 1 if l > 0 else l] for l in layers])
+ routs.extend([i + l if l < 0 else l for l in layers])
+ modules = FeatureConcat(layers=layers)
+
+ elif mdef['type'] == 'route2': # nn.Sequential() placeholder for 'route' layer
+ layers = mdef['layers']
+ filters = sum([output_filters[l + 1 if l > 0 else l] for l in layers])
+ routs.extend([i + l if l < 0 else l for l in layers])
+ modules = FeatureConcat2(layers=layers)
+
+ elif mdef['type'] == 'route3': # nn.Sequential() placeholder for 'route' layer
+ layers = mdef['layers']
+ filters = sum([output_filters[l + 1 if l > 0 else l] for l in layers])
+ routs.extend([i + l if l < 0 else l for l in layers])
+ modules = FeatureConcat3(layers=layers)
+
+ elif mdef['type'] == 'route_lhalf': # nn.Sequential() placeholder for 'route' layer
+ layers = mdef['layers']
+ filters = sum([output_filters[l + 1 if l > 0 else l] for l in layers])//2
+ routs.extend([i + l if l < 0 else l for l in layers])
+ modules = FeatureConcat_l(layers=layers)
+
+ elif mdef['type'] == 'shortcut': # nn.Sequential() placeholder for 'shortcut' layer
+ layers = mdef['from']
+ filters = output_filters[-1]
+ routs.extend([i + l if l < 0 else l for l in layers])
+ modules = WeightedFeatureFusion(layers=layers, weight='weights_type' in mdef)
+
+ elif mdef['type'] == 'reorg3d': # yolov3-spp-pan-scale
+ pass
+
+ elif mdef['type'] == 'yolo':
+ yolo_index += 1
+ stride = [8, 16, 32, 64, 128] # P3, P4, P5, P6, P7 strides
+ if any(x in cfg for x in ['yolov4-tiny', 'fpn', 'yolov3']): # P5, P4, P3 strides
+ stride = [32, 16, 8]
+ layers = mdef['from'] if 'from' in mdef else []
+ modules = YOLOLayer(anchors=mdef['anchors'][mdef['mask']], # anchor list
+ nc=mdef['classes'], # number of classes
+ img_size=img_size, # (416, 416)
+ yolo_index=yolo_index, # 0, 1, 2...
+ layers=layers, # output layers
+ stride=stride[yolo_index])
+
+ # Initialize preceding Conv2d() bias (https://arxiv.org/pdf/1708.02002.pdf section 3.3)
+ try:
+ j = layers[yolo_index] if 'from' in mdef else -1
+ bias_ = module_list[j][0].bias # shape(255,)
+ bias = bias_[:modules.no * modules.na].view(modules.na, -1) # shape(3,85)
+ #bias[:, 4] += -4.5 # obj
+ bias[:, 4] += math.log(8 / (640 / stride[yolo_index]) ** 2) # obj (8 objects per 640 image)
+ bias[:, 5:] += math.log(0.6 / (modules.nc - 0.99)) # cls (sigmoid(p) = 1/nc)
+ module_list[j][0].bias = torch.nn.Parameter(bias_, requires_grad=bias_.requires_grad)
+ except:
+ print('WARNING: smart bias initialization failure.')
+
+ else:
+ print('Warning: Unrecognized Layer Type: ' + mdef['type'])
+
+ # Register module list and number of output filters
+ module_list.append(modules)
+ output_filters.append(filters)
+
+ routs_binary = [False] * (i + 1)
+ for i in routs:
+ routs_binary[i] = True
+ return module_list, routs_binary
+
+
+class YOLOLayer(nn.Module):
+ def __init__(self, anchors, nc, img_size, yolo_index, layers, stride):
+ super(YOLOLayer, self).__init__()
+ self.anchors = torch.Tensor(anchors)
+ self.index = yolo_index # index of this layer in layers
+ self.layers = layers # model output layer indices
+ self.stride = stride # layer stride
+ self.nl = len(layers) # number of output layers (3)
+ self.na = len(anchors) # number of anchors (3)
+ self.nc = nc # number of classes (80)
+ self.no = nc + 5 # number of outputs (85)
+ self.nx, self.ny, self.ng = 0, 0, 0 # initialize number of x, y gridpoints
+ self.anchor_vec = self.anchors / self.stride
+ self.anchor_wh = self.anchor_vec.view(1, self.na, 1, 1, 2)
+
+ if ONNX_EXPORT:
+ self.training = False
+ self.create_grids((img_size[1] // stride, img_size[0] // stride)) # number x, y grid points
+
+ def create_grids(self, ng=(13, 13), device='cpu'):
+ self.nx, self.ny = ng # x and y grid size
+ self.ng = torch.tensor(ng, dtype=torch.float)
+
+ # build xy offsets
+ if not self.training:
+ yv, xv = torch.meshgrid([torch.arange(self.ny, device=device), torch.arange(self.nx, device=device)])
+ self.grid = torch.stack((xv, yv), 2).view((1, 1, self.ny, self.nx, 2)).float()
+
+ if self.anchor_vec.device != device:
+ self.anchor_vec = self.anchor_vec.to(device)
+ self.anchor_wh = self.anchor_wh.to(device)
+
+ def forward(self, p, out):
+ ASFF = False # https://arxiv.org/abs/1911.09516
+ if ASFF:
+ i, n = self.index, self.nl # index in layers, number of layers
+ p = out[self.layers[i]]
+ bs, _, ny, nx = p.shape # bs, 255, 13, 13
+ if (self.nx, self.ny) != (nx, ny):
+ self.create_grids((nx, ny), p.device)
+
+ # outputs and weights
+ # w = F.softmax(p[:, -n:], 1) # normalized weights
+ w = torch.sigmoid(p[:, -n:]) * (2 / n) # sigmoid weights (faster)
+ # w = w / w.sum(1).unsqueeze(1) # normalize across layer dimension
+
+ # weighted ASFF sum
+ p = out[self.layers[i]][:, :-n] * w[:, i:i + 1]
+ for j in range(n):
+ if j != i:
+ p += w[:, j:j + 1] * \
+ F.interpolate(out[self.layers[j]][:, :-n], size=[ny, nx], mode='bilinear', align_corners=False)
+
+ elif ONNX_EXPORT:
+ bs = 1 # batch size
+ else:
+ bs, _, ny, nx = p.shape # bs, 255, 13, 13
+ if (self.nx, self.ny) != (nx, ny):
+ self.create_grids((nx, ny), p.device)
+
+ # p.view(bs, 255, 13, 13) -- > (bs, 3, 13, 13, 85) # (bs, anchors, grid, grid, classes + xywh)
+ p = p.view(bs, self.na, self.no, self.ny, self.nx).permute(0, 1, 3, 4, 2).contiguous() # prediction
+
+ if self.training:
+ return p
+
+ elif ONNX_EXPORT:
+ # Avoid broadcasting for ANE operations
+ m = self.na * self.nx * self.ny
+ ng = 1. / self.ng.repeat(m, 1)
+ grid = self.grid.repeat(1, self.na, 1, 1, 1).view(m, 2)
+ anchor_wh = self.anchor_wh.repeat(1, 1, self.nx, self.ny, 1).view(m, 2) * ng
+
+ p = p.view(m, self.no)
+ xy = torch.sigmoid(p[:, 0:2]) + grid # x, y
+ wh = torch.exp(p[:, 2:4]) * anchor_wh # width, height
+ p_cls = torch.sigmoid(p[:, 4:5]) if self.nc == 1 else \
+ torch.sigmoid(p[:, 5:self.no]) * torch.sigmoid(p[:, 4:5]) # conf
+ return p_cls, xy * ng, wh
+
+ else: # inference
+ io = p.sigmoid()
+ io[..., :2] = (io[..., :2] * 2. - 0.5 + self.grid)
+ io[..., 2:4] = (io[..., 2:4] * 2) ** 2 * self.anchor_wh
+ io[..., :4] *= self.stride
+ #io = p.clone() # inference output
+ #io[..., :2] = torch.sigmoid(io[..., :2]) + self.grid # xy
+ #io[..., 2:4] = torch.exp(io[..., 2:4]) * self.anchor_wh # wh yolo method
+ #io[..., :4] *= self.stride
+ #torch.sigmoid_(io[..., 4:])
+ return io.view(bs, -1, self.no), p # view [1, 3, 13, 13, 85] as [1, 507, 85]
+
+class Darknet(nn.Module):
+ # YOLOv3 object detection model
+
+ def __init__(self, cfg, img_size=(416, 416), verbose=False):
+ super(Darknet, self).__init__()
+
+ self.module_defs = parse_model_cfg(cfg)
+ self.module_list, self.routs = create_modules(self.module_defs, img_size, cfg)
+ self.yolo_layers = get_yolo_layers(self)
+ # torch_utils.initialize_weights(self)
+
+ # Darknet Header https://github.com/AlexeyAB/darknet/issues/2914#issuecomment-496675346
+ self.version = np.array([0, 2, 5], dtype=np.int32) # (int32) version info: major, minor, revision
+ self.seen = np.array([0], dtype=np.int64) # (int64) number of images seen during training
+ self.info(verbose) if not ONNX_EXPORT else None # print model description
+
+ def forward(self, x, augment=False, verbose=False):
+
+ if not augment:
+ return self.forward_once(x)
+ else: # Augment images (inference and test only) https://github.com/ultralytics/yolov3/issues/931
+ img_size = x.shape[-2:] # height, width
+ s = [0.83, 0.67] # scales
+ y = []
+ for i, xi in enumerate((x,
+ torch_utils.scale_img(x.flip(3), s[0], same_shape=False), # flip-lr and scale
+ torch_utils.scale_img(x, s[1], same_shape=False), # scale
+ )):
+ # cv2.imwrite('img%g.jpg' % i, 255 * xi[0].numpy().transpose((1, 2, 0))[:, :, ::-1])
+ y.append(self.forward_once(xi)[0])
+
+ y[1][..., :4] /= s[0] # scale
+ y[1][..., 0] = img_size[1] - y[1][..., 0] # flip lr
+ y[2][..., :4] /= s[1] # scale
+
+ # for i, yi in enumerate(y): # coco small, medium, large = < 32**2 < 96**2 <
+ # area = yi[..., 2:4].prod(2)[:, :, None]
+ # if i == 1:
+ # yi *= (area < 96. ** 2).float()
+ # elif i == 2:
+ # yi *= (area > 32. ** 2).float()
+ # y[i] = yi
+
+ y = torch.cat(y, 1)
+ return y, None
+
+ def forward_once(self, x, augment=False, verbose=False):
+ img_size = x.shape[-2:] # height, width
+ yolo_out, out = [], []
+ if verbose:
+ print('0', x.shape)
+ str = ''
+
+ # Augment images (inference and test only)
+ if augment: # https://github.com/ultralytics/yolov3/issues/931
+ nb = x.shape[0] # batch size
+ s = [0.83, 0.67] # scales
+ x = torch.cat((x,
+ torch_utils.scale_img(x.flip(3), s[0]), # flip-lr and scale
+ torch_utils.scale_img(x, s[1]), # scale
+ ), 0)
+
+ for i, module in enumerate(self.module_list):
+ name = module.__class__.__name__
+ if name in ['WeightedFeatureFusion', 'FeatureConcat', 'FeatureConcat2', 'FeatureConcat3', 'FeatureConcat_l']: # sum, concat
+ if verbose:
+ l = [i - 1] + module.layers # layers
+ sh = [list(x.shape)] + [list(out[i].shape) for i in module.layers] # shapes
+ str = ' >> ' + ' + '.join(['layer %g %s' % x for x in zip(l, sh)])
+ x = module(x, out) # WeightedFeatureFusion(), FeatureConcat()
+ elif name == 'YOLOLayer':
+ yolo_out.append(module(x, out))
+ else: # run module directly, i.e. mtype = 'convolutional', 'upsample', 'maxpool', 'batchnorm2d' etc.
+ x = module(x)
+
+ out.append(x if self.routs[i] else [])
+ if verbose:
+ print('%g/%g %s -' % (i, len(self.module_list), name), list(x.shape), str)
+ str = ''
+
+ if self.training: # train
+ return yolo_out
+ elif ONNX_EXPORT: # export
+ x = [torch.cat(x, 0) for x in zip(*yolo_out)]
+ return x[0], torch.cat(x[1:3], 1) # scores, boxes: 3780x80, 3780x4
+ else: # inference or test
+ x, p = zip(*yolo_out) # inference output, training output
+ x = torch.cat(x, 1) # cat yolo outputs
+ if augment: # de-augment results
+ x = torch.split(x, nb, dim=0)
+ x[1][..., :4] /= s[0] # scale
+ x[1][..., 0] = img_size[1] - x[1][..., 0] # flip lr
+ x[2][..., :4] /= s[1] # scale
+ x = torch.cat(x, 1)
+ return x, p
+
+ def fuse(self):
+ # Fuse Conv2d + BatchNorm2d layers throughout model
+ print('Fusing layers...')
+ fused_list = nn.ModuleList()
+ for a in list(self.children())[0]:
+ if isinstance(a, nn.Sequential):
+ for i, b in enumerate(a):
+ if isinstance(b, nn.modules.batchnorm.BatchNorm2d):
+ # fuse this bn layer with the previous conv2d layer
+ conv = a[i - 1]
+ fused = torch_utils.fuse_conv_and_bn(conv, b)
+ a = nn.Sequential(fused, *list(a.children())[i + 1:])
+ break
+ fused_list.append(a)
+ self.module_list = fused_list
+ self.info() if not ONNX_EXPORT else None # yolov3-spp reduced from 225 to 152 layers
+
+ def info(self, verbose=False):
+ torch_utils.model_info(self, verbose)
+
+
+def get_yolo_layers(model):
+ return [i for i, m in enumerate(model.module_list) if m.__class__.__name__ == 'YOLOLayer'] # [89, 101, 113]
+
+
+def load_darknet_weights(self, weights, cutoff=-1):
+ # Parses and loads the weights stored in 'weights'
+
+ # Establish cutoffs (load layers between 0 and cutoff. if cutoff = -1 all are loaded)
+ file = Path(weights).name
+ if file == 'darknet53.conv.74':
+ cutoff = 75
+ elif file == 'yolov3-tiny.conv.15':
+ cutoff = 15
+
+ # Read weights file
+ with open(weights, 'rb') as f:
+ # Read Header https://github.com/AlexeyAB/darknet/issues/2914#issuecomment-496675346
+ self.version = np.fromfile(f, dtype=np.int32, count=3) # (int32) version info: major, minor, revision
+ self.seen = np.fromfile(f, dtype=np.int64, count=1) # (int64) number of images seen during training
+
+ weights = np.fromfile(f, dtype=np.float32) # the rest are weights
+
+ ptr = 0
+ for i, (mdef, module) in enumerate(zip(self.module_defs[:cutoff], self.module_list[:cutoff])):
+ if mdef['type'] == 'convolutional':
+ conv = module[0]
+ if mdef['batch_normalize']:
+ # Load BN bias, weights, running mean and running variance
+ bn = module[1]
+ nb = bn.bias.numel() # number of biases
+ # Bias
+ bn.bias.data.copy_(torch.from_numpy(weights[ptr:ptr + nb]).view_as(bn.bias))
+ ptr += nb
+ # Weight
+ bn.weight.data.copy_(torch.from_numpy(weights[ptr:ptr + nb]).view_as(bn.weight))
+ ptr += nb
+ # Running Mean
+ bn.running_mean.data.copy_(torch.from_numpy(weights[ptr:ptr + nb]).view_as(bn.running_mean))
+ ptr += nb
+ # Running Var
+ bn.running_var.data.copy_(torch.from_numpy(weights[ptr:ptr + nb]).view_as(bn.running_var))
+ ptr += nb
+ else:
+ # Load conv. bias
+ nb = conv.bias.numel()
+ conv_b = torch.from_numpy(weights[ptr:ptr + nb]).view_as(conv.bias)
+ conv.bias.data.copy_(conv_b)
+ ptr += nb
+ # Load conv. weights
+ nw = conv.weight.numel() # number of weights
+ conv.weight.data.copy_(torch.from_numpy(weights[ptr:ptr + nw]).view_as(conv.weight))
+ ptr += nw
+
+
+def save_weights(self, path='model.weights', cutoff=-1):
+ # Converts a PyTorch model to Darket format (*.pt to *.weights)
+ # Note: Does not work if model.fuse() is applied
+ with open(path, 'wb') as f:
+ # Write Header https://github.com/AlexeyAB/darknet/issues/2914#issuecomment-496675346
+ self.version.tofile(f) # (int32) version info: major, minor, revision
+ self.seen.tofile(f) # (int64) number of images seen during training
+
+ # Iterate through layers
+ for i, (mdef, module) in enumerate(zip(self.module_defs[:cutoff], self.module_list[:cutoff])):
+ if mdef['type'] == 'convolutional':
+ conv_layer = module[0]
+ # If batch norm, load bn first
+ if mdef['batch_normalize']:
+ bn_layer = module[1]
+ bn_layer.bias.data.cpu().numpy().tofile(f)
+ bn_layer.weight.data.cpu().numpy().tofile(f)
+ bn_layer.running_mean.data.cpu().numpy().tofile(f)
+ bn_layer.running_var.data.cpu().numpy().tofile(f)
+ # Load conv bias
+ else:
+ conv_layer.bias.data.cpu().numpy().tofile(f)
+ # Load conv weights
+ conv_layer.weight.data.cpu().numpy().tofile(f)
+
+
+def convert(cfg='cfg/yolov3-spp.cfg', weights='weights/yolov3-spp.weights', saveto='converted.weights'):
+ # Converts between PyTorch and Darknet format per extension (i.e. *.weights convert to *.pt and vice versa)
+ # from models import *; convert('cfg/yolov3-spp.cfg', 'weights/yolov3-spp.weights')
+
+ # Initialize model
+ model = Darknet(cfg)
+ ckpt = torch.load(weights) # load checkpoint
+ try:
+ ckpt['model'] = {k: v for k, v in ckpt['model'].items() if model.state_dict()[k].numel() == v.numel()}
+ model.load_state_dict(ckpt['model'], strict=False)
+ save_weights(model, path=saveto, cutoff=-1)
+ except KeyError as e:
+ print(e)
+
+def attempt_download(weights):
+ # Attempt to download pretrained weights if not found locally
+ weights = weights.strip()
+ msg = weights + ' missing, try downloading from https://drive.google.com/open?id=1LezFG5g3BCW6iYaV89B2i64cqEUZD7e0'
+
+ if len(weights) > 0 and not os.path.isfile(weights):
+ d = {''}
+
+ file = Path(weights).name
+ if file in d:
+ r = gdrive_download(id=d[file], name=weights)
+ else: # download from pjreddie.com
+ url = 'https://pjreddie.com/media/files/' + file
+ print('Downloading ' + url)
+ r = os.system('curl -f ' + url + ' -o ' + weights)
+
+ # Error check
+ if not (r == 0 and os.path.exists(weights) and os.path.getsize(weights) > 1E6): # weights exist and > 1MB
+ os.system('rm ' + weights) # remove partial downloads
+ raise Exception(msg)
diff --git a/models/yolo.py b/models/yolo.py
new file mode 100644
index 0000000..4bde2c0
--- /dev/null
+++ b/models/yolo.py
@@ -0,0 +1,259 @@
+import argparse
+import math
+from copy import deepcopy
+from pathlib import Path
+
+import torch
+import torch.nn as nn
+
+from models.common import *
+from models.experimental import MixConv2d, CrossConv, C3
+from utils.general import check_anchor_order, make_divisible, check_file
+from utils.torch_utils import (
+ time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, select_device)
+
+
+class Detect(nn.Module):
+ def __init__(self, nc=80, anchors=(), ch=()): # detection layer
+ super(Detect, self).__init__()
+ self.stride = None # strides computed during build
+ self.nc = nc # number of classes
+ self.no = nc + 5 # number of outputs per anchor
+ self.nl = len(anchors) # number of detection layers
+ self.na = len(anchors[0]) // 2 # number of anchors
+ self.grid = [torch.zeros(1)] * self.nl # init grid
+ a = torch.tensor(anchors).float().view(self.nl, -1, 2)
+ self.register_buffer('anchors', a) # shape(nl,na,2)
+ self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2)
+ self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
+ self.export = False # onnx export
+
+ def forward(self, x):
+ # x = x.copy() # for profiling
+ z = [] # inference output
+ self.training |= self.export
+ for i in range(self.nl):
+ x[i] = self.m[i](x[i]) # conv
+ bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
+ x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
+
+ if not self.training: # inference
+ if self.grid[i].shape[2:4] != x[i].shape[2:4]:
+ self.grid[i] = self._make_grid(nx, ny).to(x[i].device)
+
+ y = x[i].sigmoid()
+ y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy
+ y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
+ z.append(y.view(bs, -1, self.no))
+
+ return x if self.training else (torch.cat(z, 1), x)
+
+ @staticmethod
+ def _make_grid(nx=20, ny=20):
+ yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
+ return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
+
+
+class Model(nn.Module):
+ def __init__(self, cfg='yolov4.yaml', ch=3, nc=None): # model, input channels, number of classes
+ super(Model, self).__init__()
+ if isinstance(cfg, dict):
+ self.yaml = cfg # model dict
+ else: # is *.yaml
+ import yaml # for torch hub
+ self.yaml_file = Path(cfg).name
+ with open(cfg) as f:
+ self.yaml = yaml.load(f, Loader=yaml.FullLoader) # model dict
+
+ # Define model
+ if nc and nc != self.yaml['nc']:
+ print('Overriding %s nc=%g with nc=%g' % (cfg, self.yaml['nc'], nc))
+ self.yaml['nc'] = nc # override yaml value
+ self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist, ch_out
+ # print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))])
+
+ # Build strides, anchors
+ m = self.model[-1] # Detect()
+ if isinstance(m, Detect):
+ s = 128 # 2x min stride
+ m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward
+ m.anchors /= m.stride.view(-1, 1, 1)
+ check_anchor_order(m)
+ self.stride = m.stride
+ self._initialize_biases() # only run once
+ # print('Strides: %s' % m.stride.tolist())
+
+ # Init weights, biases
+ initialize_weights(self)
+ self.info()
+ print('')
+
+ def forward(self, x, augment=False, profile=False):
+ if augment:
+ img_size = x.shape[-2:] # height, width
+ s = [1, 0.83, 0.67] # scales
+ f = [None, 3, None] # flips (2-ud, 3-lr)
+ y = [] # outputs
+ for si, fi in zip(s, f):
+ xi = scale_img(x.flip(fi) if fi else x, si)
+ yi = self.forward_once(xi)[0] # forward
+ # cv2.imwrite('img%g.jpg' % s, 255 * xi[0].numpy().transpose((1, 2, 0))[:, :, ::-1]) # save
+ yi[..., :4] /= si # de-scale
+ if fi == 2:
+ yi[..., 1] = img_size[0] - yi[..., 1] # de-flip ud
+ elif fi == 3:
+ yi[..., 0] = img_size[1] - yi[..., 0] # de-flip lr
+ y.append(yi)
+ return torch.cat(y, 1), None # augmented inference, train
+ else:
+ return self.forward_once(x, profile) # single-scale inference, train
+
+ def forward_once(self, x, profile=False):
+ y, dt = [], [] # outputs
+ for m in self.model:
+ if m.f != -1: # if not from previous layer
+ x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
+
+ if profile:
+ try:
+ import thop
+ o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # FLOPS
+ except:
+ o = 0
+ t = time_synchronized()
+ for _ in range(10):
+ _ = m(x)
+ dt.append((time_synchronized() - t) * 100)
+ print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type))
+
+ x = m(x) # run
+ y.append(x if m.i in self.save else None) # save output
+
+ if profile:
+ print('%.1fms total' % sum(dt))
+ return x
+
+ def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency
+ # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.
+ m = self.model[-1] # Detect() module
+ for mi, s in zip(m.m, m.stride): # from
+ b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)
+ b[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
+ b[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls
+ mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
+
+ def _print_biases(self):
+ m = self.model[-1] # Detect() module
+ for mi in m.m: # from
+ b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85)
+ print(('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean()))
+
+ # def _print_weights(self):
+ # for m in self.model.modules():
+ # if type(m) is Bottleneck:
+ # print('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights
+
+ def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
+ print('Fusing layers... ', end='')
+ for m in self.model.modules():
+ if type(m) is Conv:
+ m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatability
+ m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
+ m.bn = None # remove batchnorm
+ m.forward = m.fuseforward # update forward
+ self.info()
+ return self
+
+ def info(self): # print model information
+ model_info(self)
+
+
+def parse_model(d, ch): # model_dict, input_channels(3)
+ print('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments'))
+ anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
+ na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
+ no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
+
+ layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
+ for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
+ m = eval(m) if isinstance(m, str) else m # eval strings
+ for j, a in enumerate(args):
+ try:
+ args[j] = eval(a) if isinstance(a, str) else a # eval strings
+ except:
+ pass
+
+ n = max(round(n * gd), 1) if n > 1 else n # depth gain
+ if m in [nn.Conv2d, Conv, Bottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, BottleneckCSP2, SPPCSP, VoVCSP, C3]:
+ c1, c2 = ch[f], args[0]
+
+ # Normal
+ # if i > 0 and args[0] != no: # channel expansion factor
+ # ex = 1.75 # exponential (default 2.0)
+ # e = math.log(c2 / ch[1]) / math.log(2)
+ # c2 = int(ch[1] * ex ** e)
+ # if m != Focus:
+
+ c2 = make_divisible(c2 * gw, 8) if c2 != no else c2
+
+ # Experimental
+ # if i > 0 and args[0] != no: # channel expansion factor
+ # ex = 1 + gw # exponential (default 2.0)
+ # ch1 = 32 # ch[1]
+ # e = math.log(c2 / ch1) / math.log(2) # level 1-n
+ # c2 = int(ch1 * ex ** e)
+ # if m != Focus:
+ # c2 = make_divisible(c2, 8) if c2 != no else c2
+
+ args = [c1, c2, *args[1:]]
+ if m in [BottleneckCSP, BottleneckCSP2, SPPCSP, VoVCSP, C3]:
+ args.insert(2, n)
+ n = 1
+ elif m is nn.BatchNorm2d:
+ args = [ch[f]]
+ elif m is Concat:
+ c2 = sum([ch[-1 if x == -1 else x + 1] for x in f])
+ elif m is Detect:
+ args.append([ch[x + 1] for x in f])
+ if isinstance(args[1], int): # number of anchors
+ args[1] = [list(range(args[1] * 2))] * len(f)
+ else:
+ c2 = ch[f]
+
+ m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module
+ t = str(m)[8:-2].replace('__main__.', '') # module type
+ np = sum([x.numel() for x in m_.parameters()]) # number params
+ m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
+ print('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print
+ save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
+ layers.append(m_)
+ ch.append(c2)
+ return nn.Sequential(*layers), sorted(save)
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--cfg', type=str, default='yolov4.yaml', help='model.yaml')
+ parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
+ opt = parser.parse_args()
+ opt.cfg = check_file(opt.cfg) # check file
+ device = select_device(opt.device)
+
+ # Create model
+ model = Model(opt.cfg).to(device)
+ model.train()
+
+ # Profile
+ # img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device)
+ # y = model(img, profile=True)
+
+ # ONNX export
+ # model.model[-1].export = True
+ # torch.onnx.export(model, img, opt.cfg.replace('.yaml', '.onnx'), verbose=True, opset_version=11)
+
+ # Tensorboard
+ # from torch.utils.tensorboard import SummaryWriter
+ # tb_writer = SummaryWriter()
+ # print("Run 'tensorboard --logdir=models/runs' to view tensorboard at http://localhost:6006/")
+ # tb_writer.add_graph(model.model, img) # add model to tensorboard
+ # tb_writer.add_image('test', img[0], dataformats='CWH') # add model to tensorboard
diff --git a/models/yolov3-spp.cfg b/models/yolov3-spp.cfg
new file mode 100644
index 0000000..0c856eb
--- /dev/null
+++ b/models/yolov3-spp.cfg
@@ -0,0 +1,821 @@
+[net]
+# Testing
+batch=1
+subdivisions=1
+# Training
+# batch=64
+# subdivisions=16
+width=608
+height=608
+channels=3
+momentum=0.9
+decay=0.0005
+angle=0
+saturation = 1.5
+exposure = 1.5
+hue=.1
+
+learning_rate=0.001
+burn_in=1000
+max_batches = 500200
+policy=steps
+steps=400000,450000
+scales=.1,.1
+
+[convolutional]
+batch_normalize=1
+filters=32
+size=3
+stride=1
+pad=1
+activation=leaky
+
+# Downsample
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=3
+stride=2
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=32
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+# Downsample
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=2
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+# Downsample
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=2
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+# Downsample
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=2
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+# Downsample
+
+[convolutional]
+batch_normalize=1
+filters=1024
+size=3
+stride=2
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=1024
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=1024
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=1024
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=1024
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+######################
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=1024
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=leaky
+
+### SPP ###
+[maxpool]
+stride=1
+size=5
+
+[route]
+layers=-2
+
+[maxpool]
+stride=1
+size=9
+
+[route]
+layers=-4
+
+[maxpool]
+stride=1
+size=13
+
+[route]
+layers=-1,-3,-5,-6
+
+### End SPP ###
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=leaky
+
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=1024
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=1024
+activation=leaky
+
+[convolutional]
+size=1
+stride=1
+pad=1
+filters=255
+activation=linear
+
+
+[yolo]
+mask = 6,7,8
+anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
+classes=80
+num=9
+jitter=.3
+ignore_thresh = .7
+truth_thresh = 1
+random=1
+
+
+[route]
+layers = -4
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[upsample]
+stride=2
+
+[route]
+layers = -1, 61
+
+
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=512
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=512
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=512
+activation=leaky
+
+[convolutional]
+size=1
+stride=1
+pad=1
+filters=255
+activation=linear
+
+
+[yolo]
+mask = 3,4,5
+anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
+classes=80
+num=9
+jitter=.3
+ignore_thresh = .7
+truth_thresh = 1
+random=1
+
+
+
+[route]
+layers = -4
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[upsample]
+stride=2
+
+[route]
+layers = -1, 36
+
+
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=256
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=256
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=256
+activation=leaky
+
+[convolutional]
+size=1
+stride=1
+pad=1
+filters=255
+activation=linear
+
+
+[yolo]
+mask = 0,1,2
+anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
+classes=80
+num=9
+jitter=.3
+ignore_thresh = .7
+truth_thresh = 1
+random=1
\ No newline at end of file
diff --git a/models/yolov4-csp.cfg b/models/yolov4-csp.cfg
new file mode 100644
index 0000000..7c211bf
--- /dev/null
+++ b/models/yolov4-csp.cfg
@@ -0,0 +1,1259 @@
+[net]
+# Testing
+#batch=1
+#subdivisions=1
+# Training
+batch=64
+subdivisions=8
+width=512
+height=512
+channels=3
+momentum=0.949
+decay=0.0005
+angle=0
+saturation = 1.5
+exposure = 1.5
+hue=.1
+
+learning_rate=0.00261
+burn_in=1000
+max_batches = 500500
+policy=steps
+steps=400000,450000
+scales=.1,.1
+
+#cutmix=1
+mosaic=1
+
+#23:104x104 54:52x52 85:26x26 104:13x13 for 416
+
+
+
+[convolutional]
+batch_normalize=1
+filters=32
+size=3
+stride=1
+pad=1
+activation=mish
+
+# Downsample
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=3
+stride=2
+pad=1
+activation=mish
+
+#[convolutional]
+#batch_normalize=1
+#filters=64
+#size=1
+#stride=1
+#pad=1
+#activation=mish
+
+#[route]
+#layers = -2
+
+#[convolutional]
+#batch_normalize=1
+#filters=64
+#size=1
+#stride=1
+#pad=1
+#activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=32
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+#[convolutional]
+#batch_normalize=1
+#filters=64
+#size=1
+#stride=1
+#pad=1
+#activation=mish
+
+#[route]
+#layers = -1,-7
+
+#[convolutional]
+#batch_normalize=1
+#filters=64
+#size=1
+#stride=1
+#pad=1
+#activation=mish
+
+# Downsample
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=2
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -2
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -1,-10
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+# Downsample
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=2
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -2
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -1,-28
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+# Downsample
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=2
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -2
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -1,-28
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+# Downsample
+
+[convolutional]
+batch_normalize=1
+filters=1024
+size=3
+stride=2
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -2
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -1,-16
+
+[convolutional]
+batch_normalize=1
+filters=1024
+size=1
+stride=1
+pad=1
+activation=mish
+
+##########################
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -2
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=512
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+### SPP ###
+[maxpool]
+stride=1
+size=5
+
+[route]
+layers=-2
+
+[maxpool]
+stride=1
+size=9
+
+[route]
+layers=-4
+
+[maxpool]
+stride=1
+size=13
+
+[route]
+layers=-1,-3,-5,-6
+### End SPP ###
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=512
+activation=mish
+
+[route]
+layers = -1, -13
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[upsample]
+stride=2
+
+[route]
+layers = 79
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -1, -3
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -2
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=256
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=256
+activation=mish
+
+[route]
+layers = -1, -6
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[upsample]
+stride=2
+
+[route]
+layers = 48
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -1, -3
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -2
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=128
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=128
+activation=mish
+
+[route]
+layers = -1, -6
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+##########################
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=256
+activation=mish
+
+[convolutional]
+size=1
+stride=1
+pad=1
+filters=255
+activation=linear
+
+
+[yolo]
+mask = 0,1,2
+anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401
+classes=80
+num=9
+jitter=.3
+ignore_thresh = .7
+truth_thresh = 1
+random=1
+scale_x_y = 1.05
+iou_thresh=0.213
+cls_normalizer=1.0
+iou_normalizer=0.07
+iou_loss=ciou
+nms_kind=greedynms
+beta_nms=0.6
+
+[route]
+layers = -4
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=2
+pad=1
+filters=256
+activation=mish
+
+[route]
+layers = -1, -20
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -2
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=256
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=256
+activation=mish
+
+[route]
+layers = -1,-6
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=512
+activation=mish
+
+[convolutional]
+size=1
+stride=1
+pad=1
+filters=255
+activation=linear
+
+
+[yolo]
+mask = 3,4,5
+anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401
+classes=80
+num=9
+jitter=.3
+ignore_thresh = .7
+truth_thresh = 1
+random=1
+scale_x_y = 1.05
+iou_thresh=0.213
+cls_normalizer=1.0
+iou_normalizer=0.07
+iou_loss=ciou
+nms_kind=greedynms
+beta_nms=0.6
+
+[route]
+layers = -4
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=2
+pad=1
+filters=512
+activation=mish
+
+[route]
+layers = -1, -49
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -2
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=512
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=512
+activation=mish
+
+[route]
+layers = -1,-6
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=1024
+activation=mish
+
+[convolutional]
+size=1
+stride=1
+pad=1
+filters=255
+activation=linear
+
+
+[yolo]
+mask = 6,7,8
+anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401
+classes=80
+num=9
+jitter=.3
+ignore_thresh = .7
+truth_thresh = 1
+random=1
+scale_x_y = 1.05
+iou_thresh=0.213
+cls_normalizer=1.0
+iou_normalizer=0.07
+iou_loss=ciou
+nms_kind=greedynms
+beta_nms=0.6
diff --git a/models/yolov4.cfg b/models/yolov4.cfg
new file mode 100644
index 0000000..faa55a5
--- /dev/null
+++ b/models/yolov4.cfg
@@ -0,0 +1,1154 @@
+[net]
+batch=64
+subdivisions=8
+# Training
+#width=512
+#height=512
+width=608
+height=608
+channels=3
+momentum=0.949
+decay=0.0005
+angle=0
+saturation = 1.5
+exposure = 1.5
+hue=.1
+
+learning_rate=0.0013
+burn_in=1000
+max_batches = 500500
+policy=steps
+steps=400000,450000
+scales=.1,.1
+
+#cutmix=1
+mosaic=1
+
+#:104x104 54:52x52 85:26x26 104:13x13 for 416
+
+[convolutional]
+batch_normalize=1
+filters=32
+size=3
+stride=1
+pad=1
+activation=mish
+
+# Downsample
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=3
+stride=2
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -2
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=32
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -1,-7
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=1
+stride=1
+pad=1
+activation=mish
+
+# Downsample
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=2
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -2
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -1,-10
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+# Downsample
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=2
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -2
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -1,-28
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+# Downsample
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=2
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -2
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -1,-28
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+# Downsample
+
+[convolutional]
+batch_normalize=1
+filters=1024
+size=3
+stride=2
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -2
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -1,-16
+
+[convolutional]
+batch_normalize=1
+filters=1024
+size=1
+stride=1
+pad=1
+activation=mish
+
+##########################
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=1024
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=leaky
+
+### SPP ###
+[maxpool]
+stride=1
+size=5
+
+[route]
+layers=-2
+
+[maxpool]
+stride=1
+size=9
+
+[route]
+layers=-4
+
+[maxpool]
+stride=1
+size=13
+
+[route]
+layers=-1,-3,-5,-6
+### End SPP ###
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=1024
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[upsample]
+stride=2
+
+[route]
+layers = 85
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[route]
+layers = -1, -3
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=512
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=512
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[upsample]
+stride=2
+
+[route]
+layers = 54
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[route]
+layers = -1, -3
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=256
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=256
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+##########################
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=256
+activation=leaky
+
+[convolutional]
+size=1
+stride=1
+pad=1
+filters=255
+activation=linear
+
+
+[yolo]
+mask = 0,1,2
+anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401
+classes=80
+num=9
+jitter=.3
+ignore_thresh = .7
+truth_thresh = 1
+scale_x_y = 1.2
+iou_thresh=0.213
+cls_normalizer=1.0
+iou_normalizer=0.07
+iou_loss=ciou
+nms_kind=greedynms
+beta_nms=0.6
+
+
+[route]
+layers = -4
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=2
+pad=1
+filters=256
+activation=leaky
+
+[route]
+layers = -1, -16
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=512
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=512
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=512
+activation=leaky
+
+[convolutional]
+size=1
+stride=1
+pad=1
+filters=255
+activation=linear
+
+
+[yolo]
+mask = 3,4,5
+anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401
+classes=80
+num=9
+jitter=.3
+ignore_thresh = .7
+truth_thresh = 1
+scale_x_y = 1.1
+iou_thresh=0.213
+cls_normalizer=1.0
+iou_normalizer=0.07
+iou_loss=ciou
+nms_kind=greedynms
+beta_nms=0.6
+
+
+[route]
+layers = -4
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=2
+pad=1
+filters=512
+activation=leaky
+
+[route]
+layers = -1, -37
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=1024
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=1024
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=1024
+activation=leaky
+
+[convolutional]
+size=1
+stride=1
+pad=1
+filters=255
+activation=linear
+
+
+[yolo]
+mask = 6,7,8
+anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401
+classes=80
+num=9
+jitter=.3
+ignore_thresh = .7
+truth_thresh = 1
+random=1
+scale_x_y = 1.05
+iou_thresh=0.213
+cls_normalizer=1.0
+iou_normalizer=0.07
+iou_loss=ciou
+nms_kind=greedynms
+beta_nms=0.6
\ No newline at end of file
diff --git a/test.py b/test.py
new file mode 100644
index 0000000..f5d2433
--- /dev/null
+++ b/test.py
@@ -0,0 +1,310 @@
+import argparse
+import glob
+import json
+import os
+import shutil
+from pathlib import Path
+
+import numpy as np
+import torch
+import yaml
+from tqdm import tqdm
+
+from models.experimental import attempt_load
+from utils.datasets import create_dataloader
+from utils.general import (
+ coco80_to_coco91_class, check_file, check_img_size, compute_loss, non_max_suppression,
+ scale_coords, xyxy2xywh, clip_coords, plot_images, xywh2xyxy, box_iou, output_to_target, ap_per_class)
+from utils.torch_utils import select_device, time_synchronized
+
+from models.models import *
+#from utils.datasets import *
+
+def load_classes(path):
+ # Loads *.names file at 'path'
+ with open(path, 'r') as f:
+ names = f.read().split('\n')
+ return list(filter(None, names)) # filter removes empty strings (such as last line)
+
+
+
+def test(data,
+ weights=None,
+ batch_size=16,
+ imgsz=640,
+ conf_thres=0.001,
+ iou_thres=0.6, # for NMS
+ save_json=False,
+ single_cls=False,
+ augment=False,
+ verbose=False,
+ model=None,
+ dataloader=None,
+ save_dir='',
+ merge=False,
+ save_txt=False):
+ # Initialize/load model and set device
+ training = model is not None
+ if training: # called by train.py
+ device = next(model.parameters()).device # get model device
+
+ else: # called directly
+ device = select_device(opt.device, batch_size=batch_size)
+ merge, save_txt = opt.merge, opt.save_txt # use Merge NMS, save *.txt labels
+ if save_txt:
+ out = Path('inference/output')
+ if os.path.exists(out):
+ shutil.rmtree(out) # delete output folder
+ os.makedirs(out) # make new output folder
+
+ # Remove previous
+ for f in glob.glob(str(Path(save_dir) / 'test_batch*.jpg')):
+ os.remove(f)
+
+ # Load model
+ model = Darknet(opt.cfg).to(device)
+
+ # load model
+ try:
+ ckpt = torch.load(weights[0], map_location=device) # load checkpoint
+ ckpt['model'] = {k: v for k, v in ckpt['model'].items() if model.state_dict()[k].numel() == v.numel()}
+ model.load_state_dict(ckpt['model'], strict=False)
+ except:
+ load_darknet_weights(model, weights[0])
+ imgsz = check_img_size(imgsz, s=32) # check img_size
+
+ # Half
+ half = device.type != 'cpu' # half precision only supported on CUDA
+ if half:
+ model.half()
+
+ # Configure
+ model.eval()
+ with open(data) as f:
+ data = yaml.load(f, Loader=yaml.FullLoader) # model dict
+ nc = 1 if single_cls else int(data['nc']) # number of classes
+ iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95
+ niou = iouv.numel()
+
+ # Dataloader
+ if not training:
+ img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
+ _ = model(img.half() if half else img) if device.type != 'cpu' else None # run once
+ path = data['test'] if opt.task == 'test' else data['val'] # path to val/test images
+ dataloader = create_dataloader(path, imgsz, batch_size, 32, opt,
+ hyp=None, augment=False, cache=False, pad=0.5, rect=True)[0]
+
+ seen = 0
+ try:
+ names = model.names if hasattr(model, 'names') else model.module.names
+ except:
+ names = load_classes(opt.names)
+ coco91class = coco80_to_coco91_class()
+ s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', 'mAP@.5', 'mAP@.5:.95')
+ p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.
+ loss = torch.zeros(3, device=device)
+ jdict, stats, ap, ap_class = [], [], [], []
+ for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)):
+ img = img.to(device, non_blocking=True)
+ img = img.half() if half else img.float() # uint8 to fp16/32
+ img /= 255.0 # 0 - 255 to 0.0 - 1.0
+ targets = targets.to(device)
+ nb, _, height, width = img.shape # batch size, channels, height, width
+ whwh = torch.Tensor([width, height, width, height]).to(device)
+
+ # Disable gradients
+ with torch.no_grad():
+ # Run model
+ t = time_synchronized()
+ inf_out, train_out = model(img, augment=augment) # inference and training outputs
+ t0 += time_synchronized() - t
+
+ # Compute loss
+ if training: # if model has loss hyperparameters
+ loss += compute_loss([x.float() for x in train_out], targets, model)[1][:3] # GIoU, obj, cls
+
+ # Run NMS
+ t = time_synchronized()
+ output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres, merge=merge)
+ t1 += time_synchronized() - t
+
+ # Statistics per image
+ for si, pred in enumerate(output):
+ labels = targets[targets[:, 0] == si, 1:]
+ nl = len(labels)
+ tcls = labels[:, 0].tolist() if nl else [] # target class
+ seen += 1
+
+ if pred is None:
+ if nl:
+ stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))
+ continue
+
+ # Append to text file
+ if save_txt:
+ gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh
+ txt_path = str(out / Path(paths[si]).stem)
+ pred[:, :4] = scale_coords(img[si].shape[1:], pred[:, :4], shapes[si][0], shapes[si][1]) # to original
+ for *xyxy, conf, cls in pred:
+ xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
+ with open(txt_path + '.txt', 'a') as f:
+ f.write(('%g ' * 5 + '\n') % (cls, *xywh)) # label format
+
+ # Clip boxes to image bounds
+ clip_coords(pred, (height, width))
+
+ # Append to pycocotools JSON dictionary
+ if save_json:
+ # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
+ image_id = Path(paths[si]).stem
+ box = pred[:, :4].clone() # xyxy
+ scale_coords(img[si].shape[1:], box, shapes[si][0], shapes[si][1]) # to original shape
+ box = xyxy2xywh(box) # xywh
+ box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
+ for p, b in zip(pred.tolist(), box.tolist()):
+ jdict.append({'image_id': int(image_id) if image_id.isnumeric() else image_id,
+ 'category_id': coco91class[int(p[5])],
+ 'bbox': [round(x, 3) for x in b],
+ 'score': round(p[4], 5)})
+
+ # Assign all predictions as incorrect
+ correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device)
+ if nl:
+ detected = [] # target indices
+ tcls_tensor = labels[:, 0]
+
+ # target boxes
+ tbox = xywh2xyxy(labels[:, 1:5]) * whwh
+
+ # Per target class
+ for cls in torch.unique(tcls_tensor):
+ ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # prediction indices
+ pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # target indices
+
+ # Search for detections
+ if pi.shape[0]:
+ # Prediction to target ious
+ ious, i = box_iou(pred[pi, :4], tbox[ti]).max(1) # best ious, indices
+
+ # Append detections
+ for j in (ious > iouv[0]).nonzero(as_tuple=False):
+ d = ti[i[j]] # detected target
+ if d not in detected:
+ detected.append(d)
+ correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn
+ if len(detected) == nl: # all targets already located in image
+ break
+
+ # Append statistics (correct, conf, pcls, tcls)
+ stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))
+
+ # Plot images
+ if batch_i < 1:
+ f = Path(save_dir) / ('test_batch%g_gt.jpg' % batch_i) # filename
+ plot_images(img, targets, paths, str(f), names) # ground truth
+ f = Path(save_dir) / ('test_batch%g_pred.jpg' % batch_i)
+ plot_images(img, output_to_target(output, width, height), paths, str(f), names) # predictions
+
+ # Compute statistics
+ stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
+ if len(stats) and stats[0].any():
+ p, r, ap, f1, ap_class = ap_per_class(*stats)
+ p, r, ap50, ap = p[:, 0], r[:, 0], ap[:, 0], ap.mean(1) # [P, R, AP@0.5, AP@0.5:0.95]
+ mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
+ nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class
+ else:
+ nt = torch.zeros(1)
+
+ # Print results
+ pf = '%20s' + '%12.3g' * 6 # print format
+ print(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
+
+ # Print results per class
+ if verbose and nc > 1 and len(stats):
+ for i, c in enumerate(ap_class):
+ print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
+
+ # Print speeds
+ t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size) # tuple
+ if not training:
+ print('Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g' % t)
+
+ # Save JSON
+ if save_json and len(jdict):
+ f = 'detections_val2017_%s_results.json' % \
+ (weights.split(os.sep)[-1].replace('.pt', '') if isinstance(weights, str) else '') # filename
+ print('\nCOCO mAP with pycocotools... saving %s...' % f)
+ with open(f, 'w') as file:
+ json.dump(jdict, file)
+
+ try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
+ from pycocotools.coco import COCO
+ from pycocotools.cocoeval import COCOeval
+
+ imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files]
+ cocoGt = COCO(glob.glob('../coco/annotations/instances_val*.json')[0]) # initialize COCO ground truth api
+ cocoDt = cocoGt.loadRes(f) # initialize COCO pred api
+ cocoEval = COCOeval(cocoGt, cocoDt, 'bbox')
+ cocoEval.params.imgIds = imgIds # image IDs to evaluate
+ cocoEval.evaluate()
+ cocoEval.accumulate()
+ cocoEval.summarize()
+ map, map50 = cocoEval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
+ except Exception as e:
+ print('ERROR: pycocotools unable to run: %s' % e)
+
+ # Return results
+ model.float() # for training
+ maps = np.zeros(nc) + map
+ for i, c in enumerate(ap_class):
+ maps[c] = ap[i]
+ return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(prog='test.py')
+ parser.add_argument('--weights', nargs='+', type=str, default='yolov4.pt', help='model.pt path(s)')
+ parser.add_argument('--data', type=str, default='data/coco128.yaml', help='*.data path')
+ parser.add_argument('--batch-size', type=int, default=32, help='size of each image batch')
+ parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
+ parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold')
+ parser.add_argument('--iou-thres', type=float, default=0.65, help='IOU threshold for NMS')
+ parser.add_argument('--save-json', action='store_true', help='save a cocoapi-compatible JSON results file')
+ parser.add_argument('--task', default='val', help="'val', 'test', 'study'")
+ parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
+ parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
+ parser.add_argument('--augment', action='store_true', help='augmented inference')
+ parser.add_argument('--merge', action='store_true', help='use Merge NMS')
+ parser.add_argument('--verbose', action='store_true', help='report mAP by class')
+ parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
+ parser.add_argument('--cfg', type=str, default='cfg/yolov4.cfg', help='*.cfg path')
+ parser.add_argument('--names', type=str, default='data/coco.names', help='*.cfg path')
+ opt = parser.parse_args()
+ opt.save_json |= opt.data.endswith('coco.yaml')
+ opt.data = check_file(opt.data) # check file
+ print(opt)
+
+ if opt.task in ['val', 'test']: # run normally
+ test(opt.data,
+ opt.weights,
+ opt.batch_size,
+ opt.img_size,
+ opt.conf_thres,
+ opt.iou_thres,
+ opt.save_json,
+ opt.single_cls,
+ opt.augment,
+ opt.verbose)
+
+ elif opt.task == 'study': # run over a range of settings and save/plot
+ for weights in ['']:
+ f = 'study_%s_%s.txt' % (Path(opt.data).stem, Path(weights).stem) # filename to save to
+ x = list(range(352, 832, 64)) # x axis
+ y = [] # y axis
+ for i in x: # img-size
+ print('\nRunning %s point %s...' % (f, i))
+ r, _, t = test(opt.data, weights, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json)
+ y.append(r + t) # results and times
+ np.savetxt(f, y, fmt='%10.4g') # save
+ os.system('zip -r study.zip study_*.txt')
+ # plot_study_txt(f, x) # plot
diff --git a/train.py b/train.py
new file mode 100644
index 0000000..78b04b9
--- /dev/null
+++ b/train.py
@@ -0,0 +1,514 @@
+import argparse
+import math
+import os
+import random
+import time
+from pathlib import Path
+
+import numpy as np
+import torch.distributed as dist
+import torch.nn.functional as F
+import torch.optim as optim
+import torch.optim.lr_scheduler as lr_scheduler
+import torch.utils.data
+import yaml
+from torch.cuda import amp
+from torch.nn.parallel import DistributedDataParallel as DDP
+from torch.utils.tensorboard import SummaryWriter
+from tqdm import tqdm
+
+import test # import test.py to get mAP after each epoch
+from models.models import *
+from utils.datasets import create_dataloader
+from utils.general import (
+ check_img_size, torch_distributed_zero_first, labels_to_class_weights, plot_labels, check_anchors,
+ labels_to_image_weights, compute_loss, plot_images, fitness, strip_optimizer, plot_results,
+ get_latest_run, check_git_status, check_file, increment_dir, print_mutation, plot_evolution)
+from utils.google_utils import attempt_download
+from utils.torch_utils import init_seeds, ModelEMA, select_device, intersect_dicts
+
+
+def train(hyp, opt, device, tb_writer=None):
+ print(f'Hyperparameters {hyp}')
+ log_dir = Path(tb_writer.log_dir) if tb_writer else Path(opt.logdir) / 'evolve' # logging directory
+ wdir = str(log_dir / 'weights') + os.sep # weights directory
+ os.makedirs(wdir, exist_ok=True)
+ last = wdir + 'last.pt'
+ best = wdir + 'best.pt'
+ results_file = str(log_dir / 'results.txt')
+ epochs, batch_size, total_batch_size, weights, rank = \
+ opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank
+
+ # TODO: Use DDP logging. Only the first process is allowed to log.
+ # Save run settings
+ with open(log_dir / 'hyp.yaml', 'w') as f:
+ yaml.dump(hyp, f, sort_keys=False)
+ with open(log_dir / 'opt.yaml', 'w') as f:
+ yaml.dump(vars(opt), f, sort_keys=False)
+
+ # Configure
+ cuda = device.type != 'cpu'
+ init_seeds(2 + rank)
+ with open(opt.data) as f:
+ data_dict = yaml.load(f, Loader=yaml.FullLoader) # model dict
+ train_path = data_dict['train']
+ test_path = data_dict['val']
+ nc, names = (1, ['item']) if opt.single_cls else (int(data_dict['nc']), data_dict['names']) # number classes, names
+ assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check
+
+ # Model
+ pretrained = weights.endswith('.pt')
+ if pretrained:
+ with torch_distributed_zero_first(rank):
+ attempt_download(weights) # download if not found locally
+ ckpt = torch.load(weights, map_location=device) # load checkpoint
+ model = Darknet(opt.cfg).to(device) # create
+ state_dict = {k: v for k, v in ckpt['model'].items() if model.state_dict()[k].numel() == v.numel()}
+ model.load_state_dict(state_dict, strict=False)
+ print('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report
+ else:
+ model = Darknet(opt.cfg).to(device) # create
+
+ # Optimizer
+ nbs = 64 # nominal batch size
+ accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing
+ hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay
+
+ pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
+ for k, v in dict(model.named_parameters()).items():
+ if '.bias' in k:
+ pg2.append(v) # biases
+ elif 'Conv2d.weight' in k:
+ pg1.append(v) # apply weight_decay
+ else:
+ pg0.append(v) # all else
+
+ if opt.adam:
+ optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum
+ else:
+ optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)
+
+ optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay
+ optimizer.add_param_group({'params': pg2}) # add pg2 (biases)
+ print('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0)))
+ del pg0, pg1, pg2
+
+ # Scheduler https://arxiv.org/pdf/1812.01187.pdf
+ # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
+ lf = lambda x: (((1 + math.cos(x * math.pi / epochs)) / 2) ** 1.0) * 0.8 + 0.2 # cosine
+ scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
+ # plot_lr_scheduler(optimizer, scheduler, epochs)
+
+ # Resume
+ start_epoch, best_fitness = 0, 0.0
+ if pretrained:
+ # Optimizer
+ if ckpt['optimizer'] is not None:
+ optimizer.load_state_dict(ckpt['optimizer'])
+ best_fitness = ckpt['best_fitness']
+
+ # Results
+ if ckpt.get('training_results') is not None:
+ with open(results_file, 'w') as file:
+ file.write(ckpt['training_results']) # write results.txt
+
+ # Epochs
+ start_epoch = ckpt['epoch'] + 1
+ if epochs < start_epoch:
+ print('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' %
+ (weights, ckpt['epoch'], epochs))
+ epochs += ckpt['epoch'] # finetune additional epochs
+
+ del ckpt, state_dict
+
+ # Image sizes
+ gs = 32 # grid size (max stride)
+ imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples
+
+ # DP mode
+ if cuda and rank == -1 and torch.cuda.device_count() > 1:
+ model = torch.nn.DataParallel(model)
+
+ # SyncBatchNorm
+ if opt.sync_bn and cuda and rank != -1:
+ model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
+ print('Using SyncBatchNorm()')
+
+ # Exponential moving average
+ ema = ModelEMA(model) if rank in [-1, 0] else None
+
+ # DDP mode
+ if cuda and rank != -1:
+ model = DDP(model, device_ids=[opt.local_rank], output_device=(opt.local_rank))
+
+ # Trainloader
+ dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt, hyp=hyp, augment=True,
+ cache=opt.cache_images, rect=opt.rect, local_rank=rank,
+ world_size=opt.world_size)
+ mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
+ nb = len(dataloader) # number of batches
+ assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1)
+
+ # Testloader
+ if rank in [-1, 0]:
+ ema.updates = start_epoch * nb // accumulate # set EMA updates ***
+ # local_rank is set to -1. Because only the first process is expected to do evaluation.
+ testloader = create_dataloader(test_path, imgsz_test, batch_size, gs, opt, hyp=hyp, augment=False,
+ cache=opt.cache_images, rect=True, local_rank=-1, world_size=opt.world_size)[0]
+
+ # Model parameters
+ hyp['cls'] *= nc / 80. # scale coco-tuned hyp['cls'] to current dataset
+ model.nc = nc # attach number of classes to model
+ model.hyp = hyp # attach hyperparameters to model
+ model.gr = 1.0 # giou loss ratio (obj_loss = 1.0 or giou)
+ model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) # attach class weights
+ model.names = names
+
+ # Class frequency
+ if rank in [-1, 0]:
+ labels = np.concatenate(dataset.labels, 0)
+ c = torch.tensor(labels[:, 0]) # classes
+ # cf = torch.bincount(c.long(), minlength=nc) + 1.
+ # model._initialize_biases(cf.to(device))
+ plot_labels(labels, save_dir=log_dir)
+ if tb_writer:
+ tb_writer.add_histogram('classes', c, 0)
+
+ # Check anchors
+ #if not opt.noautoanchor:
+ # check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
+
+ # Start training
+ t0 = time.time()
+ nw = max(3 * nb, 1e3) # number of warmup iterations, max(3 epochs, 1k iterations)
+ # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
+ maps = np.zeros(nc) # mAP per class
+ results = (0, 0, 0, 0, 0, 0, 0) # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification'
+ scheduler.last_epoch = start_epoch - 1 # do not move
+ scaler = amp.GradScaler(enabled=cuda)
+ if rank in [0, -1]:
+ print('Image sizes %g train, %g test' % (imgsz, imgsz_test))
+ print('Using %g dataloader workers' % dataloader.num_workers)
+ print('Starting training for %g epochs...' % epochs)
+ # torch.autograd.set_detect_anomaly(True)
+ for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
+ model.train()
+
+ # Update image weights (optional)
+ if dataset.image_weights:
+ # Generate indices
+ if rank in [-1, 0]:
+ w = model.class_weights.cpu().numpy() * (1 - maps) ** 2 # class weights
+ image_weights = labels_to_image_weights(dataset.labels, nc=nc, class_weights=w)
+ dataset.indices = random.choices(range(dataset.n), weights=image_weights,
+ k=dataset.n) # rand weighted idx
+ # Broadcast if DDP
+ if rank != -1:
+ indices = torch.zeros([dataset.n], dtype=torch.int)
+ if rank == 0:
+ indices[:] = torch.from_tensor(dataset.indices, dtype=torch.int)
+ dist.broadcast(indices, 0)
+ if rank != 0:
+ dataset.indices = indices.cpu().numpy()
+
+ # Update mosaic border
+ # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
+ # dataset.mosaic_border = [b - imgsz, -b] # height, width borders
+
+ mloss = torch.zeros(4, device=device) # mean losses
+ if rank != -1:
+ dataloader.sampler.set_epoch(epoch)
+ pbar = enumerate(dataloader)
+ if rank in [-1, 0]:
+ print(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'GIoU', 'obj', 'cls', 'total', 'targets', 'img_size'))
+ pbar = tqdm(pbar, total=nb) # progress bar
+ optimizer.zero_grad()
+ for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
+ ni = i + nb * epoch # number integrated batches (since train start)
+ imgs = imgs.to(device, non_blocking=True).float() / 255.0 # uint8 to float32, 0-255 to 0.0-1.0
+
+ # Warmup
+ if ni <= nw:
+ xi = [0, nw] # x interp
+ # model.gr = np.interp(ni, xi, [0.0, 1.0]) # giou loss ratio (obj_loss = 1.0 or giou)
+ accumulate = max(1, np.interp(ni, xi, [1, nbs / total_batch_size]).round())
+ for j, x in enumerate(optimizer.param_groups):
+ # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
+ x['lr'] = np.interp(ni, xi, [0.1 if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])
+ if 'momentum' in x:
+ x['momentum'] = np.interp(ni, xi, [0.9, hyp['momentum']])
+
+ # Multi-scale
+ if opt.multi_scale:
+ sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
+ sf = sz / max(imgs.shape[2:]) # scale factor
+ if sf != 1:
+ ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
+ imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
+
+ # Autocast
+ with amp.autocast(enabled=cuda):
+ # Forward
+ pred = model(imgs)
+
+ # Loss
+ loss, loss_items = compute_loss(pred, targets.to(device), model) # scaled by batch_size
+ if rank != -1:
+ loss *= opt.world_size # gradient averaged between devices in DDP mode
+ # if not torch.isfinite(loss):
+ # print('WARNING: non-finite loss, ending training ', loss_items)
+ # return results
+
+ # Backward
+ scaler.scale(loss).backward()
+
+ # Optimize
+ if ni % accumulate == 0:
+ scaler.step(optimizer) # optimizer.step
+ scaler.update()
+ optimizer.zero_grad()
+ if ema is not None:
+ ema.update(model)
+
+ # Print
+ if rank in [-1, 0]:
+ mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
+ mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB)
+ s = ('%10s' * 2 + '%10.4g' * 6) % (
+ '%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1])
+ pbar.set_description(s)
+
+ # Plot
+ if ni < 3:
+ f = str(log_dir / ('train_batch%g.jpg' % ni)) # filename
+ result = plot_images(images=imgs, targets=targets, paths=paths, fname=f)
+ if tb_writer and result is not None:
+ tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)
+ # tb_writer.add_graph(model, imgs) # add model to tensorboard
+
+ # end batch ------------------------------------------------------------------------------------------------
+
+ # Scheduler
+ scheduler.step()
+
+ # DDP process 0 or single-GPU
+ if rank in [-1, 0]:
+ # mAP
+ if ema is not None:
+ ema.update_attr(model)
+ final_epoch = epoch + 1 == epochs
+ if not opt.notest or final_epoch: # Calculate mAP
+ results, maps, times = test.test(opt.data,
+ batch_size=batch_size,
+ imgsz=imgsz_test,
+ save_json=final_epoch and opt.data.endswith(os.sep + 'coco.yaml'),
+ model=ema.ema.module if hasattr(ema.ema, 'module') else ema.ema,
+ single_cls=opt.single_cls,
+ dataloader=testloader,
+ save_dir=log_dir)
+
+ # Write
+ with open(results_file, 'a') as f:
+ f.write(s + '%10.4g' * 7 % results + '\n') # P, R, mAP, F1, test_losses=(GIoU, obj, cls)
+ if len(opt.name) and opt.bucket:
+ os.system('gsutil cp %s gs://%s/results/results%s.txt' % (results_file, opt.bucket, opt.name))
+
+ # Tensorboard
+ if tb_writer:
+ tags = ['train/giou_loss', 'train/obj_loss', 'train/cls_loss',
+ 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95',
+ 'val/giou_loss', 'val/obj_loss', 'val/cls_loss']
+ for x, tag in zip(list(mloss[:-1]) + list(results), tags):
+ tb_writer.add_scalar(tag, x, epoch)
+
+ # Update best mAP
+ fi = fitness(np.array(results).reshape(1, -1)) # fitness_i = weighted combination of [P, R, mAP, F1]
+ if fi > best_fitness:
+ best_fitness = fi
+
+ # Save model
+ save = (not opt.nosave) or (final_epoch and not opt.evolve)
+ if save:
+ with open(results_file, 'r') as f: # create checkpoint
+ ckpt = {'epoch': epoch,
+ 'best_fitness': best_fitness,
+ 'training_results': f.read(),
+ 'model': ema.ema.module.state_dict() if hasattr(ema, 'module') else ema.ema.state_dict(),
+ 'optimizer': None if final_epoch else optimizer.state_dict()}
+
+ # Save last, best and delete
+ torch.save(ckpt, last)
+ if epoch >= (epochs-5):
+ torch.save(ckpt, last.replace('.pt','_{:03d}.pt'.format(epoch)))
+ if (best_fitness == fi) and not final_epoch:
+ torch.save(ckpt, best)
+ del ckpt
+ # end epoch ----------------------------------------------------------------------------------------------------
+ # end training
+
+ if rank in [-1, 0]:
+ # Strip optimizers
+ n = ('_' if len(opt.name) and not opt.name.isnumeric() else '') + opt.name
+ fresults, flast, fbest = 'results%s.txt' % n, wdir + 'last%s.pt' % n, wdir + 'best%s.pt' % n
+ for f1, f2 in zip([wdir + 'last.pt', wdir + 'best.pt', 'results.txt'], [flast, fbest, fresults]):
+ if os.path.exists(f1):
+ os.rename(f1, f2) # rename
+ ispt = f2.endswith('.pt') # is *.pt
+ strip_optimizer(f2) if ispt else None # strip optimizer
+ os.system('gsutil cp %s gs://%s/weights' % (f2, opt.bucket)) if opt.bucket and ispt else None # upload
+ # Finish
+ if not opt.evolve:
+ plot_results(save_dir=log_dir) # save as results.png
+ print('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))
+
+ dist.destroy_process_group() if rank not in [-1, 0] else None
+ torch.cuda.empty_cache()
+ return results
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--weights', type=str, default='yolov4.pt', help='initial weights path')
+ parser.add_argument('--cfg', type=str, default='', help='model.yaml path')
+ parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path')
+ parser.add_argument('--hyp', type=str, default='', help='hyperparameters path, i.e. data/hyp.scratch.yaml')
+ parser.add_argument('--epochs', type=int, default=300)
+ parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs')
+ parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='train,test sizes')
+ parser.add_argument('--rect', action='store_true', help='rectangular training')
+ parser.add_argument('--resume', nargs='?', const='get_last', default=False,
+ help='resume from given path/last.pt, or most recent run if blank')
+ parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
+ parser.add_argument('--notest', action='store_true', help='only test final epoch')
+ parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check')
+ parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters')
+ parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
+ parser.add_argument('--cache-images', action='store_true', help='cache images for faster training')
+ parser.add_argument('--name', default='', help='renames results.txt to results_name.txt if supplied')
+ parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
+ parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
+ parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset')
+ parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer')
+ parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
+ parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')
+ parser.add_argument('--logdir', type=str, default='runs/', help='logging directory')
+ opt = parser.parse_args()
+
+ # Resume
+ if opt.resume:
+ last = get_latest_run() if opt.resume == 'get_last' else opt.resume # resume from most recent run
+ if last and not opt.weights:
+ print(f'Resuming training from {last}')
+ opt.weights = last if opt.resume and not opt.weights else opt.weights
+ if opt.local_rank == -1 or ("RANK" in os.environ and os.environ["RANK"] == "0"):
+ check_git_status()
+
+ opt.hyp = opt.hyp or ('data/hyp.scratch.yaml')
+ opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files
+ assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
+
+ opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test)
+ device = select_device(opt.device, batch_size=opt.batch_size)
+ opt.total_batch_size = opt.batch_size
+ opt.world_size = 1
+ opt.global_rank = -1
+
+ # DDP mode
+ if opt.local_rank != -1:
+ assert torch.cuda.device_count() > opt.local_rank
+ torch.cuda.set_device(opt.local_rank)
+ device = torch.device('cuda', opt.local_rank)
+ dist.init_process_group(backend='nccl', init_method='env://') # distributed backend
+ opt.world_size = dist.get_world_size()
+ opt.global_rank = dist.get_rank()
+ assert opt.batch_size % opt.world_size == 0, '--batch-size must be multiple of CUDA device count'
+ opt.batch_size = opt.total_batch_size // opt.world_size
+
+ print(opt)
+ with open(opt.hyp) as f:
+ hyp = yaml.load(f, Loader=yaml.FullLoader) # load hyps
+
+ # Train
+ if not opt.evolve:
+ tb_writer = None
+ if opt.global_rank in [-1, 0]:
+ print('Start Tensorboard with "tensorboard --logdir %s", view at http://localhost:6006/' % opt.logdir)
+ tb_writer = SummaryWriter(log_dir=increment_dir(Path(opt.logdir) / 'exp', opt.name)) # runs/exp
+
+ train(hyp, opt, device, tb_writer)
+
+ # Evolve hyperparameters (optional)
+ else:
+ # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit)
+ meta = {'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3)
+ 'momentum': (0.1, 0.6, 0.98), # SGD momentum/Adam beta1
+ 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay
+ 'giou': (1, 0.02, 0.2), # GIoU loss gain
+ 'cls': (1, 0.2, 4.0), # cls loss gain
+ 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight
+ 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels)
+ 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight
+ 'iou_t': (0, 0.1, 0.7), # IoU training threshold
+ 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold
+ 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5)
+ 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction)
+ 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction)
+ 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction)
+ 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg)
+ 'translate': (1, 0.0, 0.9), # image translation (+/- fraction)
+ 'scale': (1, 0.0, 0.9), # image scale (+/- gain)
+ 'shear': (1, 0.0, 10.0), # image shear (+/- deg)
+ 'perspective': (1, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001
+ 'flipud': (0, 0.0, 1.0), # image flip up-down (probability)
+ 'fliplr': (1, 0.0, 1.0), # image flip left-right (probability)
+ 'mixup': (1, 0.0, 1.0)} # image mixup (probability)
+
+ assert opt.local_rank == -1, 'DDP mode not implemented for --evolve'
+ opt.notest, opt.nosave = True, True # only test/save final epoch
+ # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices
+ yaml_file = Path('runs/evolve/hyp_evolved.yaml') # save best result here
+ if opt.bucket:
+ os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket) # download evolve.txt if exists
+
+ for _ in range(100): # generations to evolve
+ if os.path.exists('evolve.txt'): # if evolve.txt exists: select best hyps and mutate
+ # Select parent(s)
+ parent = 'single' # parent selection method: 'single' or 'weighted'
+ x = np.loadtxt('evolve.txt', ndmin=2)
+ n = min(5, len(x)) # number of previous results to consider
+ x = x[np.argsort(-fitness(x))][:n] # top n mutations
+ w = fitness(x) - fitness(x).min() # weights
+ if parent == 'single' or len(x) == 1:
+ # x = x[random.randint(0, n - 1)] # random selection
+ x = x[random.choices(range(n), weights=w)[0]] # weighted selection
+ elif parent == 'weighted':
+ x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination
+
+ # Mutate
+ mp, s = 0.9, 0.2 # mutation probability, sigma
+ npr = np.random
+ npr.seed(int(time.time()))
+ g = np.array([x[0] for x in meta.values()]) # gains 0-1
+ ng = len(meta)
+ v = np.ones(ng)
+ while all(v == 1): # mutate until a change occurs (prevent duplicates)
+ v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)
+ for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300)
+ hyp[k] = float(x[i + 7] * v[i]) # mutate
+
+ # Constrain to limits
+ for k, v in meta.items():
+ hyp[k] = max(hyp[k], v[1]) # lower limit
+ hyp[k] = min(hyp[k], v[2]) # upper limit
+ hyp[k] = round(hyp[k], 5) # significant digits
+
+ # Train mutation
+ results = train(hyp.copy(), opt, device)
+
+ # Write mutation results
+ print_mutation(hyp.copy(), results, yaml_file, opt.bucket)
+
+ # Plot results
+ plot_evolution(yaml_file)
+ print('Hyperparameter evolution complete. Best results saved as: %s\nCommand to train a new model with these '
+ 'hyperparameters: $ python train.py --hyp %s' % (yaml_file, yaml_file))
From 5f5fafd368b424e288904fa73881c859f719cc67 Mon Sep 17 00:00:00 2001
From: "Kin-Yiu, Wong" <102582011@cc.ncu.edu.tw>
Date: Mon, 16 Nov 2020 16:18:09 +0800
Subject: [PATCH 03/37] Create __init__.py
---
models/__init__.py | 1 +
1 file changed, 1 insertion(+)
create mode 100644 models/__init__.py
diff --git a/models/__init__.py b/models/__init__.py
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/models/__init__.py
@@ -0,0 +1 @@
+
From 22d83f00894b227a444846ef4e602a6f1bdf1903 Mon Sep 17 00:00:00 2001
From: "Kin-Yiu, Wong" <102582011@cc.ncu.edu.tw>
Date: Mon, 16 Nov 2020 16:19:00 +0800
Subject: [PATCH 04/37] Add files via upload
---
utils/activations.py | 69 +++
utils/datasets.py | 907 ++++++++++++++++++++++++++++++
utils/general.py | 1237 +++++++++++++++++++++++++++++++++++++++++
utils/google_utils.py | 76 +++
utils/layers.py | 323 +++++++++++
utils/parse_config.py | 70 +++
utils/torch_utils.py | 226 ++++++++
7 files changed, 2908 insertions(+)
create mode 100644 utils/activations.py
create mode 100644 utils/datasets.py
create mode 100644 utils/general.py
create mode 100644 utils/google_utils.py
create mode 100644 utils/layers.py
create mode 100644 utils/parse_config.py
create mode 100644 utils/torch_utils.py
diff --git a/utils/activations.py b/utils/activations.py
new file mode 100644
index 0000000..f00b3b9
--- /dev/null
+++ b/utils/activations.py
@@ -0,0 +1,69 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+
+# Swish https://arxiv.org/pdf/1905.02244.pdf ---------------------------------------------------------------------------
+class Swish(nn.Module): #
+ @staticmethod
+ def forward(x):
+ return x * torch.sigmoid(x)
+
+
+class HardSwish(nn.Module):
+ @staticmethod
+ def forward(x):
+ return x * F.hardtanh(x + 3, 0., 6., True) / 6.
+
+
+class MemoryEfficientSwish(nn.Module):
+ class F(torch.autograd.Function):
+ @staticmethod
+ def forward(ctx, x):
+ ctx.save_for_backward(x)
+ return x * torch.sigmoid(x)
+
+ @staticmethod
+ def backward(ctx, grad_output):
+ x = ctx.saved_tensors[0]
+ sx = torch.sigmoid(x)
+ return grad_output * (sx * (1 + x * (1 - sx)))
+
+ def forward(self, x):
+ return self.F.apply(x)
+
+
+# Mish https://github.com/digantamisra98/Mish --------------------------------------------------------------------------
+class Mish(nn.Module):
+ @staticmethod
+ def forward(x):
+ return x * F.softplus(x).tanh()
+
+
+class MemoryEfficientMish(nn.Module):
+ class F(torch.autograd.Function):
+ @staticmethod
+ def forward(ctx, x):
+ ctx.save_for_backward(x)
+ return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x)))
+
+ @staticmethod
+ def backward(ctx, grad_output):
+ x = ctx.saved_tensors[0]
+ sx = torch.sigmoid(x)
+ fx = F.softplus(x).tanh()
+ return grad_output * (fx + x * sx * (1 - fx * fx))
+
+ def forward(self, x):
+ return self.F.apply(x)
+
+
+# FReLU https://arxiv.org/abs/2007.11824 -------------------------------------------------------------------------------
+class FReLU(nn.Module):
+ def __init__(self, c1, k=3): # ch_in, kernel
+ super().__init__()
+ self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1)
+ self.bn = nn.BatchNorm2d(c1)
+
+ def forward(self, x):
+ return torch.max(x, self.bn(self.conv(x)))
diff --git a/utils/datasets.py b/utils/datasets.py
new file mode 100644
index 0000000..af06d7b
--- /dev/null
+++ b/utils/datasets.py
@@ -0,0 +1,907 @@
+import glob
+import math
+import os
+import random
+import shutil
+import time
+from pathlib import Path
+from threading import Thread
+
+import cv2
+import numpy as np
+import torch
+from PIL import Image, ExifTags
+from torch.utils.data import Dataset
+from tqdm import tqdm
+
+from utils.general import xyxy2xywh, xywh2xyxy, torch_distributed_zero_first
+
+help_url = ''
+img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.dng']
+vid_formats = ['.mov', '.avi', '.mp4', '.mpg', '.mpeg', '.m4v', '.wmv', '.mkv']
+
+# Get orientation exif tag
+for orientation in ExifTags.TAGS.keys():
+ if ExifTags.TAGS[orientation] == 'Orientation':
+ break
+
+
+def get_hash(files):
+ # Returns a single hash value of a list of files
+ return sum(os.path.getsize(f) for f in files if os.path.isfile(f))
+
+
+def exif_size(img):
+ # Returns exif-corrected PIL size
+ s = img.size # (width, height)
+ try:
+ rotation = dict(img._getexif().items())[orientation]
+ if rotation == 6: # rotation 270
+ s = (s[1], s[0])
+ elif rotation == 8: # rotation 90
+ s = (s[1], s[0])
+ except:
+ pass
+
+ return s
+
+
+def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,
+ local_rank=-1, world_size=1):
+ # Make sure only the first process in DDP process the dataset first, and the following others can use the cache.
+ with torch_distributed_zero_first(local_rank):
+ dataset = LoadImagesAndLabels(path, imgsz, batch_size,
+ augment=augment, # augment images
+ hyp=hyp, # augmentation hyperparameters
+ rect=rect, # rectangular training
+ cache_images=cache,
+ single_cls=opt.single_cls,
+ stride=int(stride),
+ pad=pad)
+
+ batch_size = min(batch_size, len(dataset))
+ nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, 8]) # number of workers
+ train_sampler = torch.utils.data.distributed.DistributedSampler(dataset) if local_rank != -1 else None
+ dataloader = torch.utils.data.DataLoader(dataset,
+ batch_size=batch_size,
+ num_workers=nw,
+ sampler=train_sampler,
+ pin_memory=True,
+ collate_fn=LoadImagesAndLabels.collate_fn)
+ return dataloader, dataset
+
+
+class LoadImages: # for inference
+ def __init__(self, path, img_size=640):
+ p = str(Path(path)) # os-agnostic
+ p = os.path.abspath(p) # absolute path
+ if '*' in p:
+ files = sorted(glob.glob(p)) # glob
+ elif os.path.isdir(p):
+ files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
+ elif os.path.isfile(p):
+ files = [p] # files
+ else:
+ raise Exception('ERROR: %s does not exist' % p)
+
+ images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
+ videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
+ ni, nv = len(images), len(videos)
+
+ self.img_size = img_size
+ self.files = images + videos
+ self.nf = ni + nv # number of files
+ self.video_flag = [False] * ni + [True] * nv
+ self.mode = 'images'
+ if any(videos):
+ self.new_video(videos[0]) # new video
+ else:
+ self.cap = None
+ assert self.nf > 0, 'No images or videos found in %s. Supported formats are:\nimages: %s\nvideos: %s' % \
+ (p, img_formats, vid_formats)
+
+ def __iter__(self):
+ self.count = 0
+ return self
+
+ def __next__(self):
+ if self.count == self.nf:
+ raise StopIteration
+ path = self.files[self.count]
+
+ if self.video_flag[self.count]:
+ # Read video
+ self.mode = 'video'
+ ret_val, img0 = self.cap.read()
+ if not ret_val:
+ self.count += 1
+ self.cap.release()
+ if self.count == self.nf: # last video
+ raise StopIteration
+ else:
+ path = self.files[self.count]
+ self.new_video(path)
+ ret_val, img0 = self.cap.read()
+
+ self.frame += 1
+ print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nf, self.frame, self.nframes, path), end='')
+
+ else:
+ # Read image
+ self.count += 1
+ img0 = cv2.imread(path) # BGR
+ assert img0 is not None, 'Image Not Found ' + path
+ print('image %g/%g %s: ' % (self.count, self.nf, path), end='')
+
+ # Padded resize
+ img = letterbox(img0, new_shape=self.img_size)[0]
+
+ # Convert
+ img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
+ img = np.ascontiguousarray(img)
+
+ # cv2.imwrite(path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
+ return path, img, img0, self.cap
+
+ def new_video(self, path):
+ self.frame = 0
+ self.cap = cv2.VideoCapture(path)
+ self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
+
+ def __len__(self):
+ return self.nf # number of files
+
+
+class LoadWebcam: # for inference
+ def __init__(self, pipe=0, img_size=640):
+ self.img_size = img_size
+
+ if pipe == '0':
+ pipe = 0 # local camera
+ # pipe = 'rtsp://192.168.1.64/1' # IP camera
+ # pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
+ # pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa' # IP traffic camera
+ # pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
+
+ # https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/
+ # pipe = '"rtspsrc location="rtsp://username:password@192.168.1.64/1" latency=10 ! appsink' # GStreamer
+
+ # https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/
+ # https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package # install help
+ # pipe = "rtspsrc location=rtsp://root:root@192.168.0.91:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink" # GStreamer
+
+ self.pipe = pipe
+ self.cap = cv2.VideoCapture(pipe) # video capture object
+ self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
+
+ def __iter__(self):
+ self.count = -1
+ return self
+
+ def __next__(self):
+ self.count += 1
+ if cv2.waitKey(1) == ord('q'): # q to quit
+ self.cap.release()
+ cv2.destroyAllWindows()
+ raise StopIteration
+
+ # Read frame
+ if self.pipe == 0: # local camera
+ ret_val, img0 = self.cap.read()
+ img0 = cv2.flip(img0, 1) # flip left-right
+ else: # IP camera
+ n = 0
+ while True:
+ n += 1
+ self.cap.grab()
+ if n % 30 == 0: # skip frames
+ ret_val, img0 = self.cap.retrieve()
+ if ret_val:
+ break
+
+ # Print
+ assert ret_val, 'Camera Error %s' % self.pipe
+ img_path = 'webcam.jpg'
+ print('webcam %g: ' % self.count, end='')
+
+ # Padded resize
+ img = letterbox(img0, new_shape=self.img_size)[0]
+
+ # Convert
+ img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
+ img = np.ascontiguousarray(img)
+
+ return img_path, img, img0, None
+
+ def __len__(self):
+ return 0
+
+
+class LoadStreams: # multiple IP or RTSP cameras
+ def __init__(self, sources='streams.txt', img_size=640):
+ self.mode = 'images'
+ self.img_size = img_size
+
+ if os.path.isfile(sources):
+ with open(sources, 'r') as f:
+ sources = [x.strip() for x in f.read().splitlines() if len(x.strip())]
+ else:
+ sources = [sources]
+
+ n = len(sources)
+ self.imgs = [None] * n
+ self.sources = sources
+ for i, s in enumerate(sources):
+ # Start the thread to read frames from the video stream
+ print('%g/%g: %s... ' % (i + 1, n, s), end='')
+ cap = cv2.VideoCapture(0 if s == '0' else s)
+ assert cap.isOpened(), 'Failed to open %s' % s
+ w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
+ h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
+ fps = cap.get(cv2.CAP_PROP_FPS) % 100
+ _, self.imgs[i] = cap.read() # guarantee first frame
+ thread = Thread(target=self.update, args=([i, cap]), daemon=True)
+ print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
+ thread.start()
+ print('') # newline
+
+ # check for common shapes
+ s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
+ self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
+ if not self.rect:
+ print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
+
+ def update(self, index, cap):
+ # Read next stream frame in a daemon thread
+ n = 0
+ while cap.isOpened():
+ n += 1
+ # _, self.imgs[index] = cap.read()
+ cap.grab()
+ if n == 4: # read every 4th frame
+ _, self.imgs[index] = cap.retrieve()
+ n = 0
+ time.sleep(0.01) # wait time
+
+ def __iter__(self):
+ self.count = -1
+ return self
+
+ def __next__(self):
+ self.count += 1
+ img0 = self.imgs.copy()
+ if cv2.waitKey(1) == ord('q'): # q to quit
+ cv2.destroyAllWindows()
+ raise StopIteration
+
+ # Letterbox
+ img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0]
+
+ # Stack
+ img = np.stack(img, 0)
+
+ # Convert
+ img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
+ img = np.ascontiguousarray(img)
+
+ return self.sources, img, img0, None
+
+ def __len__(self):
+ return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
+
+
+class LoadImagesAndLabels(Dataset): # for training/testing
+ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
+ cache_images=False, single_cls=False, stride=32, pad=0.0):
+ try:
+ f = [] # image files
+ for p in path if isinstance(path, list) else [path]:
+ p = str(Path(p)) # os-agnostic
+ parent = str(Path(p).parent) + os.sep
+ if os.path.isfile(p): # file
+ with open(p, 'r') as t:
+ t = t.read().splitlines()
+ f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
+ elif os.path.isdir(p): # folder
+ f += glob.iglob(p + os.sep + '*.*')
+ else:
+ raise Exception('%s does not exist' % p)
+ self.img_files = sorted(
+ [x.replace('/', os.sep) for x in f if os.path.splitext(x)[-1].lower() in img_formats])
+ except Exception as e:
+ raise Exception('Error loading data from %s: %s\nSee %s' % (path, e, help_url))
+
+ n = len(self.img_files)
+ assert n > 0, 'No images found in %s. See %s' % (path, help_url)
+ bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
+ nb = bi[-1] + 1 # number of batches
+
+ self.n = n # number of images
+ self.batch = bi # batch index of image
+ self.img_size = img_size
+ self.augment = augment
+ self.hyp = hyp
+ self.image_weights = image_weights
+ self.rect = False if image_weights else rect
+ self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
+ self.mosaic_border = [-img_size // 2, -img_size // 2]
+ self.stride = stride
+
+ # Define labels
+ self.label_files = [x.replace('images', 'labels').replace(os.path.splitext(x)[-1], '.txt') for x in
+ self.img_files]
+
+ # Check cache
+ cache_path = str(Path(self.label_files[0]).parent) + '.cache' # cached labels
+ if os.path.isfile(cache_path):
+ cache = torch.load(cache_path) # load
+ if cache['hash'] != get_hash(self.label_files + self.img_files): # dataset changed
+ cache = self.cache_labels(cache_path) # re-cache
+ else:
+ cache = self.cache_labels(cache_path) # cache
+
+ # Get labels
+ labels, shapes = zip(*[cache[x] for x in self.img_files])
+ self.shapes = np.array(shapes, dtype=np.float64)
+ self.labels = list(labels)
+
+ # Rectangular Training https://github.com/ultralytics/yolov3/issues/232
+ if self.rect:
+ # Sort by aspect ratio
+ s = self.shapes # wh
+ ar = s[:, 1] / s[:, 0] # aspect ratio
+ irect = ar.argsort()
+ self.img_files = [self.img_files[i] for i in irect]
+ self.label_files = [self.label_files[i] for i in irect]
+ self.labels = [self.labels[i] for i in irect]
+ self.shapes = s[irect] # wh
+ ar = ar[irect]
+
+ # Set training image shapes
+ shapes = [[1, 1]] * nb
+ for i in range(nb):
+ ari = ar[bi == i]
+ mini, maxi = ari.min(), ari.max()
+ if maxi < 1:
+ shapes[i] = [maxi, 1]
+ elif mini > 1:
+ shapes[i] = [1, 1 / mini]
+
+ self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
+
+ # Cache labels
+ create_datasubset, extract_bounding_boxes, labels_loaded = False, False, False
+ nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate
+ pbar = tqdm(self.label_files)
+ for i, file in enumerate(pbar):
+ l = self.labels[i] # label
+ if l.shape[0]:
+ assert l.shape[1] == 5, '> 5 label columns: %s' % file
+ assert (l >= 0).all(), 'negative labels: %s' % file
+ assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
+ if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows
+ nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows
+ if single_cls:
+ l[:, 0] = 0 # force dataset into single-class mode
+ self.labels[i] = l
+ nf += 1 # file found
+
+ # Create subdataset (a smaller dataset)
+ if create_datasubset and ns < 1E4:
+ if ns == 0:
+ create_folder(path='./datasubset')
+ os.makedirs('./datasubset/images')
+ exclude_classes = 43
+ if exclude_classes not in l[:, 0]:
+ ns += 1
+ # shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image
+ with open('./datasubset/images.txt', 'a') as f:
+ f.write(self.img_files[i] + '\n')
+
+ # Extract object detection boxes for a second stage classifier
+ if extract_bounding_boxes:
+ p = Path(self.img_files[i])
+ img = cv2.imread(str(p))
+ h, w = img.shape[:2]
+ for j, x in enumerate(l):
+ f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)
+ if not os.path.exists(Path(f).parent):
+ os.makedirs(Path(f).parent) # make new output folder
+
+ b = x[1:] * [w, h, w, h] # box
+ b[2:] = b[2:].max() # rectangle to square
+ b[2:] = b[2:] * 1.3 + 30 # pad
+ b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
+
+ b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
+ b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
+ assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes'
+ else:
+ ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
+ # os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
+
+ pbar.desc = 'Scanning labels %s (%g found, %g missing, %g empty, %g duplicate, for %g images)' % (
+ cache_path, nf, nm, ne, nd, n)
+ if nf == 0:
+ s = 'WARNING: No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url)
+ print(s)
+ assert not augment, '%s. Can not train without labels.' % s
+
+ # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
+ self.imgs = [None] * n
+ if cache_images:
+ gb = 0 # Gigabytes of cached images
+ pbar = tqdm(range(len(self.img_files)), desc='Caching images')
+ self.img_hw0, self.img_hw = [None] * n, [None] * n
+ for i in pbar: # max 10k images
+ self.imgs[i], self.img_hw0[i], self.img_hw[i] = load_image(self, i) # img, hw_original, hw_resized
+ gb += self.imgs[i].nbytes
+ pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
+
+ def cache_labels(self, path='labels.cache'):
+ # Cache dataset labels, check images and read shapes
+ x = {} # dict
+ pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
+ for (img, label) in pbar:
+ try:
+ l = []
+ image = Image.open(img)
+ image.verify() # PIL verify
+ # _ = io.imread(img) # skimage verify (from skimage import io)
+ shape = exif_size(image) # image size
+ assert (shape[0] > 9) & (shape[1] > 9), 'image size <10 pixels'
+ if os.path.isfile(label):
+ with open(label, 'r') as f:
+ l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32) # labels
+ if len(l) == 0:
+ l = np.zeros((0, 5), dtype=np.float32)
+ x[img] = [l, shape]
+ except Exception as e:
+ x[img] = None
+ print('WARNING: %s: %s' % (img, e))
+
+ x['hash'] = get_hash(self.label_files + self.img_files)
+ torch.save(x, path) # save for next time
+ return x
+
+ def __len__(self):
+ return len(self.img_files)
+
+ # def __iter__(self):
+ # self.count = -1
+ # print('ran dataset iter')
+ # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
+ # return self
+
+ def __getitem__(self, index):
+ if self.image_weights:
+ index = self.indices[index]
+
+ hyp = self.hyp
+ if self.mosaic:
+ # Load mosaic
+ img, labels = load_mosaic(self, index)
+ shapes = None
+
+ # MixUp https://arxiv.org/pdf/1710.09412.pdf
+ if random.random() < hyp['mixup']:
+ img2, labels2 = load_mosaic(self, random.randint(0, len(self.labels) - 1))
+ r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0
+ img = (img * r + img2 * (1 - r)).astype(np.uint8)
+ labels = np.concatenate((labels, labels2), 0)
+
+ else:
+ # Load image
+ img, (h0, w0), (h, w) = load_image(self, index)
+
+ # Letterbox
+ shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
+ img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
+ shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
+
+ # Load labels
+ labels = []
+ x = self.labels[index]
+ if x.size > 0:
+ # Normalized xywh to pixel xyxy format
+ labels = x.copy()
+ labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width
+ labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height
+ labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
+ labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
+
+ if self.augment:
+ # Augment imagespace
+ if not self.mosaic:
+ img, labels = random_perspective(img, labels,
+ degrees=hyp['degrees'],
+ translate=hyp['translate'],
+ scale=hyp['scale'],
+ shear=hyp['shear'],
+ perspective=hyp['perspective'])
+
+ # Augment colorspace
+ augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
+
+ # Apply cutouts
+ # if random.random() < 0.9:
+ # labels = cutout(img, labels)
+
+ nL = len(labels) # number of labels
+ if nL:
+ labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh
+ labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1
+ labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1
+
+ if self.augment:
+ # flip up-down
+ if random.random() < hyp['flipud']:
+ img = np.flipud(img)
+ if nL:
+ labels[:, 2] = 1 - labels[:, 2]
+
+ # flip left-right
+ if random.random() < hyp['fliplr']:
+ img = np.fliplr(img)
+ if nL:
+ labels[:, 1] = 1 - labels[:, 1]
+
+ labels_out = torch.zeros((nL, 6))
+ if nL:
+ labels_out[:, 1:] = torch.from_numpy(labels)
+
+ # Convert
+ img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
+ img = np.ascontiguousarray(img)
+
+ return torch.from_numpy(img), labels_out, self.img_files[index], shapes
+
+ @staticmethod
+ def collate_fn(batch):
+ img, label, path, shapes = zip(*batch) # transposed
+ for i, l in enumerate(label):
+ l[:, 0] = i # add target image index for build_targets()
+ return torch.stack(img, 0), torch.cat(label, 0), path, shapes
+
+
+# Ancillary functions --------------------------------------------------------------------------------------------------
+def load_image(self, index):
+ # loads 1 image from dataset, returns img, original hw, resized hw
+ img = self.imgs[index]
+ if img is None: # not cached
+ path = self.img_files[index]
+ img = cv2.imread(path) # BGR
+ assert img is not None, 'Image Not Found ' + path
+ h0, w0 = img.shape[:2] # orig hw
+ r = self.img_size / max(h0, w0) # resize image to img_size
+ if r != 1: # always resize down, only resize up if training with augmentation
+ interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
+ img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
+ return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
+ else:
+ return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
+
+
+def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
+ r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
+ hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
+ dtype = img.dtype # uint8
+
+ x = np.arange(0, 256, dtype=np.int16)
+ lut_hue = ((x * r[0]) % 180).astype(dtype)
+ lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
+ lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
+
+ img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
+ cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
+
+ # Histogram equalization
+ # if random.random() < 0.2:
+ # for i in range(3):
+ # img[:, :, i] = cv2.equalizeHist(img[:, :, i])
+
+
+def load_mosaic(self, index):
+ # loads images in a mosaic
+
+ labels4 = []
+ s = self.img_size
+ yc, xc = s, s # mosaic center x, y
+ indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(3)] # 3 additional image indices
+ for i, index in enumerate(indices):
+ # Load image
+ img, _, (h, w) = load_image(self, index)
+
+ # place img in img4
+ if i == 0: # top left
+ img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
+ x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
+ x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
+ elif i == 1: # top right
+ x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
+ x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
+ elif i == 2: # bottom left
+ x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
+ x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(y2a - y1a, h)
+ elif i == 3: # bottom right
+ x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
+ x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
+
+ img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
+ padw = x1a - x1b
+ padh = y1a - y1b
+
+ # Labels
+ x = self.labels[index]
+ labels = x.copy()
+ if x.size > 0: # Normalized xywh to pixel xyxy format
+ labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
+ labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
+ labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
+ labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
+ labels4.append(labels)
+
+ # Concat/clip labels
+ if len(labels4):
+ labels4 = np.concatenate(labels4, 0)
+ # np.clip(labels4[:, 1:] - s / 2, 0, s, out=labels4[:, 1:]) # use with center crop
+ np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_affine
+
+ # Replicate
+ # img4, labels4 = replicate(img4, labels4)
+
+ # Augment
+ # img4 = img4[s // 2: int(s * 1.5), s // 2:int(s * 1.5)] # center crop (WARNING, requires box pruning)
+ img4, labels4 = random_perspective(img4, labels4,
+ degrees=self.hyp['degrees'],
+ translate=self.hyp['translate'],
+ scale=self.hyp['scale'],
+ shear=self.hyp['shear'],
+ perspective=self.hyp['perspective'],
+ border=self.mosaic_border) # border to remove
+
+ return img4, labels4
+
+
+def replicate(img, labels):
+ # Replicate labels
+ h, w = img.shape[:2]
+ boxes = labels[:, 1:].astype(int)
+ x1, y1, x2, y2 = boxes.T
+ s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
+ for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
+ x1b, y1b, x2b, y2b = boxes[i]
+ bh, bw = y2b - y1b, x2b - x1b
+ yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
+ x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
+ img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
+ labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
+
+ return img, labels
+
+
+def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
+ # Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
+ shape = img.shape[:2] # current shape [height, width]
+ if isinstance(new_shape, int):
+ new_shape = (new_shape, new_shape)
+
+ # Scale ratio (new / old)
+ r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
+ if not scaleup: # only scale down, do not scale up (for better test mAP)
+ r = min(r, 1.0)
+
+ # Compute padding
+ ratio = r, r # width, height ratios
+ new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
+ dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
+ if auto: # minimum rectangle
+ dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding
+ elif scaleFill: # stretch
+ dw, dh = 0.0, 0.0
+ new_unpad = (new_shape[1], new_shape[0])
+ ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
+
+ dw /= 2 # divide padding into 2 sides
+ dh /= 2
+
+ if shape[::-1] != new_unpad: # resize
+ img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
+ top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
+ left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
+ img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
+ return img, ratio, (dw, dh)
+
+
+def random_perspective(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0)):
+ # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
+ # targets = [cls, xyxy]
+
+ height = img.shape[0] + border[0] * 2 # shape(h,w,c)
+ width = img.shape[1] + border[1] * 2
+
+ # Center
+ C = np.eye(3)
+ C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
+ C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
+
+ # Perspective
+ P = np.eye(3)
+ P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
+ P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
+
+ # Rotation and Scale
+ R = np.eye(3)
+ a = random.uniform(-degrees, degrees)
+ # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
+ s = random.uniform(1 - scale, 1 + scale)
+ # s = 2 ** random.uniform(-scale, scale)
+ R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
+
+ # Shear
+ S = np.eye(3)
+ S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
+ S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
+
+ # Translation
+ T = np.eye(3)
+ T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
+ T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
+
+ # Combined rotation matrix
+ M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
+ if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
+ if perspective:
+ img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))
+ else: # affine
+ img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
+
+ # Visualize
+ # import matplotlib.pyplot as plt
+ # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
+ # ax[0].imshow(img[:, :, ::-1]) # base
+ # ax[1].imshow(img2[:, :, ::-1]) # warped
+
+ # Transform label coordinates
+ n = len(targets)
+ if n:
+ # warp points
+ xy = np.ones((n * 4, 3))
+ xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
+ xy = xy @ M.T # transform
+ if perspective:
+ xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale
+ else: # affine
+ xy = xy[:, :2].reshape(n, 8)
+
+ # create new boxes
+ x = xy[:, [0, 2, 4, 6]]
+ y = xy[:, [1, 3, 5, 7]]
+ xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
+
+ # # apply angle-based reduction of bounding boxes
+ # radians = a * math.pi / 180
+ # reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
+ # x = (xy[:, 2] + xy[:, 0]) / 2
+ # y = (xy[:, 3] + xy[:, 1]) / 2
+ # w = (xy[:, 2] - xy[:, 0]) * reduction
+ # h = (xy[:, 3] - xy[:, 1]) * reduction
+ # xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
+
+ # clip boxes
+ xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
+ xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
+
+ # filter candidates
+ i = box_candidates(box1=targets[:, 1:5].T * s, box2=xy.T)
+ targets = targets[i]
+ targets[:, 1:5] = xy[i]
+
+ return img, targets
+
+
+def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.2): # box1(4,n), box2(4,n)
+ # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
+ w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
+ w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
+ ar = np.maximum(w2 / (h2 + 1e-16), h2 / (w2 + 1e-16)) # aspect ratio
+ return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + 1e-16) > area_thr) & (ar < ar_thr) # candidates
+
+
+def cutout(image, labels):
+ # Applies image cutout augmentation https://arxiv.org/abs/1708.04552
+ h, w = image.shape[:2]
+
+ def bbox_ioa(box1, box2):
+ # Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
+ box2 = box2.transpose()
+
+ # Get the coordinates of bounding boxes
+ b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
+ b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
+
+ # Intersection area
+ inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
+ (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
+
+ # box2 area
+ box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
+
+ # Intersection over box2 area
+ return inter_area / box2_area
+
+ # create random masks
+ scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
+ for s in scales:
+ mask_h = random.randint(1, int(h * s))
+ mask_w = random.randint(1, int(w * s))
+
+ # box
+ xmin = max(0, random.randint(0, w) - mask_w // 2)
+ ymin = max(0, random.randint(0, h) - mask_h // 2)
+ xmax = min(w, xmin + mask_w)
+ ymax = min(h, ymin + mask_h)
+
+ # apply random color mask
+ image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
+
+ # return unobscured labels
+ if len(labels) and s > 0.03:
+ box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
+ ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
+ labels = labels[ioa < 0.60] # remove >60% obscured labels
+
+ return labels
+
+
+def reduce_img_size(path='path/images', img_size=1024): # from utils.datasets import *; reduce_img_size()
+ # creates a new ./images_reduced folder with reduced size images of maximum size img_size
+ path_new = path + '_reduced' # reduced images path
+ create_folder(path_new)
+ for f in tqdm(glob.glob('%s/*.*' % path)):
+ try:
+ img = cv2.imread(f)
+ h, w = img.shape[:2]
+ r = img_size / max(h, w) # size ratio
+ if r < 1.0:
+ img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_AREA) # _LINEAR fastest
+ fnew = f.replace(path, path_new) # .replace(Path(f).suffix, '.jpg')
+ cv2.imwrite(fnew, img)
+ except:
+ print('WARNING: image failure %s' % f)
+
+
+def recursive_dataset2bmp(dataset='path/dataset_bmp'): # from utils.datasets import *; recursive_dataset2bmp()
+ # Converts dataset to bmp (for faster training)
+ formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
+ for a, b, files in os.walk(dataset):
+ for file in tqdm(files, desc=a):
+ p = a + '/' + file
+ s = Path(file).suffix
+ if s == '.txt': # replace text
+ with open(p, 'r') as f:
+ lines = f.read()
+ for f in formats:
+ lines = lines.replace(f, '.bmp')
+ with open(p, 'w') as f:
+ f.write(lines)
+ elif s in formats: # replace image
+ cv2.imwrite(p.replace(s, '.bmp'), cv2.imread(p))
+ if s != '.bmp':
+ os.system("rm '%s'" % p)
+
+
+def imagelist2folder(path='path/images.txt'): # from utils.datasets import *; imagelist2folder()
+ # Copies all the images in a text file (list of images) into a folder
+ create_folder(path[:-4])
+ with open(path, 'r') as f:
+ for line in f.read().splitlines():
+ os.system('cp "%s" %s' % (line, path[:-4]))
+ print(line)
+
+
+def create_folder(path='./new'):
+ # Create folder
+ if os.path.exists(path):
+ shutil.rmtree(path) # delete output folder
+ os.makedirs(path) # make new output folder
diff --git a/utils/general.py b/utils/general.py
new file mode 100644
index 0000000..f326acc
--- /dev/null
+++ b/utils/general.py
@@ -0,0 +1,1237 @@
+import glob
+import math
+import os
+import random
+import shutil
+import subprocess
+import time
+from contextlib import contextmanager
+from copy import copy
+from pathlib import Path
+from sys import platform
+
+import cv2
+import matplotlib
+import matplotlib.pyplot as plt
+import numpy as np
+import torch
+import torch.nn as nn
+import torchvision
+import yaml
+from scipy.cluster.vq import kmeans
+from scipy.signal import butter, filtfilt
+from tqdm import tqdm
+
+from utils.torch_utils import init_seeds, is_parallel
+
+# Set printoptions
+torch.set_printoptions(linewidth=320, precision=5, profile='long')
+np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
+matplotlib.rc('font', **{'size': 11})
+
+# Prevent OpenCV from multithreading (to use PyTorch DataLoader)
+cv2.setNumThreads(0)
+
+
+@contextmanager
+def torch_distributed_zero_first(local_rank: int):
+ """
+ Decorator to make all processes in distributed training wait for each local_master to do something.
+ """
+ if local_rank not in [-1, 0]:
+ torch.distributed.barrier()
+ yield
+ if local_rank == 0:
+ torch.distributed.barrier()
+
+
+def init_seeds(seed=0):
+ random.seed(seed)
+ np.random.seed(seed)
+ init_seeds(seed=seed)
+
+
+def get_latest_run(search_dir='./runs'):
+ # Return path to most recent 'last.pt' in /runs (i.e. to --resume from)
+ last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True)
+ return max(last_list, key=os.path.getctime)
+
+
+def check_git_status():
+ # Suggest 'git pull' if repo is out of date
+ if platform in ['linux', 'darwin'] and not os.path.isfile('/.dockerenv'):
+ s = subprocess.check_output('if [ -d .git ]; then git fetch && git status -uno; fi', shell=True).decode('utf-8')
+ if 'Your branch is behind' in s:
+ print(s[s.find('Your branch is behind'):s.find('\n\n')] + '\n')
+
+
+def check_img_size(img_size, s=32):
+ # Verify img_size is a multiple of stride s
+ new_size = make_divisible(img_size, int(s)) # ceil gs-multiple
+ if new_size != img_size:
+ print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size))
+ return new_size
+
+
+def check_anchors(dataset, model, thr=4.0, imgsz=640):
+ # Check anchor fit to data, recompute if necessary
+ print('\nAnalyzing anchors... ', end='')
+ m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect()
+ shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True)
+ scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale
+ wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh
+
+ def metric(k): # compute metric
+ r = wh[:, None] / k[None]
+ x = torch.min(r, 1. / r).min(2)[0] # ratio metric
+ best = x.max(1)[0] # best_x
+ aat = (x > 1. / thr).float().sum(1).mean() # anchors above threshold
+ bpr = (best > 1. / thr).float().mean() # best possible recall
+ return bpr, aat
+
+ bpr, aat = metric(m.anchor_grid.clone().cpu().view(-1, 2))
+ print('anchors/target = %.2f, Best Possible Recall (BPR) = %.4f' % (aat, bpr), end='')
+ if bpr < 0.98: # threshold to recompute
+ print('. Attempting to generate improved anchors, please wait...' % bpr)
+ na = m.anchor_grid.numel() // 2 # number of anchors
+ new_anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False)
+ new_bpr = metric(new_anchors.reshape(-1, 2))[0]
+ if new_bpr > bpr: # replace anchors
+ new_anchors = torch.tensor(new_anchors, device=m.anchors.device).type_as(m.anchors)
+ m.anchor_grid[:] = new_anchors.clone().view_as(m.anchor_grid) # for inference
+ m.anchors[:] = new_anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss
+ check_anchor_order(m)
+ print('New anchors saved to model. Update model *.yaml to use these anchors in the future.')
+ else:
+ print('Original anchors better than new anchors. Proceeding with original anchors.')
+ print('') # newline
+
+
+def check_anchor_order(m):
+ # Check anchor order against stride order for YOLO Detect() module m, and correct if necessary
+ a = m.anchor_grid.prod(-1).view(-1) # anchor area
+ da = a[-1] - a[0] # delta a
+ ds = m.stride[-1] - m.stride[0] # delta s
+ if da.sign() != ds.sign(): # same order
+ print('Reversing anchor order')
+ m.anchors[:] = m.anchors.flip(0)
+ m.anchor_grid[:] = m.anchor_grid.flip(0)
+
+
+def check_file(file):
+ # Searches for file if not found locally
+ if os.path.isfile(file) or file == '':
+ return file
+ else:
+ files = glob.glob('./**/' + file, recursive=True) # find file
+ assert len(files), 'File Not Found: %s' % file # assert file was found
+ return files[0] # return first file if multiple found
+
+
+def make_divisible(x, divisor):
+ # Returns x evenly divisble by divisor
+ return math.ceil(x / divisor) * divisor
+
+
+def labels_to_class_weights(labels, nc=80):
+ # Get class weights (inverse frequency) from training labels
+ if labels[0] is None: # no labels loaded
+ return torch.Tensor()
+
+ labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
+ classes = labels[:, 0].astype(np.int) # labels = [class xywh]
+ weights = np.bincount(classes, minlength=nc) # occurences per class
+
+ # Prepend gridpoint count (for uCE trianing)
+ # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image
+ # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start
+
+ weights[weights == 0] = 1 # replace empty bins with 1
+ weights = 1 / weights # number of targets per class
+ weights /= weights.sum() # normalize
+ return torch.from_numpy(weights)
+
+
+def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
+ # Produces image weights based on class mAPs
+ n = len(labels)
+ class_counts = np.array([np.bincount(labels[i][:, 0].astype(np.int), minlength=nc) for i in range(n)])
+ image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1)
+ # index = random.choices(range(n), weights=image_weights, k=1) # weight image sample
+ return image_weights
+
+
+def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
+ # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
+ # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
+ # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
+ # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
+ # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
+ x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
+ return x
+
+
+def xyxy2xywh(x):
+ # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
+ y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
+ y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
+ y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
+ y[:, 2] = x[:, 2] - x[:, 0] # width
+ y[:, 3] = x[:, 3] - x[:, 1] # height
+ return y
+
+
+def xywh2xyxy(x):
+ # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
+ y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
+ y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
+ y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
+ y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
+ y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
+ return y
+
+
+def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
+ # Rescale coords (xyxy) from img1_shape to img0_shape
+ if ratio_pad is None: # calculate from img0_shape
+ gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
+ pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
+ else:
+ gain = ratio_pad[0][0]
+ pad = ratio_pad[1]
+
+ coords[:, [0, 2]] -= pad[0] # x padding
+ coords[:, [1, 3]] -= pad[1] # y padding
+ coords[:, :4] /= gain
+ clip_coords(coords, img0_shape)
+ return coords
+
+
+def clip_coords(boxes, img_shape):
+ # Clip bounding xyxy bounding boxes to image shape (height, width)
+ boxes[:, 0].clamp_(0, img_shape[1]) # x1
+ boxes[:, 1].clamp_(0, img_shape[0]) # y1
+ boxes[:, 2].clamp_(0, img_shape[1]) # x2
+ boxes[:, 3].clamp_(0, img_shape[0]) # y2
+
+
+def ap_per_class(tp, conf, pred_cls, target_cls):
+ """ Compute the average precision, given the recall and precision curves.
+ Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
+ # Arguments
+ tp: True positives (nparray, nx1 or nx10).
+ conf: Objectness value from 0-1 (nparray).
+ pred_cls: Predicted object classes (nparray).
+ target_cls: True object classes (nparray).
+ # Returns
+ The average precision as computed in py-faster-rcnn.
+ """
+
+ # Sort by objectness
+ i = np.argsort(-conf)
+ tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
+
+ # Find unique classes
+ unique_classes = np.unique(target_cls)
+
+ # Create Precision-Recall curve and compute AP for each class
+ pr_score = 0.1 # score to evaluate P and R https://github.com/ultralytics/yolov3/issues/898
+ s = [unique_classes.shape[0], tp.shape[1]] # number class, number iou thresholds (i.e. 10 for mAP0.5...0.95)
+ ap, p, r = np.zeros(s), np.zeros(s), np.zeros(s)
+ for ci, c in enumerate(unique_classes):
+ i = pred_cls == c
+ n_gt = (target_cls == c).sum() # Number of ground truth objects
+ n_p = i.sum() # Number of predicted objects
+
+ if n_p == 0 or n_gt == 0:
+ continue
+ else:
+ # Accumulate FPs and TPs
+ fpc = (1 - tp[i]).cumsum(0)
+ tpc = tp[i].cumsum(0)
+
+ # Recall
+ recall = tpc / (n_gt + 1e-16) # recall curve
+ r[ci] = np.interp(-pr_score, -conf[i], recall[:, 0]) # r at pr_score, negative x, xp because xp decreases
+
+ # Precision
+ precision = tpc / (tpc + fpc) # precision curve
+ p[ci] = np.interp(-pr_score, -conf[i], precision[:, 0]) # p at pr_score
+
+ # AP from recall-precision curve
+ for j in range(tp.shape[1]):
+ ap[ci, j] = compute_ap(recall[:, j], precision[:, j])
+
+ # Plot
+ # fig, ax = plt.subplots(1, 1, figsize=(5, 5))
+ # ax.plot(recall, precision)
+ # ax.set_xlabel('Recall')
+ # ax.set_ylabel('Precision')
+ # ax.set_xlim(0, 1.01)
+ # ax.set_ylim(0, 1.01)
+ # fig.tight_layout()
+ # fig.savefig('PR_curve.png', dpi=300)
+
+ # Compute F1 score (harmonic mean of precision and recall)
+ f1 = 2 * p * r / (p + r + 1e-16)
+
+ return p, r, ap, f1, unique_classes.astype('int32')
+
+
+def compute_ap(recall, precision):
+ """ Compute the average precision, given the recall and precision curves.
+ Source: https://github.com/rbgirshick/py-faster-rcnn.
+ # Arguments
+ recall: The recall curve (list).
+ precision: The precision curve (list).
+ # Returns
+ The average precision as computed in py-faster-rcnn.
+ """
+
+ # Append sentinel values to beginning and end
+ mrec = np.concatenate(([0.], recall, [min(recall[-1] + 1E-3, 1.)]))
+ mpre = np.concatenate(([0.], precision, [0.]))
+
+ # Compute the precision envelope
+ mpre = np.flip(np.maximum.accumulate(np.flip(mpre)))
+
+ # Integrate area under curve
+ method = 'interp' # methods: 'continuous', 'interp'
+ if method == 'interp':
+ x = np.linspace(0, 1, 101) # 101-point interp (COCO)
+ ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate
+ else: # 'continuous'
+ i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes
+ ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve
+
+ return ap
+
+
+def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False):
+ # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
+ box2 = box2.T
+
+ # Get the coordinates of bounding boxes
+ if x1y1x2y2: # x1, y1, x2, y2 = box1
+ b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
+ b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
+ else: # transform from xywh to xyxy
+ b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
+ b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
+ b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
+ b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
+
+ # Intersection area
+ inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
+ (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
+
+ # Union Area
+ w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1
+ w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1
+ union = (w1 * h1 + 1e-16) + w2 * h2 - inter
+
+ iou = inter / union # iou
+ if GIoU or DIoU or CIoU:
+ cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width
+ ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
+ if GIoU: # Generalized IoU https://arxiv.org/pdf/1902.09630.pdf
+ c_area = cw * ch + 1e-16 # convex area
+ return iou - (c_area - union) / c_area # GIoU
+ if DIoU or CIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
+ # convex diagonal squared
+ c2 = cw ** 2 + ch ** 2 + 1e-16
+ # centerpoint distance squared
+ rho2 = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2)) ** 2 / 4 + ((b2_y1 + b2_y2) - (b1_y1 + b1_y2)) ** 2 / 4
+ if DIoU:
+ return iou - rho2 / c2 # DIoU
+ elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
+ v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
+ with torch.no_grad():
+ alpha = v / (1 - iou + v + 1e-16)
+ return iou - (rho2 / c2 + v * alpha) # CIoU
+
+ return iou
+
+
+def box_iou(box1, box2):
+ # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
+ """
+ Return intersection-over-union (Jaccard index) of boxes.
+ Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
+ Arguments:
+ box1 (Tensor[N, 4])
+ box2 (Tensor[M, 4])
+ Returns:
+ iou (Tensor[N, M]): the NxM matrix containing the pairwise
+ IoU values for every element in boxes1 and boxes2
+ """
+
+ def box_area(box):
+ # box = 4xn
+ return (box[2] - box[0]) * (box[3] - box[1])
+
+ area1 = box_area(box1.T)
+ area2 = box_area(box2.T)
+
+ # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
+ inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
+ return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)
+
+
+def wh_iou(wh1, wh2):
+ # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2
+ wh1 = wh1[:, None] # [N,1,2]
+ wh2 = wh2[None] # [1,M,2]
+ inter = torch.min(wh1, wh2).prod(2) # [N,M]
+ return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter)
+
+
+class FocalLoss(nn.Module):
+ # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
+ def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
+ super(FocalLoss, self).__init__()
+ self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
+ self.gamma = gamma
+ self.alpha = alpha
+ self.reduction = loss_fcn.reduction
+ self.loss_fcn.reduction = 'none' # required to apply FL to each element
+
+ def forward(self, pred, true):
+ loss = self.loss_fcn(pred, true)
+ # p_t = torch.exp(-loss)
+ # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability
+
+ # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py
+ pred_prob = torch.sigmoid(pred) # prob from logits
+ p_t = true * pred_prob + (1 - true) * (1 - pred_prob)
+ alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
+ modulating_factor = (1.0 - p_t) ** self.gamma
+ loss *= alpha_factor * modulating_factor
+
+ if self.reduction == 'mean':
+ return loss.mean()
+ elif self.reduction == 'sum':
+ return loss.sum()
+ else: # 'none'
+ return loss
+
+
+def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441
+ # return positive, negative label smoothing BCE targets
+ return 1.0 - 0.5 * eps, 0.5 * eps
+
+
+class BCEBlurWithLogitsLoss(nn.Module):
+ # BCEwithLogitLoss() with reduced missing label effects.
+ def __init__(self, alpha=0.05):
+ super(BCEBlurWithLogitsLoss, self).__init__()
+ self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss()
+ self.alpha = alpha
+
+ def forward(self, pred, true):
+ loss = self.loss_fcn(pred, true)
+ pred = torch.sigmoid(pred) # prob from logits
+ dx = pred - true # reduce only missing label effects
+ # dx = (pred - true).abs() # reduce missing label and false label effects
+ alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4))
+ loss *= alpha_factor
+ return loss.mean()
+
+
+def compute_loss(p, targets, model): # predictions, targets, model
+ device = targets.device
+ lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)
+ tcls, tbox, indices, anchors = build_targets(p, targets, model) # targets
+ h = model.hyp # hyperparameters
+
+ # Define criteria
+ BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.Tensor([h['cls_pw']])).to(device)
+ BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.Tensor([h['obj_pw']])).to(device)
+
+ # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
+ cp, cn = smooth_BCE(eps=0.0)
+
+ # Focal loss
+ g = h['fl_gamma'] # focal loss gamma
+ if g > 0:
+ BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
+
+ # Losses
+ nt = 0 # number of targets
+ np = len(p) # number of outputs
+ balance = [4.0, 1.0, 0.4] if np == 3 else [4.0, 1.0, 0.4, 0.1] # P3-5 or P3-6
+ for i, pi in enumerate(p): # layer index, layer predictions
+ b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
+ tobj = torch.zeros_like(pi[..., 0], device=device) # target obj
+
+ n = b.shape[0] # number of targets
+ if n:
+ nt += n # cumulative targets
+ ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
+
+ # Regression
+ pxy = ps[:, :2].sigmoid() * 2. - 0.5
+ pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]
+ #pxy = torch.sigmoid(ps[:, 0:2]) # pxy = pxy * s - (s - 1) / 2, s = 1.5 (scale_xy)
+ #pwh = torch.exp(ps[:, 2:4]).clamp(max=1E3) * anchors[i]
+ pbox = torch.cat((pxy, pwh), 1).to(device) # predicted box
+ giou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # giou(prediction, target)
+ lbox += (1.0 - giou).mean() # giou loss
+
+ # Objectness
+ tobj[b, a, gj, gi] = (1.0 - model.gr) + model.gr * giou.detach().clamp(0).type(tobj.dtype) # giou ratio
+
+ # Classification
+ if model.nc > 1: # cls loss (only if multiple classes)
+ t = torch.full_like(ps[:, 5:], cn, device=device) # targets
+ t[range(n), tcls[i]] = cp
+ lcls += BCEcls(ps[:, 5:], t) # BCE
+
+ # Append targets to text file
+ # with open('targets.txt', 'a') as file:
+ # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
+
+ lobj += BCEobj(pi[..., 4], tobj) * balance[i] # obj loss
+
+ s = 3 / np # output count scaling
+ lbox *= h['giou'] * s
+ lobj *= h['obj'] * s * (1.4 if np == 4 else 1.)
+ lcls *= h['cls'] * s
+ bs = tobj.shape[0] # batch size
+
+ loss = lbox + lobj + lcls
+ return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach()
+
+
+def build_targets(p, targets, model):
+ nt = targets.shape[0] # number of anchors, targets
+ tcls, tbox, indices, anch = [], [], [], []
+ gain = torch.ones(6, device=targets.device) # normalized to gridspace gain
+ off = torch.tensor([[1, 0], [0, 1], [-1, 0], [0, -1]], device=targets.device).float() # overlap offsets
+
+ g = 0.5 # offset
+ multi_gpu = is_parallel(model)
+ for i, jj in enumerate(model.module.yolo_layers if multi_gpu else model.yolo_layers):
+ # get number of grid points and anchor vec for this yolo layer
+ anchors = model.module.module_list[jj].anchor_vec if multi_gpu else model.module_list[jj].anchor_vec
+ gain[2:] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
+
+ # Match targets to anchors
+ a, t, offsets = [], targets * gain, 0
+ if nt:
+ na = anchors.shape[0] # number of anchors
+ at = torch.arange(na).view(na, 1).repeat(1, nt) # anchor tensor, same as .repeat_interleave(nt)
+ r = t[None, :, 4:6] / anchors[:, None] # wh ratio
+ j = torch.max(r, 1. / r).max(2)[0] < model.hyp['anchor_t'] # compare
+ # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n) = wh_iou(anchors(3,2), gwh(n,2))
+ a, t = at[j], t.repeat(na, 1, 1)[j] # filter
+
+ # overlaps
+ gxy = t[:, 2:4] # grid xy
+ z = torch.zeros_like(gxy)
+ j, k = ((gxy % 1. < g) & (gxy > 1.)).T
+ l, m = ((gxy % 1. > (1 - g)) & (gxy < (gain[[2, 3]] - 1.))).T
+ a, t = torch.cat((a, a[j], a[k], a[l], a[m]), 0), torch.cat((t, t[j], t[k], t[l], t[m]), 0)
+ offsets = torch.cat((z, z[j] + off[0], z[k] + off[1], z[l] + off[2], z[m] + off[3]), 0) * g
+
+ # Define
+ b, c = t[:, :2].long().T # image, class
+ gxy = t[:, 2:4] # grid xy
+ gwh = t[:, 4:6] # grid wh
+ gij = (gxy - offsets).long()
+ gi, gj = gij.T # grid xy indices
+
+ # Append
+ #indices.append((b, a, gj, gi)) # image, anchor, grid indices
+ indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices
+ tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
+ anch.append(anchors[a]) # anchors
+ tcls.append(c) # class
+
+ return tcls, tbox, indices, anch
+
+
+def non_max_suppression(prediction, conf_thres=0.1, iou_thres=0.6, merge=False, classes=None, agnostic=False):
+ """Performs Non-Maximum Suppression (NMS) on inference results
+
+ Returns:
+ detections with shape: nx6 (x1, y1, x2, y2, conf, cls)
+ """
+ if prediction.dtype is torch.float16:
+ prediction = prediction.float() # to FP32
+
+ nc = prediction[0].shape[1] - 5 # number of classes
+ xc = prediction[..., 4] > conf_thres # candidates
+
+ # Settings
+ min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
+ max_det = 300 # maximum number of detections per image
+ time_limit = 10.0 # seconds to quit after
+ redundant = True # require redundant detections
+ multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)
+
+ t = time.time()
+ output = [None] * prediction.shape[0]
+ for xi, x in enumerate(prediction): # image index, image inference
+ # Apply constraints
+ # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
+ x = x[xc[xi]] # confidence
+
+ # If none remain process next image
+ if not x.shape[0]:
+ continue
+
+ # Compute conf
+ x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
+
+ # Box (center x, center y, width, height) to (x1, y1, x2, y2)
+ box = xywh2xyxy(x[:, :4])
+
+ # Detections matrix nx6 (xyxy, conf, cls)
+ if multi_label:
+ i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
+ x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
+ else: # best class only
+ conf, j = x[:, 5:].max(1, keepdim=True)
+ x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
+
+ # Filter by class
+ if classes:
+ x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
+
+ # Apply finite constraint
+ # if not torch.isfinite(x).all():
+ # x = x[torch.isfinite(x).all(1)]
+
+ # If none remain process next image
+ n = x.shape[0] # number of boxes
+ if not n:
+ continue
+
+ # Sort by confidence
+ # x = x[x[:, 4].argsort(descending=True)]
+
+ # Batched NMS
+ c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
+ boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
+ i = torchvision.ops.boxes.nms(boxes, scores, iou_thres)
+ if i.shape[0] > max_det: # limit detections
+ i = i[:max_det]
+ if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
+ try: # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
+ iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
+ weights = iou * scores[None] # box weights
+ x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
+ if redundant:
+ i = i[iou.sum(1) > 1] # require redundancy
+ except: # possible CUDA error https://github.com/ultralytics/yolov3/issues/1139
+ print(x, i, x.shape, i.shape)
+ pass
+
+ output[xi] = x[i]
+ if (time.time() - t) > time_limit:
+ break # time limit exceeded
+
+ return output
+
+
+def strip_optimizer(f='weights/best.pt', s=''): # from utils.utils import *; strip_optimizer()
+ # Strip optimizer from 'f' to finalize training, optionally save as 's'
+ x = torch.load(f, map_location=torch.device('cpu'))
+ x['optimizer'] = None
+ x['training_results'] = None
+ x['epoch'] = -1
+ #x['model'].half() # to FP16
+ #for p in x['model'].parameters():
+ # p.requires_grad = False
+ torch.save(x, s or f)
+ mb = os.path.getsize(s or f) / 1E6 # filesize
+ print('Optimizer stripped from %s,%s %.1fMB' % (f, (' saved as %s,' % s) if s else '', mb))
+
+
+def coco_class_count(path='../coco/labels/train2014/'):
+ # Histogram of occurrences per class
+ nc = 80 # number classes
+ x = np.zeros(nc, dtype='int32')
+ files = sorted(glob.glob('%s/*.*' % path))
+ for i, file in enumerate(files):
+ labels = np.loadtxt(file, dtype=np.float32).reshape(-1, 5)
+ x += np.bincount(labels[:, 0].astype('int32'), minlength=nc)
+ print(i, len(files))
+
+
+def coco_only_people(path='../coco/labels/train2017/'): # from utils.utils import *; coco_only_people()
+ # Find images with only people
+ files = sorted(glob.glob('%s/*.*' % path))
+ for i, file in enumerate(files):
+ labels = np.loadtxt(file, dtype=np.float32).reshape(-1, 5)
+ if all(labels[:, 0] == 0):
+ print(labels.shape[0], file)
+
+
+def crop_images_random(path='../images/', scale=0.50): # from utils.utils import *; crop_images_random()
+ # crops images into random squares up to scale fraction
+ # WARNING: overwrites images!
+ for file in tqdm(sorted(glob.glob('%s/*.*' % path))):
+ img = cv2.imread(file) # BGR
+ if img is not None:
+ h, w = img.shape[:2]
+
+ # create random mask
+ a = 30 # minimum size (pixels)
+ mask_h = random.randint(a, int(max(a, h * scale))) # mask height
+ mask_w = mask_h # mask width
+
+ # box
+ xmin = max(0, random.randint(0, w) - mask_w // 2)
+ ymin = max(0, random.randint(0, h) - mask_h // 2)
+ xmax = min(w, xmin + mask_w)
+ ymax = min(h, ymin + mask_h)
+
+ # apply random color mask
+ cv2.imwrite(file, img[ymin:ymax, xmin:xmax])
+
+
+def coco_single_class_labels(path='../coco/labels/train2014/', label_class=43):
+ # Makes single-class coco datasets. from utils.utils import *; coco_single_class_labels()
+ if os.path.exists('new/'):
+ shutil.rmtree('new/') # delete output folder
+ os.makedirs('new/') # make new output folder
+ os.makedirs('new/labels/')
+ os.makedirs('new/images/')
+ for file in tqdm(sorted(glob.glob('%s/*.*' % path))):
+ with open(file, 'r') as f:
+ labels = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
+ i = labels[:, 0] == label_class
+ if any(i):
+ img_file = file.replace('labels', 'images').replace('txt', 'jpg')
+ labels[:, 0] = 0 # reset class to 0
+ with open('new/images.txt', 'a') as f: # add image to dataset list
+ f.write(img_file + '\n')
+ with open('new/labels/' + Path(file).name, 'a') as f: # write label
+ for l in labels[i]:
+ f.write('%g %.6f %.6f %.6f %.6f\n' % tuple(l))
+ shutil.copyfile(src=img_file, dst='new/images/' + Path(file).name.replace('txt', 'jpg')) # copy images
+
+
+def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True):
+ """ Creates kmeans-evolved anchors from training dataset
+
+ Arguments:
+ path: path to dataset *.yaml, or a loaded dataset
+ n: number of anchors
+ img_size: image size used for training
+ thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0
+ gen: generations to evolve anchors using genetic algorithm
+
+ Return:
+ k: kmeans evolved anchors
+
+ Usage:
+ from utils.utils import *; _ = kmean_anchors()
+ """
+ thr = 1. / thr
+
+ def metric(k, wh): # compute metrics
+ r = wh[:, None] / k[None]
+ x = torch.min(r, 1. / r).min(2)[0] # ratio metric
+ # x = wh_iou(wh, torch.tensor(k)) # iou metric
+ return x, x.max(1)[0] # x, best_x
+
+ def fitness(k): # mutation fitness
+ _, best = metric(torch.tensor(k, dtype=torch.float32), wh)
+ return (best * (best > thr).float()).mean() # fitness
+
+ def print_results(k):
+ k = k[np.argsort(k.prod(1))] # sort small to large
+ x, best = metric(k, wh0)
+ bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr
+ print('thr=%.2f: %.4f best possible recall, %.2f anchors past thr' % (thr, bpr, aat))
+ print('n=%g, img_size=%s, metric_all=%.3f/%.3f-mean/best, past_thr=%.3f-mean: ' %
+ (n, img_size, x.mean(), best.mean(), x[x > thr].mean()), end='')
+ for i, x in enumerate(k):
+ print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg
+ return k
+
+ if isinstance(path, str): # *.yaml file
+ with open(path) as f:
+ data_dict = yaml.load(f, Loader=yaml.FullLoader) # model dict
+ from utils.datasets import LoadImagesAndLabels
+ dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True)
+ else:
+ dataset = path # dataset
+
+ # Get label wh
+ shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True)
+ wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh
+
+ # Filter
+ i = (wh0 < 3.0).any(1).sum()
+ if i:
+ print('WARNING: Extremely small objects found. '
+ '%g of %g labels are < 3 pixels in width or height.' % (i, len(wh0)))
+ wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels
+
+ # Kmeans calculation
+ print('Running kmeans for %g anchors on %g points...' % (n, len(wh)))
+ s = wh.std(0) # sigmas for whitening
+ k, dist = kmeans(wh / s, n, iter=30) # points, mean distance
+ k *= s
+ wh = torch.tensor(wh, dtype=torch.float32) # filtered
+ wh0 = torch.tensor(wh0, dtype=torch.float32) # unflitered
+ k = print_results(k)
+
+ # Plot
+ # k, d = [None] * 20, [None] * 20
+ # for i in tqdm(range(1, 21)):
+ # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance
+ # fig, ax = plt.subplots(1, 2, figsize=(14, 7))
+ # ax = ax.ravel()
+ # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.')
+ # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh
+ # ax[0].hist(wh[wh[:, 0]<100, 0],400)
+ # ax[1].hist(wh[wh[:, 1]<100, 1],400)
+ # fig.tight_layout()
+ # fig.savefig('wh.png', dpi=200)
+
+ # Evolve
+ npr = np.random
+ f, sh, mp, s = fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma
+ pbar = tqdm(range(gen), desc='Evolving anchors with Genetic Algorithm') # progress bar
+ for _ in pbar:
+ v = np.ones(sh)
+ while (v == 1).all(): # mutate until a change occurs (prevent duplicates)
+ v = ((npr.random(sh) < mp) * npr.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0)
+ kg = (k.copy() * v).clip(min=2.0)
+ fg = fitness(kg)
+ if fg > f:
+ f, k = fg, kg.copy()
+ pbar.desc = 'Evolving anchors with Genetic Algorithm: fitness = %.4f' % f
+ if verbose:
+ print_results(k)
+
+ return print_results(k)
+
+
+def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''):
+ # Print mutation results to evolve.txt (for use with train.py --evolve)
+ a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys
+ b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values
+ c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3)
+ print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c))
+
+ if bucket:
+ os.system('gsutil cp gs://%s/evolve.txt .' % bucket) # download evolve.txt
+
+ with open('evolve.txt', 'a') as f: # append result
+ f.write(c + b + '\n')
+ x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0) # load unique rows
+ x = x[np.argsort(-fitness(x))] # sort
+ np.savetxt('evolve.txt', x, '%10.3g') # save sort by fitness
+
+ if bucket:
+ os.system('gsutil cp evolve.txt gs://%s' % bucket) # upload evolve.txt
+
+ # Save yaml
+ for i, k in enumerate(hyp.keys()):
+ hyp[k] = float(x[0, i + 7])
+ with open(yaml_file, 'w') as f:
+ results = tuple(x[0, :7])
+ c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3)
+ f.write('# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: ' % len(x) + c + '\n\n')
+ yaml.dump(hyp, f, sort_keys=False)
+
+
+def apply_classifier(x, model, img, im0):
+ # applies a second stage classifier to yolo outputs
+ im0 = [im0] if isinstance(im0, np.ndarray) else im0
+ for i, d in enumerate(x): # per image
+ if d is not None and len(d):
+ d = d.clone()
+
+ # Reshape and pad cutouts
+ b = xyxy2xywh(d[:, :4]) # boxes
+ b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
+ b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
+ d[:, :4] = xywh2xyxy(b).long()
+
+ # Rescale boxes from img_size to im0 size
+ scale_coords(img.shape[2:], d[:, :4], im0[i].shape)
+
+ # Classes
+ pred_cls1 = d[:, 5].long()
+ ims = []
+ for j, a in enumerate(d): # per item
+ cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
+ im = cv2.resize(cutout, (224, 224)) # BGR
+ # cv2.imwrite('test%i.jpg' % j, cutout)
+
+ im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
+ im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32
+ im /= 255.0 # 0 - 255 to 0.0 - 1.0
+ ims.append(im)
+
+ pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction
+ x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections
+
+ return x
+
+
+def fitness(x):
+ # Returns fitness (for use with results.txt or evolve.txt)
+ w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95]
+ return (x[:, :4] * w).sum(1)
+
+
+def output_to_target(output, width, height):
+ # Convert model output to target format [batch_id, class_id, x, y, w, h, conf]
+ if isinstance(output, torch.Tensor):
+ output = output.cpu().numpy()
+
+ targets = []
+ for i, o in enumerate(output):
+ if o is not None:
+ for pred in o:
+ box = pred[:4]
+ w = (box[2] - box[0]) / width
+ h = (box[3] - box[1]) / height
+ x = box[0] / width + w / 2
+ y = box[1] / height + h / 2
+ conf = pred[4]
+ cls = int(pred[5])
+
+ targets.append([i, cls, x, y, w, h, conf])
+
+ return np.array(targets)
+
+
+def increment_dir(dir, comment=''):
+ # Increments a directory runs/exp1 --> runs/exp2_comment
+ n = 0 # number
+ dir = str(Path(dir)) # os-agnostic
+ d = sorted(glob.glob(dir + '*')) # directories
+ if len(d):
+ n = max([int(x[len(dir):x.find('_') if '_' in x else None]) for x in d]) + 1 # increment
+ return dir + str(n) + ('_' + comment if comment else '')
+
+
+# Plotting functions ---------------------------------------------------------------------------------------------------
+def hist2d(x, y, n=100):
+ # 2d histogram used in labels.png and evolve.png
+ xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n)
+ hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))
+ xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1)
+ yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1)
+ return np.log(hist[xidx, yidx])
+
+
+def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):
+ # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy
+ def butter_lowpass(cutoff, fs, order):
+ nyq = 0.5 * fs
+ normal_cutoff = cutoff / nyq
+ b, a = butter(order, normal_cutoff, btype='low', analog=False)
+ return b, a
+
+ b, a = butter_lowpass(cutoff, fs, order=order)
+ return filtfilt(b, a, data) # forward-backward filter
+
+
+def plot_one_box(x, img, color=None, label=None, line_thickness=None):
+ # Plots one bounding box on image img
+ tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
+ color = color or [random.randint(0, 255) for _ in range(3)]
+ c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
+ cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
+ if label:
+ tf = max(tl - 1, 1) # font thickness
+ t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
+ c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
+ cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
+ cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
+
+
+def plot_wh_methods(): # from utils.utils import *; plot_wh_methods()
+ # Compares the two methods for width-height anchor multiplication
+ # https://github.com/ultralytics/yolov3/issues/168
+ x = np.arange(-4.0, 4.0, .1)
+ ya = np.exp(x)
+ yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2
+
+ fig = plt.figure(figsize=(6, 3), dpi=150)
+ plt.plot(x, ya, '.-', label='YOLO')
+ plt.plot(x, yb ** 2, '.-', label='YOLO ^2')
+ plt.plot(x, yb ** 1.6, '.-', label='YOLO ^1.6')
+ plt.xlim(left=-4, right=4)
+ plt.ylim(bottom=0, top=6)
+ plt.xlabel('input')
+ plt.ylabel('output')
+ plt.grid()
+ plt.legend()
+ fig.tight_layout()
+ fig.savefig('comparison.png', dpi=200)
+
+
+def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16):
+ tl = 3 # line thickness
+ tf = max(tl - 1, 1) # font thickness
+ if os.path.isfile(fname): # do not overwrite
+ return None
+
+ if isinstance(images, torch.Tensor):
+ images = images.cpu().float().numpy()
+
+ if isinstance(targets, torch.Tensor):
+ targets = targets.cpu().numpy()
+
+ # un-normalise
+ if np.max(images[0]) <= 1:
+ images *= 255
+
+ bs, _, h, w = images.shape # batch size, _, height, width
+ bs = min(bs, max_subplots) # limit plot images
+ ns = np.ceil(bs ** 0.5) # number of subplots (square)
+
+ # Check if we should resize
+ scale_factor = max_size / max(h, w)
+ if scale_factor < 1:
+ h = math.ceil(scale_factor * h)
+ w = math.ceil(scale_factor * w)
+
+ # Empty array for output
+ mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8)
+
+ # Fix class - colour map
+ prop_cycle = plt.rcParams['axes.prop_cycle']
+ # https://stackoverflow.com/questions/51350872/python-from-color-name-to-rgb
+ hex2rgb = lambda h: tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
+ color_lut = [hex2rgb(h) for h in prop_cycle.by_key()['color']]
+
+ for i, img in enumerate(images):
+ if i == max_subplots: # if last batch has fewer images than we expect
+ break
+
+ block_x = int(w * (i // ns))
+ block_y = int(h * (i % ns))
+
+ img = img.transpose(1, 2, 0)
+ if scale_factor < 1:
+ img = cv2.resize(img, (w, h))
+
+ mosaic[block_y:block_y + h, block_x:block_x + w, :] = img
+ if len(targets) > 0:
+ image_targets = targets[targets[:, 0] == i]
+ boxes = xywh2xyxy(image_targets[:, 2:6]).T
+ classes = image_targets[:, 1].astype('int')
+ gt = image_targets.shape[1] == 6 # ground truth if no conf column
+ conf = None if gt else image_targets[:, 6] # check for confidence presence (gt vs pred)
+
+ boxes[[0, 2]] *= w
+ boxes[[0, 2]] += block_x
+ boxes[[1, 3]] *= h
+ boxes[[1, 3]] += block_y
+ for j, box in enumerate(boxes.T):
+ cls = int(classes[j])
+ color = color_lut[cls % len(color_lut)]
+ cls = names[cls] if names else cls
+ if gt or conf[j] > 0.3: # 0.3 conf thresh
+ label = '%s' % cls if gt else '%s %.1f' % (cls, conf[j])
+ plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl)
+
+ # Draw image filename labels
+ if paths is not None:
+ label = os.path.basename(paths[i])[:40] # trim to 40 char
+ t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
+ cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf,
+ lineType=cv2.LINE_AA)
+
+ # Image border
+ cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3)
+
+ if fname is not None:
+ mosaic = cv2.resize(mosaic, (int(ns * w * 0.5), int(ns * h * 0.5)), interpolation=cv2.INTER_AREA)
+ cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB))
+
+ return mosaic
+
+
+def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):
+ # Plot LR simulating training for full epochs
+ optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals
+ y = []
+ for _ in range(epochs):
+ scheduler.step()
+ y.append(optimizer.param_groups[0]['lr'])
+ plt.plot(y, '.-', label='LR')
+ plt.xlabel('epoch')
+ plt.ylabel('LR')
+ plt.grid()
+ plt.xlim(0, epochs)
+ plt.ylim(0)
+ plt.tight_layout()
+ plt.savefig(Path(save_dir) / 'LR.png', dpi=200)
+
+
+def plot_test_txt(): # from utils.utils import *; plot_test()
+ # Plot test.txt histograms
+ x = np.loadtxt('test.txt', dtype=np.float32)
+ box = xyxy2xywh(x[:, :4])
+ cx, cy = box[:, 0], box[:, 1]
+
+ fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True)
+ ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0)
+ ax.set_aspect('equal')
+ plt.savefig('hist2d.png', dpi=300)
+
+ fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True)
+ ax[0].hist(cx, bins=600)
+ ax[1].hist(cy, bins=600)
+ plt.savefig('hist1d.png', dpi=200)
+
+
+def plot_targets_txt(): # from utils.utils import *; plot_targets_txt()
+ # Plot targets.txt histograms
+ x = np.loadtxt('targets.txt', dtype=np.float32).T
+ s = ['x targets', 'y targets', 'width targets', 'height targets']
+ fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
+ ax = ax.ravel()
+ for i in range(4):
+ ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std()))
+ ax[i].legend()
+ ax[i].set_title(s[i])
+ plt.savefig('targets.jpg', dpi=200)
+
+
+def plot_study_txt(f='study.txt', x=None): # from utils.utils import *; plot_study_txt()
+ # Plot study.txt generated by test.py
+ fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)
+ ax = ax.ravel()
+
+ fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)
+ for f in ['coco_study/study_coco_yolov4%s.txt' % x for x in ['s', 'm', 'l', 'x']]:
+ y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T
+ x = np.arange(y.shape[1]) if x is None else np.array(x)
+ s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)']
+ for i in range(7):
+ ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)
+ ax[i].set_title(s[i])
+
+ j = y[3].argmax() + 1
+ ax2.plot(y[6, :j], y[3, :j] * 1E2, '.-', linewidth=2, markersize=8,
+ label=Path(f).stem.replace('study_coco_', '').replace('yolo', 'YOLO'))
+
+ ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [33.8, 39.6, 43.0, 47.5, 49.4, 50.7],
+ 'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet')
+
+ ax2.grid()
+ ax2.set_xlim(0, 30)
+ ax2.set_ylim(28, 50)
+ ax2.set_yticks(np.arange(30, 55, 5))
+ ax2.set_xlabel('GPU Speed (ms/img)')
+ ax2.set_ylabel('COCO AP val')
+ ax2.legend(loc='lower right')
+ plt.savefig('study_mAP_latency.png', dpi=300)
+ plt.savefig(f.replace('.txt', '.png'), dpi=200)
+
+
+def plot_labels(labels, save_dir=''):
+ # plot dataset labels
+ c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes
+ nc = int(c.max() + 1) # number of classes
+
+ fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
+ ax = ax.ravel()
+ ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8)
+ ax[0].set_xlabel('classes')
+ ax[1].scatter(b[0], b[1], c=hist2d(b[0], b[1], 90), cmap='jet')
+ ax[1].set_xlabel('x')
+ ax[1].set_ylabel('y')
+ ax[2].scatter(b[2], b[3], c=hist2d(b[2], b[3], 90), cmap='jet')
+ ax[2].set_xlabel('width')
+ ax[2].set_ylabel('height')
+ plt.savefig(Path(save_dir) / 'labels.png', dpi=200)
+ plt.close()
+
+
+def plot_evolution(yaml_file='runs/evolve/hyp_evolved.yaml'): # from utils.utils import *; plot_evolution()
+ # Plot hyperparameter evolution results in evolve.txt
+ with open(yaml_file) as f:
+ hyp = yaml.load(f, Loader=yaml.FullLoader)
+ x = np.loadtxt('evolve.txt', ndmin=2)
+ f = fitness(x)
+ # weights = (f - f.min()) ** 2 # for weighted results
+ plt.figure(figsize=(10, 10), tight_layout=True)
+ matplotlib.rc('font', **{'size': 8})
+ for i, (k, v) in enumerate(hyp.items()):
+ y = x[:, i + 7]
+ # mu = (y * weights).sum() / weights.sum() # best weighted result
+ mu = y[f.argmax()] # best single result
+ plt.subplot(5, 5, i + 1)
+ plt.scatter(y, f, c=hist2d(y, f, 20), cmap='viridis', alpha=.8, edgecolors='none')
+ plt.plot(mu, f.max(), 'k+', markersize=15)
+ plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters
+ if i % 5 != 0:
+ plt.yticks([])
+ print('%15s: %.3g' % (k, mu))
+ plt.savefig('evolve.png', dpi=200)
+ print('\nPlot saved as evolve.png')
+
+
+def plot_results_overlay(start=0, stop=0): # from utils.utils import *; plot_results_overlay()
+ # Plot training 'results*.txt', overlaying train and val losses
+ s = ['train', 'train', 'train', 'Precision', 'mAP@0.5', 'val', 'val', 'val', 'Recall', 'mAP@0.5:0.95'] # legends
+ t = ['GIoU', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles
+ for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')):
+ results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
+ n = results.shape[1] # number of rows
+ x = range(start, min(stop, n) if stop else n)
+ fig, ax = plt.subplots(1, 5, figsize=(14, 3.5), tight_layout=True)
+ ax = ax.ravel()
+ for i in range(5):
+ for j in [i, i + 5]:
+ y = results[j, x]
+ ax[i].plot(x, y, marker='.', label=s[j])
+ # y_smooth = butter_lowpass_filtfilt(y)
+ # ax[i].plot(x, np.gradient(y_smooth), marker='.', label=s[j])
+
+ ax[i].set_title(t[i])
+ ax[i].legend()
+ ax[i].set_ylabel(f) if i == 0 else None # add filename
+ fig.savefig(f.replace('.txt', '.png'), dpi=200)
+
+
+def plot_results(start=0, stop=0, bucket='', id=(), labels=(),
+ save_dir=''): # from utils.utils import *; plot_results()
+ # Plot training 'results*.txt' as seen in https://github.com/ultralytics/yolov3
+ fig, ax = plt.subplots(2, 5, figsize=(12, 6))
+ ax = ax.ravel()
+ s = ['GIoU', 'Objectness', 'Classification', 'Precision', 'Recall',
+ 'val GIoU', 'val Objectness', 'val Classification', 'mAP@0.5', 'mAP@0.5:0.95']
+ if bucket:
+ os.system('rm -rf storage.googleapis.com')
+ files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id]
+ else:
+ files = glob.glob(str(Path(save_dir) / 'results*.txt')) + glob.glob('../../Downloads/results*.txt')
+ for fi, f in enumerate(files):
+ try:
+ results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
+ n = results.shape[1] # number of rows
+ x = range(start, min(stop, n) if stop else n)
+ for i in range(10):
+ y = results[i, x]
+ if i in [0, 1, 2, 5, 6, 7]:
+ y[y == 0] = np.nan # dont show zero loss values
+ # y /= y[0] # normalize
+ label = labels[fi] if len(labels) else Path(f).stem
+ ax[i].plot(x, y, marker='.', label=label, linewidth=2, markersize=8)
+ ax[i].set_title(s[i])
+ # if i in [5, 6, 7]: # share train and val loss y axes
+ # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
+ except:
+ print('Warning: Plotting error for %s, skipping file' % f)
+
+ fig.tight_layout()
+ ax[1].legend()
+ fig.savefig(Path(save_dir) / 'results.png', dpi=200)
diff --git a/utils/google_utils.py b/utils/google_utils.py
new file mode 100644
index 0000000..453e953
--- /dev/null
+++ b/utils/google_utils.py
@@ -0,0 +1,76 @@
+# This file contains google utils: https://cloud.google.com/storage/docs/reference/libraries
+# pip install --upgrade google-cloud-storage
+# from google.cloud import storage
+
+import os
+import platform
+import time
+from pathlib import Path
+
+
+def attempt_download(weights):
+ # Attempt to download pretrained weights if not found locally
+ weights = weights.strip().replace("'", '')
+ msg = weights + ' missing'
+
+ r = 1 # return
+ if len(weights) > 0 and not os.path.isfile(weights):
+ d = {'': '',
+ }
+
+ file = Path(weights).name
+ if file in d:
+ r = gdrive_download(id=d[file], name=weights)
+
+ if not (r == 0 and os.path.exists(weights) and os.path.getsize(weights) > 1E6): # weights exist and > 1MB
+ os.remove(weights) if os.path.exists(weights) else None # remove partial downloads
+ s = 'curl -L -o %s "storage.googleapis.com/%s"' % (weights, file)
+ r = os.system(s) # execute, capture return values
+
+ # Error check
+ if not (r == 0 and os.path.exists(weights) and os.path.getsize(weights) > 1E6): # weights exist and > 1MB
+ os.remove(weights) if os.path.exists(weights) else None # remove partial downloads
+ raise Exception(msg)
+
+
+def gdrive_download(id='1n_oKgR81BJtqk75b00eAjdv03qVCQn2f', name='coco128.zip'):
+ # Downloads a file from Google Drive, accepting presented query
+ # from utils.google_utils import *; gdrive_download()
+ t = time.time()
+
+ print('Downloading https://drive.google.com/uc?export=download&id=%s as %s... ' % (id, name), end='')
+ os.remove(name) if os.path.exists(name) else None # remove existing
+ os.remove('cookie') if os.path.exists('cookie') else None
+
+ # Attempt file download
+ out = "NUL" if platform.system() == "Windows" else "/dev/null"
+ os.system('curl -c ./cookie -s -L "drive.google.com/uc?export=download&id=%s" > %s ' % (id, out))
+ if os.path.exists('cookie'): # large file
+ s = 'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm=%s&id=%s" -o %s' % (get_token(), id, name)
+ else: # small file
+ s = 'curl -s -L -o %s "drive.google.com/uc?export=download&id=%s"' % (name, id)
+ r = os.system(s) # execute, capture return values
+ os.remove('cookie') if os.path.exists('cookie') else None
+
+ # Error check
+ if r != 0:
+ os.remove(name) if os.path.exists(name) else None # remove partial
+ print('Download error ') # raise Exception('Download error')
+ return r
+
+ # Unzip if archive
+ if name.endswith('.zip'):
+ print('unzipping... ', end='')
+ os.system('unzip -q %s' % name) # unzip
+ os.remove(name) # remove zip to free space
+
+ print('Done (%.1fs)' % (time.time() - t))
+ return r
+
+
+def get_token(cookie="./cookie"):
+ with open(cookie) as f:
+ for line in f:
+ if "download" in line:
+ return line.split()[-1]
+ return ""
diff --git a/utils/layers.py b/utils/layers.py
new file mode 100644
index 0000000..edb0a69
--- /dev/null
+++ b/utils/layers.py
@@ -0,0 +1,323 @@
+import torch.nn.functional as F
+
+from utils.general import *
+
+import torch
+from torch import nn
+
+from mish_cuda import MishCuda as Mish
+
+
+def make_divisible(v, divisor):
+ # Function ensures all layers have a channel number that is divisible by 8
+ # https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
+ return math.ceil(v / divisor) * divisor
+
+
+class Flatten(nn.Module):
+ # Use after nn.AdaptiveAvgPool2d(1) to remove last 2 dimensions
+ def forward(self, x):
+ return x.view(x.size(0), -1)
+
+
+class Concat(nn.Module):
+ # Concatenate a list of tensors along dimension
+ def __init__(self, dimension=1):
+ super(Concat, self).__init__()
+ self.d = dimension
+
+ def forward(self, x):
+ return torch.cat(x, self.d)
+
+
+class FeatureConcat(nn.Module):
+ def __init__(self, layers):
+ super(FeatureConcat, self).__init__()
+ self.layers = layers # layer indices
+ self.multiple = len(layers) > 1 # multiple layers flag
+
+ def forward(self, x, outputs):
+ return torch.cat([outputs[i] for i in self.layers], 1) if self.multiple else outputs[self.layers[0]]
+
+
+class FeatureConcat2(nn.Module):
+ def __init__(self, layers):
+ super(FeatureConcat2, self).__init__()
+ self.layers = layers # layer indices
+ self.multiple = len(layers) > 1 # multiple layers flag
+
+ def forward(self, x, outputs):
+ return torch.cat([outputs[self.layers[0]], outputs[self.layers[1]].detach()], 1)
+
+
+class FeatureConcat3(nn.Module):
+ def __init__(self, layers):
+ super(FeatureConcat3, self).__init__()
+ self.layers = layers # layer indices
+ self.multiple = len(layers) > 1 # multiple layers flag
+
+ def forward(self, x, outputs):
+ return torch.cat([outputs[self.layers[0]], outputs[self.layers[1]].detach(), outputs[self.layers[2]].detach()], 1)
+
+
+class FeatureConcat_l(nn.Module):
+ def __init__(self, layers):
+ super(FeatureConcat_l, self).__init__()
+ self.layers = layers # layer indices
+ self.multiple = len(layers) > 1 # multiple layers flag
+
+ def forward(self, x, outputs):
+ return torch.cat([outputs[i][:,:outputs[i].shape[1]//2,:,:] for i in self.layers], 1) if self.multiple else outputs[self.layers[0]][:,:outputs[self.layers[0]].shape[1]//2,:,:]
+
+
+class WeightedFeatureFusion(nn.Module): # weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
+ def __init__(self, layers, weight=False):
+ super(WeightedFeatureFusion, self).__init__()
+ self.layers = layers # layer indices
+ self.weight = weight # apply weights boolean
+ self.n = len(layers) + 1 # number of layers
+ if weight:
+ self.w = nn.Parameter(torch.zeros(self.n), requires_grad=True) # layer weights
+
+ def forward(self, x, outputs):
+ # Weights
+ if self.weight:
+ w = torch.sigmoid(self.w) * (2 / self.n) # sigmoid weights (0-1)
+ x = x * w[0]
+
+ # Fusion
+ nx = x.shape[1] # input channels
+ for i in range(self.n - 1):
+ a = outputs[self.layers[i]] * w[i + 1] if self.weight else outputs[self.layers[i]] # feature to add
+ na = a.shape[1] # feature channels
+
+ # Adjust channels
+ if nx == na: # same shape
+ x = x + a
+ elif nx > na: # slice input
+ x[:, :na] = x[:, :na] + a # or a = nn.ZeroPad2d((0, 0, 0, 0, 0, dc))(a); x = x + a
+ else: # slice feature
+ x = x + a[:, :nx]
+
+ return x
+
+
+class MixConv2d(nn.Module): # MixConv: Mixed Depthwise Convolutional Kernels https://arxiv.org/abs/1907.09595
+ def __init__(self, in_ch, out_ch, k=(3, 5, 7), stride=1, dilation=1, bias=True, method='equal_params'):
+ super(MixConv2d, self).__init__()
+
+ groups = len(k)
+ if method == 'equal_ch': # equal channels per group
+ i = torch.linspace(0, groups - 1E-6, out_ch).floor() # out_ch indices
+ ch = [(i == g).sum() for g in range(groups)]
+ else: # 'equal_params': equal parameter count per group
+ b = [out_ch] + [0] * groups
+ a = np.eye(groups + 1, groups, k=-1)
+ a -= np.roll(a, 1, axis=1)
+ a *= np.array(k) ** 2
+ a[0] = 1
+ ch = np.linalg.lstsq(a, b, rcond=None)[0].round().astype(int) # solve for equal weight indices, ax = b
+
+ self.m = nn.ModuleList([nn.Conv2d(in_channels=in_ch,
+ out_channels=ch[g],
+ kernel_size=k[g],
+ stride=stride,
+ padding=k[g] // 2, # 'same' pad
+ dilation=dilation,
+ bias=bias) for g in range(groups)])
+
+ def forward(self, x):
+ return torch.cat([m(x) for m in self.m], 1)
+
+
+# Activation functions below -------------------------------------------------------------------------------------------
+class SwishImplementation(torch.autograd.Function):
+ @staticmethod
+ def forward(ctx, x):
+ ctx.save_for_backward(x)
+ return x * torch.sigmoid(x)
+
+ @staticmethod
+ def backward(ctx, grad_output):
+ x = ctx.saved_tensors[0]
+ sx = torch.sigmoid(x) # sigmoid(ctx)
+ return grad_output * (sx * (1 + x * (1 - sx)))
+
+
+class MishImplementation(torch.autograd.Function):
+ @staticmethod
+ def forward(ctx, x):
+ ctx.save_for_backward(x)
+ return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x)))
+
+ @staticmethod
+ def backward(ctx, grad_output):
+ x = ctx.saved_tensors[0]
+ sx = torch.sigmoid(x)
+ fx = F.softplus(x).tanh()
+ return grad_output * (fx + x * sx * (1 - fx * fx))
+
+
+class MemoryEfficientSwish(nn.Module):
+ def forward(self, x):
+ return SwishImplementation.apply(x)
+
+
+class MemoryEfficientMish(nn.Module):
+ def forward(self, x):
+ return MishImplementation.apply(x)
+
+
+class Swish(nn.Module):
+ def forward(self, x):
+ return x * torch.sigmoid(x)
+
+
+class HardSwish(nn.Module): # https://arxiv.org/pdf/1905.02244.pdf
+ def forward(self, x):
+ return x * F.hardtanh(x + 3, 0., 6., True) / 6.
+
+
+#class Mish(nn.Module): # https://github.com/digantamisra98/Mish
+# def forward(self, x):
+# return x * F.softplus(x).tanh()
+
+class DeformConv2d(nn.Module):
+ def __init__(self, inc, outc, kernel_size=3, padding=1, stride=1, bias=None, modulation=False):
+ """
+ Args:
+ modulation (bool, optional): If True, Modulated Defomable Convolution (Deformable ConvNets v2).
+ """
+ super(DeformConv2d, self).__init__()
+ self.kernel_size = kernel_size
+ self.padding = padding
+ self.stride = stride
+ self.zero_padding = nn.ZeroPad2d(padding)
+ self.conv = nn.Conv2d(inc, outc, kernel_size=kernel_size, stride=kernel_size, bias=bias)
+
+ self.p_conv = nn.Conv2d(inc, 2*kernel_size*kernel_size, kernel_size=3, padding=1, stride=stride)
+ nn.init.constant_(self.p_conv.weight, 0)
+ self.p_conv.register_backward_hook(self._set_lr)
+
+ self.modulation = modulation
+ if modulation:
+ self.m_conv = nn.Conv2d(inc, kernel_size*kernel_size, kernel_size=3, padding=1, stride=stride)
+ nn.init.constant_(self.m_conv.weight, 0)
+ self.m_conv.register_backward_hook(self._set_lr)
+
+ @staticmethod
+ def _set_lr(module, grad_input, grad_output):
+ grad_input = (grad_input[i] * 0.1 for i in range(len(grad_input)))
+ grad_output = (grad_output[i] * 0.1 for i in range(len(grad_output)))
+
+ def forward(self, x):
+ offset = self.p_conv(x)
+ if self.modulation:
+ m = torch.sigmoid(self.m_conv(x))
+
+ dtype = offset.data.type()
+ ks = self.kernel_size
+ N = offset.size(1) // 2
+
+ if self.padding:
+ x = self.zero_padding(x)
+
+ # (b, 2N, h, w)
+ p = self._get_p(offset, dtype)
+
+ # (b, h, w, 2N)
+ p = p.contiguous().permute(0, 2, 3, 1)
+ q_lt = p.detach().floor()
+ q_rb = q_lt + 1
+
+ q_lt = torch.cat([torch.clamp(q_lt[..., :N], 0, x.size(2)-1), torch.clamp(q_lt[..., N:], 0, x.size(3)-1)], dim=-1).long()
+ q_rb = torch.cat([torch.clamp(q_rb[..., :N], 0, x.size(2)-1), torch.clamp(q_rb[..., N:], 0, x.size(3)-1)], dim=-1).long()
+ q_lb = torch.cat([q_lt[..., :N], q_rb[..., N:]], dim=-1)
+ q_rt = torch.cat([q_rb[..., :N], q_lt[..., N:]], dim=-1)
+
+ # clip p
+ p = torch.cat([torch.clamp(p[..., :N], 0, x.size(2)-1), torch.clamp(p[..., N:], 0, x.size(3)-1)], dim=-1)
+
+ # bilinear kernel (b, h, w, N)
+ g_lt = (1 + (q_lt[..., :N].type_as(p) - p[..., :N])) * (1 + (q_lt[..., N:].type_as(p) - p[..., N:]))
+ g_rb = (1 - (q_rb[..., :N].type_as(p) - p[..., :N])) * (1 - (q_rb[..., N:].type_as(p) - p[..., N:]))
+ g_lb = (1 + (q_lb[..., :N].type_as(p) - p[..., :N])) * (1 - (q_lb[..., N:].type_as(p) - p[..., N:]))
+ g_rt = (1 - (q_rt[..., :N].type_as(p) - p[..., :N])) * (1 + (q_rt[..., N:].type_as(p) - p[..., N:]))
+
+ # (b, c, h, w, N)
+ x_q_lt = self._get_x_q(x, q_lt, N)
+ x_q_rb = self._get_x_q(x, q_rb, N)
+ x_q_lb = self._get_x_q(x, q_lb, N)
+ x_q_rt = self._get_x_q(x, q_rt, N)
+
+ # (b, c, h, w, N)
+ x_offset = g_lt.unsqueeze(dim=1) * x_q_lt + \
+ g_rb.unsqueeze(dim=1) * x_q_rb + \
+ g_lb.unsqueeze(dim=1) * x_q_lb + \
+ g_rt.unsqueeze(dim=1) * x_q_rt
+
+ # modulation
+ if self.modulation:
+ m = m.contiguous().permute(0, 2, 3, 1)
+ m = m.unsqueeze(dim=1)
+ m = torch.cat([m for _ in range(x_offset.size(1))], dim=1)
+ x_offset *= m
+
+ x_offset = self._reshape_x_offset(x_offset, ks)
+ out = self.conv(x_offset)
+
+ return out
+
+ def _get_p_n(self, N, dtype):
+ p_n_x, p_n_y = torch.meshgrid(
+ torch.arange(-(self.kernel_size-1)//2, (self.kernel_size-1)//2+1),
+ torch.arange(-(self.kernel_size-1)//2, (self.kernel_size-1)//2+1))
+ # (2N, 1)
+ p_n = torch.cat([torch.flatten(p_n_x), torch.flatten(p_n_y)], 0)
+ p_n = p_n.view(1, 2*N, 1, 1).type(dtype)
+
+ return p_n
+
+ def _get_p_0(self, h, w, N, dtype):
+ p_0_x, p_0_y = torch.meshgrid(
+ torch.arange(1, h*self.stride+1, self.stride),
+ torch.arange(1, w*self.stride+1, self.stride))
+ p_0_x = torch.flatten(p_0_x).view(1, 1, h, w).repeat(1, N, 1, 1)
+ p_0_y = torch.flatten(p_0_y).view(1, 1, h, w).repeat(1, N, 1, 1)
+ p_0 = torch.cat([p_0_x, p_0_y], 1).type(dtype)
+
+ return p_0
+
+ def _get_p(self, offset, dtype):
+ N, h, w = offset.size(1)//2, offset.size(2), offset.size(3)
+
+ # (1, 2N, 1, 1)
+ p_n = self._get_p_n(N, dtype)
+ # (1, 2N, h, w)
+ p_0 = self._get_p_0(h, w, N, dtype)
+ p = p_0 + p_n + offset
+ return p
+
+ def _get_x_q(self, x, q, N):
+ b, h, w, _ = q.size()
+ padded_w = x.size(3)
+ c = x.size(1)
+ # (b, c, h*w)
+ x = x.contiguous().view(b, c, -1)
+
+ # (b, h, w, N)
+ index = q[..., :N]*padded_w + q[..., N:] # offset_x*w + offset_y
+ # (b, c, h*w*N)
+ index = index.contiguous().unsqueeze(dim=1).expand(-1, c, -1, -1, -1).contiguous().view(b, c, -1)
+
+ x_offset = x.gather(dim=-1, index=index).contiguous().view(b, c, h, w, N)
+
+ return x_offset
+
+ @staticmethod
+ def _reshape_x_offset(x_offset, ks):
+ b, c, h, w, N = x_offset.size()
+ x_offset = torch.cat([x_offset[..., s:s+ks].contiguous().view(b, c, h, w*ks) for s in range(0, N, ks)], dim=-1)
+ x_offset = x_offset.contiguous().view(b, c, h*ks, w*ks)
+
+ return x_offset
\ No newline at end of file
diff --git a/utils/parse_config.py b/utils/parse_config.py
new file mode 100644
index 0000000..4208748
--- /dev/null
+++ b/utils/parse_config.py
@@ -0,0 +1,70 @@
+import os
+
+import numpy as np
+
+
+def parse_model_cfg(path):
+ # Parse the yolo *.cfg file and return module definitions path may be 'cfg/yolov3.cfg', 'yolov3.cfg', or 'yolov3'
+ if not path.endswith('.cfg'): # add .cfg suffix if omitted
+ path += '.cfg'
+ if not os.path.exists(path) and os.path.exists('cfg' + os.sep + path): # add cfg/ prefix if omitted
+ path = 'cfg' + os.sep + path
+
+ with open(path, 'r') as f:
+ lines = f.read().split('\n')
+ lines = [x for x in lines if x and not x.startswith('#')]
+ lines = [x.rstrip().lstrip() for x in lines] # get rid of fringe whitespaces
+ mdefs = [] # module definitions
+ for line in lines:
+ if line.startswith('['): # This marks the start of a new block
+ mdefs.append({})
+ mdefs[-1]['type'] = line[1:-1].rstrip()
+ if mdefs[-1]['type'] == 'convolutional':
+ mdefs[-1]['batch_normalize'] = 0 # pre-populate with zeros (may be overwritten later)
+ else:
+ key, val = line.split("=")
+ key = key.rstrip()
+
+ if key == 'anchors': # return nparray
+ mdefs[-1][key] = np.array([float(x) for x in val.split(',')]).reshape((-1, 2)) # np anchors
+ elif (key in ['from', 'layers', 'mask']) or (key == 'size' and ',' in val): # return array
+ mdefs[-1][key] = [int(x) for x in val.split(',')]
+ else:
+ val = val.strip()
+ if val.isnumeric(): # return int or float
+ mdefs[-1][key] = int(val) if (int(val) - float(val)) == 0 else float(val)
+ else:
+ mdefs[-1][key] = val # return string
+
+ # Check all fields are supported
+ supported = ['type', 'batch_normalize', 'filters', 'size', 'stride', 'pad', 'activation', 'layers', 'groups',
+ 'from', 'mask', 'anchors', 'classes', 'num', 'jitter', 'ignore_thresh', 'truth_thresh', 'random',
+ 'stride_x', 'stride_y', 'weights_type', 'weights_normalization', 'scale_x_y', 'beta_nms', 'nms_kind',
+ 'iou_loss', 'iou_normalizer', 'cls_normalizer', 'iou_thresh']
+
+ f = [] # fields
+ for x in mdefs[1:]:
+ [f.append(k) for k in x if k not in f]
+ u = [x for x in f if x not in supported] # unsupported fields
+ assert not any(u), "Unsupported fields %s in %s. See https://github.com/ultralytics/yolov3/issues/631" % (u, path)
+
+ return mdefs
+
+
+def parse_data_cfg(path):
+ # Parses the data configuration file
+ if not os.path.exists(path) and os.path.exists('data' + os.sep + path): # add data/ prefix if omitted
+ path = 'data' + os.sep + path
+
+ with open(path, 'r') as f:
+ lines = f.readlines()
+
+ options = dict()
+ for line in lines:
+ line = line.strip()
+ if line == '' or line.startswith('#'):
+ continue
+ key, val = line.split('=')
+ options[key.strip()] = val.strip()
+
+ return options
diff --git a/utils/torch_utils.py b/utils/torch_utils.py
new file mode 100644
index 0000000..139c7f3
--- /dev/null
+++ b/utils/torch_utils.py
@@ -0,0 +1,226 @@
+import math
+import os
+import time
+from copy import deepcopy
+
+import torch
+import torch.backends.cudnn as cudnn
+import torch.nn as nn
+import torch.nn.functional as F
+import torchvision.models as models
+
+
+def init_seeds(seed=0):
+ torch.manual_seed(seed)
+
+ # Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html
+ if seed == 0: # slower, more reproducible
+ cudnn.deterministic = True
+ cudnn.benchmark = False
+ else: # faster, less reproducible
+ cudnn.deterministic = False
+ cudnn.benchmark = True
+
+
+def select_device(device='', batch_size=None):
+ # device = 'cpu' or '0' or '0,1,2,3'
+ cpu_request = device.lower() == 'cpu'
+ if device and not cpu_request: # if device requested other than 'cpu'
+ os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable
+ assert torch.cuda.is_available(), 'CUDA unavailable, invalid device %s requested' % device # check availablity
+
+ cuda = False if cpu_request else torch.cuda.is_available()
+ if cuda:
+ c = 1024 ** 2 # bytes to MB
+ ng = torch.cuda.device_count()
+ if ng > 1 and batch_size: # check that batch_size is compatible with device_count
+ assert batch_size % ng == 0, 'batch-size %g not multiple of GPU count %g' % (batch_size, ng)
+ x = [torch.cuda.get_device_properties(i) for i in range(ng)]
+ s = 'Using CUDA '
+ for i in range(0, ng):
+ if i == 1:
+ s = ' ' * len(s)
+ print("%sdevice%g _CudaDeviceProperties(name='%s', total_memory=%dMB)" %
+ (s, i, x[i].name, x[i].total_memory / c))
+ else:
+ print('Using CPU')
+
+ print('') # skip a line
+ return torch.device('cuda:0' if cuda else 'cpu')
+
+
+def time_synchronized():
+ torch.cuda.synchronize() if torch.cuda.is_available() else None
+ return time.time()
+
+
+def is_parallel(model):
+ return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)
+
+
+def intersect_dicts(da, db, exclude=()):
+ # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values
+ return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape}
+
+
+def initialize_weights(model):
+ for m in model.modules():
+ t = type(m)
+ if t is nn.Conv2d:
+ pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
+ elif t is nn.BatchNorm2d:
+ m.eps = 1e-3
+ m.momentum = 0.03
+ elif t in [nn.LeakyReLU, nn.ReLU, nn.ReLU6]:
+ m.inplace = True
+
+
+def find_modules(model, mclass=nn.Conv2d):
+ # Finds layer indices matching module class 'mclass'
+ return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)]
+
+
+def sparsity(model):
+ # Return global model sparsity
+ a, b = 0., 0.
+ for p in model.parameters():
+ a += p.numel()
+ b += (p == 0).sum()
+ return b / a
+
+
+def prune(model, amount=0.3):
+ # Prune model to requested global sparsity
+ import torch.nn.utils.prune as prune
+ print('Pruning model... ', end='')
+ for name, m in model.named_modules():
+ if isinstance(m, nn.Conv2d):
+ prune.l1_unstructured(m, name='weight', amount=amount) # prune
+ prune.remove(m, 'weight') # make permanent
+ print(' %.3g global sparsity' % sparsity(model))
+
+
+def fuse_conv_and_bn(conv, bn):
+ # https://tehnokv.com/posts/fusing-batchnorm-and-conv/
+ with torch.no_grad():
+ # init
+ fusedconv = nn.Conv2d(conv.in_channels,
+ conv.out_channels,
+ kernel_size=conv.kernel_size,
+ stride=conv.stride,
+ padding=conv.padding,
+ bias=True).to(conv.weight.device)
+
+ # prepare filters
+ w_conv = conv.weight.clone().view(conv.out_channels, -1)
+ w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
+ fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size()))
+
+ # prepare spatial bias
+ b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias
+ b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
+ fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)
+
+ return fusedconv
+
+
+def model_info(model, verbose=False):
+ # Plots a line-by-line description of a PyTorch model
+ n_p = sum(x.numel() for x in model.parameters()) # number parameters
+ n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
+ if verbose:
+ print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))
+ for i, (name, p) in enumerate(model.named_parameters()):
+ name = name.replace('module_list.', '')
+ print('%5g %40s %9s %12g %20s %10.3g %10.3g' %
+ (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))
+
+ try: # FLOPS
+ from thop import profile
+ flops = profile(deepcopy(model), inputs=(torch.zeros(1, 3, 64, 64),), verbose=False)[0] / 1E9 * 2
+ fs = ', %.1f GFLOPS' % (flops * 100) # 640x640 FLOPS
+ except:
+ fs = ''
+
+ print('Model Summary: %g layers, %g parameters, %g gradients%s' % (len(list(model.parameters())), n_p, n_g, fs))
+
+
+def load_classifier(name='resnet101', n=2):
+ # Loads a pretrained model reshaped to n-class output
+ model = models.__dict__[name](pretrained=True)
+
+ # Display model properties
+ input_size = [3, 224, 224]
+ input_space = 'RGB'
+ input_range = [0, 1]
+ mean = [0.485, 0.456, 0.406]
+ std = [0.229, 0.224, 0.225]
+ for x in [input_size, input_space, input_range, mean, std]:
+ print(x + ' =', eval(x))
+
+ # Reshape output to n classes
+ filters = model.fc.weight.shape[1]
+ model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True)
+ model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True)
+ model.fc.out_features = n
+ return model
+
+
+def scale_img(img, ratio=1.0, same_shape=False): # img(16,3,256,416), r=ratio
+ # scales img(bs,3,y,x) by ratio
+ if ratio == 1.0:
+ return img
+ else:
+ h, w = img.shape[2:]
+ s = (int(h * ratio), int(w * ratio)) # new size
+ img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize
+ if not same_shape: # pad/crop img
+ gs = 32 # (pixels) grid size
+ h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)]
+ return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean
+
+
+def copy_attr(a, b, include=(), exclude=()):
+ # Copy attributes from b to a, options to only include [...] and to exclude [...]
+ for k, v in b.__dict__.items():
+ if (len(include) and k not in include) or k.startswith('_') or k in exclude:
+ continue
+ else:
+ setattr(a, k, v)
+
+
+class ModelEMA:
+ """ Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models
+ Keep a moving average of everything in the model state_dict (parameters and buffers).
+ This is intended to allow functionality like
+ https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
+ A smoothed version of the weights is necessary for some training schemes to perform well.
+ This class is sensitive where it is initialized in the sequence of model init,
+ GPU assignment and distributed training wrappers.
+ """
+
+ def __init__(self, model, decay=0.9999, updates=0):
+ # Create EMA
+ self.ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA
+ # if next(model.parameters()).device.type != 'cpu':
+ # self.ema.half() # FP16 EMA
+ self.updates = updates # number of EMA updates
+ self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs)
+ for p in self.ema.parameters():
+ p.requires_grad_(False)
+
+ def update(self, model):
+ # Update EMA parameters
+ with torch.no_grad():
+ self.updates += 1
+ d = self.decay(self.updates)
+
+ msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict
+ for k, v in self.ema.state_dict().items():
+ if v.dtype.is_floating_point:
+ v *= d
+ v += (1. - d) * msd[k].detach()
+
+ def update_attr(self, model, include=(), exclude=('process_group', 'reducer')):
+ # Update EMA attributes
+ copy_attr(self.ema, model, include, exclude)
From 855c32fdd1bf55913514c53cff67a1b4311e7083 Mon Sep 17 00:00:00 2001
From: "Kin-Yiu, Wong" <102582011@cc.ncu.edu.tw>
Date: Mon, 16 Nov 2020 16:19:25 +0800
Subject: [PATCH 05/37] Create __init__.py
---
utils/__init__.py | 1 +
1 file changed, 1 insertion(+)
create mode 100644 utils/__init__.py
diff --git a/utils/__init__.py b/utils/__init__.py
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/utils/__init__.py
@@ -0,0 +1 @@
+
From d2a65ea3c72d6972a1653b63465440c4d6fd97df Mon Sep 17 00:00:00 2001
From: "Kin-Yiu, Wong" <102582011@cc.ncu.edu.tw>
Date: Tue, 17 Nov 2020 11:30:13 +0800
Subject: [PATCH 06/37] Update README.md
---
README.md | 17 ++++++++++++++++-
1 file changed, 16 insertions(+), 1 deletion(-)
diff --git a/README.md b/README.md
index e8ad71f..d1edafe 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,8 @@
# YOLOv4-CSP
-This is the implementation of "Scaled-YOLOv4: Scaling Cross Stage Partial Network" using PyTorch framwork.
+This is the implementation of "[Scaled-YOLOv4: Scaling Cross Stage Partial Network](https://arxiv.org/abs/2011.08036)" using PyTorch framwork.
+
+* **2020.11.16** Now supported by [Darknet](https://github.com/AlexeyAB/darknet). `[yolo] new_coords=1`
## Installation
@@ -20,6 +22,8 @@ cd /yolo
## Testing
+[`yolov4-csp.weights`](https://drive.google.com/file/d/1NQwz47cW0NUgy7L3_xOKaNEfLoQuq3EL/view?usp=sharing)
+
```
# download yolov4-csp.weights and put it in /yolo/weights/ folder.
python test.py --img 640 --conf 0.001 --batch 8 --device 0 --data coco.yaml --cfg models/yolov4-csp.cfg --weights weights/yolov4-csp.weights
@@ -58,3 +62,14 @@ If you want to use multiple GPUs for training
```
python -m torch.distributed.launch --nproc_per_node 4 train.py --device 0,1,2,3 --batch-size 64 --data coco.yaml --cfg yolov4-csp.cfg --weights '' --name yolov4-csp --sync-bn
```
+
+## Citation
+
+```
+@article{bochkovskiy2020yolov4,
+ title={{Scaled-YOLOv4}: Scaling Cross Stage Partial Network},
+ author={Wang, Chien-Yao and Bochkovskiy, Alexey and Liao, Hong-Yuan Mark},
+ journal={arXiv preprint arXiv:2011.08036},
+ year={2020}
+}
+```
From f3a476f3e582ffc80025863f616ab2466e3b0743 Mon Sep 17 00:00:00 2001
From: "Kin-Yiu, Wong" <102582011@cc.ncu.edu.tw>
Date: Tue, 17 Nov 2020 11:50:44 +0800
Subject: [PATCH 07/37] Update README.md
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index d1edafe..0077759 100644
--- a/README.md
+++ b/README.md
@@ -66,7 +66,7 @@ python -m torch.distributed.launch --nproc_per_node 4 train.py --device 0,1,2,3
## Citation
```
-@article{bochkovskiy2020yolov4,
+@article{wang2020scaled,
title={{Scaled-YOLOv4}: Scaling Cross Stage Partial Network},
author={Wang, Chien-Yao and Bochkovskiy, Alexey and Liao, Hong-Yuan Mark},
journal={arXiv preprint arXiv:2011.08036},
From eb17b4e651ffd2838d156b7ef202bded1c6b4c81 Mon Sep 17 00:00:00 2001
From: "Kin-Yiu, Wong" <102582011@cc.ncu.edu.tw>
Date: Wed, 18 Nov 2020 17:04:29 +0800
Subject: [PATCH 08/37] Update detect.py
---
detect.py | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/detect.py b/detect.py
index 76d4bc3..d35059e 100644
--- a/detect.py
+++ b/detect.py
@@ -41,7 +41,11 @@ def detect(save_img=False):
# Load model
model = Darknet(cfg, imgsz).cuda()
- model.load_state_dict(torch.load(weights[0], map_location=device)['model'])
+ try:
+ model.load_state_dict(torch.load(weights[0], map_location=device)['model'])
+ except:
+ model = model.to(device)
+ load_darknet_weights(model, weights[0])
#model = attempt_load(weights, map_location=device) # load FP32 model
#imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
model.to(device).eval()
From 4dfcec67f8b7db4893ed66000dd1b317691373a4 Mon Sep 17 00:00:00 2001
From: "Kin-Yiu, Wong" <102582011@cc.ncu.edu.tw>
Date: Wed, 2 Dec 2020 09:19:44 +0800
Subject: [PATCH 09/37] Update README.md
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index 0077759..6e807c2 100644
--- a/README.md
+++ b/README.md
@@ -2,7 +2,7 @@
This is the implementation of "[Scaled-YOLOv4: Scaling Cross Stage Partial Network](https://arxiv.org/abs/2011.08036)" using PyTorch framwork.
-* **2020.11.16** Now supported by [Darknet](https://github.com/AlexeyAB/darknet). `[yolo] new_coords=1`
+* **2020.11.16** Now supported by [Darknet](https://github.com/AlexeyAB/darknet). [`yolov4-csp.cfg`](https://github.com/AlexeyAB/darknet/blob/master/cfg/yolov4-csp.cfg) [`yolov4-csp.weights`](https://drive.google.com/file/d/1NQwz47cW0NUgy7L3_xOKaNEfLoQuq3EL/view?usp=sharing)
## Installation
From 3fbfa65b6f531ceca47cb91b269d0168189b829a Mon Sep 17 00:00:00 2001
From: "Kin-Yiu, Wong" <102582011@cc.ncu.edu.tw>
Date: Fri, 21 May 2021 08:28:37 +0800
Subject: [PATCH 10/37] Update train.py
---
train.py | 427 ++++++++++++++++++++++++++++++++++---------------------
1 file changed, 266 insertions(+), 161 deletions(-)
diff --git a/train.py b/train.py
index 78b04b9..d7cbf1c 100644
--- a/train.py
+++ b/train.py
@@ -1,12 +1,15 @@
import argparse
+import logging
import math
import os
import random
import time
from pathlib import Path
+from warnings import warn
import numpy as np
import torch.distributed as dist
+import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
@@ -18,39 +21,52 @@
from tqdm import tqdm
import test # import test.py to get mAP after each epoch
+#from models.yolo import Model
from models.models import *
+from utils.autoanchor import check_anchors
from utils.datasets import create_dataloader
-from utils.general import (
- check_img_size, torch_distributed_zero_first, labels_to_class_weights, plot_labels, check_anchors,
- labels_to_image_weights, compute_loss, plot_images, fitness, strip_optimizer, plot_results,
- get_latest_run, check_git_status, check_file, increment_dir, print_mutation, plot_evolution)
+from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \
+ fitness, fitness_p, fitness_r, fitness_ap50, fitness_ap, fitness_f, strip_optimizer, get_latest_run,\
+ check_dataset, check_file, check_git_status, check_img_size, print_mutation, set_logging
from utils.google_utils import attempt_download
-from utils.torch_utils import init_seeds, ModelEMA, select_device, intersect_dicts
+from utils.loss import compute_loss
+from utils.plots import plot_images, plot_labels, plot_results, plot_evolution
+from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first
+
+logger = logging.getLogger(__name__)
+
+try:
+ import wandb
+except ImportError:
+ wandb = None
+ logger.info("Install Weights & Biases for experiment logging via 'pip install wandb' (recommended)")
+
+def train(hyp, opt, device, tb_writer=None, wandb=None):
+ logger.info(f'Hyperparameters {hyp}')
+ save_dir, epochs, batch_size, total_batch_size, weights, rank = \
+ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank
+
+ # Directories
+ wdir = save_dir / 'weights'
+ wdir.mkdir(parents=True, exist_ok=True) # make dir
+ last = wdir / 'last.pt'
+ best = wdir / 'best.pt'
+ results_file = save_dir / 'results.txt'
-
-def train(hyp, opt, device, tb_writer=None):
- print(f'Hyperparameters {hyp}')
- log_dir = Path(tb_writer.log_dir) if tb_writer else Path(opt.logdir) / 'evolve' # logging directory
- wdir = str(log_dir / 'weights') + os.sep # weights directory
- os.makedirs(wdir, exist_ok=True)
- last = wdir + 'last.pt'
- best = wdir + 'best.pt'
- results_file = str(log_dir / 'results.txt')
- epochs, batch_size, total_batch_size, weights, rank = \
- opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank
-
- # TODO: Use DDP logging. Only the first process is allowed to log.
# Save run settings
- with open(log_dir / 'hyp.yaml', 'w') as f:
+ with open(save_dir / 'hyp.yaml', 'w') as f:
yaml.dump(hyp, f, sort_keys=False)
- with open(log_dir / 'opt.yaml', 'w') as f:
+ with open(save_dir / 'opt.yaml', 'w') as f:
yaml.dump(vars(opt), f, sort_keys=False)
# Configure
+ plots = not opt.evolve # create plots
cuda = device.type != 'cpu'
init_seeds(2 + rank)
with open(opt.data) as f:
- data_dict = yaml.load(f, Loader=yaml.FullLoader) # model dict
+ data_dict = yaml.load(f, Loader=yaml.FullLoader) # data dict
+ with torch_distributed_zero_first(rank):
+ check_dataset(data_dict) # check
train_path = data_dict['train']
test_path = data_dict['val']
nc, names = (1, ['item']) if opt.single_cls else (int(data_dict['nc']), data_dict['names']) # number classes, names
@@ -80,6 +96,10 @@ def train(hyp, opt, device, tb_writer=None):
pg2.append(v) # biases
elif 'Conv2d.weight' in k:
pg1.append(v) # apply weight_decay
+ elif 'm.weight' in k:
+ pg1.append(v) # apply weight_decay
+ elif 'w.weight' in k:
+ pg1.append(v) # apply weight_decay
else:
pg0.append(v) # all else
@@ -90,22 +110,36 @@ def train(hyp, opt, device, tb_writer=None):
optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay
optimizer.add_param_group({'params': pg2}) # add pg2 (biases)
- print('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0)))
+ logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0)))
del pg0, pg1, pg2
# Scheduler https://arxiv.org/pdf/1812.01187.pdf
# https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
- lf = lambda x: (((1 + math.cos(x * math.pi / epochs)) / 2) ** 1.0) * 0.8 + 0.2 # cosine
+ lf = lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - hyp['lrf']) + hyp['lrf'] # cosine
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
# plot_lr_scheduler(optimizer, scheduler, epochs)
+ # Logging
+ if wandb and wandb.run is None:
+ opt.hyp = hyp # add hyperparameters
+ wandb_run = wandb.init(config=opt, resume="allow",
+ project='YOLOv4' if opt.project == 'runs/train' else Path(opt.project).stem,
+ name=save_dir.stem,
+ id=ckpt.get('wandb_id') if 'ckpt' in locals() else None)
+
# Resume
start_epoch, best_fitness = 0, 0.0
+ best_fitness_p, best_fitness_r, best_fitness_ap50, best_fitness_ap, best_fitness_f = 0.0, 0.0, 0.0, 0.0, 0.0
if pretrained:
# Optimizer
if ckpt['optimizer'] is not None:
optimizer.load_state_dict(ckpt['optimizer'])
best_fitness = ckpt['best_fitness']
+ best_fitness_p = ckpt['best_fitness_p']
+ best_fitness_r = ckpt['best_fitness_r']
+ best_fitness_ap50 = ckpt['best_fitness_ap50']
+ best_fitness_ap = ckpt['best_fitness_ap']
+ best_fitness_f = ckpt['best_fitness_f']
# Results
if ckpt.get('training_results') is not None:
@@ -114,15 +148,17 @@ def train(hyp, opt, device, tb_writer=None):
# Epochs
start_epoch = ckpt['epoch'] + 1
+ if opt.resume:
+ assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs)
if epochs < start_epoch:
- print('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' %
- (weights, ckpt['epoch'], epochs))
+ logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' %
+ (weights, ckpt['epoch'], epochs))
epochs += ckpt['epoch'] # finetune additional epochs
del ckpt, state_dict
-
+
# Image sizes
- gs = 32 # grid size (max stride)
+ gs = 64 #int(max(model.stride)) # grid size (max stride)
imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples
# DP mode
@@ -132,81 +168,81 @@ def train(hyp, opt, device, tb_writer=None):
# SyncBatchNorm
if opt.sync_bn and cuda and rank != -1:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
- print('Using SyncBatchNorm()')
+ logger.info('Using SyncBatchNorm()')
- # Exponential moving average
+ # EMA
ema = ModelEMA(model) if rank in [-1, 0] else None
# DDP mode
if cuda and rank != -1:
- model = DDP(model, device_ids=[opt.local_rank], output_device=(opt.local_rank))
+ model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank)
# Trainloader
- dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt, hyp=hyp, augment=True,
- cache=opt.cache_images, rect=opt.rect, local_rank=rank,
- world_size=opt.world_size)
+ dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt,
+ hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect,
+ rank=rank, world_size=opt.world_size, workers=opt.workers)
mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
nb = len(dataloader) # number of batches
assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1)
- # Testloader
+ # Process 0
if rank in [-1, 0]:
- ema.updates = start_epoch * nb // accumulate # set EMA updates ***
- # local_rank is set to -1. Because only the first process is expected to do evaluation.
- testloader = create_dataloader(test_path, imgsz_test, batch_size, gs, opt, hyp=hyp, augment=False,
- cache=opt.cache_images, rect=True, local_rank=-1, world_size=opt.world_size)[0]
+ ema.updates = start_epoch * nb // accumulate # set EMA updates
+ testloader = create_dataloader(test_path, imgsz_test, batch_size*2, gs, opt,
+ hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True,
+ rank=-1, world_size=opt.world_size, workers=opt.workers)[0] # testloader
+
+ if not opt.resume:
+ labels = np.concatenate(dataset.labels, 0)
+ c = torch.tensor(labels[:, 0]) # classes
+ # cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency
+ # model._initialize_biases(cf.to(device))
+ if plots:
+ plot_labels(labels, save_dir=save_dir)
+ if tb_writer:
+ tb_writer.add_histogram('classes', c, 0)
+ if wandb:
+ wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in save_dir.glob('*labels*.png')]})
+
+ # Anchors
+ # if not opt.noautoanchor:
+ # check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
# Model parameters
hyp['cls'] *= nc / 80. # scale coco-tuned hyp['cls'] to current dataset
model.nc = nc # attach number of classes to model
model.hyp = hyp # attach hyperparameters to model
- model.gr = 1.0 # giou loss ratio (obj_loss = 1.0 or giou)
+ model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou)
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) # attach class weights
model.names = names
- # Class frequency
- if rank in [-1, 0]:
- labels = np.concatenate(dataset.labels, 0)
- c = torch.tensor(labels[:, 0]) # classes
- # cf = torch.bincount(c.long(), minlength=nc) + 1.
- # model._initialize_biases(cf.to(device))
- plot_labels(labels, save_dir=log_dir)
- if tb_writer:
- tb_writer.add_histogram('classes', c, 0)
-
- # Check anchors
- #if not opt.noautoanchor:
- # check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
-
# Start training
t0 = time.time()
- nw = max(3 * nb, 1e3) # number of warmup iterations, max(3 epochs, 1k iterations)
+ nw = max(round(hyp['warmup_epochs'] * nb), 1000) # number of warmup iterations, max(3 epochs, 1k iterations)
# nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
maps = np.zeros(nc) # mAP per class
- results = (0, 0, 0, 0, 0, 0, 0) # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification'
+ results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
scheduler.last_epoch = start_epoch - 1 # do not move
scaler = amp.GradScaler(enabled=cuda)
- if rank in [0, -1]:
- print('Image sizes %g train, %g test' % (imgsz, imgsz_test))
- print('Using %g dataloader workers' % dataloader.num_workers)
- print('Starting training for %g epochs...' % epochs)
- # torch.autograd.set_detect_anomaly(True)
+ logger.info('Image sizes %g train, %g test\n'
+ 'Using %g dataloader workers\nLogging results to %s\n'
+ 'Starting training for %g epochs...' % (imgsz, imgsz_test, dataloader.num_workers, save_dir, epochs))
+
+ torch.save(model, wdir / 'init.pt')
+
for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
model.train()
# Update image weights (optional)
- if dataset.image_weights:
+ if opt.image_weights:
# Generate indices
if rank in [-1, 0]:
- w = model.class_weights.cpu().numpy() * (1 - maps) ** 2 # class weights
- image_weights = labels_to_image_weights(dataset.labels, nc=nc, class_weights=w)
- dataset.indices = random.choices(range(dataset.n), weights=image_weights,
- k=dataset.n) # rand weighted idx
+ cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 # class weights
+ iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights
+ dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx
# Broadcast if DDP
if rank != -1:
- indices = torch.zeros([dataset.n], dtype=torch.int)
- if rank == 0:
- indices[:] = torch.from_tensor(dataset.indices, dtype=torch.int)
+ indices = (torch.tensor(dataset.indices) if rank == 0 else torch.zeros(dataset.n)).int()
dist.broadcast(indices, 0)
if rank != 0:
dataset.indices = indices.cpu().numpy()
@@ -219,8 +255,8 @@ def train(hyp, opt, device, tb_writer=None):
if rank != -1:
dataloader.sampler.set_epoch(epoch)
pbar = enumerate(dataloader)
+ logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'targets', 'img_size'))
if rank in [-1, 0]:
- print(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'GIoU', 'obj', 'cls', 'total', 'targets', 'img_size'))
pbar = tqdm(pbar, total=nb) # progress bar
optimizer.zero_grad()
for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
@@ -230,13 +266,13 @@ def train(hyp, opt, device, tb_writer=None):
# Warmup
if ni <= nw:
xi = [0, nw] # x interp
- # model.gr = np.interp(ni, xi, [0.0, 1.0]) # giou loss ratio (obj_loss = 1.0 or giou)
+ # model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
accumulate = max(1, np.interp(ni, xi, [1, nbs / total_batch_size]).round())
for j, x in enumerate(optimizer.param_groups):
# bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
- x['lr'] = np.interp(ni, xi, [0.1 if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])
+ x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])
if 'momentum' in x:
- x['momentum'] = np.interp(ni, xi, [0.9, hyp['momentum']])
+ x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])
# Multi-scale
if opt.multi_scale:
@@ -246,18 +282,12 @@ def train(hyp, opt, device, tb_writer=None):
ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
- # Autocast
+ # Forward
with amp.autocast(enabled=cuda):
- # Forward
- pred = model(imgs)
-
- # Loss
- loss, loss_items = compute_loss(pred, targets.to(device), model) # scaled by batch_size
+ pred = model(imgs) # forward
+ loss, loss_items = compute_loss(pred, targets.to(device), model) # loss scaled by batch_size
if rank != -1:
loss *= opt.world_size # gradient averaged between devices in DDP mode
- # if not torch.isfinite(loss):
- # print('WARNING: non-finite loss, ending training ', loss_items)
- # return results
# Backward
scaler.scale(loss).backward()
@@ -267,7 +297,7 @@ def train(hyp, opt, device, tb_writer=None):
scaler.step(optimizer) # optimizer.step
scaler.update()
optimizer.zero_grad()
- if ema is not None:
+ if ema:
ema.update(model)
# Print
@@ -279,52 +309,79 @@ def train(hyp, opt, device, tb_writer=None):
pbar.set_description(s)
# Plot
- if ni < 3:
- f = str(log_dir / ('train_batch%g.jpg' % ni)) # filename
- result = plot_images(images=imgs, targets=targets, paths=paths, fname=f)
- if tb_writer and result is not None:
- tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)
- # tb_writer.add_graph(model, imgs) # add model to tensorboard
+ if plots and ni < 3:
+ f = save_dir / f'train_batch{ni}.jpg' # filename
+ plot_images(images=imgs, targets=targets, paths=paths, fname=f)
+ # if tb_writer:
+ # tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)
+ # tb_writer.add_graph(model, imgs) # add model to tensorboard
+ elif plots and ni == 3 and wandb:
+ wandb.log({"Mosaics": [wandb.Image(str(x), caption=x.name) for x in save_dir.glob('train*.jpg')]})
# end batch ------------------------------------------------------------------------------------------------
+ # end epoch ----------------------------------------------------------------------------------------------------
# Scheduler
+ lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard
scheduler.step()
# DDP process 0 or single-GPU
if rank in [-1, 0]:
# mAP
- if ema is not None:
+ if ema:
ema.update_attr(model)
final_epoch = epoch + 1 == epochs
if not opt.notest or final_epoch: # Calculate mAP
- results, maps, times = test.test(opt.data,
- batch_size=batch_size,
+ if epoch >= 3:
+ results, maps, times = test.test(opt.data,
+ batch_size=batch_size*2,
imgsz=imgsz_test,
- save_json=final_epoch and opt.data.endswith(os.sep + 'coco.yaml'),
model=ema.ema.module if hasattr(ema.ema, 'module') else ema.ema,
single_cls=opt.single_cls,
dataloader=testloader,
- save_dir=log_dir)
+ save_dir=save_dir,
+ plots=plots and final_epoch,
+ log_imgs=opt.log_imgs if wandb else 0)
# Write
with open(results_file, 'a') as f:
- f.write(s + '%10.4g' * 7 % results + '\n') # P, R, mAP, F1, test_losses=(GIoU, obj, cls)
+ f.write(s + '%10.4g' * 7 % results + '\n') # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
if len(opt.name) and opt.bucket:
os.system('gsutil cp %s gs://%s/results/results%s.txt' % (results_file, opt.bucket, opt.name))
- # Tensorboard
- if tb_writer:
- tags = ['train/giou_loss', 'train/obj_loss', 'train/cls_loss',
- 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95',
- 'val/giou_loss', 'val/obj_loss', 'val/cls_loss']
- for x, tag in zip(list(mloss[:-1]) + list(results), tags):
- tb_writer.add_scalar(tag, x, epoch)
+ # Log
+ tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss
+ 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95',
+ 'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss
+ 'x/lr0', 'x/lr1', 'x/lr2'] # params
+ for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags):
+ if tb_writer:
+ tb_writer.add_scalar(tag, x, epoch) # tensorboard
+ if wandb:
+ wandb.log({tag: x}) # W&B
# Update best mAP
- fi = fitness(np.array(results).reshape(1, -1)) # fitness_i = weighted combination of [P, R, mAP, F1]
+ fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
+ fi_p = fitness_p(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
+ fi_r = fitness_r(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
+ fi_ap50 = fitness_ap50(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
+ fi_ap = fitness_ap(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
+ if (fi_p > 0.0) or (fi_r > 0.0):
+ fi_f = fitness_f(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
+ else:
+ fi_f = 0.0
if fi > best_fitness:
best_fitness = fi
+ if fi_p > best_fitness_p:
+ best_fitness_p = fi_p
+ if fi_r > best_fitness_r:
+ best_fitness_r = fi_r
+ if fi_ap50 > best_fitness_ap50:
+ best_fitness_ap50 = fi_ap50
+ if fi_ap > best_fitness_ap:
+ best_fitness_ap = fi_ap
+ if fi_f > best_fitness_f:
+ best_fitness_f = fi_f
# Save model
save = (not opt.nosave) or (final_epoch and not opt.evolve)
@@ -332,124 +389,171 @@ def train(hyp, opt, device, tb_writer=None):
with open(results_file, 'r') as f: # create checkpoint
ckpt = {'epoch': epoch,
'best_fitness': best_fitness,
+ 'best_fitness_p': best_fitness_p,
+ 'best_fitness_r': best_fitness_r,
+ 'best_fitness_ap50': best_fitness_ap50,
+ 'best_fitness_ap': best_fitness_ap,
+ 'best_fitness_f': best_fitness_f,
'training_results': f.read(),
'model': ema.ema.module.state_dict() if hasattr(ema, 'module') else ema.ema.state_dict(),
- 'optimizer': None if final_epoch else optimizer.state_dict()}
+ 'optimizer': None if final_epoch else optimizer.state_dict(),
+ 'wandb_id': wandb_run.id if wandb else None}
# Save last, best and delete
torch.save(ckpt, last)
- if epoch >= (epochs-5):
- torch.save(ckpt, last.replace('.pt','_{:03d}.pt'.format(epoch)))
- if (best_fitness == fi) and not final_epoch:
+ if best_fitness == fi:
torch.save(ckpt, best)
+ if (best_fitness == fi) and (epoch >= 200):
+ torch.save(ckpt, wdir / 'best_{:03d}.pt'.format(epoch))
+ if best_fitness == fi:
+ torch.save(ckpt, wdir / 'best_overall.pt')
+ if best_fitness_p == fi_p:
+ torch.save(ckpt, wdir / 'best_p.pt')
+ if best_fitness_r == fi_r:
+ torch.save(ckpt, wdir / 'best_r.pt')
+ if best_fitness_ap50 == fi_ap50:
+ torch.save(ckpt, wdir / 'best_ap50.pt')
+ if best_fitness_ap == fi_ap:
+ torch.save(ckpt, wdir / 'best_ap.pt')
+ if best_fitness_f == fi_f:
+ torch.save(ckpt, wdir / 'best_f.pt')
+ if epoch == 0:
+ torch.save(ckpt, wdir / 'epoch_{:03d}.pt'.format(epoch))
+ if ((epoch+1) % 25) == 0:
+ torch.save(ckpt, wdir / 'epoch_{:03d}.pt'.format(epoch))
+ if epoch >= (epochs-5):
+ torch.save(ckpt, wdir / 'last_{:03d}.pt'.format(epoch))
+ elif epoch >= 420:
+ torch.save(ckpt, wdir / 'last_{:03d}.pt'.format(epoch))
del ckpt
# end epoch ----------------------------------------------------------------------------------------------------
# end training
if rank in [-1, 0]:
# Strip optimizers
- n = ('_' if len(opt.name) and not opt.name.isnumeric() else '') + opt.name
- fresults, flast, fbest = 'results%s.txt' % n, wdir + 'last%s.pt' % n, wdir + 'best%s.pt' % n
- for f1, f2 in zip([wdir + 'last.pt', wdir + 'best.pt', 'results.txt'], [flast, fbest, fresults]):
- if os.path.exists(f1):
+ n = opt.name if opt.name.isnumeric() else ''
+ fresults, flast, fbest = save_dir / f'results{n}.txt', wdir / f'last{n}.pt', wdir / f'best{n}.pt'
+ for f1, f2 in zip([wdir / 'last.pt', wdir / 'best.pt', results_file], [flast, fbest, fresults]):
+ if f1.exists():
os.rename(f1, f2) # rename
- ispt = f2.endswith('.pt') # is *.pt
- strip_optimizer(f2) if ispt else None # strip optimizer
- os.system('gsutil cp %s gs://%s/weights' % (f2, opt.bucket)) if opt.bucket and ispt else None # upload
+ if str(f2).endswith('.pt'): # is *.pt
+ strip_optimizer(f2) # strip optimizer
+ os.system('gsutil cp %s gs://%s/weights' % (f2, opt.bucket)) if opt.bucket else None # upload
# Finish
- if not opt.evolve:
- plot_results(save_dir=log_dir) # save as results.png
- print('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))
+ if plots:
+ plot_results(save_dir=save_dir) # save as results.png
+ if wandb:
+ wandb.log({"Results": [wandb.Image(str(save_dir / x), caption=x) for x in
+ ['results.png', 'precision-recall_curve.png']]})
+ logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))
+ else:
+ dist.destroy_process_group()
- dist.destroy_process_group() if rank not in [-1, 0] else None
+ wandb.run.finish() if wandb and wandb.run else None
torch.cuda.empty_cache()
return results
if __name__ == '__main__':
parser = argparse.ArgumentParser()
- parser.add_argument('--weights', type=str, default='yolov4.pt', help='initial weights path')
+ parser.add_argument('--weights', type=str, default='yolov4-csp.pt', help='initial weights path')
parser.add_argument('--cfg', type=str, default='', help='model.yaml path')
- parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path')
- parser.add_argument('--hyp', type=str, default='', help='hyperparameters path, i.e. data/hyp.scratch.yaml')
+ parser.add_argument('--data', type=str, default='data/coco.yaml', help='data.yaml path')
+ parser.add_argument('--hyp', type=str, default='data/hyp.scratch.yaml', help='hyperparameters path')
parser.add_argument('--epochs', type=int, default=300)
parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs')
- parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='train,test sizes')
+ parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='[train, test] image sizes')
parser.add_argument('--rect', action='store_true', help='rectangular training')
- parser.add_argument('--resume', nargs='?', const='get_last', default=False,
- help='resume from given path/last.pt, or most recent run if blank')
+ parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
parser.add_argument('--notest', action='store_true', help='only test final epoch')
parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check')
parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters')
parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
parser.add_argument('--cache-images', action='store_true', help='cache images for faster training')
- parser.add_argument('--name', default='', help='renames results.txt to results_name.txt if supplied')
+ parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset')
parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer')
parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')
- parser.add_argument('--logdir', type=str, default='runs/', help='logging directory')
+ parser.add_argument('--log-imgs', type=int, default=16, help='number of images for W&B logging, max 100')
+ parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers')
+ parser.add_argument('--project', default='runs/train', help='save to project/name')
+ parser.add_argument('--name', default='exp', help='save to project/name')
+ parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
opt = parser.parse_args()
- # Resume
- if opt.resume:
- last = get_latest_run() if opt.resume == 'get_last' else opt.resume # resume from most recent run
- if last and not opt.weights:
- print(f'Resuming training from {last}')
- opt.weights = last if opt.resume and not opt.weights else opt.weights
- if opt.local_rank == -1 or ("RANK" in os.environ and os.environ["RANK"] == "0"):
+ # Set DDP variables
+ opt.total_batch_size = opt.batch_size
+ opt.world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1
+ opt.global_rank = int(os.environ['RANK']) if 'RANK' in os.environ else -1
+ set_logging(opt.global_rank)
+ if opt.global_rank in [-1, 0]:
check_git_status()
- opt.hyp = opt.hyp or ('data/hyp.scratch.yaml')
- opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files
- assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
-
- opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test)
- device = select_device(opt.device, batch_size=opt.batch_size)
- opt.total_batch_size = opt.batch_size
- opt.world_size = 1
- opt.global_rank = -1
+ # Resume
+ if opt.resume: # resume an interrupted run
+ ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path
+ assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist'
+ with open(Path(ckpt).parent.parent / 'opt.yaml') as f:
+ opt = argparse.Namespace(**yaml.load(f, Loader=yaml.FullLoader)) # replace
+ opt.cfg, opt.weights, opt.resume = '', ckpt, True
+ logger.info('Resuming training from %s' % ckpt)
+ else:
+ # opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml')
+ opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files
+ assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
+ opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test)
+ opt.name = 'evolve' if opt.evolve else opt.name
+ opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve) # increment run
# DDP mode
+ device = select_device(opt.device, batch_size=opt.batch_size)
if opt.local_rank != -1:
assert torch.cuda.device_count() > opt.local_rank
torch.cuda.set_device(opt.local_rank)
device = torch.device('cuda', opt.local_rank)
dist.init_process_group(backend='nccl', init_method='env://') # distributed backend
- opt.world_size = dist.get_world_size()
- opt.global_rank = dist.get_rank()
assert opt.batch_size % opt.world_size == 0, '--batch-size must be multiple of CUDA device count'
opt.batch_size = opt.total_batch_size // opt.world_size
- print(opt)
+ # Hyperparameters
with open(opt.hyp) as f:
hyp = yaml.load(f, Loader=yaml.FullLoader) # load hyps
+ if 'box' not in hyp:
+ warn('Compatibility: %s missing "box" which was renamed from "giou" in %s' %
+ (opt.hyp, 'https://github.com/ultralytics/yolov5/pull/1120'))
+ hyp['box'] = hyp.pop('giou')
# Train
+ logger.info(opt)
if not opt.evolve:
- tb_writer = None
+ tb_writer = None # init loggers
if opt.global_rank in [-1, 0]:
- print('Start Tensorboard with "tensorboard --logdir %s", view at http://localhost:6006/' % opt.logdir)
- tb_writer = SummaryWriter(log_dir=increment_dir(Path(opt.logdir) / 'exp', opt.name)) # runs/exp
-
- train(hyp, opt, device, tb_writer)
+ logger.info(f'Start Tensorboard with "tensorboard --logdir {opt.project}", view at http://localhost:6006/')
+ tb_writer = SummaryWriter(opt.save_dir) # Tensorboard
+ train(hyp, opt, device, tb_writer, wandb)
# Evolve hyperparameters (optional)
else:
# Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit)
meta = {'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3)
- 'momentum': (0.1, 0.6, 0.98), # SGD momentum/Adam beta1
+ 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf)
+ 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1
'weight_decay': (1, 0.0, 0.001), # optimizer weight decay
- 'giou': (1, 0.02, 0.2), # GIoU loss gain
+ 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok)
+ 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum
+ 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr
+ 'box': (1, 0.02, 0.2), # box loss gain
'cls': (1, 0.2, 4.0), # cls loss gain
'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight
'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels)
'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight
'iou_t': (0, 0.1, 0.7), # IoU training threshold
'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold
+ 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore)
'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5)
'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction)
'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction)
@@ -458,20 +562,21 @@ def train(hyp, opt, device, tb_writer=None):
'translate': (1, 0.0, 0.9), # image translation (+/- fraction)
'scale': (1, 0.0, 0.9), # image scale (+/- gain)
'shear': (1, 0.0, 10.0), # image shear (+/- deg)
- 'perspective': (1, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001
- 'flipud': (0, 0.0, 1.0), # image flip up-down (probability)
- 'fliplr': (1, 0.0, 1.0), # image flip left-right (probability)
+ 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001
+ 'flipud': (1, 0.0, 1.0), # image flip up-down (probability)
+ 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability)
+ 'mosaic': (1, 0.0, 1.0), # image mixup (probability)
'mixup': (1, 0.0, 1.0)} # image mixup (probability)
assert opt.local_rank == -1, 'DDP mode not implemented for --evolve'
opt.notest, opt.nosave = True, True # only test/save final epoch
# ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices
- yaml_file = Path('runs/evolve/hyp_evolved.yaml') # save best result here
+ yaml_file = Path(opt.save_dir) / 'hyp_evolved.yaml' # save best result here
if opt.bucket:
os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket) # download evolve.txt if exists
- for _ in range(100): # generations to evolve
- if os.path.exists('evolve.txt'): # if evolve.txt exists: select best hyps and mutate
+ for _ in range(300): # generations to evolve
+ if Path('evolve.txt').exists(): # if evolve.txt exists: select best hyps and mutate
# Select parent(s)
parent = 'single' # parent selection method: 'single' or 'weighted'
x = np.loadtxt('evolve.txt', ndmin=2)
@@ -485,7 +590,7 @@ def train(hyp, opt, device, tb_writer=None):
x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination
# Mutate
- mp, s = 0.9, 0.2 # mutation probability, sigma
+ mp, s = 0.8, 0.2 # mutation probability, sigma
npr = np.random
npr.seed(int(time.time()))
g = np.array([x[0] for x in meta.values()]) # gains 0-1
@@ -503,12 +608,12 @@ def train(hyp, opt, device, tb_writer=None):
hyp[k] = round(hyp[k], 5) # significant digits
# Train mutation
- results = train(hyp.copy(), opt, device)
+ results = train(hyp.copy(), opt, device, wandb=wandb)
# Write mutation results
print_mutation(hyp.copy(), results, yaml_file, opt.bucket)
# Plot results
plot_evolution(yaml_file)
- print('Hyperparameter evolution complete. Best results saved as: %s\nCommand to train a new model with these '
- 'hyperparameters: $ python train.py --hyp %s' % (yaml_file, yaml_file))
+ print(f'Hyperparameter evolution complete. Best results saved as: {yaml_file}\n'
+ f'Command to train a new model with these hyperparameters: $ python train.py --hyp {yaml_file}')
From 7df8141933ac638ebf35e66b28839fa2e57b183f Mon Sep 17 00:00:00 2001
From: "Kin-Yiu, Wong" <102582011@cc.ncu.edu.tw>
Date: Fri, 21 May 2021 08:31:19 +0800
Subject: [PATCH 11/37] Update test.py
---
test.py | 162 ++++++++++++++++++++++++++++++++++----------------------
1 file changed, 98 insertions(+), 64 deletions(-)
diff --git a/test.py b/test.py
index f5d2433..bd99b59 100644
--- a/test.py
+++ b/test.py
@@ -2,7 +2,6 @@
import glob
import json
import os
-import shutil
from pathlib import Path
import numpy as np
@@ -10,15 +9,16 @@
import yaml
from tqdm import tqdm
-from models.experimental import attempt_load
+from utils.google_utils import attempt_load
from utils.datasets import create_dataloader
-from utils.general import (
- coco80_to_coco91_class, check_file, check_img_size, compute_loss, non_max_suppression,
- scale_coords, xyxy2xywh, clip_coords, plot_images, xywh2xyxy, box_iou, output_to_target, ap_per_class)
+from utils.general import coco80_to_coco91_class, check_dataset, check_file, check_img_size, box_iou, \
+ non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, clip_coords, set_logging, increment_path
+from utils.loss import compute_loss
+from utils.metrics import ap_per_class
+from utils.plots import plot_images, output_to_target
from utils.torch_utils import select_device, time_synchronized
from models.models import *
-#from utils.datasets import *
def load_classes(path):
# Loads *.names file at 'path'
@@ -27,7 +27,6 @@ def load_classes(path):
return list(filter(None, names)) # filter removes empty strings (such as last line)
-
def test(data,
weights=None,
batch_size=16,
@@ -40,26 +39,25 @@ def test(data,
verbose=False,
model=None,
dataloader=None,
- save_dir='',
- merge=False,
- save_txt=False):
+ save_dir=Path(''), # for saving images
+ save_txt=False, # for auto-labelling
+ save_conf=False,
+ plots=True,
+ log_imgs=0): # number of logged images
+
# Initialize/load model and set device
training = model is not None
if training: # called by train.py
device = next(model.parameters()).device # get model device
else: # called directly
+ set_logging()
device = select_device(opt.device, batch_size=batch_size)
- merge, save_txt = opt.merge, opt.save_txt # use Merge NMS, save *.txt labels
- if save_txt:
- out = Path('inference/output')
- if os.path.exists(out):
- shutil.rmtree(out) # delete output folder
- os.makedirs(out) # make new output folder
+ save_txt = opt.save_txt # save *.txt labels
- # Remove previous
- for f in glob.glob(str(Path(save_dir) / 'test_batch*.jpg')):
- os.remove(f)
+ # Directories
+ save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run
+ (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
# Load model
model = Darknet(opt.cfg).to(device)
@@ -71,7 +69,7 @@ def test(data,
model.load_state_dict(ckpt['model'], strict=False)
except:
load_darknet_weights(model, weights[0])
- imgsz = check_img_size(imgsz, s=32) # check img_size
+ imgsz = check_img_size(imgsz, s=64) # check img_size
# Half
half = device.type != 'cpu' # half precision only supported on CUDA
@@ -80,19 +78,27 @@ def test(data,
# Configure
model.eval()
+ is_coco = data.endswith('coco.yaml') # is COCO dataset
with open(data) as f:
data = yaml.load(f, Loader=yaml.FullLoader) # model dict
+ check_dataset(data) # check
nc = 1 if single_cls else int(data['nc']) # number of classes
iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95
niou = iouv.numel()
+ # Logging
+ log_imgs, wandb = min(log_imgs, 100), None # ceil
+ try:
+ import wandb # Weights & Biases
+ except ImportError:
+ log_imgs = 0
+
# Dataloader
if not training:
img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
_ = model(img.half() if half else img) if device.type != 'cpu' else None # run once
path = data['test'] if opt.task == 'test' else data['val'] # path to val/test images
- dataloader = create_dataloader(path, imgsz, batch_size, 32, opt,
- hyp=None, augment=False, cache=False, pad=0.5, rect=True)[0]
+ dataloader = create_dataloader(path, imgsz, batch_size, 64, opt, pad=0.5, rect=True)[0]
seen = 0
try:
@@ -103,7 +109,7 @@ def test(data,
s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', 'mAP@.5', 'mAP@.5:.95')
p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.
loss = torch.zeros(3, device=device)
- jdict, stats, ap, ap_class = [], [], [], []
+ jdict, stats, ap, ap_class, wandb_images = [], [], [], [], []
for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)):
img = img.to(device, non_blocking=True)
img = img.half() if half else img.float() # uint8 to fp16/32
@@ -121,11 +127,11 @@ def test(data,
# Compute loss
if training: # if model has loss hyperparameters
- loss += compute_loss([x.float() for x in train_out], targets, model)[1][:3] # GIoU, obj, cls
+ loss += compute_loss([x.float() for x in train_out], targets, model)[1][:3] # box, obj, cls
# Run NMS
t = time_synchronized()
- output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres, merge=merge)
+ output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres)
t1 += time_synchronized() - t
# Statistics per image
@@ -135,20 +141,32 @@ def test(data,
tcls = labels[:, 0].tolist() if nl else [] # target class
seen += 1
- if pred is None:
+ if len(pred) == 0:
if nl:
stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))
continue
# Append to text file
+ path = Path(paths[si])
if save_txt:
gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh
- txt_path = str(out / Path(paths[si]).stem)
- pred[:, :4] = scale_coords(img[si].shape[1:], pred[:, :4], shapes[si][0], shapes[si][1]) # to original
- for *xyxy, conf, cls in pred:
+ x = pred.clone()
+ x[:, :4] = scale_coords(img[si].shape[1:], x[:, :4], shapes[si][0], shapes[si][1]) # to original
+ for *xyxy, conf, cls in x:
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
- with open(txt_path + '.txt', 'a') as f:
- f.write(('%g ' * 5 + '\n') % (cls, *xywh)) # label format
+ line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
+ with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f:
+ f.write(('%g ' * len(line)).rstrip() % line + '\n')
+
+ # W&B logging
+ if plots and len(wandb_images) < log_imgs:
+ box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
+ "class_id": int(cls),
+ "box_caption": "%s %.3f" % (names[cls], conf),
+ "scores": {"class_score": conf},
+ "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()]
+ boxes = {"predictions": {"box_data": box_data, "class_labels": names}}
+ wandb_images.append(wandb.Image(img[si], boxes=boxes, caption=path.name))
# Clip boxes to image bounds
clip_coords(pred, (height, width))
@@ -156,14 +174,14 @@ def test(data,
# Append to pycocotools JSON dictionary
if save_json:
# [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
- image_id = Path(paths[si]).stem
+ image_id = int(path.stem) if path.stem.isnumeric() else path.stem
box = pred[:, :4].clone() # xyxy
scale_coords(img[si].shape[1:], box, shapes[si][0], shapes[si][1]) # to original shape
box = xyxy2xywh(box) # xywh
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
for p, b in zip(pred.tolist(), box.tolist()):
- jdict.append({'image_id': int(image_id) if image_id.isnumeric() else image_id,
- 'category_id': coco91class[int(p[5])],
+ jdict.append({'image_id': image_id,
+ 'category_id': coco91class[int(p[5])] if is_coco else int(p[5]),
'bbox': [round(x, 3) for x in b],
'score': round(p[4], 5)})
@@ -187,9 +205,11 @@ def test(data,
ious, i = box_iou(pred[pi, :4], tbox[ti]).max(1) # best ious, indices
# Append detections
+ detected_set = set()
for j in (ious > iouv[0]).nonzero(as_tuple=False):
d = ti[i[j]] # detected target
- if d not in detected:
+ if d.item() not in detected_set:
+ detected_set.add(d.item())
detected.append(d)
correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn
if len(detected) == nl: # all targets already located in image
@@ -199,22 +219,27 @@ def test(data,
stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))
# Plot images
- if batch_i < 1:
- f = Path(save_dir) / ('test_batch%g_gt.jpg' % batch_i) # filename
- plot_images(img, targets, paths, str(f), names) # ground truth
- f = Path(save_dir) / ('test_batch%g_pred.jpg' % batch_i)
- plot_images(img, output_to_target(output, width, height), paths, str(f), names) # predictions
+ if plots and batch_i < 3:
+ f = save_dir / f'test_batch{batch_i}_labels.jpg' # filename
+ plot_images(img, targets, paths, f, names) # labels
+ f = save_dir / f'test_batch{batch_i}_pred.jpg'
+ plot_images(img, output_to_target(output, width, height), paths, f, names) # predictions
# Compute statistics
stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
if len(stats) and stats[0].any():
- p, r, ap, f1, ap_class = ap_per_class(*stats)
+ p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, fname=save_dir / 'precision-recall_curve.png')
p, r, ap50, ap = p[:, 0], r[:, 0], ap[:, 0], ap.mean(1) # [P, R, AP@0.5, AP@0.5:0.95]
mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class
else:
nt = torch.zeros(1)
+ # W&B logging
+ if plots and wandb:
+ wandb.log({"Images": wandb_images})
+ wandb.log({"Validation": [wandb.Image(str(x), caption=x.name) for x in sorted(save_dir.glob('test*.jpg'))]})
+
# Print results
pf = '%20s' + '%12.3g' * 6 # print format
print(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
@@ -231,29 +256,32 @@ def test(data,
# Save JSON
if save_json and len(jdict):
- f = 'detections_val2017_%s_results.json' % \
- (weights.split(os.sep)[-1].replace('.pt', '') if isinstance(weights, str) else '') # filename
- print('\nCOCO mAP with pycocotools... saving %s...' % f)
- with open(f, 'w') as file:
- json.dump(jdict, file)
+ w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
+ anno_json = glob.glob('../coco/annotations/instances_val*.json')[0] # annotations json
+ pred_json = str(save_dir / f"{w}_predictions.json") # predictions json
+ print('\nEvaluating pycocotools mAP... saving %s...' % pred_json)
+ with open(pred_json, 'w') as f:
+ json.dump(jdict, f)
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
- imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files]
- cocoGt = COCO(glob.glob('../coco/annotations/instances_val*.json')[0]) # initialize COCO ground truth api
- cocoDt = cocoGt.loadRes(f) # initialize COCO pred api
- cocoEval = COCOeval(cocoGt, cocoDt, 'bbox')
- cocoEval.params.imgIds = imgIds # image IDs to evaluate
- cocoEval.evaluate()
- cocoEval.accumulate()
- cocoEval.summarize()
- map, map50 = cocoEval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
+ anno = COCO(anno_json) # init annotations api
+ pred = anno.loadRes(pred_json) # init predictions api
+ eval = COCOeval(anno, pred, 'bbox')
+ if is_coco:
+ eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files] # image IDs to evaluate
+ eval.evaluate()
+ eval.accumulate()
+ eval.summarize()
+ map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
except Exception as e:
print('ERROR: pycocotools unable to run: %s' % e)
# Return results
+ if not training:
+ print('Results saved to %s' % save_dir)
model.float() # for training
maps = np.zeros(nc) + map
for i, c in enumerate(ap_class):
@@ -263,21 +291,24 @@ def test(data,
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='test.py')
- parser.add_argument('--weights', nargs='+', type=str, default='yolov4.pt', help='model.pt path(s)')
- parser.add_argument('--data', type=str, default='data/coco128.yaml', help='*.data path')
+ parser.add_argument('--weights', nargs='+', type=str, default='yolov4-csp.pt', help='model.pt path(s)')
+ parser.add_argument('--data', type=str, default='data/coco.yaml', help='*.data path')
parser.add_argument('--batch-size', type=int, default=32, help='size of each image batch')
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.65, help='IOU threshold for NMS')
- parser.add_argument('--save-json', action='store_true', help='save a cocoapi-compatible JSON results file')
parser.add_argument('--task', default='val', help="'val', 'test', 'study'")
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
parser.add_argument('--augment', action='store_true', help='augmented inference')
- parser.add_argument('--merge', action='store_true', help='use Merge NMS')
parser.add_argument('--verbose', action='store_true', help='report mAP by class')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
- parser.add_argument('--cfg', type=str, default='cfg/yolov4.cfg', help='*.cfg path')
+ parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
+ parser.add_argument('--save-json', action='store_true', help='save a cocoapi-compatible JSON results file')
+ parser.add_argument('--project', default='runs/test', help='save to project/name')
+ parser.add_argument('--name', default='exp', help='save to project/name')
+ parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
+ parser.add_argument('--cfg', type=str, default='models/yolov4-csp.cfg', help='*.cfg path')
parser.add_argument('--names', type=str, default='data/coco.names', help='*.cfg path')
opt = parser.parse_args()
opt.save_json |= opt.data.endswith('coco.yaml')
@@ -294,12 +325,15 @@ def test(data,
opt.save_json,
opt.single_cls,
opt.augment,
- opt.verbose)
+ opt.verbose,
+ save_txt=opt.save_txt,
+ save_conf=opt.save_conf,
+ )
elif opt.task == 'study': # run over a range of settings and save/plot
- for weights in ['']:
+ for weights in ['yolov4-csp.pt', 'yolov4-csp-x.pt']:
f = 'study_%s_%s.txt' % (Path(opt.data).stem, Path(weights).stem) # filename to save to
- x = list(range(352, 832, 64)) # x axis
+ x = list(range(320, 800, 64)) # x axis
y = [] # y axis
for i in x: # img-size
print('\nRunning %s point %s...' % (f, i))
@@ -307,4 +341,4 @@ def test(data,
y.append(r + t) # results and times
np.savetxt(f, y, fmt='%10.4g') # save
os.system('zip -r study.zip study_*.txt')
- # plot_study_txt(f, x) # plot
+ # utils.general.plot_study_txt(f, x) # plot
From 372abc8a51a9157121722f3fd22faccb2011ca64 Mon Sep 17 00:00:00 2001
From: "Kin-Yiu, Wong" <102582011@cc.ncu.edu.tw>
Date: Fri, 21 May 2021 08:32:19 +0800
Subject: [PATCH 12/37] Update detect.py
---
detect.py | 18 +++++++-----------
1 file changed, 7 insertions(+), 11 deletions(-)
diff --git a/detect.py b/detect.py
index d35059e..d871194 100644
--- a/detect.py
+++ b/detect.py
@@ -10,14 +10,14 @@
import torch.backends.cudnn as cudnn
from numpy import random
-from models.experimental import attempt_load
+from utils.google_utils import attempt_load
from utils.datasets import LoadStreams, LoadImages
from utils.general import (
- check_img_size, non_max_suppression, apply_classifier, scale_coords, xyxy2xywh, plot_one_box, strip_optimizer)
+ check_img_size, non_max_suppression, apply_classifier, scale_coords, xyxy2xywh, strip_optimizer)
+from utils.plots import plot_one_box
from utils.torch_utils import select_device, load_classifier, time_synchronized
from models.models import *
-from models.experimental import *
from utils.datasets import *
from utils.general import *
@@ -41,11 +41,7 @@ def detect(save_img=False):
# Load model
model = Darknet(cfg, imgsz).cuda()
- try:
- model.load_state_dict(torch.load(weights[0], map_location=device)['model'])
- except:
- model = model.to(device)
- load_darknet_weights(model, weights[0])
+ model.load_state_dict(torch.load(weights[0], map_location=device)['model'])
#model = attempt_load(weights, map_location=device) # load FP32 model
#imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
model.to(device).eval()
@@ -67,7 +63,7 @@ def detect(save_img=False):
dataset = LoadStreams(source, img_size=imgsz)
else:
save_img = True
- dataset = LoadImages(source, img_size=imgsz)
+ dataset = LoadImages(source, img_size=imgsz, auto_size=64)
# Get names and colors
names = load_classes(names)
@@ -163,7 +159,7 @@ def detect(save_img=False):
if __name__ == '__main__':
parser = argparse.ArgumentParser()
- parser.add_argument('--weights', nargs='+', type=str, default='yolov4.pt', help='model.pt path(s)')
+ parser.add_argument('--weights', nargs='+', type=str, default='yolov4-csp.pt', help='model.pt path(s)')
parser.add_argument('--source', type=str, default='inference/images', help='source') # file/folder, 0 for webcam
parser.add_argument('--output', type=str, default='inference/output', help='output folder') # output folder
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
@@ -176,7 +172,7 @@ def detect(save_img=False):
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--update', action='store_true', help='update all models')
- parser.add_argument('--cfg', type=str, default='cfg/yolov4.cfg', help='*.cfg path')
+ parser.add_argument('--cfg', type=str, default='models/yolov4-csp.cfg', help='*.cfg path')
parser.add_argument('--names', type=str, default='data/coco.names', help='*.cfg path')
opt = parser.parse_args()
print(opt)
From 3369da75543ae102d21b0c942bd3b3a302177e50 Mon Sep 17 00:00:00 2001
From: "Kin-Yiu, Wong" <102582011@cc.ncu.edu.tw>
Date: Fri, 21 May 2021 08:33:45 +0800
Subject: [PATCH 13/37] Update hyp.scratch.yaml
---
data/hyp.scratch.yaml | 21 +++++++++++----------
1 file changed, 11 insertions(+), 10 deletions(-)
diff --git a/data/hyp.scratch.yaml b/data/hyp.scratch.yaml
index fa8c9fd..00e458a 100644
--- a/data/hyp.scratch.yaml
+++ b/data/hyp.scratch.yaml
@@ -1,27 +1,28 @@
-# Hyperparameters for COCO training from scratch
-# python train.py --batch 40 --cfg yolov5m.yaml --weights '' --data coco.yaml --img 640 --epochs 300
-# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials
-
-
lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)
+lrf: 0.2 # final OneCycleLR learning rate (lr0 * lrf)
momentum: 0.937 # SGD momentum/Adam beta1
weight_decay: 0.0005 # optimizer weight decay 5e-4
-giou: 0.05 # GIoU loss gain
-cls: 0.5 # cls loss gain
+warmup_epochs: 3.0 # warmup epochs (fractions ok)
+warmup_momentum: 0.8 # warmup initial momentum
+warmup_bias_lr: 0.1 # warmup initial bias lr
+box: 0.05 # box loss gain
+cls: 0.3 # cls loss gain
cls_pw: 1.0 # cls BCELoss positive_weight
-obj: 1.0 # obj loss gain (scale with pixels)
+obj: 0.7 # obj loss gain (scale with pixels)
obj_pw: 1.0 # obj BCELoss positive_weight
iou_t: 0.20 # IoU training threshold
anchor_t: 4.0 # anchor-multiple threshold
+# anchors: 3 # anchors per output layer (0 to ignore)
fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)
hsv_h: 0.015 # image HSV-Hue augmentation (fraction)
hsv_s: 0.7 # image HSV-Saturation augmentation (fraction)
hsv_v: 0.4 # image HSV-Value augmentation (fraction)
degrees: 0.0 # image rotation (+/- deg)
-translate: 0.0 # image translation (+/- fraction)
-scale: 0.5 # image scale (+/- gain)
+translate: 0.1 # image translation (+/- fraction)
+scale: 0.9 # image scale (+/- gain)
shear: 0.0 # image shear (+/- deg)
perspective: 0.0 # image perspective (+/- fraction), range 0-0.001
flipud: 0.0 # image flip up-down (probability)
fliplr: 0.5 # image flip left-right (probability)
+mosaic: 1.0 # image mosaic (probability)
mixup: 0.0 # image mixup (probability)
From 1fd1c6d701babd5287aa079cef9325443f1c2c4b Mon Sep 17 00:00:00 2001
From: "Kin-Yiu, Wong" <102582011@cc.ncu.edu.tw>
Date: Fri, 21 May 2021 08:34:36 +0800
Subject: [PATCH 14/37] Delete common.py
---
models/common.py | 188 -----------------------------------------------
1 file changed, 188 deletions(-)
delete mode 100644 models/common.py
diff --git a/models/common.py b/models/common.py
deleted file mode 100644
index a11240b..0000000
--- a/models/common.py
+++ /dev/null
@@ -1,188 +0,0 @@
-# This file contains modules common to various models
-import math
-
-import torch
-import torch.nn as nn
-
-from mish_cuda import MishCuda as Mish
-
-
-def autopad(k, p=None): # kernel, padding
- # Pad to 'same'
- if p is None:
- p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
- return p
-
-
-def DWConv(c1, c2, k=1, s=1, act=True):
- # Depthwise convolution
- return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act)
-
-
-class Conv(nn.Module):
- # Standard convolution
- def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
- super(Conv, self).__init__()
- self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
- self.bn = nn.BatchNorm2d(c2)
- self.act = Mish() if act else nn.Identity()
-
- def forward(self, x):
- return self.act(self.bn(self.conv(x)))
-
- def fuseforward(self, x):
- return self.act(self.conv(x))
-
-
-class Bottleneck(nn.Module):
- # Standard bottleneck
- def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion
- super(Bottleneck, self).__init__()
- c_ = int(c2 * e) # hidden channels
- self.cv1 = Conv(c1, c_, 1, 1)
- self.cv2 = Conv(c_, c2, 3, 1, g=g)
- self.add = shortcut and c1 == c2
-
- def forward(self, x):
- return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
-
-
-class BottleneckCSP(nn.Module):
- # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
- def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
- super(BottleneckCSP, self).__init__()
- c_ = int(c2 * e) # hidden channels
- self.cv1 = Conv(c1, c_, 1, 1)
- self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
- self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
- self.cv4 = Conv(2 * c_, c2, 1, 1)
- self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
- self.act = Mish()
- self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
-
- def forward(self, x):
- y1 = self.cv3(self.m(self.cv1(x)))
- y2 = self.cv2(x)
- return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))
-
-
-class BottleneckCSP2(nn.Module):
- # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
- def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
- super(BottleneckCSP2, self).__init__()
- c_ = int(c2) # hidden channels
- self.cv1 = Conv(c1, c_, 1, 1)
- self.cv2 = nn.Conv2d(c_, c_, 1, 1, bias=False)
- self.cv3 = Conv(2 * c_, c2, 1, 1)
- self.bn = nn.BatchNorm2d(2 * c_)
- self.act = Mish()
- self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
-
- def forward(self, x):
- x1 = self.cv1(x)
- y1 = self.m(x1)
- y2 = self.cv2(x1)
- return self.cv3(self.act(self.bn(torch.cat((y1, y2), dim=1))))
-
-
-class VoVCSP(nn.Module):
- # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
- def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
- super(VoVCSP, self).__init__()
- c_ = int(c2) # hidden channels
- self.cv1 = Conv(c1//2, c_//2, 3, 1)
- self.cv2 = Conv(c_//2, c_//2, 3, 1)
- self.cv3 = Conv(c_, c2, 1, 1)
-
- def forward(self, x):
- _, x1 = x.chunk(2, dim=1)
- x1 = self.cv1(x1)
- x2 = self.cv2(x1)
- return self.cv3(torch.cat((x1,x2), dim=1))
-
-
-class SPP(nn.Module):
- # Spatial pyramid pooling layer used in YOLOv3-SPP
- def __init__(self, c1, c2, k=(5, 9, 13)):
- super(SPP, self).__init__()
- c_ = c1 // 2 # hidden channels
- self.cv1 = Conv(c1, c_, 1, 1)
- self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
- self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
-
- def forward(self, x):
- x = self.cv1(x)
- return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))
-
-
-class SPPCSP(nn.Module):
- # CSP SPP https://github.com/WongKinYiu/CrossStagePartialNetworks
- def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5, k=(5, 9, 13)):
- super(SPPCSP, self).__init__()
- c_ = int(2 * c2 * e) # hidden channels
- self.cv1 = Conv(c1, c_, 1, 1)
- self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
- self.cv3 = Conv(c_, c_, 3, 1)
- self.cv4 = Conv(c_, c_, 1, 1)
- self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
- self.cv5 = Conv(4 * c_, c_, 1, 1)
- self.cv6 = Conv(c_, c_, 3, 1)
- self.bn = nn.BatchNorm2d(2 * c_)
- self.act = Mish()
- self.cv7 = Conv(2 * c_, c2, 1, 1)
-
- def forward(self, x):
- x1 = self.cv4(self.cv3(self.cv1(x)))
- y1 = self.cv6(self.cv5(torch.cat([x1] + [m(x1) for m in self.m], 1)))
- y2 = self.cv2(x)
- return self.cv7(self.act(self.bn(torch.cat((y1, y2), dim=1))))
-
-
-class MP(nn.Module):
- # Spatial pyramid pooling layer used in YOLOv3-SPP
- def __init__(self, k=2):
- super(MP, self).__init__()
- self.m = nn.MaxPool2d(kernel_size=k, stride=k)
-
- def forward(self, x):
- return self.m(x)
-
-
-class Focus(nn.Module):
- # Focus wh information into c-space
- def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
- super(Focus, self).__init__()
- self.conv = Conv(c1 * 4, c2, k, s, p, g, act)
-
- def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
- return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1))
-
-
-class Concat(nn.Module):
- # Concatenate a list of tensors along dimension
- def __init__(self, dimension=1):
- super(Concat, self).__init__()
- self.d = dimension
-
- def forward(self, x):
- return torch.cat(x, self.d)
-
-
-class Flatten(nn.Module):
- # Use after nn.AdaptiveAvgPool2d(1) to remove last 2 dimensions
- @staticmethod
- def forward(x):
- return x.view(x.size(0), -1)
-
-
-class Classify(nn.Module):
- # Classification head, i.e. x(b,c1,20,20) to x(b,c2)
- def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups
- super(Classify, self).__init__()
- self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1)
- self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False) # to x(b,c2,1,1)
- self.flat = Flatten()
-
- def forward(self, x):
- z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list
- return self.flat(self.conv(z)) # flatten to x(b,c2)
\ No newline at end of file
From c369792da7395c9b45887246788e5fa6c0f4c314 Mon Sep 17 00:00:00 2001
From: "Kin-Yiu, Wong" <102582011@cc.ncu.edu.tw>
Date: Fri, 21 May 2021 08:34:45 +0800
Subject: [PATCH 15/37] Delete experimental.py
---
models/experimental.py | 145 -----------------------------------------
1 file changed, 145 deletions(-)
delete mode 100644 models/experimental.py
diff --git a/models/experimental.py b/models/experimental.py
deleted file mode 100644
index 1b99ce4..0000000
--- a/models/experimental.py
+++ /dev/null
@@ -1,145 +0,0 @@
-# This file contains experimental modules
-
-import numpy as np
-import torch
-import torch.nn as nn
-
-from models.common import Conv, DWConv
-from utils.google_utils import attempt_download
-
-
-class CrossConv(nn.Module):
- # Cross Convolution Downsample
- def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):
- # ch_in, ch_out, kernel, stride, groups, expansion, shortcut
- super(CrossConv, self).__init__()
- c_ = int(c2 * e) # hidden channels
- self.cv1 = Conv(c1, c_, (1, k), (1, s))
- self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g)
- self.add = shortcut and c1 == c2
-
- def forward(self, x):
- return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
-
-
-class C3(nn.Module):
- # Cross Convolution CSP
- def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
- super(C3, self).__init__()
- c_ = int(c2 * e) # hidden channels
- self.cv1 = Conv(c1, c_, 1, 1)
- self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
- self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
- self.cv4 = Conv(2 * c_, c2, 1, 1)
- self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
- self.act = nn.LeakyReLU(0.1, inplace=True)
- self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)])
-
- def forward(self, x):
- y1 = self.cv3(self.m(self.cv1(x)))
- y2 = self.cv2(x)
- return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))
-
-
-class Sum(nn.Module):
- # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
- def __init__(self, n, weight=False): # n: number of inputs
- super(Sum, self).__init__()
- self.weight = weight # apply weights boolean
- self.iter = range(n - 1) # iter object
- if weight:
- self.w = nn.Parameter(-torch.arange(1., n) / 2, requires_grad=True) # layer weights
-
- def forward(self, x):
- y = x[0] # no weight
- if self.weight:
- w = torch.sigmoid(self.w) * 2
- for i in self.iter:
- y = y + x[i + 1] * w[i]
- else:
- for i in self.iter:
- y = y + x[i + 1]
- return y
-
-
-class GhostConv(nn.Module):
- # Ghost Convolution https://github.com/huawei-noah/ghostnet
- def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups
- super(GhostConv, self).__init__()
- c_ = c2 // 2 # hidden channels
- self.cv1 = Conv(c1, c_, k, s, g, act)
- self.cv2 = Conv(c_, c_, 5, 1, c_, act)
-
- def forward(self, x):
- y = self.cv1(x)
- return torch.cat([y, self.cv2(y)], 1)
-
-
-class GhostBottleneck(nn.Module):
- # Ghost Bottleneck https://github.com/huawei-noah/ghostnet
- def __init__(self, c1, c2, k, s):
- super(GhostBottleneck, self).__init__()
- c_ = c2 // 2
- self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw
- DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw
- GhostConv(c_, c2, 1, 1, act=False)) # pw-linear
- self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False),
- Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity()
-
- def forward(self, x):
- return self.conv(x) + self.shortcut(x)
-
-
-class MixConv2d(nn.Module):
- # Mixed Depthwise Conv https://arxiv.org/abs/1907.09595
- def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True):
- super(MixConv2d, self).__init__()
- groups = len(k)
- if equal_ch: # equal c_ per group
- i = torch.linspace(0, groups - 1E-6, c2).floor() # c2 indices
- c_ = [(i == g).sum() for g in range(groups)] # intermediate channels
- else: # equal weight.numel() per group
- b = [c2] + [0] * groups
- a = np.eye(groups + 1, groups, k=-1)
- a -= np.roll(a, 1, axis=1)
- a *= np.array(k) ** 2
- a[0] = 1
- c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b
-
- self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)])
- self.bn = nn.BatchNorm2d(c2)
- self.act = nn.LeakyReLU(0.1, inplace=True)
-
- def forward(self, x):
- return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))
-
-
-class Ensemble(nn.ModuleList):
- # Ensemble of models
- def __init__(self):
- super(Ensemble, self).__init__()
-
- def forward(self, x, augment=False):
- y = []
- for module in self:
- y.append(module(x, augment)[0])
- # y = torch.stack(y).max(0)[0] # max ensemble
- # y = torch.cat(y, 1) # nms ensemble
- y = torch.stack(y).mean(0) # mean ensemble
- return y, None # inference, train output
-
-
-def attempt_load(weights, map_location=None):
- # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
- model = Ensemble()
- for w in weights if isinstance(weights, list) else [weights]:
- attempt_download(w)
- model.append(torch.load(w, map_location=map_location)['model'].float().fuse().eval()) # load FP32 model
-
- if len(model) == 1:
- return model[-1] # return model
- else:
- print('Ensemble created with %s\n' % weights)
- for k in ['names', 'stride']:
- setattr(model, k, getattr(model[-1], k))
- return model # return ensemble
From d60b350451a37ede480c54b181f7658807c0e8b8 Mon Sep 17 00:00:00 2001
From: "Kin-Yiu, Wong" <102582011@cc.ncu.edu.tw>
Date: Fri, 21 May 2021 08:35:27 +0800
Subject: [PATCH 16/37] Update export.py
---
models/export.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/models/export.py b/models/export.py
index d91813a..947a7a8 100644
--- a/models/export.py
+++ b/models/export.py
@@ -6,7 +6,7 @@
if __name__ == '__main__':
parser = argparse.ArgumentParser()
- parser.add_argument('--weights', type=str, default='./yolov4.pt', help='weights path')
+ parser.add_argument('--weights', type=str, default='./yolov4-csp.pt', help='weights path')
parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size')
parser.add_argument('--batch-size', type=int, default=1, help='batch size')
opt = parser.parse_args()
From 3ebdfcb4e311c7686ef743af8f116a6a21027e16 Mon Sep 17 00:00:00 2001
From: "Kin-Yiu, Wong" <102582011@cc.ncu.edu.tw>
Date: Fri, 21 May 2021 08:35:42 +0800
Subject: [PATCH 17/37] Delete yolo.py
---
models/yolo.py | 259 -------------------------------------------------
1 file changed, 259 deletions(-)
delete mode 100644 models/yolo.py
diff --git a/models/yolo.py b/models/yolo.py
deleted file mode 100644
index 4bde2c0..0000000
--- a/models/yolo.py
+++ /dev/null
@@ -1,259 +0,0 @@
-import argparse
-import math
-from copy import deepcopy
-from pathlib import Path
-
-import torch
-import torch.nn as nn
-
-from models.common import *
-from models.experimental import MixConv2d, CrossConv, C3
-from utils.general import check_anchor_order, make_divisible, check_file
-from utils.torch_utils import (
- time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, select_device)
-
-
-class Detect(nn.Module):
- def __init__(self, nc=80, anchors=(), ch=()): # detection layer
- super(Detect, self).__init__()
- self.stride = None # strides computed during build
- self.nc = nc # number of classes
- self.no = nc + 5 # number of outputs per anchor
- self.nl = len(anchors) # number of detection layers
- self.na = len(anchors[0]) // 2 # number of anchors
- self.grid = [torch.zeros(1)] * self.nl # init grid
- a = torch.tensor(anchors).float().view(self.nl, -1, 2)
- self.register_buffer('anchors', a) # shape(nl,na,2)
- self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2)
- self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
- self.export = False # onnx export
-
- def forward(self, x):
- # x = x.copy() # for profiling
- z = [] # inference output
- self.training |= self.export
- for i in range(self.nl):
- x[i] = self.m[i](x[i]) # conv
- bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
- x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
-
- if not self.training: # inference
- if self.grid[i].shape[2:4] != x[i].shape[2:4]:
- self.grid[i] = self._make_grid(nx, ny).to(x[i].device)
-
- y = x[i].sigmoid()
- y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy
- y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
- z.append(y.view(bs, -1, self.no))
-
- return x if self.training else (torch.cat(z, 1), x)
-
- @staticmethod
- def _make_grid(nx=20, ny=20):
- yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
- return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
-
-
-class Model(nn.Module):
- def __init__(self, cfg='yolov4.yaml', ch=3, nc=None): # model, input channels, number of classes
- super(Model, self).__init__()
- if isinstance(cfg, dict):
- self.yaml = cfg # model dict
- else: # is *.yaml
- import yaml # for torch hub
- self.yaml_file = Path(cfg).name
- with open(cfg) as f:
- self.yaml = yaml.load(f, Loader=yaml.FullLoader) # model dict
-
- # Define model
- if nc and nc != self.yaml['nc']:
- print('Overriding %s nc=%g with nc=%g' % (cfg, self.yaml['nc'], nc))
- self.yaml['nc'] = nc # override yaml value
- self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist, ch_out
- # print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))])
-
- # Build strides, anchors
- m = self.model[-1] # Detect()
- if isinstance(m, Detect):
- s = 128 # 2x min stride
- m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward
- m.anchors /= m.stride.view(-1, 1, 1)
- check_anchor_order(m)
- self.stride = m.stride
- self._initialize_biases() # only run once
- # print('Strides: %s' % m.stride.tolist())
-
- # Init weights, biases
- initialize_weights(self)
- self.info()
- print('')
-
- def forward(self, x, augment=False, profile=False):
- if augment:
- img_size = x.shape[-2:] # height, width
- s = [1, 0.83, 0.67] # scales
- f = [None, 3, None] # flips (2-ud, 3-lr)
- y = [] # outputs
- for si, fi in zip(s, f):
- xi = scale_img(x.flip(fi) if fi else x, si)
- yi = self.forward_once(xi)[0] # forward
- # cv2.imwrite('img%g.jpg' % s, 255 * xi[0].numpy().transpose((1, 2, 0))[:, :, ::-1]) # save
- yi[..., :4] /= si # de-scale
- if fi == 2:
- yi[..., 1] = img_size[0] - yi[..., 1] # de-flip ud
- elif fi == 3:
- yi[..., 0] = img_size[1] - yi[..., 0] # de-flip lr
- y.append(yi)
- return torch.cat(y, 1), None # augmented inference, train
- else:
- return self.forward_once(x, profile) # single-scale inference, train
-
- def forward_once(self, x, profile=False):
- y, dt = [], [] # outputs
- for m in self.model:
- if m.f != -1: # if not from previous layer
- x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
-
- if profile:
- try:
- import thop
- o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # FLOPS
- except:
- o = 0
- t = time_synchronized()
- for _ in range(10):
- _ = m(x)
- dt.append((time_synchronized() - t) * 100)
- print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type))
-
- x = m(x) # run
- y.append(x if m.i in self.save else None) # save output
-
- if profile:
- print('%.1fms total' % sum(dt))
- return x
-
- def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency
- # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.
- m = self.model[-1] # Detect() module
- for mi, s in zip(m.m, m.stride): # from
- b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)
- b[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
- b[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls
- mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
-
- def _print_biases(self):
- m = self.model[-1] # Detect() module
- for mi in m.m: # from
- b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85)
- print(('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean()))
-
- # def _print_weights(self):
- # for m in self.model.modules():
- # if type(m) is Bottleneck:
- # print('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights
-
- def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
- print('Fusing layers... ', end='')
- for m in self.model.modules():
- if type(m) is Conv:
- m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatability
- m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
- m.bn = None # remove batchnorm
- m.forward = m.fuseforward # update forward
- self.info()
- return self
-
- def info(self): # print model information
- model_info(self)
-
-
-def parse_model(d, ch): # model_dict, input_channels(3)
- print('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments'))
- anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
- na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
- no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
-
- layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
- for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
- m = eval(m) if isinstance(m, str) else m # eval strings
- for j, a in enumerate(args):
- try:
- args[j] = eval(a) if isinstance(a, str) else a # eval strings
- except:
- pass
-
- n = max(round(n * gd), 1) if n > 1 else n # depth gain
- if m in [nn.Conv2d, Conv, Bottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, BottleneckCSP2, SPPCSP, VoVCSP, C3]:
- c1, c2 = ch[f], args[0]
-
- # Normal
- # if i > 0 and args[0] != no: # channel expansion factor
- # ex = 1.75 # exponential (default 2.0)
- # e = math.log(c2 / ch[1]) / math.log(2)
- # c2 = int(ch[1] * ex ** e)
- # if m != Focus:
-
- c2 = make_divisible(c2 * gw, 8) if c2 != no else c2
-
- # Experimental
- # if i > 0 and args[0] != no: # channel expansion factor
- # ex = 1 + gw # exponential (default 2.0)
- # ch1 = 32 # ch[1]
- # e = math.log(c2 / ch1) / math.log(2) # level 1-n
- # c2 = int(ch1 * ex ** e)
- # if m != Focus:
- # c2 = make_divisible(c2, 8) if c2 != no else c2
-
- args = [c1, c2, *args[1:]]
- if m in [BottleneckCSP, BottleneckCSP2, SPPCSP, VoVCSP, C3]:
- args.insert(2, n)
- n = 1
- elif m is nn.BatchNorm2d:
- args = [ch[f]]
- elif m is Concat:
- c2 = sum([ch[-1 if x == -1 else x + 1] for x in f])
- elif m is Detect:
- args.append([ch[x + 1] for x in f])
- if isinstance(args[1], int): # number of anchors
- args[1] = [list(range(args[1] * 2))] * len(f)
- else:
- c2 = ch[f]
-
- m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module
- t = str(m)[8:-2].replace('__main__.', '') # module type
- np = sum([x.numel() for x in m_.parameters()]) # number params
- m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
- print('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print
- save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
- layers.append(m_)
- ch.append(c2)
- return nn.Sequential(*layers), sorted(save)
-
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser()
- parser.add_argument('--cfg', type=str, default='yolov4.yaml', help='model.yaml')
- parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
- opt = parser.parse_args()
- opt.cfg = check_file(opt.cfg) # check file
- device = select_device(opt.device)
-
- # Create model
- model = Model(opt.cfg).to(device)
- model.train()
-
- # Profile
- # img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device)
- # y = model(img, profile=True)
-
- # ONNX export
- # model.model[-1].export = True
- # torch.onnx.export(model, img, opt.cfg.replace('.yaml', '.onnx'), verbose=True, opset_version=11)
-
- # Tensorboard
- # from torch.utils.tensorboard import SummaryWriter
- # tb_writer = SummaryWriter()
- # print("Run 'tensorboard --logdir=models/runs' to view tensorboard at http://localhost:6006/")
- # tb_writer.add_graph(model.model, img) # add model to tensorboard
- # tb_writer.add_image('test', img[0], dataformats='CWH') # add model to tensorboard
From 8d6011ce0f8596fea75b9da98673008c775f085e Mon Sep 17 00:00:00 2001
From: "Kin-Yiu, Wong" <102582011@cc.ncu.edu.tw>
Date: Fri, 21 May 2021 08:40:32 +0800
Subject: [PATCH 18/37] Update models.py
---
models/models.py | 178 +++++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 174 insertions(+), 4 deletions(-)
diff --git a/models/models.py b/models/models.py
index 5dcb248..7fee5a1 100644
--- a/models/models.py
+++ b/models/models.py
@@ -50,6 +50,12 @@ def create_modules(module_defs, img_size, cfg):
modules.add_module('activation', Swish())
elif mdef['activation'] == 'mish':
modules.add_module('activation', Mish())
+ elif mdef['activation'] == 'emb':
+ modules.add_module('activation', F.normalize())
+ elif mdef['activation'] == 'logistic':
+ modules.add_module('activation', nn.Sigmoid())
+ elif mdef['activation'] == 'silu':
+ modules.add_module('activation', nn.SiLU())
elif mdef['type'] == 'deformableconvolutional':
bn = mdef['batch_normalize']
@@ -82,6 +88,25 @@ def create_modules(module_defs, img_size, cfg):
modules.add_module('activation', Swish())
elif mdef['activation'] == 'mish':
modules.add_module('activation', Mish())
+ elif mdef['activation'] == 'silu':
+ modules.add_module('activation', nn.SiLU())
+
+ elif mdef['type'] == 'dropout':
+ p = mdef['probability']
+ modules = nn.Dropout(p)
+
+ elif mdef['type'] == 'avgpool':
+ modules = GAP()
+
+ elif mdef['type'] == 'silence':
+ filters = output_filters[-1]
+ modules = Silence()
+
+ elif mdef['type'] == 'sam': # nn.Sequential() placeholder for 'shortcut' layer
+ layers = mdef['from']
+ filters = output_filters[-1]
+ routs.extend([i + l if l < 0 else l for l in layers])
+ modules = ScaleSpatial(layers=layers)
elif mdef['type'] == 'BatchNorm2d':
filters = output_filters[-1]
@@ -101,6 +126,16 @@ def create_modules(module_defs, img_size, cfg):
else:
modules = maxpool
+ elif mdef['type'] == 'local_avgpool':
+ k = mdef['size'] # kernel size
+ stride = mdef['stride']
+ avgpool = nn.AvgPool2d(kernel_size=k, stride=stride, padding=(k - 1) // 2)
+ if k == 2 and stride == 1: # yolov3-tiny
+ modules.add_module('ZeroPad2d', nn.ZeroPad2d((0, 1, 0, 1)))
+ modules.add_module('AvgPool2d', avgpool)
+ else:
+ modules = avgpool
+
elif mdef['type'] == 'upsample':
if ONNX_EXPORT: # explicitly state size, avoid scale_factor
g = (yolo_index + 1) * 2 / 32 # gain
@@ -141,6 +176,10 @@ def create_modules(module_defs, img_size, cfg):
elif mdef['type'] == 'reorg3d': # yolov3-spp-pan-scale
pass
+ elif mdef['type'] == 'reorg': # yolov3-spp-pan-scale
+ filters = 4 * output_filters[-1]
+ modules.add_module('Reorg', Reorg())
+
elif mdef['type'] == 'yolo':
yolo_index += 1
stride = [8, 16, 32, 64, 128] # P3, P4, P5, P6, P7 strides
@@ -160,8 +199,41 @@ def create_modules(module_defs, img_size, cfg):
bias_ = module_list[j][0].bias # shape(255,)
bias = bias_[:modules.no * modules.na].view(modules.na, -1) # shape(3,85)
#bias[:, 4] += -4.5 # obj
- bias[:, 4] += math.log(8 / (640 / stride[yolo_index]) ** 2) # obj (8 objects per 640 image)
- bias[:, 5:] += math.log(0.6 / (modules.nc - 0.99)) # cls (sigmoid(p) = 1/nc)
+ bias.data[:, 4] += math.log(8 / (640 / stride[yolo_index]) ** 2) # obj (8 objects per 640 image)
+ bias.data[:, 5:] += math.log(0.6 / (modules.nc - 0.99)) # cls (sigmoid(p) = 1/nc)
+ module_list[j][0].bias = torch.nn.Parameter(bias_, requires_grad=bias_.requires_grad)
+
+ #j = [-2, -5, -8]
+ #for sj in j:
+ # bias_ = module_list[sj][0].bias
+ # bias = bias_[:modules.no * 1].view(1, -1)
+ # bias.data[:, 4] += math.log(8 / (640 / stride[yolo_index]) ** 2)
+ # bias.data[:, 5:] += math.log(0.6 / (modules.nc - 0.99))
+ # module_list[sj][0].bias = torch.nn.Parameter(bias_, requires_grad=bias_.requires_grad)
+ except:
+ print('WARNING: smart bias initialization failure.')
+
+ elif mdef['type'] == 'jde':
+ yolo_index += 1
+ stride = [8, 16, 32, 64, 128] # P3, P4, P5, P6, P7 strides
+ if any(x in cfg for x in ['yolov4-tiny', 'fpn', 'yolov3']): # P5, P4, P3 strides
+ stride = [32, 16, 8]
+ layers = mdef['from'] if 'from' in mdef else []
+ modules = JDELayer(anchors=mdef['anchors'][mdef['mask']], # anchor list
+ nc=mdef['classes'], # number of classes
+ img_size=img_size, # (416, 416)
+ yolo_index=yolo_index, # 0, 1, 2...
+ layers=layers, # output layers
+ stride=stride[yolo_index])
+
+ # Initialize preceding Conv2d() bias (https://arxiv.org/pdf/1708.02002.pdf section 3.3)
+ try:
+ j = layers[yolo_index] if 'from' in mdef else -1
+ bias_ = module_list[j][0].bias # shape(255,)
+ bias = bias_[:modules.no * modules.na].view(modules.na, -1) # shape(3,85)
+ #bias[:, 4] += -4.5 # obj
+ bias.data[:, 4] += math.log(8 / (640 / stride[yolo_index]) ** 2) # obj (8 objects per 640 image)
+ bias.data[:, 5:] += math.log(0.6 / (modules.nc - 0.99)) # cls (sigmoid(p) = 1/nc)
module_list[j][0].bias = torch.nn.Parameter(bias_, requires_grad=bias_.requires_grad)
except:
print('WARNING: smart bias initialization failure.')
@@ -271,6 +343,99 @@ def forward(self, p, out):
#torch.sigmoid_(io[..., 4:])
return io.view(bs, -1, self.no), p # view [1, 3, 13, 13, 85] as [1, 507, 85]
+
+class JDELayer(nn.Module):
+ def __init__(self, anchors, nc, img_size, yolo_index, layers, stride):
+ super(JDELayer, self).__init__()
+ self.anchors = torch.Tensor(anchors)
+ self.index = yolo_index # index of this layer in layers
+ self.layers = layers # model output layer indices
+ self.stride = stride # layer stride
+ self.nl = len(layers) # number of output layers (3)
+ self.na = len(anchors) # number of anchors (3)
+ self.nc = nc # number of classes (80)
+ self.no = nc + 5 # number of outputs (85)
+ self.nx, self.ny, self.ng = 0, 0, 0 # initialize number of x, y gridpoints
+ self.anchor_vec = self.anchors / self.stride
+ self.anchor_wh = self.anchor_vec.view(1, self.na, 1, 1, 2)
+
+ if ONNX_EXPORT:
+ self.training = False
+ self.create_grids((img_size[1] // stride, img_size[0] // stride)) # number x, y grid points
+
+ def create_grids(self, ng=(13, 13), device='cpu'):
+ self.nx, self.ny = ng # x and y grid size
+ self.ng = torch.tensor(ng, dtype=torch.float)
+
+ # build xy offsets
+ if not self.training:
+ yv, xv = torch.meshgrid([torch.arange(self.ny, device=device), torch.arange(self.nx, device=device)])
+ self.grid = torch.stack((xv, yv), 2).view((1, 1, self.ny, self.nx, 2)).float()
+
+ if self.anchor_vec.device != device:
+ self.anchor_vec = self.anchor_vec.to(device)
+ self.anchor_wh = self.anchor_wh.to(device)
+
+ def forward(self, p, out):
+ ASFF = False # https://arxiv.org/abs/1911.09516
+ if ASFF:
+ i, n = self.index, self.nl # index in layers, number of layers
+ p = out[self.layers[i]]
+ bs, _, ny, nx = p.shape # bs, 255, 13, 13
+ if (self.nx, self.ny) != (nx, ny):
+ self.create_grids((nx, ny), p.device)
+
+ # outputs and weights
+ # w = F.softmax(p[:, -n:], 1) # normalized weights
+ w = torch.sigmoid(p[:, -n:]) * (2 / n) # sigmoid weights (faster)
+ # w = w / w.sum(1).unsqueeze(1) # normalize across layer dimension
+
+ # weighted ASFF sum
+ p = out[self.layers[i]][:, :-n] * w[:, i:i + 1]
+ for j in range(n):
+ if j != i:
+ p += w[:, j:j + 1] * \
+ F.interpolate(out[self.layers[j]][:, :-n], size=[ny, nx], mode='bilinear', align_corners=False)
+
+ elif ONNX_EXPORT:
+ bs = 1 # batch size
+ else:
+ bs, _, ny, nx = p.shape # bs, 255, 13, 13
+ if (self.nx, self.ny) != (nx, ny):
+ self.create_grids((nx, ny), p.device)
+
+ # p.view(bs, 255, 13, 13) -- > (bs, 3, 13, 13, 85) # (bs, anchors, grid, grid, classes + xywh)
+ p = p.view(bs, self.na, self.no, self.ny, self.nx).permute(0, 1, 3, 4, 2).contiguous() # prediction
+
+ if self.training:
+ return p
+
+ elif ONNX_EXPORT:
+ # Avoid broadcasting for ANE operations
+ m = self.na * self.nx * self.ny
+ ng = 1. / self.ng.repeat(m, 1)
+ grid = self.grid.repeat(1, self.na, 1, 1, 1).view(m, 2)
+ anchor_wh = self.anchor_wh.repeat(1, 1, self.nx, self.ny, 1).view(m, 2) * ng
+
+ p = p.view(m, self.no)
+ xy = torch.sigmoid(p[:, 0:2]) + grid # x, y
+ wh = torch.exp(p[:, 2:4]) * anchor_wh # width, height
+ p_cls = torch.sigmoid(p[:, 4:5]) if self.nc == 1 else \
+ torch.sigmoid(p[:, 5:self.no]) * torch.sigmoid(p[:, 4:5]) # conf
+ return p_cls, xy * ng, wh
+
+ else: # inference
+ #io = p.sigmoid()
+ #io[..., :2] = (io[..., :2] * 2. - 0.5 + self.grid)
+ #io[..., 2:4] = (io[..., 2:4] * 2) ** 2 * self.anchor_wh
+ #io[..., :4] *= self.stride
+ io = p.clone() # inference output
+ io[..., :2] = torch.sigmoid(io[..., :2]) * 2. - 0.5 + self.grid # xy
+ io[..., 2:4] = (torch.sigmoid(io[..., 2:4]) * 2) ** 2 * self.anchor_wh # wh yolo method
+ io[..., :4] *= self.stride
+ io[..., 4:] = F.softmax(io[..., 4:])
+ return io.view(bs, -1, self.no), p # view [1, 3, 13, 13, 85] as [1, 507, 85]
+
class Darknet(nn.Module):
# YOLOv3 object detection model
@@ -335,7 +500,8 @@ def forward_once(self, x, augment=False, verbose=False):
for i, module in enumerate(self.module_list):
name = module.__class__.__name__
- if name in ['WeightedFeatureFusion', 'FeatureConcat', 'FeatureConcat2', 'FeatureConcat3', 'FeatureConcat_l']: # sum, concat
+ #print(name)
+ if name in ['WeightedFeatureFusion', 'FeatureConcat', 'FeatureConcat2', 'FeatureConcat3', 'FeatureConcat_l', 'ScaleSpatial']: # sum, concat
if verbose:
l = [i - 1] + module.layers # layers
sh = [list(x.shape)] + [list(out[i].shape) for i in module.layers] # shapes
@@ -343,7 +509,11 @@ def forward_once(self, x, augment=False, verbose=False):
x = module(x, out) # WeightedFeatureFusion(), FeatureConcat()
elif name == 'YOLOLayer':
yolo_out.append(module(x, out))
+ elif name == 'JDELayer':
+ yolo_out.append(module(x, out))
else: # run module directly, i.e. mtype = 'convolutional', 'upsample', 'maxpool', 'batchnorm2d' etc.
+ #print(module)
+ #print(x.shape)
x = module(x)
out.append(x if self.routs[i] else [])
@@ -389,7 +559,7 @@ def info(self, verbose=False):
def get_yolo_layers(model):
- return [i for i, m in enumerate(model.module_list) if m.__class__.__name__ == 'YOLOLayer'] # [89, 101, 113]
+ return [i for i, m in enumerate(model.module_list) if m.__class__.__name__ in ['YOLOLayer', 'JDELayer']] # [89, 101, 113]
def load_darknet_weights(self, weights, cutoff=-1):
From 21488494b75ce6b7b8ef728624a63c8d69c92da9 Mon Sep 17 00:00:00 2001
From: "Kin-Yiu, Wong" <102582011@cc.ncu.edu.tw>
Date: Fri, 21 May 2021 08:41:43 +0800
Subject: [PATCH 19/37] Update activations.py
---
utils/activations.py | 7 +++++--
1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/utils/activations.py b/utils/activations.py
index f00b3b9..ba6b854 100644
--- a/utils/activations.py
+++ b/utils/activations.py
@@ -1,3 +1,5 @@
+# Activation functions
+
import torch
import torch.nn as nn
import torch.nn.functional as F
@@ -10,10 +12,11 @@ def forward(x):
return x * torch.sigmoid(x)
-class HardSwish(nn.Module):
+class Hardswish(nn.Module): # export-friendly version of nn.Hardswish()
@staticmethod
def forward(x):
- return x * F.hardtanh(x + 3, 0., 6., True) / 6.
+ # return x * F.hardsigmoid(x) # for torchscript and CoreML
+ return x * F.hardtanh(x + 3, 0., 6.) / 6. # for torchscript, CoreML and ONNX
class MemoryEfficientSwish(nn.Module):
From 28b7cd2d6685d76e69375842c3aa1d3fe192e9ac Mon Sep 17 00:00:00 2001
From: "Kin-Yiu, Wong" <102582011@cc.ncu.edu.tw>
Date: Fri, 21 May 2021 08:42:42 +0800
Subject: [PATCH 20/37] Update datasets.py
---
utils/datasets.py | 648 ++++++++++++++++++++++++++++++++++++----------
1 file changed, 517 insertions(+), 131 deletions(-)
diff --git a/utils/datasets.py b/utils/datasets.py
index af06d7b..d104af1 100644
--- a/utils/datasets.py
+++ b/utils/datasets.py
@@ -1,9 +1,13 @@
+# Dataset utils and dataloaders
+
import glob
import math
import os
import random
import shutil
import time
+from itertools import repeat
+from multiprocessing.pool import ThreadPool
from pathlib import Path
from threading import Thread
@@ -14,11 +18,18 @@
from torch.utils.data import Dataset
from tqdm import tqdm
-from utils.general import xyxy2xywh, xywh2xyxy, torch_distributed_zero_first
+import pickle
+from copy import deepcopy
+from pycocotools import mask as maskUtils
+from torchvision.utils import save_image
+
+from utils.general import xyxy2xywh, xywh2xyxy
+from utils.torch_utils import torch_distributed_zero_first
-help_url = ''
-img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.dng']
-vid_formats = ['.mov', '.avi', '.mp4', '.mpg', '.mpeg', '.m4v', '.wmv', '.mkv']
+# Parameters
+help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
+img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng'] # acceptable image suffixes
+vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
@@ -47,9 +58,9 @@ def exif_size(img):
def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,
- local_rank=-1, world_size=1):
- # Make sure only the first process in DDP process the dataset first, and the following others can use the cache.
- with torch_distributed_zero_first(local_rank):
+ rank=-1, world_size=1, workers=8):
+ # Make sure only the first process in DDP process the dataset first, and the following others can use the cache
+ with torch_distributed_zero_first(rank):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
@@ -57,26 +68,85 @@ def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=Fa
cache_images=cache,
single_cls=opt.single_cls,
stride=int(stride),
- pad=pad)
+ pad=pad,
+ rank=rank)
batch_size = min(batch_size, len(dataset))
- nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, 8]) # number of workers
- train_sampler = torch.utils.data.distributed.DistributedSampler(dataset) if local_rank != -1 else None
- dataloader = torch.utils.data.DataLoader(dataset,
- batch_size=batch_size,
- num_workers=nw,
- sampler=train_sampler,
- pin_memory=True,
- collate_fn=LoadImagesAndLabels.collate_fn)
+ nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers
+ sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
+ dataloader = InfiniteDataLoader(dataset,
+ batch_size=batch_size,
+ num_workers=nw,
+ sampler=sampler,
+ pin_memory=True,
+ collate_fn=LoadImagesAndLabels.collate_fn) # torch.utils.data.DataLoader()
return dataloader, dataset
+def create_dataloader9(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,
+ rank=-1, world_size=1, workers=8):
+ # Make sure only the first process in DDP process the dataset first, and the following others can use the cache
+ with torch_distributed_zero_first(rank):
+ dataset = LoadImagesAndLabels9(path, imgsz, batch_size,
+ augment=augment, # augment images
+ hyp=hyp, # augmentation hyperparameters
+ rect=rect, # rectangular training
+ cache_images=cache,
+ single_cls=opt.single_cls,
+ stride=int(stride),
+ pad=pad,
+ rank=rank)
+
+ batch_size = min(batch_size, len(dataset))
+ nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers
+ sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
+ dataloader = InfiniteDataLoader(dataset,
+ batch_size=batch_size,
+ num_workers=nw,
+ sampler=sampler,
+ pin_memory=True,
+ collate_fn=LoadImagesAndLabels9.collate_fn) # torch.utils.data.DataLoader()
+ return dataloader, dataset
+
+
+class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
+ """ Dataloader that reuses workers
+ Uses same syntax as vanilla DataLoader
+ """
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
+ self.iterator = super().__iter__()
+
+ def __len__(self):
+ return len(self.batch_sampler.sampler)
+
+ def __iter__(self):
+ for i in range(len(self)):
+ yield next(self.iterator)
+
+
+class _RepeatSampler(object):
+ """ Sampler that repeats forever
+ Args:
+ sampler (Sampler)
+ """
+
+ def __init__(self, sampler):
+ self.sampler = sampler
+
+ def __iter__(self):
+ while True:
+ yield from iter(self.sampler)
+
+
class LoadImages: # for inference
- def __init__(self, path, img_size=640):
+ def __init__(self, path, img_size=640, auto_size=32):
p = str(Path(path)) # os-agnostic
p = os.path.abspath(p) # absolute path
if '*' in p:
- files = sorted(glob.glob(p)) # glob
+ files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
@@ -84,11 +154,12 @@ def __init__(self, path, img_size=640):
else:
raise Exception('ERROR: %s does not exist' % p)
- images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
- videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
+ images = [x for x in files if x.split('.')[-1].lower() in img_formats]
+ videos = [x for x in files if x.split('.')[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
+ self.auto_size = auto_size
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
@@ -134,13 +205,12 @@ def __next__(self):
print('image %g/%g %s: ' % (self.count, self.nf, path), end='')
# Padded resize
- img = letterbox(img0, new_shape=self.img_size)[0]
+ img = letterbox(img0, new_shape=self.img_size, auto_size=self.auto_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
- # cv2.imwrite(path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return path, img, img0, self.cap
def new_video(self, path):
@@ -153,23 +223,15 @@ def __len__(self):
class LoadWebcam: # for inference
- def __init__(self, pipe=0, img_size=640):
+ def __init__(self, pipe='0', img_size=640):
self.img_size = img_size
- if pipe == '0':
- pipe = 0 # local camera
+ if pipe.isnumeric():
+ pipe = eval(pipe) # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
- # pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa' # IP traffic camera
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
- # https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/
- # pipe = '"rtspsrc location="rtsp://username:password@192.168.1.64/1" latency=10 ! appsink' # GStreamer
-
- # https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/
- # https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package # install help
- # pipe = "rtspsrc location=rtsp://root:root@192.168.0.91:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink" # GStreamer
-
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
@@ -234,7 +296,7 @@ def __init__(self, sources='streams.txt', img_size=640):
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print('%g/%g: %s... ' % (i + 1, n, s), end='')
- cap = cv2.VideoCapture(0 if s == '0' else s)
+ cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s)
assert cap.isOpened(), 'Failed to open %s' % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
@@ -292,32 +354,290 @@ def __len__(self):
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
- cache_images=False, single_cls=False, stride=32, pad=0.0):
+ cache_images=False, single_cls=False, stride=32, pad=0.0, rank=-1):
+ self.img_size = img_size
+ self.augment = augment
+ self.hyp = hyp
+ self.image_weights = image_weights
+ self.rect = False if image_weights else rect
+ self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
+ self.mosaic_border = [-img_size // 2, -img_size // 2]
+ self.stride = stride
+
+ def img2label_paths(img_paths):
+ # Define label paths as a function of image paths
+ sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
+ return [x.replace(sa, sb, 1).replace(x.split('.')[-1], 'txt') for x in img_paths]
+
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
- p = str(Path(p)) # os-agnostic
- parent = str(Path(p).parent) + os.sep
- if os.path.isfile(p): # file
+ p = Path(p) # os-agnostic
+ if p.is_dir(): # dir
+ f += glob.glob(str(p / '**' / '*.*'), recursive=True)
+ elif p.is_file(): # file
with open(p, 'r') as t:
t = t.read().splitlines()
+ parent = str(p.parent) + os.sep
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
- elif os.path.isdir(p): # folder
- f += glob.iglob(p + os.sep + '*.*')
else:
raise Exception('%s does not exist' % p)
- self.img_files = sorted(
- [x.replace('/', os.sep) for x in f if os.path.splitext(x)[-1].lower() in img_formats])
+ self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats])
+ assert self.img_files, 'No images found'
except Exception as e:
raise Exception('Error loading data from %s: %s\nSee %s' % (path, e, help_url))
- n = len(self.img_files)
- assert n > 0, 'No images found in %s. See %s' % (path, help_url)
+ # Check cache
+ self.label_files = img2label_paths(self.img_files) # labels
+ cache_path = str(Path(self.label_files[0]).parent) + '.cache3' # cached labels
+ if os.path.isfile(cache_path):
+ cache = torch.load(cache_path) # load
+ if cache['hash'] != get_hash(self.label_files + self.img_files): # dataset changed
+ cache = self.cache_labels(cache_path) # re-cache
+ else:
+ cache = self.cache_labels(cache_path) # cache
+
+ # Read cache
+ cache.pop('hash') # remove hash
+ labels, shapes = zip(*cache.values())
+ self.labels = list(labels)
+ self.shapes = np.array(shapes, dtype=np.float64)
+ self.img_files = list(cache.keys()) # update
+ self.label_files = img2label_paths(cache.keys()) # update
+
+ n = len(shapes) # number of images
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
-
- self.n = n # number of images
self.batch = bi # batch index of image
+ self.n = n
+
+ # Rectangular Training
+ if self.rect:
+ # Sort by aspect ratio
+ s = self.shapes # wh
+ ar = s[:, 1] / s[:, 0] # aspect ratio
+ irect = ar.argsort()
+ self.img_files = [self.img_files[i] for i in irect]
+ self.label_files = [self.label_files[i] for i in irect]
+ self.labels = [self.labels[i] for i in irect]
+ self.shapes = s[irect] # wh
+ ar = ar[irect]
+
+ # Set training image shapes
+ shapes = [[1, 1]] * nb
+ for i in range(nb):
+ ari = ar[bi == i]
+ mini, maxi = ari.min(), ari.max()
+ if maxi < 1:
+ shapes[i] = [maxi, 1]
+ elif mini > 1:
+ shapes[i] = [1, 1 / mini]
+
+ self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
+
+ # Check labels
+ create_datasubset, extract_bounding_boxes, labels_loaded = False, False, False
+ nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate
+ pbar = enumerate(self.label_files)
+ if rank in [-1, 0]:
+ pbar = tqdm(pbar)
+ for i, file in pbar:
+ l = self.labels[i] # label
+ if l is not None and l.shape[0]:
+ assert l.shape[1] == 5, '> 5 label columns: %s' % file
+ assert (l >= 0).all(), 'negative labels: %s' % file
+ assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
+ if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows
+ nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows
+ if single_cls:
+ l[:, 0] = 0 # force dataset into single-class mode
+ self.labels[i] = l
+ nf += 1 # file found
+
+ # Create subdataset (a smaller dataset)
+ if create_datasubset and ns < 1E4:
+ if ns == 0:
+ create_folder(path='./datasubset')
+ os.makedirs('./datasubset/images')
+ exclude_classes = 43
+ if exclude_classes not in l[:, 0]:
+ ns += 1
+ # shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image
+ with open('./datasubset/images.txt', 'a') as f:
+ f.write(self.img_files[i] + '\n')
+
+ # Extract object detection boxes for a second stage classifier
+ if extract_bounding_boxes:
+ p = Path(self.img_files[i])
+ img = cv2.imread(str(p))
+ h, w = img.shape[:2]
+ for j, x in enumerate(l):
+ f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)
+ if not os.path.exists(Path(f).parent):
+ os.makedirs(Path(f).parent) # make new output folder
+
+ b = x[1:] * [w, h, w, h] # box
+ b[2:] = b[2:].max() # rectangle to square
+ b[2:] = b[2:] * 1.3 + 30 # pad
+ b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
+
+ b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
+ b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
+ assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes'
+ else:
+ ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
+ # os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
+
+ if rank in [-1, 0]:
+ pbar.desc = 'Scanning labels %s (%g found, %g missing, %g empty, %g duplicate, for %g images)' % (
+ cache_path, nf, nm, ne, nd, n)
+ if nf == 0:
+ s = 'WARNING: No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url)
+ print(s)
+ assert not augment, '%s. Can not train without labels.' % s
+
+ # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
+ self.imgs = [None] * n
+ if cache_images:
+ gb = 0 # Gigabytes of cached images
+ self.img_hw0, self.img_hw = [None] * n, [None] * n
+ results = ThreadPool(8).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) # 8 threads
+ pbar = tqdm(enumerate(results), total=n)
+ for i, x in pbar:
+ self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i)
+ gb += self.imgs[i].nbytes
+ pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
+
+ def cache_labels(self, path='labels.cache3'):
+ # Cache dataset labels, check images and read shapes
+ x = {} # dict
+ pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
+ for (img, label) in pbar:
+ try:
+ l = []
+ im = Image.open(img)
+ im.verify() # PIL verify
+ shape = exif_size(im) # image size
+ assert (shape[0] > 9) & (shape[1] > 9), 'image size <10 pixels'
+ if os.path.isfile(label):
+ with open(label, 'r') as f:
+ l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32) # labels
+ if len(l) == 0:
+ l = np.zeros((0, 5), dtype=np.float32)
+ x[img] = [l, shape]
+ except Exception as e:
+ print('WARNING: Ignoring corrupted image and/or label %s: %s' % (img, e))
+
+ x['hash'] = get_hash(self.label_files + self.img_files)
+ torch.save(x, path) # save for next time
+ return x
+
+ def __len__(self):
+ return len(self.img_files)
+
+ # def __iter__(self):
+ # self.count = -1
+ # print('ran dataset iter')
+ # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
+ # return self
+
+ def __getitem__(self, index):
+ if self.image_weights:
+ index = self.indices[index]
+
+ hyp = self.hyp
+ mosaic = self.mosaic and random.random() < hyp['mosaic']
+ if mosaic:
+ # Load mosaic
+ img, labels = load_mosaic(self, index)
+ #img, labels = load_mosaic9(self, index)
+ shapes = None
+
+ # MixUp https://arxiv.org/pdf/1710.09412.pdf
+ if random.random() < hyp['mixup']:
+ img2, labels2 = load_mosaic(self, random.randint(0, len(self.labels) - 1))
+ #img2, labels2 = load_mosaic9(self, random.randint(0, len(self.labels) - 1))
+ r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0
+ img = (img * r + img2 * (1 - r)).astype(np.uint8)
+ labels = np.concatenate((labels, labels2), 0)
+
+ else:
+ # Load image
+ img, (h0, w0), (h, w) = load_image(self, index)
+
+ # Letterbox
+ shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
+ img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
+ shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
+
+ # Load labels
+ labels = []
+ x = self.labels[index]
+ if x.size > 0:
+ # Normalized xywh to pixel xyxy format
+ labels = x.copy()
+ labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width
+ labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height
+ labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
+ labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
+
+ if self.augment:
+ # Augment imagespace
+ if not mosaic:
+ img, labels = random_perspective(img, labels,
+ degrees=hyp['degrees'],
+ translate=hyp['translate'],
+ scale=hyp['scale'],
+ shear=hyp['shear'],
+ perspective=hyp['perspective'])
+
+ # Augment colorspace
+ augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
+
+ # Apply cutouts
+ # if random.random() < 0.9:
+ # labels = cutout(img, labels)
+
+ nL = len(labels) # number of labels
+ if nL:
+ labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh
+ labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1
+ labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1
+
+ if self.augment:
+ # flip up-down
+ if random.random() < hyp['flipud']:
+ img = np.flipud(img)
+ if nL:
+ labels[:, 2] = 1 - labels[:, 2]
+
+ # flip left-right
+ if random.random() < hyp['fliplr']:
+ img = np.fliplr(img)
+ if nL:
+ labels[:, 1] = 1 - labels[:, 1]
+
+ labels_out = torch.zeros((nL, 6))
+ if nL:
+ labels_out[:, 1:] = torch.from_numpy(labels)
+
+ # Convert
+ img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
+ img = np.ascontiguousarray(img)
+
+ return torch.from_numpy(img), labels_out, self.img_files[index], shapes
+
+ @staticmethod
+ def collate_fn(batch):
+ img, label, path, shapes = zip(*batch) # transposed
+ for i, l in enumerate(label):
+ l[:, 0] = i # add target image index for build_targets()
+ return torch.stack(img, 0), torch.cat(label, 0), path, shapes
+
+
+class LoadImagesAndLabels9(Dataset): # for training/testing
+ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
+ cache_images=False, single_cls=False, stride=32, pad=0.0, rank=-1):
self.img_size = img_size
self.augment = augment
self.hyp = hyp
@@ -327,12 +647,32 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
- # Define labels
- self.label_files = [x.replace('images', 'labels').replace(os.path.splitext(x)[-1], '.txt') for x in
- self.img_files]
+ def img2label_paths(img_paths):
+ # Define label paths as a function of image paths
+ sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
+ return [x.replace(sa, sb, 1).replace(x.split('.')[-1], 'txt') for x in img_paths]
+
+ try:
+ f = [] # image files
+ for p in path if isinstance(path, list) else [path]:
+ p = Path(p) # os-agnostic
+ if p.is_dir(): # dir
+ f += glob.glob(str(p / '**' / '*.*'), recursive=True)
+ elif p.is_file(): # file
+ with open(p, 'r') as t:
+ t = t.read().splitlines()
+ parent = str(p.parent) + os.sep
+ f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
+ else:
+ raise Exception('%s does not exist' % p)
+ self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats])
+ assert self.img_files, 'No images found'
+ except Exception as e:
+ raise Exception('Error loading data from %s: %s\nSee %s' % (path, e, help_url))
# Check cache
- cache_path = str(Path(self.label_files[0]).parent) + '.cache' # cached labels
+ self.label_files = img2label_paths(self.img_files) # labels
+ cache_path = str(Path(self.label_files[0]).parent) + '.cache3' # cached labels
if os.path.isfile(cache_path):
cache = torch.load(cache_path) # load
if cache['hash'] != get_hash(self.label_files + self.img_files): # dataset changed
@@ -340,12 +680,21 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r
else:
cache = self.cache_labels(cache_path) # cache
- # Get labels
- labels, shapes = zip(*[cache[x] for x in self.img_files])
- self.shapes = np.array(shapes, dtype=np.float64)
+ # Read cache
+ cache.pop('hash') # remove hash
+ labels, shapes = zip(*cache.values())
self.labels = list(labels)
+ self.shapes = np.array(shapes, dtype=np.float64)
+ self.img_files = list(cache.keys()) # update
+ self.label_files = img2label_paths(cache.keys()) # update
+
+ n = len(shapes) # number of images
+ bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
+ nb = bi[-1] + 1 # number of batches
+ self.batch = bi # batch index of image
+ self.n = n
- # Rectangular Training https://github.com/ultralytics/yolov3/issues/232
+ # Rectangular Training
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
@@ -369,13 +718,15 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
- # Cache labels
+ # Check labels
create_datasubset, extract_bounding_boxes, labels_loaded = False, False, False
nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate
- pbar = tqdm(self.label_files)
- for i, file in enumerate(pbar):
+ pbar = enumerate(self.label_files)
+ if rank in [-1, 0]:
+ pbar = tqdm(pbar)
+ for i, file in pbar:
l = self.labels[i] # label
- if l.shape[0]:
+ if l is not None and l.shape[0]:
assert l.shape[1] == 5, '> 5 label columns: %s' % file
assert (l >= 0).all(), 'negative labels: %s' % file
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
@@ -420,8 +771,9 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r
ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
# os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
- pbar.desc = 'Scanning labels %s (%g found, %g missing, %g empty, %g duplicate, for %g images)' % (
- cache_path, nf, nm, ne, nd, n)
+ if rank in [-1, 0]:
+ pbar.desc = 'Scanning labels %s (%g found, %g missing, %g empty, %g duplicate, for %g images)' % (
+ cache_path, nf, nm, ne, nd, n)
if nf == 0:
s = 'WARNING: No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url)
print(s)
@@ -431,24 +783,24 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r
self.imgs = [None] * n
if cache_images:
gb = 0 # Gigabytes of cached images
- pbar = tqdm(range(len(self.img_files)), desc='Caching images')
self.img_hw0, self.img_hw = [None] * n, [None] * n
- for i in pbar: # max 10k images
- self.imgs[i], self.img_hw0[i], self.img_hw[i] = load_image(self, i) # img, hw_original, hw_resized
+ results = ThreadPool(8).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) # 8 threads
+ pbar = tqdm(enumerate(results), total=n)
+ for i, x in pbar:
+ self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i)
gb += self.imgs[i].nbytes
pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
- def cache_labels(self, path='labels.cache'):
+ def cache_labels(self, path='labels.cache3'):
# Cache dataset labels, check images and read shapes
x = {} # dict
pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
for (img, label) in pbar:
try:
l = []
- image = Image.open(img)
- image.verify() # PIL verify
- # _ = io.imread(img) # skimage verify (from skimage import io)
- shape = exif_size(image) # image size
+ im = Image.open(img)
+ im.verify() # PIL verify
+ shape = exif_size(im) # image size
assert (shape[0] > 9) & (shape[1] > 9), 'image size <10 pixels'
if os.path.isfile(label):
with open(label, 'r') as f:
@@ -457,8 +809,7 @@ def cache_labels(self, path='labels.cache'):
l = np.zeros((0, 5), dtype=np.float32)
x[img] = [l, shape]
except Exception as e:
- x[img] = None
- print('WARNING: %s: %s' % (img, e))
+ print('WARNING: Ignoring corrupted image and/or label %s: %s' % (img, e))
x['hash'] = get_hash(self.label_files + self.img_files)
torch.save(x, path) # save for next time
@@ -478,14 +829,17 @@ def __getitem__(self, index):
index = self.indices[index]
hyp = self.hyp
- if self.mosaic:
+ mosaic = self.mosaic and random.random() < hyp['mosaic']
+ if mosaic:
# Load mosaic
- img, labels = load_mosaic(self, index)
+ #img, labels = load_mosaic(self, index)
+ img, labels = load_mosaic9(self, index)
shapes = None
# MixUp https://arxiv.org/pdf/1710.09412.pdf
if random.random() < hyp['mixup']:
- img2, labels2 = load_mosaic(self, random.randint(0, len(self.labels) - 1))
+ #img2, labels2 = load_mosaic(self, random.randint(0, len(self.labels) - 1))
+ img2, labels2 = load_mosaic9(self, random.randint(0, len(self.labels) - 1))
r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0
img = (img * r + img2 * (1 - r)).astype(np.uint8)
labels = np.concatenate((labels, labels2), 0)
@@ -512,7 +866,7 @@ def __getitem__(self, index):
if self.augment:
# Augment imagespace
- if not self.mosaic:
+ if not mosaic:
img, labels = random_perspective(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
@@ -606,7 +960,7 @@ def load_mosaic(self, index):
labels4 = []
s = self.img_size
- yc, xc = s, s # mosaic center x, y
+ yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(3)] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
@@ -622,7 +976,7 @@ def load_mosaic(self, index):
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
- x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(y2a - y1a, h)
+ x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
@@ -644,14 +998,10 @@ def load_mosaic(self, index):
# Concat/clip labels
if len(labels4):
labels4 = np.concatenate(labels4, 0)
- # np.clip(labels4[:, 1:] - s / 2, 0, s, out=labels4[:, 1:]) # use with center crop
- np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_affine
-
- # Replicate
- # img4, labels4 = replicate(img4, labels4)
+ np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_perspective
+ # img4, labels4 = replicate(img4, labels4) # replicate
# Augment
- # img4 = img4[s // 2: int(s * 1.5), s // 2:int(s * 1.5)] # center crop (WARNING, requires box pruning)
img4, labels4 = random_perspective(img4, labels4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
@@ -663,6 +1013,80 @@ def load_mosaic(self, index):
return img4, labels4
+def load_mosaic9(self, index):
+ # loads images in a 9-mosaic
+
+ labels9 = []
+ s = self.img_size
+ indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(8)] # 8 additional image indices
+ for i, index in enumerate(indices):
+ # Load image
+ img, _, (h, w) = load_image(self, index)
+
+ # place img in img9
+ if i == 0: # center
+ img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
+ h0, w0 = h, w
+ c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
+ elif i == 1: # top
+ c = s, s - h, s + w, s
+ elif i == 2: # top right
+ c = s + wp, s - h, s + wp + w, s
+ elif i == 3: # right
+ c = s + w0, s, s + w0 + w, s + h
+ elif i == 4: # bottom right
+ c = s + w0, s + hp, s + w0 + w, s + hp + h
+ elif i == 5: # bottom
+ c = s + w0 - w, s + h0, s + w0, s + h0 + h
+ elif i == 6: # bottom left
+ c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
+ elif i == 7: # left
+ c = s - w, s + h0 - h, s, s + h0
+ elif i == 8: # top left
+ c = s - w, s + h0 - hp - h, s, s + h0 - hp
+
+ padx, pady = c[:2]
+ x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords
+
+ # Labels
+ x = self.labels[index]
+ labels = x.copy()
+ if x.size > 0: # Normalized xywh to pixel xyxy format
+ labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padx
+ labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + pady
+ labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padx
+ labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + pady
+ labels9.append(labels)
+
+ # Image
+ img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]
+ hp, wp = h, w # height, width previous
+
+ # Offset
+ yc, xc = [int(random.uniform(0, s)) for x in self.mosaic_border] # mosaic center x, y
+ img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
+
+ # Concat/clip labels
+ if len(labels9):
+ labels9 = np.concatenate(labels9, 0)
+ labels9[:, [1, 3]] -= xc
+ labels9[:, [2, 4]] -= yc
+
+ np.clip(labels9[:, 1:], 0, 2 * s, out=labels9[:, 1:]) # use with random_perspective
+ # img9, labels9 = replicate(img9, labels9) # replicate
+
+ # Augment
+ img9, labels9 = random_perspective(img9, labels9,
+ degrees=self.hyp['degrees'],
+ translate=self.hyp['translate'],
+ scale=self.hyp['scale'],
+ shear=self.hyp['shear'],
+ perspective=self.hyp['perspective'],
+ border=self.mosaic_border) # border to remove
+
+ return img9, labels9
+
+
def replicate(img, labels):
# Replicate labels
h, w = img.shape[:2]
@@ -680,7 +1104,7 @@ def replicate(img, labels):
return img, labels
-def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
+def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, auto_size=32):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
@@ -696,7 +1120,7 @@ def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scale
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
- dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding
+ dw, dh = np.mod(dw, auto_size), np.mod(dh, auto_size) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
@@ -800,7 +1224,7 @@ def random_perspective(img, targets=(), degrees=10, translate=.1, scale=.1, shea
return img, targets
-def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.2): # box1(4,n), box2(4,n)
+def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1): # box1(4,n), box2(4,n)
# Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
@@ -854,54 +1278,16 @@ def bbox_ioa(box1, box2):
return labels
-def reduce_img_size(path='path/images', img_size=1024): # from utils.datasets import *; reduce_img_size()
- # creates a new ./images_reduced folder with reduced size images of maximum size img_size
- path_new = path + '_reduced' # reduced images path
- create_folder(path_new)
- for f in tqdm(glob.glob('%s/*.*' % path)):
- try:
- img = cv2.imread(f)
- h, w = img.shape[:2]
- r = img_size / max(h, w) # size ratio
- if r < 1.0:
- img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_AREA) # _LINEAR fastest
- fnew = f.replace(path, path_new) # .replace(Path(f).suffix, '.jpg')
- cv2.imwrite(fnew, img)
- except:
- print('WARNING: image failure %s' % f)
-
-
-def recursive_dataset2bmp(dataset='path/dataset_bmp'): # from utils.datasets import *; recursive_dataset2bmp()
- # Converts dataset to bmp (for faster training)
- formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
- for a, b, files in os.walk(dataset):
- for file in tqdm(files, desc=a):
- p = a + '/' + file
- s = Path(file).suffix
- if s == '.txt': # replace text
- with open(p, 'r') as f:
- lines = f.read()
- for f in formats:
- lines = lines.replace(f, '.bmp')
- with open(p, 'w') as f:
- f.write(lines)
- elif s in formats: # replace image
- cv2.imwrite(p.replace(s, '.bmp'), cv2.imread(p))
- if s != '.bmp':
- os.system("rm '%s'" % p)
-
-
-def imagelist2folder(path='path/images.txt'): # from utils.datasets import *; imagelist2folder()
- # Copies all the images in a text file (list of images) into a folder
- create_folder(path[:-4])
- with open(path, 'r') as f:
- for line in f.read().splitlines():
- os.system('cp "%s" %s' % (line, path[:-4]))
- print(line)
-
-
def create_folder(path='./new'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
+
+
+def flatten_recursive(path='../coco128'):
+ # Flatten a recursive directory by bringing all files to top level
+ new_path = Path(path + '_flat')
+ create_folder(new_path)
+ for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):
+ shutil.copyfile(file, new_path / Path(file).name)
From 536312b0bd596ed2019c2478fedfae79e356ae7e Mon Sep 17 00:00:00 2001
From: "Kin-Yiu, Wong" <102582011@cc.ncu.edu.tw>
Date: Fri, 21 May 2021 08:43:57 +0800
Subject: [PATCH 21/37] Update general.py
---
utils/general.py | 989 +++++------------------------------------------
1 file changed, 100 insertions(+), 889 deletions(-)
diff --git a/utils/general.py b/utils/general.py
index f326acc..0585f28 100644
--- a/utils/general.py
+++ b/utils/general.py
@@ -1,28 +1,25 @@
+# General utils
+
import glob
+import logging
import math
import os
+import platform
import random
-import shutil
+import re
import subprocess
import time
-from contextlib import contextmanager
-from copy import copy
from pathlib import Path
-from sys import platform
import cv2
import matplotlib
-import matplotlib.pyplot as plt
import numpy as np
import torch
-import torch.nn as nn
-import torchvision
import yaml
-from scipy.cluster.vq import kmeans
-from scipy.signal import butter, filtfilt
-from tqdm import tqdm
-from utils.torch_utils import init_seeds, is_parallel
+from utils.google_utils import gsutil_getsize
+from utils.metrics import fitness, fitness_p, fitness_r, fitness_ap50, fitness_ap, fitness_f
+from utils.torch_utils import init_torch_seeds
# Set printoptions
torch.set_printoptions(linewidth=320, precision=5, profile='long')
@@ -33,33 +30,27 @@
cv2.setNumThreads(0)
-@contextmanager
-def torch_distributed_zero_first(local_rank: int):
- """
- Decorator to make all processes in distributed training wait for each local_master to do something.
- """
- if local_rank not in [-1, 0]:
- torch.distributed.barrier()
- yield
- if local_rank == 0:
- torch.distributed.barrier()
+def set_logging(rank=-1):
+ logging.basicConfig(
+ format="%(message)s",
+ level=logging.INFO if rank in [-1, 0] else logging.WARN)
def init_seeds(seed=0):
random.seed(seed)
np.random.seed(seed)
- init_seeds(seed=seed)
+ init_torch_seeds(seed)
-def get_latest_run(search_dir='./runs'):
+def get_latest_run(search_dir='.'):
# Return path to most recent 'last.pt' in /runs (i.e. to --resume from)
last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True)
- return max(last_list, key=os.path.getctime)
+ return max(last_list, key=os.path.getctime) if last_list else ''
def check_git_status():
# Suggest 'git pull' if repo is out of date
- if platform in ['linux', 'darwin'] and not os.path.isfile('/.dockerenv'):
+ if platform.system() in ['Linux', 'Darwin'] and not os.path.isfile('/.dockerenv'):
s = subprocess.check_output('if [ -d .git ]; then git fetch && git status -uno; fi', shell=True).decode('utf-8')
if 'Your branch is behind' in s:
print(s[s.find('Your branch is behind'):s.find('\n\n')] + '\n')
@@ -73,63 +64,39 @@ def check_img_size(img_size, s=32):
return new_size
-def check_anchors(dataset, model, thr=4.0, imgsz=640):
- # Check anchor fit to data, recompute if necessary
- print('\nAnalyzing anchors... ', end='')
- m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect()
- shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True)
- scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale
- wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh
-
- def metric(k): # compute metric
- r = wh[:, None] / k[None]
- x = torch.min(r, 1. / r).min(2)[0] # ratio metric
- best = x.max(1)[0] # best_x
- aat = (x > 1. / thr).float().sum(1).mean() # anchors above threshold
- bpr = (best > 1. / thr).float().mean() # best possible recall
- return bpr, aat
-
- bpr, aat = metric(m.anchor_grid.clone().cpu().view(-1, 2))
- print('anchors/target = %.2f, Best Possible Recall (BPR) = %.4f' % (aat, bpr), end='')
- if bpr < 0.98: # threshold to recompute
- print('. Attempting to generate improved anchors, please wait...' % bpr)
- na = m.anchor_grid.numel() // 2 # number of anchors
- new_anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False)
- new_bpr = metric(new_anchors.reshape(-1, 2))[0]
- if new_bpr > bpr: # replace anchors
- new_anchors = torch.tensor(new_anchors, device=m.anchors.device).type_as(m.anchors)
- m.anchor_grid[:] = new_anchors.clone().view_as(m.anchor_grid) # for inference
- m.anchors[:] = new_anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss
- check_anchor_order(m)
- print('New anchors saved to model. Update model *.yaml to use these anchors in the future.')
- else:
- print('Original anchors better than new anchors. Proceeding with original anchors.')
- print('') # newline
-
-
-def check_anchor_order(m):
- # Check anchor order against stride order for YOLO Detect() module m, and correct if necessary
- a = m.anchor_grid.prod(-1).view(-1) # anchor area
- da = a[-1] - a[0] # delta a
- ds = m.stride[-1] - m.stride[0] # delta s
- if da.sign() != ds.sign(): # same order
- print('Reversing anchor order')
- m.anchors[:] = m.anchors.flip(0)
- m.anchor_grid[:] = m.anchor_grid.flip(0)
-
-
def check_file(file):
- # Searches for file if not found locally
+ # Search for file if not found
if os.path.isfile(file) or file == '':
return file
else:
files = glob.glob('./**/' + file, recursive=True) # find file
assert len(files), 'File Not Found: %s' % file # assert file was found
- return files[0] # return first file if multiple found
+ assert len(files) == 1, "Multiple files match '%s', specify exact path: %s" % (file, files) # assert unique
+ return files[0] # return file
+
+
+def check_dataset(dict):
+ # Download dataset if not found locally
+ val, s = dict.get('val'), dict.get('download')
+ if val and len(val):
+ val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path
+ if not all(x.exists() for x in val):
+ print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()])
+ if s and len(s): # download script
+ print('Downloading %s ...' % s)
+ if s.startswith('http') and s.endswith('.zip'): # URL
+ f = Path(s).name # filename
+ torch.hub.download_url_to_file(s, f)
+ r = os.system('unzip -q %s -d ../ && rm %s' % (f, f)) # unzip
+ else: # bash script
+ r = os.system(s)
+ print('Dataset autodownload %s\n' % ('success' if r == 0 else 'failure')) # analyze return value
+ else:
+ raise Exception('Dataset not found.')
def make_divisible(x, divisor):
- # Returns x evenly divisble by divisor
+ # Returns x evenly divisible by divisor
return math.ceil(x / divisor) * divisor
@@ -140,9 +107,9 @@ def labels_to_class_weights(labels, nc=80):
labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
classes = labels[:, 0].astype(np.int) # labels = [class xywh]
- weights = np.bincount(classes, minlength=nc) # occurences per class
+ weights = np.bincount(classes, minlength=nc) # occurrences per class
- # Prepend gridpoint count (for uCE trianing)
+ # Prepend gridpoint count (for uCE training)
# gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image
# weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start
@@ -175,7 +142,7 @@ def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
def xyxy2xywh(x):
# Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
- y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
+ y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
y[:, 2] = x[:, 2] - x[:, 0] # width
@@ -185,7 +152,7 @@ def xyxy2xywh(x):
def xywh2xyxy(x):
# Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
- y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
+ y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
@@ -217,99 +184,7 @@ def clip_coords(boxes, img_shape):
boxes[:, 3].clamp_(0, img_shape[0]) # y2
-def ap_per_class(tp, conf, pred_cls, target_cls):
- """ Compute the average precision, given the recall and precision curves.
- Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
- # Arguments
- tp: True positives (nparray, nx1 or nx10).
- conf: Objectness value from 0-1 (nparray).
- pred_cls: Predicted object classes (nparray).
- target_cls: True object classes (nparray).
- # Returns
- The average precision as computed in py-faster-rcnn.
- """
-
- # Sort by objectness
- i = np.argsort(-conf)
- tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
-
- # Find unique classes
- unique_classes = np.unique(target_cls)
-
- # Create Precision-Recall curve and compute AP for each class
- pr_score = 0.1 # score to evaluate P and R https://github.com/ultralytics/yolov3/issues/898
- s = [unique_classes.shape[0], tp.shape[1]] # number class, number iou thresholds (i.e. 10 for mAP0.5...0.95)
- ap, p, r = np.zeros(s), np.zeros(s), np.zeros(s)
- for ci, c in enumerate(unique_classes):
- i = pred_cls == c
- n_gt = (target_cls == c).sum() # Number of ground truth objects
- n_p = i.sum() # Number of predicted objects
-
- if n_p == 0 or n_gt == 0:
- continue
- else:
- # Accumulate FPs and TPs
- fpc = (1 - tp[i]).cumsum(0)
- tpc = tp[i].cumsum(0)
-
- # Recall
- recall = tpc / (n_gt + 1e-16) # recall curve
- r[ci] = np.interp(-pr_score, -conf[i], recall[:, 0]) # r at pr_score, negative x, xp because xp decreases
-
- # Precision
- precision = tpc / (tpc + fpc) # precision curve
- p[ci] = np.interp(-pr_score, -conf[i], precision[:, 0]) # p at pr_score
-
- # AP from recall-precision curve
- for j in range(tp.shape[1]):
- ap[ci, j] = compute_ap(recall[:, j], precision[:, j])
-
- # Plot
- # fig, ax = plt.subplots(1, 1, figsize=(5, 5))
- # ax.plot(recall, precision)
- # ax.set_xlabel('Recall')
- # ax.set_ylabel('Precision')
- # ax.set_xlim(0, 1.01)
- # ax.set_ylim(0, 1.01)
- # fig.tight_layout()
- # fig.savefig('PR_curve.png', dpi=300)
-
- # Compute F1 score (harmonic mean of precision and recall)
- f1 = 2 * p * r / (p + r + 1e-16)
-
- return p, r, ap, f1, unique_classes.astype('int32')
-
-
-def compute_ap(recall, precision):
- """ Compute the average precision, given the recall and precision curves.
- Source: https://github.com/rbgirshick/py-faster-rcnn.
- # Arguments
- recall: The recall curve (list).
- precision: The precision curve (list).
- # Returns
- The average precision as computed in py-faster-rcnn.
- """
-
- # Append sentinel values to beginning and end
- mrec = np.concatenate(([0.], recall, [min(recall[-1] + 1E-3, 1.)]))
- mpre = np.concatenate(([0.], precision, [0.]))
-
- # Compute the precision envelope
- mpre = np.flip(np.maximum.accumulate(np.flip(mpre)))
-
- # Integrate area under curve
- method = 'interp' # methods: 'continuous', 'interp'
- if method == 'interp':
- x = np.linspace(0, 1, 101) # 101-point interp (COCO)
- ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate
- else: # 'continuous'
- i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes
- ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve
-
- return ap
-
-
-def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False):
+def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, EIoU=False, ECIoU=False, eps=1e-9):
# Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
box2 = box2.T
@@ -328,31 +203,45 @@ def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False):
(torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
# Union Area
- w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1
- w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1
- union = (w1 * h1 + 1e-16) + w2 * h2 - inter
+ w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
+ w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
+ union = w1 * h1 + w2 * h2 - inter + eps
- iou = inter / union # iou
- if GIoU or DIoU or CIoU:
+ iou = inter / union
+ if GIoU or DIoU or CIoU or EIoU or ECIoU:
cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width
ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
- if GIoU: # Generalized IoU https://arxiv.org/pdf/1902.09630.pdf
- c_area = cw * ch + 1e-16 # convex area
- return iou - (c_area - union) / c_area # GIoU
- if DIoU or CIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
- # convex diagonal squared
- c2 = cw ** 2 + ch ** 2 + 1e-16
- # centerpoint distance squared
- rho2 = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2)) ** 2 / 4 + ((b2_y1 + b2_y2) - (b1_y1 + b1_y2)) ** 2 / 4
+ if CIoU or DIoU or EIoU or ECIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
+ c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared
+ rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 +
+ (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared
if DIoU:
return iou - rho2 / c2 # DIoU
elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
with torch.no_grad():
- alpha = v / (1 - iou + v + 1e-16)
+ alpha = v / ((1 + eps) - iou + v)
return iou - (rho2 / c2 + v * alpha) # CIoU
-
- return iou
+ elif EIoU: # Efficient IoU https://arxiv.org/abs/2101.08158
+ rho3 = (w1-w2) **2
+ c3 = cw ** 2 + eps
+ rho4 = (h1-h2) **2
+ c4 = ch ** 2 + eps
+ return iou - rho2 / c2 - rho3 / c3 - rho4 / c4 # EIoU
+ elif ECIoU:
+ v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
+ with torch.no_grad():
+ alpha = v / ((1 + eps) - iou + v)
+ rho3 = (w1-w2) **2
+ c3 = cw ** 2 + eps
+ rho4 = (h1-h2) **2
+ c4 = ch ** 2 + eps
+ return iou - v * alpha - rho2 / c2 - rho3 / c3 - rho4 / c4 # ECIoU
+ else: # GIoU https://arxiv.org/pdf/1902.09630.pdf
+ c_area = cw * ch + eps # convex area
+ return iou - (c_area - union) / c_area # GIoU
+ else:
+ return iou # IoU
def box_iou(box1, box2):
@@ -388,179 +277,11 @@ def wh_iou(wh1, wh2):
return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter)
-class FocalLoss(nn.Module):
- # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
- def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
- super(FocalLoss, self).__init__()
- self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
- self.gamma = gamma
- self.alpha = alpha
- self.reduction = loss_fcn.reduction
- self.loss_fcn.reduction = 'none' # required to apply FL to each element
-
- def forward(self, pred, true):
- loss = self.loss_fcn(pred, true)
- # p_t = torch.exp(-loss)
- # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability
-
- # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py
- pred_prob = torch.sigmoid(pred) # prob from logits
- p_t = true * pred_prob + (1 - true) * (1 - pred_prob)
- alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
- modulating_factor = (1.0 - p_t) ** self.gamma
- loss *= alpha_factor * modulating_factor
-
- if self.reduction == 'mean':
- return loss.mean()
- elif self.reduction == 'sum':
- return loss.sum()
- else: # 'none'
- return loss
-
-
-def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441
- # return positive, negative label smoothing BCE targets
- return 1.0 - 0.5 * eps, 0.5 * eps
-
-
-class BCEBlurWithLogitsLoss(nn.Module):
- # BCEwithLogitLoss() with reduced missing label effects.
- def __init__(self, alpha=0.05):
- super(BCEBlurWithLogitsLoss, self).__init__()
- self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss()
- self.alpha = alpha
-
- def forward(self, pred, true):
- loss = self.loss_fcn(pred, true)
- pred = torch.sigmoid(pred) # prob from logits
- dx = pred - true # reduce only missing label effects
- # dx = (pred - true).abs() # reduce missing label and false label effects
- alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4))
- loss *= alpha_factor
- return loss.mean()
-
-
-def compute_loss(p, targets, model): # predictions, targets, model
- device = targets.device
- lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)
- tcls, tbox, indices, anchors = build_targets(p, targets, model) # targets
- h = model.hyp # hyperparameters
-
- # Define criteria
- BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.Tensor([h['cls_pw']])).to(device)
- BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.Tensor([h['obj_pw']])).to(device)
-
- # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
- cp, cn = smooth_BCE(eps=0.0)
-
- # Focal loss
- g = h['fl_gamma'] # focal loss gamma
- if g > 0:
- BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
-
- # Losses
- nt = 0 # number of targets
- np = len(p) # number of outputs
- balance = [4.0, 1.0, 0.4] if np == 3 else [4.0, 1.0, 0.4, 0.1] # P3-5 or P3-6
- for i, pi in enumerate(p): # layer index, layer predictions
- b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
- tobj = torch.zeros_like(pi[..., 0], device=device) # target obj
-
- n = b.shape[0] # number of targets
- if n:
- nt += n # cumulative targets
- ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
-
- # Regression
- pxy = ps[:, :2].sigmoid() * 2. - 0.5
- pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]
- #pxy = torch.sigmoid(ps[:, 0:2]) # pxy = pxy * s - (s - 1) / 2, s = 1.5 (scale_xy)
- #pwh = torch.exp(ps[:, 2:4]).clamp(max=1E3) * anchors[i]
- pbox = torch.cat((pxy, pwh), 1).to(device) # predicted box
- giou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # giou(prediction, target)
- lbox += (1.0 - giou).mean() # giou loss
-
- # Objectness
- tobj[b, a, gj, gi] = (1.0 - model.gr) + model.gr * giou.detach().clamp(0).type(tobj.dtype) # giou ratio
-
- # Classification
- if model.nc > 1: # cls loss (only if multiple classes)
- t = torch.full_like(ps[:, 5:], cn, device=device) # targets
- t[range(n), tcls[i]] = cp
- lcls += BCEcls(ps[:, 5:], t) # BCE
-
- # Append targets to text file
- # with open('targets.txt', 'a') as file:
- # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
-
- lobj += BCEobj(pi[..., 4], tobj) * balance[i] # obj loss
-
- s = 3 / np # output count scaling
- lbox *= h['giou'] * s
- lobj *= h['obj'] * s * (1.4 if np == 4 else 1.)
- lcls *= h['cls'] * s
- bs = tobj.shape[0] # batch size
-
- loss = lbox + lobj + lcls
- return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach()
-
-
-def build_targets(p, targets, model):
- nt = targets.shape[0] # number of anchors, targets
- tcls, tbox, indices, anch = [], [], [], []
- gain = torch.ones(6, device=targets.device) # normalized to gridspace gain
- off = torch.tensor([[1, 0], [0, 1], [-1, 0], [0, -1]], device=targets.device).float() # overlap offsets
-
- g = 0.5 # offset
- multi_gpu = is_parallel(model)
- for i, jj in enumerate(model.module.yolo_layers if multi_gpu else model.yolo_layers):
- # get number of grid points and anchor vec for this yolo layer
- anchors = model.module.module_list[jj].anchor_vec if multi_gpu else model.module_list[jj].anchor_vec
- gain[2:] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
-
- # Match targets to anchors
- a, t, offsets = [], targets * gain, 0
- if nt:
- na = anchors.shape[0] # number of anchors
- at = torch.arange(na).view(na, 1).repeat(1, nt) # anchor tensor, same as .repeat_interleave(nt)
- r = t[None, :, 4:6] / anchors[:, None] # wh ratio
- j = torch.max(r, 1. / r).max(2)[0] < model.hyp['anchor_t'] # compare
- # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n) = wh_iou(anchors(3,2), gwh(n,2))
- a, t = at[j], t.repeat(na, 1, 1)[j] # filter
-
- # overlaps
- gxy = t[:, 2:4] # grid xy
- z = torch.zeros_like(gxy)
- j, k = ((gxy % 1. < g) & (gxy > 1.)).T
- l, m = ((gxy % 1. > (1 - g)) & (gxy < (gain[[2, 3]] - 1.))).T
- a, t = torch.cat((a, a[j], a[k], a[l], a[m]), 0), torch.cat((t, t[j], t[k], t[l], t[m]), 0)
- offsets = torch.cat((z, z[j] + off[0], z[k] + off[1], z[l] + off[2], z[m] + off[3]), 0) * g
-
- # Define
- b, c = t[:, :2].long().T # image, class
- gxy = t[:, 2:4] # grid xy
- gwh = t[:, 4:6] # grid wh
- gij = (gxy - offsets).long()
- gi, gj = gij.T # grid xy indices
-
- # Append
- #indices.append((b, a, gj, gi)) # image, anchor, grid indices
- indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices
- tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
- anch.append(anchors[a]) # anchors
- tcls.append(c) # class
-
- return tcls, tbox, indices, anch
-
-
def non_max_suppression(prediction, conf_thres=0.1, iou_thres=0.6, merge=False, classes=None, agnostic=False):
"""Performs Non-Maximum Suppression (NMS) on inference results
-
Returns:
detections with shape: nx6 (x1, y1, x2, y2, conf, cls)
"""
- if prediction.dtype is torch.float16:
- prediction = prediction.float() # to FP32
nc = prediction[0].shape[1] - 5 # number of classes
xc = prediction[..., 4] > conf_thres # candidates
@@ -573,7 +294,7 @@ def non_max_suppression(prediction, conf_thres=0.1, iou_thres=0.6, merge=False,
multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)
t = time.time()
- output = [None] * prediction.shape[0]
+ output = [torch.zeros(0, 6)] * prediction.shape[0]
for xi, x in enumerate(prediction): # image index, image inference
# Apply constraints
# x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
@@ -616,19 +337,16 @@ def non_max_suppression(prediction, conf_thres=0.1, iou_thres=0.6, merge=False,
# Batched NMS
c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
- i = torchvision.ops.boxes.nms(boxes, scores, iou_thres)
+ i = torch.ops.torchvision.nms(boxes, scores, iou_thres)
if i.shape[0] > max_det: # limit detections
i = i[:max_det]
if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
- try: # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
- iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
- weights = iou * scores[None] # box weights
- x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
- if redundant:
- i = i[iou.sum(1) > 1] # require redundancy
- except: # possible CUDA error https://github.com/ultralytics/yolov3/issues/1139
- print(x, i, x.shape, i.shape)
- pass
+ # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
+ iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
+ weights = iou * scores[None] # box weights
+ x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
+ if redundant:
+ i = i[iou.sum(1) > 1] # require redundancy
output[xi] = x[i]
if (time.time() - t) > time_limit:
@@ -637,7 +355,7 @@ def non_max_suppression(prediction, conf_thres=0.1, iou_thres=0.6, merge=False,
return output
-def strip_optimizer(f='weights/best.pt', s=''): # from utils.utils import *; strip_optimizer()
+def strip_optimizer(f='weights/best.pt', s=''): # from utils.general import *; strip_optimizer()
# Strip optimizer from 'f' to finalize training, optionally save as 's'
x = torch.load(f, map_location=torch.device('cpu'))
x['optimizer'] = None
@@ -651,170 +369,6 @@ def strip_optimizer(f='weights/best.pt', s=''): # from utils.utils import *; st
print('Optimizer stripped from %s,%s %.1fMB' % (f, (' saved as %s,' % s) if s else '', mb))
-def coco_class_count(path='../coco/labels/train2014/'):
- # Histogram of occurrences per class
- nc = 80 # number classes
- x = np.zeros(nc, dtype='int32')
- files = sorted(glob.glob('%s/*.*' % path))
- for i, file in enumerate(files):
- labels = np.loadtxt(file, dtype=np.float32).reshape(-1, 5)
- x += np.bincount(labels[:, 0].astype('int32'), minlength=nc)
- print(i, len(files))
-
-
-def coco_only_people(path='../coco/labels/train2017/'): # from utils.utils import *; coco_only_people()
- # Find images with only people
- files = sorted(glob.glob('%s/*.*' % path))
- for i, file in enumerate(files):
- labels = np.loadtxt(file, dtype=np.float32).reshape(-1, 5)
- if all(labels[:, 0] == 0):
- print(labels.shape[0], file)
-
-
-def crop_images_random(path='../images/', scale=0.50): # from utils.utils import *; crop_images_random()
- # crops images into random squares up to scale fraction
- # WARNING: overwrites images!
- for file in tqdm(sorted(glob.glob('%s/*.*' % path))):
- img = cv2.imread(file) # BGR
- if img is not None:
- h, w = img.shape[:2]
-
- # create random mask
- a = 30 # minimum size (pixels)
- mask_h = random.randint(a, int(max(a, h * scale))) # mask height
- mask_w = mask_h # mask width
-
- # box
- xmin = max(0, random.randint(0, w) - mask_w // 2)
- ymin = max(0, random.randint(0, h) - mask_h // 2)
- xmax = min(w, xmin + mask_w)
- ymax = min(h, ymin + mask_h)
-
- # apply random color mask
- cv2.imwrite(file, img[ymin:ymax, xmin:xmax])
-
-
-def coco_single_class_labels(path='../coco/labels/train2014/', label_class=43):
- # Makes single-class coco datasets. from utils.utils import *; coco_single_class_labels()
- if os.path.exists('new/'):
- shutil.rmtree('new/') # delete output folder
- os.makedirs('new/') # make new output folder
- os.makedirs('new/labels/')
- os.makedirs('new/images/')
- for file in tqdm(sorted(glob.glob('%s/*.*' % path))):
- with open(file, 'r') as f:
- labels = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
- i = labels[:, 0] == label_class
- if any(i):
- img_file = file.replace('labels', 'images').replace('txt', 'jpg')
- labels[:, 0] = 0 # reset class to 0
- with open('new/images.txt', 'a') as f: # add image to dataset list
- f.write(img_file + '\n')
- with open('new/labels/' + Path(file).name, 'a') as f: # write label
- for l in labels[i]:
- f.write('%g %.6f %.6f %.6f %.6f\n' % tuple(l))
- shutil.copyfile(src=img_file, dst='new/images/' + Path(file).name.replace('txt', 'jpg')) # copy images
-
-
-def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True):
- """ Creates kmeans-evolved anchors from training dataset
-
- Arguments:
- path: path to dataset *.yaml, or a loaded dataset
- n: number of anchors
- img_size: image size used for training
- thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0
- gen: generations to evolve anchors using genetic algorithm
-
- Return:
- k: kmeans evolved anchors
-
- Usage:
- from utils.utils import *; _ = kmean_anchors()
- """
- thr = 1. / thr
-
- def metric(k, wh): # compute metrics
- r = wh[:, None] / k[None]
- x = torch.min(r, 1. / r).min(2)[0] # ratio metric
- # x = wh_iou(wh, torch.tensor(k)) # iou metric
- return x, x.max(1)[0] # x, best_x
-
- def fitness(k): # mutation fitness
- _, best = metric(torch.tensor(k, dtype=torch.float32), wh)
- return (best * (best > thr).float()).mean() # fitness
-
- def print_results(k):
- k = k[np.argsort(k.prod(1))] # sort small to large
- x, best = metric(k, wh0)
- bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr
- print('thr=%.2f: %.4f best possible recall, %.2f anchors past thr' % (thr, bpr, aat))
- print('n=%g, img_size=%s, metric_all=%.3f/%.3f-mean/best, past_thr=%.3f-mean: ' %
- (n, img_size, x.mean(), best.mean(), x[x > thr].mean()), end='')
- for i, x in enumerate(k):
- print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg
- return k
-
- if isinstance(path, str): # *.yaml file
- with open(path) as f:
- data_dict = yaml.load(f, Loader=yaml.FullLoader) # model dict
- from utils.datasets import LoadImagesAndLabels
- dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True)
- else:
- dataset = path # dataset
-
- # Get label wh
- shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True)
- wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh
-
- # Filter
- i = (wh0 < 3.0).any(1).sum()
- if i:
- print('WARNING: Extremely small objects found. '
- '%g of %g labels are < 3 pixels in width or height.' % (i, len(wh0)))
- wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels
-
- # Kmeans calculation
- print('Running kmeans for %g anchors on %g points...' % (n, len(wh)))
- s = wh.std(0) # sigmas for whitening
- k, dist = kmeans(wh / s, n, iter=30) # points, mean distance
- k *= s
- wh = torch.tensor(wh, dtype=torch.float32) # filtered
- wh0 = torch.tensor(wh0, dtype=torch.float32) # unflitered
- k = print_results(k)
-
- # Plot
- # k, d = [None] * 20, [None] * 20
- # for i in tqdm(range(1, 21)):
- # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance
- # fig, ax = plt.subplots(1, 2, figsize=(14, 7))
- # ax = ax.ravel()
- # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.')
- # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh
- # ax[0].hist(wh[wh[:, 0]<100, 0],400)
- # ax[1].hist(wh[wh[:, 1]<100, 1],400)
- # fig.tight_layout()
- # fig.savefig('wh.png', dpi=200)
-
- # Evolve
- npr = np.random
- f, sh, mp, s = fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma
- pbar = tqdm(range(gen), desc='Evolving anchors with Genetic Algorithm') # progress bar
- for _ in pbar:
- v = np.ones(sh)
- while (v == 1).all(): # mutate until a change occurs (prevent duplicates)
- v = ((npr.random(sh) < mp) * npr.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0)
- kg = (k.copy() * v).clip(min=2.0)
- fg = fitness(kg)
- if fg > f:
- f, k = fg, kg.copy()
- pbar.desc = 'Evolving anchors with Genetic Algorithm: fitness = %.4f' % f
- if verbose:
- print_results(k)
-
- return print_results(k)
-
-
def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''):
# Print mutation results to evolve.txt (for use with train.py --evolve)
a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys
@@ -823,7 +377,9 @@ def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''):
print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c))
if bucket:
- os.system('gsutil cp gs://%s/evolve.txt .' % bucket) # download evolve.txt
+ url = 'gs://%s/evolve.txt' % bucket
+ if gsutil_getsize(url) > (os.path.getsize('evolve.txt') if os.path.exists('evolve.txt') else 0):
+ os.system('gsutil cp %s .' % url) # download evolve.txt if larger than local
with open('evolve.txt', 'a') as f: # append result
f.write(c + b + '\n')
@@ -831,9 +387,6 @@ def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''):
x = x[np.argsort(-fitness(x))] # sort
np.savetxt('evolve.txt', x, '%10.3g') # save sort by fitness
- if bucket:
- os.system('gsutil cp evolve.txt gs://%s' % bucket) # upload evolve.txt
-
# Save yaml
for i, k in enumerate(hyp.keys()):
hyp[k] = float(x[0, i + 7])
@@ -843,6 +396,9 @@ def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''):
f.write('# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: ' % len(x) + c + '\n\n')
yaml.dump(hyp, f, sort_keys=False)
+ if bucket:
+ os.system('gsutil cp evolve.txt %s gs://%s' % (yaml_file, bucket)) # upload
+
def apply_classifier(x, model, img, im0):
# applies a second stage classifier to yolo outputs
@@ -879,359 +435,14 @@ def apply_classifier(x, model, img, im0):
return x
-def fitness(x):
- # Returns fitness (for use with results.txt or evolve.txt)
- w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95]
- return (x[:, :4] * w).sum(1)
-
-
-def output_to_target(output, width, height):
- # Convert model output to target format [batch_id, class_id, x, y, w, h, conf]
- if isinstance(output, torch.Tensor):
- output = output.cpu().numpy()
-
- targets = []
- for i, o in enumerate(output):
- if o is not None:
- for pred in o:
- box = pred[:4]
- w = (box[2] - box[0]) / width
- h = (box[3] - box[1]) / height
- x = box[0] / width + w / 2
- y = box[1] / height + h / 2
- conf = pred[4]
- cls = int(pred[5])
-
- targets.append([i, cls, x, y, w, h, conf])
-
- return np.array(targets)
-
-
-def increment_dir(dir, comment=''):
- # Increments a directory runs/exp1 --> runs/exp2_comment
- n = 0 # number
- dir = str(Path(dir)) # os-agnostic
- d = sorted(glob.glob(dir + '*')) # directories
- if len(d):
- n = max([int(x[len(dir):x.find('_') if '_' in x else None]) for x in d]) + 1 # increment
- return dir + str(n) + ('_' + comment if comment else '')
-
-
-# Plotting functions ---------------------------------------------------------------------------------------------------
-def hist2d(x, y, n=100):
- # 2d histogram used in labels.png and evolve.png
- xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n)
- hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))
- xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1)
- yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1)
- return np.log(hist[xidx, yidx])
-
-
-def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):
- # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy
- def butter_lowpass(cutoff, fs, order):
- nyq = 0.5 * fs
- normal_cutoff = cutoff / nyq
- b, a = butter(order, normal_cutoff, btype='low', analog=False)
- return b, a
-
- b, a = butter_lowpass(cutoff, fs, order=order)
- return filtfilt(b, a, data) # forward-backward filter
-
-
-def plot_one_box(x, img, color=None, label=None, line_thickness=None):
- # Plots one bounding box on image img
- tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
- color = color or [random.randint(0, 255) for _ in range(3)]
- c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
- cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
- if label:
- tf = max(tl - 1, 1) # font thickness
- t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
- c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
- cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
- cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
-
-
-def plot_wh_methods(): # from utils.utils import *; plot_wh_methods()
- # Compares the two methods for width-height anchor multiplication
- # https://github.com/ultralytics/yolov3/issues/168
- x = np.arange(-4.0, 4.0, .1)
- ya = np.exp(x)
- yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2
-
- fig = plt.figure(figsize=(6, 3), dpi=150)
- plt.plot(x, ya, '.-', label='YOLO')
- plt.plot(x, yb ** 2, '.-', label='YOLO ^2')
- plt.plot(x, yb ** 1.6, '.-', label='YOLO ^1.6')
- plt.xlim(left=-4, right=4)
- plt.ylim(bottom=0, top=6)
- plt.xlabel('input')
- plt.ylabel('output')
- plt.grid()
- plt.legend()
- fig.tight_layout()
- fig.savefig('comparison.png', dpi=200)
-
-
-def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16):
- tl = 3 # line thickness
- tf = max(tl - 1, 1) # font thickness
- if os.path.isfile(fname): # do not overwrite
- return None
-
- if isinstance(images, torch.Tensor):
- images = images.cpu().float().numpy()
-
- if isinstance(targets, torch.Tensor):
- targets = targets.cpu().numpy()
-
- # un-normalise
- if np.max(images[0]) <= 1:
- images *= 255
-
- bs, _, h, w = images.shape # batch size, _, height, width
- bs = min(bs, max_subplots) # limit plot images
- ns = np.ceil(bs ** 0.5) # number of subplots (square)
-
- # Check if we should resize
- scale_factor = max_size / max(h, w)
- if scale_factor < 1:
- h = math.ceil(scale_factor * h)
- w = math.ceil(scale_factor * w)
-
- # Empty array for output
- mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8)
-
- # Fix class - colour map
- prop_cycle = plt.rcParams['axes.prop_cycle']
- # https://stackoverflow.com/questions/51350872/python-from-color-name-to-rgb
- hex2rgb = lambda h: tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
- color_lut = [hex2rgb(h) for h in prop_cycle.by_key()['color']]
-
- for i, img in enumerate(images):
- if i == max_subplots: # if last batch has fewer images than we expect
- break
-
- block_x = int(w * (i // ns))
- block_y = int(h * (i % ns))
-
- img = img.transpose(1, 2, 0)
- if scale_factor < 1:
- img = cv2.resize(img, (w, h))
-
- mosaic[block_y:block_y + h, block_x:block_x + w, :] = img
- if len(targets) > 0:
- image_targets = targets[targets[:, 0] == i]
- boxes = xywh2xyxy(image_targets[:, 2:6]).T
- classes = image_targets[:, 1].astype('int')
- gt = image_targets.shape[1] == 6 # ground truth if no conf column
- conf = None if gt else image_targets[:, 6] # check for confidence presence (gt vs pred)
-
- boxes[[0, 2]] *= w
- boxes[[0, 2]] += block_x
- boxes[[1, 3]] *= h
- boxes[[1, 3]] += block_y
- for j, box in enumerate(boxes.T):
- cls = int(classes[j])
- color = color_lut[cls % len(color_lut)]
- cls = names[cls] if names else cls
- if gt or conf[j] > 0.3: # 0.3 conf thresh
- label = '%s' % cls if gt else '%s %.1f' % (cls, conf[j])
- plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl)
-
- # Draw image filename labels
- if paths is not None:
- label = os.path.basename(paths[i])[:40] # trim to 40 char
- t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
- cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf,
- lineType=cv2.LINE_AA)
-
- # Image border
- cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3)
-
- if fname is not None:
- mosaic = cv2.resize(mosaic, (int(ns * w * 0.5), int(ns * h * 0.5)), interpolation=cv2.INTER_AREA)
- cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB))
-
- return mosaic
-
-
-def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):
- # Plot LR simulating training for full epochs
- optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals
- y = []
- for _ in range(epochs):
- scheduler.step()
- y.append(optimizer.param_groups[0]['lr'])
- plt.plot(y, '.-', label='LR')
- plt.xlabel('epoch')
- plt.ylabel('LR')
- plt.grid()
- plt.xlim(0, epochs)
- plt.ylim(0)
- plt.tight_layout()
- plt.savefig(Path(save_dir) / 'LR.png', dpi=200)
-
-
-def plot_test_txt(): # from utils.utils import *; plot_test()
- # Plot test.txt histograms
- x = np.loadtxt('test.txt', dtype=np.float32)
- box = xyxy2xywh(x[:, :4])
- cx, cy = box[:, 0], box[:, 1]
-
- fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True)
- ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0)
- ax.set_aspect('equal')
- plt.savefig('hist2d.png', dpi=300)
-
- fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True)
- ax[0].hist(cx, bins=600)
- ax[1].hist(cy, bins=600)
- plt.savefig('hist1d.png', dpi=200)
-
-
-def plot_targets_txt(): # from utils.utils import *; plot_targets_txt()
- # Plot targets.txt histograms
- x = np.loadtxt('targets.txt', dtype=np.float32).T
- s = ['x targets', 'y targets', 'width targets', 'height targets']
- fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
- ax = ax.ravel()
- for i in range(4):
- ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std()))
- ax[i].legend()
- ax[i].set_title(s[i])
- plt.savefig('targets.jpg', dpi=200)
-
-
-def plot_study_txt(f='study.txt', x=None): # from utils.utils import *; plot_study_txt()
- # Plot study.txt generated by test.py
- fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)
- ax = ax.ravel()
-
- fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)
- for f in ['coco_study/study_coco_yolov4%s.txt' % x for x in ['s', 'm', 'l', 'x']]:
- y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T
- x = np.arange(y.shape[1]) if x is None else np.array(x)
- s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)']
- for i in range(7):
- ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)
- ax[i].set_title(s[i])
-
- j = y[3].argmax() + 1
- ax2.plot(y[6, :j], y[3, :j] * 1E2, '.-', linewidth=2, markersize=8,
- label=Path(f).stem.replace('study_coco_', '').replace('yolo', 'YOLO'))
-
- ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [33.8, 39.6, 43.0, 47.5, 49.4, 50.7],
- 'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet')
-
- ax2.grid()
- ax2.set_xlim(0, 30)
- ax2.set_ylim(28, 50)
- ax2.set_yticks(np.arange(30, 55, 5))
- ax2.set_xlabel('GPU Speed (ms/img)')
- ax2.set_ylabel('COCO AP val')
- ax2.legend(loc='lower right')
- plt.savefig('study_mAP_latency.png', dpi=300)
- plt.savefig(f.replace('.txt', '.png'), dpi=200)
-
-
-def plot_labels(labels, save_dir=''):
- # plot dataset labels
- c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes
- nc = int(c.max() + 1) # number of classes
-
- fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
- ax = ax.ravel()
- ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8)
- ax[0].set_xlabel('classes')
- ax[1].scatter(b[0], b[1], c=hist2d(b[0], b[1], 90), cmap='jet')
- ax[1].set_xlabel('x')
- ax[1].set_ylabel('y')
- ax[2].scatter(b[2], b[3], c=hist2d(b[2], b[3], 90), cmap='jet')
- ax[2].set_xlabel('width')
- ax[2].set_ylabel('height')
- plt.savefig(Path(save_dir) / 'labels.png', dpi=200)
- plt.close()
-
-
-def plot_evolution(yaml_file='runs/evolve/hyp_evolved.yaml'): # from utils.utils import *; plot_evolution()
- # Plot hyperparameter evolution results in evolve.txt
- with open(yaml_file) as f:
- hyp = yaml.load(f, Loader=yaml.FullLoader)
- x = np.loadtxt('evolve.txt', ndmin=2)
- f = fitness(x)
- # weights = (f - f.min()) ** 2 # for weighted results
- plt.figure(figsize=(10, 10), tight_layout=True)
- matplotlib.rc('font', **{'size': 8})
- for i, (k, v) in enumerate(hyp.items()):
- y = x[:, i + 7]
- # mu = (y * weights).sum() / weights.sum() # best weighted result
- mu = y[f.argmax()] # best single result
- plt.subplot(5, 5, i + 1)
- plt.scatter(y, f, c=hist2d(y, f, 20), cmap='viridis', alpha=.8, edgecolors='none')
- plt.plot(mu, f.max(), 'k+', markersize=15)
- plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters
- if i % 5 != 0:
- plt.yticks([])
- print('%15s: %.3g' % (k, mu))
- plt.savefig('evolve.png', dpi=200)
- print('\nPlot saved as evolve.png')
-
-
-def plot_results_overlay(start=0, stop=0): # from utils.utils import *; plot_results_overlay()
- # Plot training 'results*.txt', overlaying train and val losses
- s = ['train', 'train', 'train', 'Precision', 'mAP@0.5', 'val', 'val', 'val', 'Recall', 'mAP@0.5:0.95'] # legends
- t = ['GIoU', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles
- for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')):
- results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
- n = results.shape[1] # number of rows
- x = range(start, min(stop, n) if stop else n)
- fig, ax = plt.subplots(1, 5, figsize=(14, 3.5), tight_layout=True)
- ax = ax.ravel()
- for i in range(5):
- for j in [i, i + 5]:
- y = results[j, x]
- ax[i].plot(x, y, marker='.', label=s[j])
- # y_smooth = butter_lowpass_filtfilt(y)
- # ax[i].plot(x, np.gradient(y_smooth), marker='.', label=s[j])
-
- ax[i].set_title(t[i])
- ax[i].legend()
- ax[i].set_ylabel(f) if i == 0 else None # add filename
- fig.savefig(f.replace('.txt', '.png'), dpi=200)
-
-
-def plot_results(start=0, stop=0, bucket='', id=(), labels=(),
- save_dir=''): # from utils.utils import *; plot_results()
- # Plot training 'results*.txt' as seen in https://github.com/ultralytics/yolov3
- fig, ax = plt.subplots(2, 5, figsize=(12, 6))
- ax = ax.ravel()
- s = ['GIoU', 'Objectness', 'Classification', 'Precision', 'Recall',
- 'val GIoU', 'val Objectness', 'val Classification', 'mAP@0.5', 'mAP@0.5:0.95']
- if bucket:
- os.system('rm -rf storage.googleapis.com')
- files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id]
+def increment_path(path, exist_ok=True, sep=''):
+ # Increment path, i.e. runs/exp --> runs/exp{sep}0, runs/exp{sep}1 etc.
+ path = Path(path) # os-agnostic
+ if (path.exists() and exist_ok) or (not path.exists()):
+ return str(path)
else:
- files = glob.glob(str(Path(save_dir) / 'results*.txt')) + glob.glob('../../Downloads/results*.txt')
- for fi, f in enumerate(files):
- try:
- results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
- n = results.shape[1] # number of rows
- x = range(start, min(stop, n) if stop else n)
- for i in range(10):
- y = results[i, x]
- if i in [0, 1, 2, 5, 6, 7]:
- y[y == 0] = np.nan # dont show zero loss values
- # y /= y[0] # normalize
- label = labels[fi] if len(labels) else Path(f).stem
- ax[i].plot(x, y, marker='.', label=label, linewidth=2, markersize=8)
- ax[i].set_title(s[i])
- # if i in [5, 6, 7]: # share train and val loss y axes
- # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
- except:
- print('Warning: Plotting error for %s, skipping file' % f)
-
- fig.tight_layout()
- ax[1].legend()
- fig.savefig(Path(save_dir) / 'results.png', dpi=200)
+ dirs = glob.glob(f"{path}{sep}*") # similar paths
+ matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs]
+ i = [int(m.groups()[0]) for m in matches if m] # indices
+ n = max(i) + 1 if i else 2 # increment number
+ return f"{path}{sep}{n}" # update path
From f29646c9460540e15e557129eea2cf7a6a19b31a Mon Sep 17 00:00:00 2001
From: "Kin-Yiu, Wong" <102582011@cc.ncu.edu.tw>
Date: Fri, 21 May 2021 08:45:05 +0800
Subject: [PATCH 22/37] Update google_utils.py
---
utils/google_utils.py | 96 +++++++++++++++++++++++++++++++------------
1 file changed, 70 insertions(+), 26 deletions(-)
diff --git a/utils/google_utils.py b/utils/google_utils.py
index 453e953..ac1f75a 100644
--- a/utils/google_utils.py
+++ b/utils/google_utils.py
@@ -1,41 +1,58 @@
-# This file contains google utils: https://cloud.google.com/storage/docs/reference/libraries
-# pip install --upgrade google-cloud-storage
-# from google.cloud import storage
+# Google utils: https://cloud.google.com/storage/docs/reference/libraries
import os
import platform
+import subprocess
import time
from pathlib import Path
+import torch
-def attempt_download(weights):
- # Attempt to download pretrained weights if not found locally
- weights = weights.strip().replace("'", '')
- msg = weights + ' missing'
-
- r = 1 # return
- if len(weights) > 0 and not os.path.isfile(weights):
- d = {'': '',
- }
- file = Path(weights).name
- if file in d:
- r = gdrive_download(id=d[file], name=weights)
+def gsutil_getsize(url=''):
+ # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du
+ s = subprocess.check_output('gsutil du %s' % url, shell=True).decode('utf-8')
+ return eval(s.split(' ')[0]) if len(s) else 0 # bytes
- if not (r == 0 and os.path.exists(weights) and os.path.getsize(weights) > 1E6): # weights exist and > 1MB
- os.remove(weights) if os.path.exists(weights) else None # remove partial downloads
- s = 'curl -L -o %s "storage.googleapis.com/%s"' % (weights, file)
- r = os.system(s) # execute, capture return values
- # Error check
- if not (r == 0 and os.path.exists(weights) and os.path.getsize(weights) > 1E6): # weights exist and > 1MB
- os.remove(weights) if os.path.exists(weights) else None # remove partial downloads
- raise Exception(msg)
+def attempt_download(weights):
+ # Attempt to download pretrained weights if not found locally
+ weights = weights.strip().replace("'", '')
+ file = Path(weights).name
+
+ msg = weights + ' missing, try downloading from https://github.com/WongKinYiu/ScaledYOLOv4/releases/'
+ models = ['yolov4-csp.pt', 'yolov4-csp-x.pt'] # available models
+
+ if file in models and not os.path.isfile(weights):
+
+ try: # GitHub
+ url = 'https://github.com/WongKinYiu/ScaledYOLOv4/releases/download/v1.0/' + file
+ print('Downloading %s to %s...' % (url, weights))
+ torch.hub.download_url_to_file(url, weights)
+ assert os.path.exists(weights) and os.path.getsize(weights) > 1E6 # check
+ except Exception as e: # GCP
+ print('ERROR: Download failure.')
+ print('')
+
+
+def attempt_load(weights, map_location=None):
+ # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
+ model = Ensemble()
+ for w in weights if isinstance(weights, list) else [weights]:
+ attempt_download(w)
+ model.append(torch.load(w, map_location=map_location)['model'].float().fuse().eval()) # load FP32 model
+
+ if len(model) == 1:
+ return model[-1] # return model
+ else:
+ print('Ensemble created with %s\n' % weights)
+ for k in ['names', 'stride']:
+ setattr(model, k, getattr(model[-1], k))
+ return model # return ensemble
def gdrive_download(id='1n_oKgR81BJtqk75b00eAjdv03qVCQn2f', name='coco128.zip'):
- # Downloads a file from Google Drive, accepting presented query
- # from utils.google_utils import *; gdrive_download()
+ # Downloads a file from Google Drive. from utils.google_utils import *; gdrive_download()
t = time.time()
print('Downloading https://drive.google.com/uc?export=download&id=%s as %s... ' % (id, name), end='')
@@ -49,7 +66,7 @@ def gdrive_download(id='1n_oKgR81BJtqk75b00eAjdv03qVCQn2f', name='coco128.zip'):
s = 'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm=%s&id=%s" -o %s' % (get_token(), id, name)
else: # small file
s = 'curl -s -L -o %s "drive.google.com/uc?export=download&id=%s"' % (name, id)
- r = os.system(s) # execute, capture return values
+ r = os.system(s) # execute, capture return
os.remove('cookie') if os.path.exists('cookie') else None
# Error check
@@ -74,3 +91,30 @@ def get_token(cookie="./cookie"):
if "download" in line:
return line.split()[-1]
return ""
+
+# def upload_blob(bucket_name, source_file_name, destination_blob_name):
+# # Uploads a file to a bucket
+# # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python
+#
+# storage_client = storage.Client()
+# bucket = storage_client.get_bucket(bucket_name)
+# blob = bucket.blob(destination_blob_name)
+#
+# blob.upload_from_filename(source_file_name)
+#
+# print('File {} uploaded to {}.'.format(
+# source_file_name,
+# destination_blob_name))
+#
+#
+# def download_blob(bucket_name, source_blob_name, destination_file_name):
+# # Uploads a blob from a bucket
+# storage_client = storage.Client()
+# bucket = storage_client.get_bucket(bucket_name)
+# blob = bucket.blob(source_blob_name)
+#
+# blob.download_to_filename(destination_file_name)
+#
+# print('Blob {} downloaded to {}.'.format(
+# source_blob_name,
+# destination_file_name))
From b1bd227e67cf24772c1c5a11bf9fe4a4fbc43e1c Mon Sep 17 00:00:00 2001
From: "Kin-Yiu, Wong" <102582011@cc.ncu.edu.tw>
Date: Fri, 21 May 2021 08:48:01 +0800
Subject: [PATCH 23/37] Update layers.py
---
utils/layers.py | 55 +++++++++++++++++++++++++++++++++++++++++++------
1 file changed, 49 insertions(+), 6 deletions(-)
diff --git a/utils/layers.py b/utils/layers.py
index edb0a69..1d46a18 100644
--- a/utils/layers.py
+++ b/utils/layers.py
@@ -5,7 +5,18 @@
import torch
from torch import nn
-from mish_cuda import MishCuda as Mish
+try:
+ from mish_cuda import MishCuda as Mish
+
+except:
+ class Mish(nn.Module): # https://github.com/digantamisra98/Mish
+ def forward(self, x):
+ return x * F.softplus(x).tanh()
+
+
+class Reorg(nn.Module):
+ def forward(self, x):
+ return torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1)
def make_divisible(v, divisor):
@@ -178,10 +189,6 @@ def forward(self, x):
return x * F.hardtanh(x + 3, 0., 6., True) / 6.
-#class Mish(nn.Module): # https://github.com/digantamisra98/Mish
-# def forward(self, x):
-# return x * F.softplus(x).tanh()
-
class DeformConv2d(nn.Module):
def __init__(self, inc, outc, kernel_size=3, padding=1, stride=1, bias=None, modulation=False):
"""
@@ -320,4 +327,40 @@ def _reshape_x_offset(x_offset, ks):
x_offset = torch.cat([x_offset[..., s:s+ks].contiguous().view(b, c, h, w*ks) for s in range(0, N, ks)], dim=-1)
x_offset = x_offset.contiguous().view(b, c, h*ks, w*ks)
- return x_offset
\ No newline at end of file
+ return x_offset
+
+
+class GAP(nn.Module):
+ def __init__(self):
+ super(GAP, self).__init__()
+ self.avg_pool = nn.AdaptiveAvgPool2d(1)
+ def forward(self, x):
+ #b, c, _, _ = x.size()
+ return self.avg_pool(x)#.view(b, c)
+
+
+class Silence(nn.Module):
+ def __init__(self):
+ super(Silence, self).__init__()
+ def forward(self, x):
+ return x
+
+
+class ScaleChannel(nn.Module): # weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
+ def __init__(self, layers):
+ super(ScaleChannel, self).__init__()
+ self.layers = layers # layer indices
+
+ def forward(self, x, outputs):
+ a = outputs[self.layers[0]]
+ return x.expand_as(a) * a
+
+
+class ScaleSpatial(nn.Module): # weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
+ def __init__(self, layers):
+ super(ScaleSpatial, self).__init__()
+ self.layers = layers # layer indices
+
+ def forward(self, x, outputs):
+ a = outputs[self.layers[0]]
+ return x * a
From 8ead5ee970b3553605da9b23c1e8c9482183b73a Mon Sep 17 00:00:00 2001
From: "Kin-Yiu, Wong" <102582011@cc.ncu.edu.tw>
Date: Fri, 21 May 2021 08:48:45 +0800
Subject: [PATCH 24/37] Update parse_config.py
---
utils/parse_config.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/utils/parse_config.py b/utils/parse_config.py
index 4208748..d6cbfdd 100644
--- a/utils/parse_config.py
+++ b/utils/parse_config.py
@@ -21,6 +21,7 @@ def parse_model_cfg(path):
mdefs[-1]['type'] = line[1:-1].rstrip()
if mdefs[-1]['type'] == 'convolutional':
mdefs[-1]['batch_normalize'] = 0 # pre-populate with zeros (may be overwritten later)
+
else:
key, val = line.split("=")
key = key.rstrip()
@@ -40,7 +41,7 @@ def parse_model_cfg(path):
supported = ['type', 'batch_normalize', 'filters', 'size', 'stride', 'pad', 'activation', 'layers', 'groups',
'from', 'mask', 'anchors', 'classes', 'num', 'jitter', 'ignore_thresh', 'truth_thresh', 'random',
'stride_x', 'stride_y', 'weights_type', 'weights_normalization', 'scale_x_y', 'beta_nms', 'nms_kind',
- 'iou_loss', 'iou_normalizer', 'cls_normalizer', 'iou_thresh']
+ 'iou_loss', 'iou_normalizer', 'cls_normalizer', 'iou_thresh', 'atoms', 'na', 'nc']
f = [] # fields
for x in mdefs[1:]:
From 53e275641a178de51727788e7cf6349a426af130 Mon Sep 17 00:00:00 2001
From: "Kin-Yiu, Wong" <102582011@cc.ncu.edu.tw>
Date: Fri, 21 May 2021 08:49:18 +0800
Subject: [PATCH 25/37] Update torch_utils.py
---
utils/torch_utils.py | 110 ++++++++++++++++++++++++-------------------
1 file changed, 62 insertions(+), 48 deletions(-)
diff --git a/utils/torch_utils.py b/utils/torch_utils.py
index 139c7f3..4d07baa 100644
--- a/utils/torch_utils.py
+++ b/utils/torch_utils.py
@@ -1,19 +1,36 @@
+# PyTorch utils
+
+import logging
import math
import os
import time
+from contextlib import contextmanager
from copy import deepcopy
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
-import torchvision.models as models
+import torchvision
+logger = logging.getLogger(__name__)
-def init_seeds(seed=0):
- torch.manual_seed(seed)
+@contextmanager
+def torch_distributed_zero_first(local_rank: int):
+ """
+ Decorator to make all processes in distributed training wait for each local_master to do something.
+ """
+ if local_rank not in [-1, 0]:
+ torch.distributed.barrier()
+ yield
+ if local_rank == 0:
+ torch.distributed.barrier()
+
+
+def init_torch_seeds(seed=0):
# Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html
+ torch.manual_seed(seed)
if seed == 0: # slower, more reproducible
cudnn.deterministic = True
cudnn.benchmark = False
@@ -36,16 +53,15 @@ def select_device(device='', batch_size=None):
if ng > 1 and batch_size: # check that batch_size is compatible with device_count
assert batch_size % ng == 0, 'batch-size %g not multiple of GPU count %g' % (batch_size, ng)
x = [torch.cuda.get_device_properties(i) for i in range(ng)]
- s = 'Using CUDA '
+ s = f'Using torch {torch.__version__} '
for i in range(0, ng):
if i == 1:
s = ' ' * len(s)
- print("%sdevice%g _CudaDeviceProperties(name='%s', total_memory=%dMB)" %
- (s, i, x[i].name, x[i].total_memory / c))
+ logger.info("%sCUDA:%g (%s, %dMB)" % (s, i, x[i].name, x[i].total_memory / c))
else:
- print('Using CPU')
+ logger.info(f'Using torch {torch.__version__} CPU')
- print('') # skip a line
+ logger.info('') # skip a line
return torch.device('cuda:0' if cuda else 'cpu')
@@ -71,7 +87,7 @@ def initialize_weights(model):
elif t is nn.BatchNorm2d:
m.eps = 1e-3
m.momentum = 0.03
- elif t in [nn.LeakyReLU, nn.ReLU, nn.ReLU6]:
+ elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]:
m.inplace = True
@@ -101,31 +117,30 @@ def prune(model, amount=0.3):
def fuse_conv_and_bn(conv, bn):
- # https://tehnokv.com/posts/fusing-batchnorm-and-conv/
- with torch.no_grad():
- # init
- fusedconv = nn.Conv2d(conv.in_channels,
- conv.out_channels,
- kernel_size=conv.kernel_size,
- stride=conv.stride,
- padding=conv.padding,
- bias=True).to(conv.weight.device)
-
- # prepare filters
- w_conv = conv.weight.clone().view(conv.out_channels, -1)
- w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
- fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size()))
-
- # prepare spatial bias
- b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias
- b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
- fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)
-
- return fusedconv
-
-
-def model_info(model, verbose=False):
- # Plots a line-by-line description of a PyTorch model
+ # Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/
+ fusedconv = nn.Conv2d(conv.in_channels,
+ conv.out_channels,
+ kernel_size=conv.kernel_size,
+ stride=conv.stride,
+ padding=conv.padding,
+ groups=conv.groups,
+ bias=True).requires_grad_(False).to(conv.weight.device)
+
+ # prepare filters
+ w_conv = conv.weight.clone().view(conv.out_channels, -1)
+ w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
+ fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size()))
+
+ # prepare spatial bias
+ b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias
+ b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
+ fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)
+
+ return fusedconv
+
+
+def model_info(model, verbose=False, img_size=640):
+ # Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320]
n_p = sum(x.numel() for x in model.parameters()) # number parameters
n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
if verbose:
@@ -137,26 +152,25 @@ def model_info(model, verbose=False):
try: # FLOPS
from thop import profile
- flops = profile(deepcopy(model), inputs=(torch.zeros(1, 3, 64, 64),), verbose=False)[0] / 1E9 * 2
- fs = ', %.1f GFLOPS' % (flops * 100) # 640x640 FLOPS
- except:
+ flops = profile(deepcopy(model), inputs=(torch.zeros(1, 3, img_size, img_size),), verbose=False)[0] / 1E9 * 2
+ img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float
+ fs = ', %.9f GFLOPS' % (flops) # 640x640 FLOPS
+ except (ImportError, Exception):
fs = ''
- print('Model Summary: %g layers, %g parameters, %g gradients%s' % (len(list(model.parameters())), n_p, n_g, fs))
+ logger.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}")
def load_classifier(name='resnet101', n=2):
# Loads a pretrained model reshaped to n-class output
- model = models.__dict__[name](pretrained=True)
-
- # Display model properties
- input_size = [3, 224, 224]
- input_space = 'RGB'
- input_range = [0, 1]
- mean = [0.485, 0.456, 0.406]
- std = [0.229, 0.224, 0.225]
- for x in [input_size, input_space, input_range, mean, std]:
- print(x + ' =', eval(x))
+ model = torchvision.models.__dict__[name](pretrained=True)
+
+ # ResNet model properties
+ # input_size = [3, 224, 224]
+ # input_space = 'RGB'
+ # input_range = [0, 1]
+ # mean = [0.485, 0.456, 0.406]
+ # std = [0.229, 0.224, 0.225]
# Reshape output to n classes
filters = model.fc.weight.shape[1]
From 8b3cc06ce54f39f1d7301652409beed25b4b1d85 Mon Sep 17 00:00:00 2001
From: "Kin-Yiu, Wong" <102582011@cc.ncu.edu.tw>
Date: Fri, 21 May 2021 08:49:48 +0800
Subject: [PATCH 26/37] Create autoanchor.py
---
utils/autoanchor.py | 149 ++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 149 insertions(+)
create mode 100644 utils/autoanchor.py
diff --git a/utils/autoanchor.py b/utils/autoanchor.py
new file mode 100644
index 0000000..cf803f8
--- /dev/null
+++ b/utils/autoanchor.py
@@ -0,0 +1,149 @@
+# Auto-anchor utils
+
+import numpy as np
+import torch
+import yaml
+from scipy.cluster.vq import kmeans
+from tqdm import tqdm
+
+
+def check_anchor_order(m):
+ # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary
+ a = m.anchor_grid.prod(-1).view(-1) # anchor area
+ da = a[-1] - a[0] # delta a
+ ds = m.stride[-1] - m.stride[0] # delta s
+ if da.sign() != ds.sign(): # same order
+ print('Reversing anchor order')
+ m.anchors[:] = m.anchors.flip(0)
+ m.anchor_grid[:] = m.anchor_grid.flip(0)
+
+
+def check_anchors(dataset, model, thr=4.0, imgsz=640):
+ # Check anchor fit to data, recompute if necessary
+ print('\nAnalyzing anchors... ', end='')
+ m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect()
+ shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True)
+ scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale
+ wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh
+
+ def metric(k): # compute metric
+ r = wh[:, None] / k[None]
+ x = torch.min(r, 1. / r).min(2)[0] # ratio metric
+ best = x.max(1)[0] # best_x
+ aat = (x > 1. / thr).float().sum(1).mean() # anchors above threshold
+ bpr = (best > 1. / thr).float().mean() # best possible recall
+ return bpr, aat
+
+ bpr, aat = metric(m.anchor_grid.clone().cpu().view(-1, 2))
+ print('anchors/target = %.2f, Best Possible Recall (BPR) = %.4f' % (aat, bpr), end='')
+ if bpr < 0.98: # threshold to recompute
+ print('. Attempting to improve anchors, please wait...')
+ na = m.anchor_grid.numel() // 2 # number of anchors
+ new_anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False)
+ new_bpr = metric(new_anchors.reshape(-1, 2))[0]
+ if new_bpr > bpr: # replace anchors
+ new_anchors = torch.tensor(new_anchors, device=m.anchors.device).type_as(m.anchors)
+ m.anchor_grid[:] = new_anchors.clone().view_as(m.anchor_grid) # for inference
+ m.anchors[:] = new_anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss
+ check_anchor_order(m)
+ print('New anchors saved to model. Update model *.yaml to use these anchors in the future.')
+ else:
+ print('Original anchors better than new anchors. Proceeding with original anchors.')
+ print('') # newline
+
+
+def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True):
+ """ Creates kmeans-evolved anchors from training dataset
+ Arguments:
+ path: path to dataset *.yaml, or a loaded dataset
+ n: number of anchors
+ img_size: image size used for training
+ thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0
+ gen: generations to evolve anchors using genetic algorithm
+ verbose: print all results
+ Return:
+ k: kmeans evolved anchors
+ Usage:
+ from utils.general import *; _ = kmean_anchors()
+ """
+ thr = 1. / thr
+
+ def metric(k, wh): # compute metrics
+ r = wh[:, None] / k[None]
+ x = torch.min(r, 1. / r).min(2)[0] # ratio metric
+ # x = wh_iou(wh, torch.tensor(k)) # iou metric
+ return x, x.max(1)[0] # x, best_x
+
+ def anchor_fitness(k): # mutation fitness
+ _, best = metric(torch.tensor(k, dtype=torch.float32), wh)
+ return (best * (best > thr).float()).mean() # fitness
+
+ def print_results(k):
+ k = k[np.argsort(k.prod(1))] # sort small to large
+ x, best = metric(k, wh0)
+ bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr
+ print('thr=%.2f: %.4f best possible recall, %.2f anchors past thr' % (thr, bpr, aat))
+ print('n=%g, img_size=%s, metric_all=%.3f/%.3f-mean/best, past_thr=%.3f-mean: ' %
+ (n, img_size, x.mean(), best.mean(), x[x > thr].mean()), end='')
+ for i, x in enumerate(k):
+ print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg
+ return k
+
+ if isinstance(path, str): # *.yaml file
+ with open(path) as f:
+ data_dict = yaml.load(f, Loader=yaml.FullLoader) # model dict
+ from utils.datasets import LoadImagesAndLabels
+ dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True)
+ else:
+ dataset = path # dataset
+
+ # Get label wh
+ shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True)
+ wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh
+
+ # Filter
+ i = (wh0 < 3.0).any(1).sum()
+ if i:
+ print('WARNING: Extremely small objects found. '
+ '%g of %g labels are < 3 pixels in width or height.' % (i, len(wh0)))
+ wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels
+
+ # Kmeans calculation
+ print('Running kmeans for %g anchors on %g points...' % (n, len(wh)))
+ s = wh.std(0) # sigmas for whitening
+ k, dist = kmeans(wh / s, n, iter=30) # points, mean distance
+ k *= s
+ wh = torch.tensor(wh, dtype=torch.float32) # filtered
+ wh0 = torch.tensor(wh0, dtype=torch.float32) # unfiltered
+ k = print_results(k)
+
+ # Plot
+ # k, d = [None] * 20, [None] * 20
+ # for i in tqdm(range(1, 21)):
+ # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance
+ # fig, ax = plt.subplots(1, 2, figsize=(14, 7))
+ # ax = ax.ravel()
+ # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.')
+ # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh
+ # ax[0].hist(wh[wh[:, 0]<100, 0],400)
+ # ax[1].hist(wh[wh[:, 1]<100, 1],400)
+ # fig.tight_layout()
+ # fig.savefig('wh.png', dpi=200)
+
+ # Evolve
+ npr = np.random
+ f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma
+ pbar = tqdm(range(gen), desc='Evolving anchors with Genetic Algorithm') # progress bar
+ for _ in pbar:
+ v = np.ones(sh)
+ while (v == 1).all(): # mutate until a change occurs (prevent duplicates)
+ v = ((npr.random(sh) < mp) * npr.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0)
+ kg = (k.copy() * v).clip(min=2.0)
+ fg = anchor_fitness(kg)
+ if fg > f:
+ f, k = fg, kg.copy()
+ pbar.desc = 'Evolving anchors with Genetic Algorithm: fitness = %.4f' % f
+ if verbose:
+ print_results(k)
+
+ return print_results(k)
From 99df736e71c5e6788744bd7d7c791ceda8a3b6d2 Mon Sep 17 00:00:00 2001
From: "Kin-Yiu, Wong" <102582011@cc.ncu.edu.tw>
Date: Fri, 21 May 2021 08:50:30 +0800
Subject: [PATCH 27/37] Create loss.py
---
utils/loss.py | 172 ++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 172 insertions(+)
create mode 100644 utils/loss.py
diff --git a/utils/loss.py b/utils/loss.py
new file mode 100644
index 0000000..288b388
--- /dev/null
+++ b/utils/loss.py
@@ -0,0 +1,172 @@
+# Loss functions
+
+import torch
+import torch.nn as nn
+
+from utils.general import bbox_iou
+from utils.torch_utils import is_parallel
+
+
+def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441
+ # return positive, negative label smoothing BCE targets
+ return 1.0 - 0.5 * eps, 0.5 * eps
+
+
+class BCEBlurWithLogitsLoss(nn.Module):
+ # BCEwithLogitLoss() with reduced missing label effects.
+ def __init__(self, alpha=0.05):
+ super(BCEBlurWithLogitsLoss, self).__init__()
+ self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss()
+ self.alpha = alpha
+
+ def forward(self, pred, true):
+ loss = self.loss_fcn(pred, true)
+ pred = torch.sigmoid(pred) # prob from logits
+ dx = pred - true # reduce only missing label effects
+ # dx = (pred - true).abs() # reduce missing label and false label effects
+ alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4))
+ loss *= alpha_factor
+ return loss.mean()
+
+
+class FocalLoss(nn.Module):
+ # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
+ def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
+ super(FocalLoss, self).__init__()
+ self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
+ self.gamma = gamma
+ self.alpha = alpha
+ self.reduction = loss_fcn.reduction
+ self.loss_fcn.reduction = 'none' # required to apply FL to each element
+
+ def forward(self, pred, true):
+ loss = self.loss_fcn(pred, true)
+ # p_t = torch.exp(-loss)
+ # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability
+
+ # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py
+ pred_prob = torch.sigmoid(pred) # prob from logits
+ p_t = true * pred_prob + (1 - true) * (1 - pred_prob)
+ alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
+ modulating_factor = (1.0 - p_t) ** self.gamma
+ loss *= alpha_factor * modulating_factor
+
+ if self.reduction == 'mean':
+ return loss.mean()
+ elif self.reduction == 'sum':
+ return loss.sum()
+ else: # 'none'
+ return loss
+
+
+def compute_loss(p, targets, model): # predictions, targets, model
+ device = targets.device
+ #print(device)
+ lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)
+ tcls, tbox, indices, anchors = build_targets(p, targets, model) # targets
+ h = model.hyp # hyperparameters
+
+ # Define criteria
+ BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.Tensor([h['cls_pw']])).to(device)
+ BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.Tensor([h['obj_pw']])).to(device)
+
+ # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
+ cp, cn = smooth_BCE(eps=0.0)
+
+ # Focal loss
+ g = h['fl_gamma'] # focal loss gamma
+ if g > 0:
+ BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
+
+ # Losses
+ nt = 0 # number of targets
+ no = len(p) # number of outputs
+ balance = [4.0, 1.0, 0.4] if no == 3 else [4.0, 1.0, 0.4, 0.1] # P3-5 or P3-6
+ balance = [4.0, 1.0, 0.5, 0.4, 0.1] if no == 5 else balance
+ for i, pi in enumerate(p): # layer index, layer predictions
+ b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
+ tobj = torch.zeros_like(pi[..., 0], device=device) # target obj
+
+ n = b.shape[0] # number of targets
+ if n:
+ nt += n # cumulative targets
+ ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
+
+ # Regression
+ pxy = ps[:, :2].sigmoid() * 2. - 0.5
+ pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]
+ pbox = torch.cat((pxy, pwh), 1).to(device) # predicted box
+ iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # iou(prediction, target)
+ lbox += (1.0 - iou).mean() # iou loss
+
+ # Objectness
+ tobj[b, a, gj, gi] = (1.0 - model.gr) + model.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio
+
+ # Classification
+ if model.nc > 1: # cls loss (only if multiple classes)
+ t = torch.full_like(ps[:, 5:], cn, device=device) # targets
+ t[range(n), tcls[i]] = cp
+ lcls += BCEcls(ps[:, 5:], t) # BCE
+
+ # Append targets to text file
+ # with open('targets.txt', 'a') as file:
+ # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
+
+ lobj += BCEobj(pi[..., 4], tobj) * balance[i] # obj loss
+
+ s = 3 / no # output count scaling
+ lbox *= h['box'] * s
+ lobj *= h['obj'] * s * (1.4 if no >= 4 else 1.)
+ lcls *= h['cls'] * s
+ bs = tobj.shape[0] # batch size
+
+ loss = lbox + lobj + lcls
+ return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach()
+
+
+def build_targets(p, targets, model):
+ nt = targets.shape[0] # number of anchors, targets
+ tcls, tbox, indices, anch = [], [], [], []
+ gain = torch.ones(6, device=targets.device) # normalized to gridspace gain
+ off = torch.tensor([[1, 0], [0, 1], [-1, 0], [0, -1]], device=targets.device).float() # overlap offsets
+
+ g = 0.5 # offset
+ multi_gpu = is_parallel(model)
+ for i, jj in enumerate(model.module.yolo_layers if multi_gpu else model.yolo_layers):
+ # get number of grid points and anchor vec for this yolo layer
+ anchors = model.module.module_list[jj].anchor_vec if multi_gpu else model.module_list[jj].anchor_vec
+ gain[2:] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
+
+ # Match targets to anchors
+ a, t, offsets = [], targets * gain, 0
+ if nt:
+ na = anchors.shape[0] # number of anchors
+ at = torch.arange(na).view(na, 1).repeat(1, nt) # anchor tensor, same as .repeat_interleave(nt)
+ r = t[None, :, 4:6] / anchors[:, None] # wh ratio
+ j = torch.max(r, 1. / r).max(2)[0] < model.hyp['anchor_t'] # compare
+ # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n) = wh_iou(anchors(3,2), gwh(n,2))
+ a, t = at[j], t.repeat(na, 1, 1)[j] # filter
+
+ # overlaps
+ gxy = t[:, 2:4] # grid xy
+ z = torch.zeros_like(gxy)
+ j, k = ((gxy % 1. < g) & (gxy > 1.)).T
+ l, m = ((gxy % 1. > (1 - g)) & (gxy < (gain[[2, 3]] - 1.))).T
+ a, t = torch.cat((a, a[j], a[k], a[l], a[m]), 0), torch.cat((t, t[j], t[k], t[l], t[m]), 0)
+ offsets = torch.cat((z, z[j] + off[0], z[k] + off[1], z[l] + off[2], z[m] + off[3]), 0) * g
+
+ # Define
+ b, c = t[:, :2].long().T # image, class
+ gxy = t[:, 2:4] # grid xy
+ gwh = t[:, 4:6] # grid wh
+ gij = (gxy - offsets).long()
+ gi, gj = gij.T # grid xy indices
+
+ # Append
+ #indices.append((b, a, gj, gi)) # image, anchor, grid indices
+ indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices
+ tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
+ anch.append(anchors[a]) # anchors
+ tcls.append(c) # class
+
+ return tcls, tbox, indices, anch
From 6b73e1ecf74f4bba459355fd4847a3d1b2129d10 Mon Sep 17 00:00:00 2001
From: "Kin-Yiu, Wong" <102582011@cc.ncu.edu.tw>
Date: Fri, 21 May 2021 08:50:58 +0800
Subject: [PATCH 28/37] Create metric.py
---
utils/metric.py | 140 ++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 140 insertions(+)
create mode 100644 utils/metric.py
diff --git a/utils/metric.py b/utils/metric.py
new file mode 100644
index 0000000..04c578a
--- /dev/null
+++ b/utils/metric.py
@@ -0,0 +1,140 @@
+# Model validation metrics
+
+import matplotlib.pyplot as plt
+import numpy as np
+
+
+def fitness(x):
+ # Model fitness as a weighted combination of metrics
+ w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95]
+ return (x[:, :4] * w).sum(1)
+
+
+def fitness_p(x):
+ # Model fitness as a weighted combination of metrics
+ w = [1.0, 0.0, 0.0, 0.0] # weights for [P, R, mAP@0.5, mAP@0.5:0.95]
+ return (x[:, :4] * w).sum(1)
+
+
+def fitness_r(x):
+ # Model fitness as a weighted combination of metrics
+ w = [0.0, 1.0, 0.0, 0.0] # weights for [P, R, mAP@0.5, mAP@0.5:0.95]
+ return (x[:, :4] * w).sum(1)
+
+
+def fitness_ap50(x):
+ # Model fitness as a weighted combination of metrics
+ w = [0.0, 0.0, 1.0, 0.0] # weights for [P, R, mAP@0.5, mAP@0.5:0.95]
+ return (x[:, :4] * w).sum(1)
+
+
+def fitness_ap(x):
+ # Model fitness as a weighted combination of metrics
+ w = [0.0, 0.0, 0.0, 1.0] # weights for [P, R, mAP@0.5, mAP@0.5:0.95]
+ return (x[:, :4] * w).sum(1)
+
+
+def fitness_f(x):
+ # Model fitness as a weighted combination of metrics
+ #w = [0.0, 0.0, 0.0, 1.0] # weights for [P, R, mAP@0.5, mAP@0.5:0.95]
+ return ((x[:, 0]*x[:, 1])/(x[:, 0]+x[:, 1]))
+
+
+def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, fname='precision-recall_curve.png'):
+ """ Compute the average precision, given the recall and precision curves.
+ Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
+ # Arguments
+ tp: True positives (nparray, nx1 or nx10).
+ conf: Objectness value from 0-1 (nparray).
+ pred_cls: Predicted object classes (nparray).
+ target_cls: True object classes (nparray).
+ plot: Plot precision-recall curve at mAP@0.5
+ fname: Plot filename
+ # Returns
+ The average precision as computed in py-faster-rcnn.
+ """
+
+ # Sort by objectness
+ i = np.argsort(-conf)
+ tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
+
+ # Find unique classes
+ unique_classes = np.unique(target_cls)
+
+ # Create Precision-Recall curve and compute AP for each class
+ px, py = np.linspace(0, 1, 1000), [] # for plotting
+ pr_score = 0.1 # score to evaluate P and R https://github.com/ultralytics/yolov3/issues/898
+ s = [unique_classes.shape[0], tp.shape[1]] # number class, number iou thresholds (i.e. 10 for mAP0.5...0.95)
+ ap, p, r = np.zeros(s), np.zeros(s), np.zeros(s)
+ for ci, c in enumerate(unique_classes):
+ i = pred_cls == c
+ n_l = (target_cls == c).sum() # number of labels
+ n_p = i.sum() # number of predictions
+
+ if n_p == 0 or n_l == 0:
+ continue
+ else:
+ # Accumulate FPs and TPs
+ fpc = (1 - tp[i]).cumsum(0)
+ tpc = tp[i].cumsum(0)
+
+ # Recall
+ recall = tpc / (n_l + 1e-16) # recall curve
+ r[ci] = np.interp(-pr_score, -conf[i], recall[:, 0]) # r at pr_score, negative x, xp because xp decreases
+
+ # Precision
+ precision = tpc / (tpc + fpc) # precision curve
+ p[ci] = np.interp(-pr_score, -conf[i], precision[:, 0]) # p at pr_score
+
+ # AP from recall-precision curve
+ for j in range(tp.shape[1]):
+ ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j])
+ if j == 0:
+ py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5
+
+ # Compute F1 score (harmonic mean of precision and recall)
+ f1 = 2 * p * r / (p + r + 1e-16)
+
+ if plot:
+ py = np.stack(py, axis=1)
+ fig, ax = plt.subplots(1, 1, figsize=(5, 5))
+ ax.plot(px, py, linewidth=0.5, color='grey') # plot(recall, precision)
+ ax.plot(px, py.mean(1), linewidth=2, color='blue', label='all classes %.3f mAP@0.5' % ap[:, 0].mean())
+ ax.set_xlabel('Recall')
+ ax.set_ylabel('Precision')
+ ax.set_xlim(0, 1)
+ ax.set_ylim(0, 1)
+ plt.legend()
+ fig.tight_layout()
+ fig.savefig(fname, dpi=200)
+
+ return p, r, ap, f1, unique_classes.astype('int32')
+
+
+def compute_ap(recall, precision):
+ """ Compute the average precision, given the recall and precision curves.
+ Source: https://github.com/rbgirshick/py-faster-rcnn.
+ # Arguments
+ recall: The recall curve (list).
+ precision: The precision curve (list).
+ # Returns
+ The average precision as computed in py-faster-rcnn.
+ """
+
+ # Append sentinel values to beginning and end
+ mrec = recall # np.concatenate(([0.], recall, [recall[-1] + 1E-3]))
+ mpre = precision # np.concatenate(([0.], precision, [0.]))
+
+ # Compute the precision envelope
+ mpre = np.flip(np.maximum.accumulate(np.flip(mpre)))
+
+ # Integrate area under curve
+ method = 'interp' # methods: 'continuous', 'interp'
+ if method == 'interp':
+ x = np.linspace(0, 1, 101) # 101-point interp (COCO)
+ ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate
+ else: # 'continuous'
+ i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes
+ ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve
+
+ return ap, mpre, mrec
From 6e0b56de461502b8cb1833a3ce777949defa932f Mon Sep 17 00:00:00 2001
From: "Kin-Yiu, Wong" <102582011@cc.ncu.edu.tw>
Date: Fri, 21 May 2021 08:51:26 +0800
Subject: [PATCH 29/37] Create plots.py
---
utils/plots.py | 380 +++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 380 insertions(+)
create mode 100644 utils/plots.py
diff --git a/utils/plots.py b/utils/plots.py
new file mode 100644
index 0000000..c90a96b
--- /dev/null
+++ b/utils/plots.py
@@ -0,0 +1,380 @@
+# Plotting utils
+
+import glob
+import math
+import os
+import random
+from copy import copy
+from pathlib import Path
+
+import cv2
+import matplotlib
+import matplotlib.pyplot as plt
+import numpy as np
+import torch
+import yaml
+from PIL import Image
+from scipy.signal import butter, filtfilt
+
+from utils.general import xywh2xyxy, xyxy2xywh
+from utils.metrics import fitness
+
+# Settings
+matplotlib.use('Agg') # for writing to files only
+
+
+def color_list():
+ # Return first 10 plt colors as (r,g,b) https://stackoverflow.com/questions/51350872/python-from-color-name-to-rgb
+ def hex2rgb(h):
+ return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
+
+ return [hex2rgb(h) for h in plt.rcParams['axes.prop_cycle'].by_key()['color']]
+
+
+def hist2d(x, y, n=100):
+ # 2d histogram used in labels.png and evolve.png
+ xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n)
+ hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))
+ xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1)
+ yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1)
+ return np.log(hist[xidx, yidx])
+
+
+def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):
+ # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy
+ def butter_lowpass(cutoff, fs, order):
+ nyq = 0.5 * fs
+ normal_cutoff = cutoff / nyq
+ return butter(order, normal_cutoff, btype='low', analog=False)
+
+ b, a = butter_lowpass(cutoff, fs, order=order)
+ return filtfilt(b, a, data) # forward-backward filter
+
+
+def plot_one_box(x, img, color=None, label=None, line_thickness=None):
+ # Plots one bounding box on image img
+ tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
+ color = color or [random.randint(0, 255) for _ in range(3)]
+ c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
+ cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
+ if label:
+ tf = max(tl - 1, 1) # font thickness
+ t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
+ c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
+ cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
+ cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
+
+
+def plot_wh_methods(): # from utils.general import *; plot_wh_methods()
+ # Compares the two methods for width-height anchor multiplication
+ # https://github.com/ultralytics/yolov3/issues/168
+ x = np.arange(-4.0, 4.0, .1)
+ ya = np.exp(x)
+ yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2
+
+ fig = plt.figure(figsize=(6, 3), dpi=150)
+ plt.plot(x, ya, '.-', label='YOLO')
+ plt.plot(x, yb ** 2, '.-', label='YOLO ^2')
+ plt.plot(x, yb ** 1.6, '.-', label='YOLO ^1.6')
+ plt.xlim(left=-4, right=4)
+ plt.ylim(bottom=0, top=6)
+ plt.xlabel('input')
+ plt.ylabel('output')
+ plt.grid()
+ plt.legend()
+ fig.tight_layout()
+ fig.savefig('comparison.png', dpi=200)
+
+
+def output_to_target(output, width, height):
+ # Convert model output to target format [batch_id, class_id, x, y, w, h, conf]
+ if isinstance(output, torch.Tensor):
+ output = output.cpu().numpy()
+
+ targets = []
+ for i, o in enumerate(output):
+ if o is not None:
+ for pred in o:
+ box = pred[:4]
+ w = (box[2] - box[0]) / width
+ h = (box[3] - box[1]) / height
+ x = box[0] / width + w / 2
+ y = box[1] / height + h / 2
+ conf = pred[4]
+ cls = int(pred[5])
+
+ targets.append([i, cls, x, y, w, h, conf])
+
+ return np.array(targets)
+
+
+def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16):
+ # Plot image grid with labels
+
+ if isinstance(images, torch.Tensor):
+ images = images.cpu().float().numpy()
+ if isinstance(targets, torch.Tensor):
+ targets = targets.cpu().numpy()
+
+ # un-normalise
+ if np.max(images[0]) <= 1:
+ images *= 255
+
+ tl = 3 # line thickness
+ tf = max(tl - 1, 1) # font thickness
+ bs, _, h, w = images.shape # batch size, _, height, width
+ bs = min(bs, max_subplots) # limit plot images
+ ns = np.ceil(bs ** 0.5) # number of subplots (square)
+
+ # Check if we should resize
+ scale_factor = max_size / max(h, w)
+ if scale_factor < 1:
+ h = math.ceil(scale_factor * h)
+ w = math.ceil(scale_factor * w)
+
+ colors = color_list() # list of colors
+ mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init
+ for i, img in enumerate(images):
+ if i == max_subplots: # if last batch has fewer images than we expect
+ break
+
+ block_x = int(w * (i // ns))
+ block_y = int(h * (i % ns))
+
+ img = img.transpose(1, 2, 0)
+ if scale_factor < 1:
+ img = cv2.resize(img, (w, h))
+
+ mosaic[block_y:block_y + h, block_x:block_x + w, :] = img
+ if len(targets) > 0:
+ image_targets = targets[targets[:, 0] == i]
+ boxes = xywh2xyxy(image_targets[:, 2:6]).T
+ classes = image_targets[:, 1].astype('int')
+ labels = image_targets.shape[1] == 6 # labels if no conf column
+ conf = None if labels else image_targets[:, 6] # check for confidence presence (label vs pred)
+
+ boxes[[0, 2]] *= w
+ boxes[[0, 2]] += block_x
+ boxes[[1, 3]] *= h
+ boxes[[1, 3]] += block_y
+ for j, box in enumerate(boxes.T):
+ cls = int(classes[j])
+ color = colors[cls % len(colors)]
+ cls = names[cls] if names else cls
+ if labels or conf[j] > 0.25: # 0.25 conf thresh
+ label = '%s' % cls if labels else '%s %.1f' % (cls, conf[j])
+ plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl)
+
+ # Draw image filename labels
+ if paths:
+ label = Path(paths[i]).name[:40] # trim to 40 char
+ t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
+ cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf,
+ lineType=cv2.LINE_AA)
+
+ # Image border
+ cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3)
+
+ if fname:
+ r = min(1280. / max(h, w) / ns, 1.0) # ratio to limit image size
+ mosaic = cv2.resize(mosaic, (int(ns * w * r), int(ns * h * r)), interpolation=cv2.INTER_AREA)
+ # cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB)) # cv2 save
+ Image.fromarray(mosaic).save(fname) # PIL save
+ return mosaic
+
+
+def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):
+ # Plot LR simulating training for full epochs
+ optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals
+ y = []
+ for _ in range(epochs):
+ scheduler.step()
+ y.append(optimizer.param_groups[0]['lr'])
+ plt.plot(y, '.-', label='LR')
+ plt.xlabel('epoch')
+ plt.ylabel('LR')
+ plt.grid()
+ plt.xlim(0, epochs)
+ plt.ylim(0)
+ plt.tight_layout()
+ plt.savefig(Path(save_dir) / 'LR.png', dpi=200)
+
+
+def plot_test_txt(): # from utils.general import *; plot_test()
+ # Plot test.txt histograms
+ x = np.loadtxt('test.txt', dtype=np.float32)
+ box = xyxy2xywh(x[:, :4])
+ cx, cy = box[:, 0], box[:, 1]
+
+ fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True)
+ ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0)
+ ax.set_aspect('equal')
+ plt.savefig('hist2d.png', dpi=300)
+
+ fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True)
+ ax[0].hist(cx, bins=600)
+ ax[1].hist(cy, bins=600)
+ plt.savefig('hist1d.png', dpi=200)
+
+
+def plot_targets_txt(): # from utils.general import *; plot_targets_txt()
+ # Plot targets.txt histograms
+ x = np.loadtxt('targets.txt', dtype=np.float32).T
+ s = ['x targets', 'y targets', 'width targets', 'height targets']
+ fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
+ ax = ax.ravel()
+ for i in range(4):
+ ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std()))
+ ax[i].legend()
+ ax[i].set_title(s[i])
+ plt.savefig('targets.jpg', dpi=200)
+
+
+def plot_study_txt(f='study.txt', x=None): # from utils.general import *; plot_study_txt()
+ # Plot study.txt generated by test.py
+ fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)
+ ax = ax.ravel()
+
+ fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)
+ for f in ['study/study_coco_yolo%s.txt' % x for x in ['s', 'm', 'l', 'x']]:
+ y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T
+ x = np.arange(y.shape[1]) if x is None else np.array(x)
+ s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)']
+ for i in range(7):
+ ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)
+ ax[i].set_title(s[i])
+
+ j = y[3].argmax() + 1
+ ax2.plot(y[6, :j], y[3, :j] * 1E2, '.-', linewidth=2, markersize=8,
+ label=Path(f).stem.replace('study_coco_', '').replace('yolo', 'YOLO'))
+
+ ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5],
+ 'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet')
+
+ ax2.grid()
+ ax2.set_xlim(0, 30)
+ ax2.set_ylim(28, 50)
+ ax2.set_yticks(np.arange(30, 55, 5))
+ ax2.set_xlabel('GPU Speed (ms/img)')
+ ax2.set_ylabel('COCO AP val')
+ ax2.legend(loc='lower right')
+ plt.savefig('study_mAP_latency.png', dpi=300)
+ plt.savefig(f.replace('.txt', '.png'), dpi=300)
+
+
+def plot_labels(labels, save_dir=''):
+ # plot dataset labels
+ c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes
+ nc = int(c.max() + 1) # number of classes
+
+ fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
+ ax = ax.ravel()
+ ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8)
+ ax[0].set_xlabel('classes')
+ ax[1].scatter(b[0], b[1], c=hist2d(b[0], b[1], 90), cmap='jet')
+ ax[1].set_xlabel('x')
+ ax[1].set_ylabel('y')
+ ax[2].scatter(b[2], b[3], c=hist2d(b[2], b[3], 90), cmap='jet')
+ ax[2].set_xlabel('width')
+ ax[2].set_ylabel('height')
+ plt.savefig(Path(save_dir) / 'labels.png', dpi=200)
+ plt.close()
+
+ # seaborn correlogram
+ try:
+ import seaborn as sns
+ import pandas as pd
+ x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height'])
+ sns.pairplot(x, corner=True, diag_kind='hist', kind='scatter', markers='o',
+ plot_kws=dict(s=3, edgecolor=None, linewidth=1, alpha=0.02),
+ diag_kws=dict(bins=50))
+ plt.savefig(Path(save_dir) / 'labels_correlogram.png', dpi=200)
+ plt.close()
+ except Exception as e:
+ pass
+
+
+def plot_evolution(yaml_file='data/hyp.finetune.yaml'): # from utils.general import *; plot_evolution()
+ # Plot hyperparameter evolution results in evolve.txt
+ with open(yaml_file) as f:
+ hyp = yaml.load(f, Loader=yaml.FullLoader)
+ x = np.loadtxt('evolve.txt', ndmin=2)
+ f = fitness(x)
+ # weights = (f - f.min()) ** 2 # for weighted results
+ plt.figure(figsize=(10, 12), tight_layout=True)
+ matplotlib.rc('font', **{'size': 8})
+ for i, (k, v) in enumerate(hyp.items()):
+ y = x[:, i + 7]
+ # mu = (y * weights).sum() / weights.sum() # best weighted result
+ mu = y[f.argmax()] # best single result
+ plt.subplot(6, 5, i + 1)
+ plt.scatter(y, f, c=hist2d(y, f, 20), cmap='viridis', alpha=.8, edgecolors='none')
+ plt.plot(mu, f.max(), 'k+', markersize=15)
+ plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters
+ if i % 5 != 0:
+ plt.yticks([])
+ print('%15s: %.3g' % (k, mu))
+ plt.savefig('evolve.png', dpi=200)
+ print('\nPlot saved as evolve.png')
+
+
+def plot_results_overlay(start=0, stop=0): # from utils.general import *; plot_results_overlay()
+ # Plot training 'results*.txt', overlaying train and val losses
+ s = ['train', 'train', 'train', 'Precision', 'mAP@0.5', 'val', 'val', 'val', 'Recall', 'mAP@0.5:0.95'] # legends
+ t = ['Box', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles
+ for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')):
+ results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
+ n = results.shape[1] # number of rows
+ x = range(start, min(stop, n) if stop else n)
+ fig, ax = plt.subplots(1, 5, figsize=(14, 3.5), tight_layout=True)
+ ax = ax.ravel()
+ for i in range(5):
+ for j in [i, i + 5]:
+ y = results[j, x]
+ ax[i].plot(x, y, marker='.', label=s[j])
+ # y_smooth = butter_lowpass_filtfilt(y)
+ # ax[i].plot(x, np.gradient(y_smooth), marker='.', label=s[j])
+
+ ax[i].set_title(t[i])
+ ax[i].legend()
+ ax[i].set_ylabel(f) if i == 0 else None # add filename
+ fig.savefig(f.replace('.txt', '.png'), dpi=200)
+
+
+def plot_results(start=0, stop=0, bucket='', id=(), labels=(), save_dir=''):
+ # from utils.general import *; plot_results(save_dir='runs/train/exp0')
+ # Plot training 'results*.txt'
+ fig, ax = plt.subplots(2, 5, figsize=(12, 6))
+ ax = ax.ravel()
+ s = ['Box', 'Objectness', 'Classification', 'Precision', 'Recall',
+ 'val Box', 'val Objectness', 'val Classification', 'mAP@0.5', 'mAP@0.5:0.95']
+ if bucket:
+ # os.system('rm -rf storage.googleapis.com')
+ # files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id]
+ files = ['%g.txt' % x for x in id]
+ c = ('gsutil cp ' + '%s ' * len(files) + '.') % tuple('gs://%s/%g.txt' % (bucket, x) for x in id)
+ os.system(c)
+ else:
+ files = glob.glob(str(Path(save_dir) / '*.txt')) + glob.glob('../../Downloads/results*.txt')
+ assert len(files), 'No results.txt files found in %s, nothing to plot.' % os.path.abspath(save_dir)
+ for fi, f in enumerate(files):
+ try:
+ results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
+ n = results.shape[1] # number of rows
+ x = range(start, min(stop, n) if stop else n)
+ for i in range(10):
+ y = results[i, x]
+ if i in [0, 1, 2, 5, 6, 7]:
+ y[y == 0] = np.nan # don't show zero loss values
+ # y /= y[0] # normalize
+ label = labels[fi] if len(labels) else Path(f).stem
+ ax[i].plot(x, y, marker='.', label=label, linewidth=1, markersize=6)
+ ax[i].set_title(s[i])
+ # if i in [5, 6, 7]: # share train and val loss y axes
+ # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
+ except Exception as e:
+ print('Warning: Plotting error for %s; %s' % (f, e))
+
+ fig.tight_layout()
+ ax[1].legend()
+ fig.savefig(Path(save_dir) / 'results.png', dpi=200)
From 4999157c08e07cad7cd70d0782b4a6d02557b837 Mon Sep 17 00:00:00 2001
From: "Kin-Yiu, Wong" <102582011@cc.ncu.edu.tw>
Date: Fri, 21 May 2021 08:52:49 +0800
Subject: [PATCH 30/37] Update models.py
---
models/models.py | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/models/models.py b/models/models.py
index 7fee5a1..8b3bd2c 100644
--- a/models/models.py
+++ b/models/models.py
@@ -102,6 +102,12 @@ def create_modules(module_defs, img_size, cfg):
filters = output_filters[-1]
modules = Silence()
+ elif mdef['type'] == 'scale_channels': # nn.Sequential() placeholder for 'shortcut' layer
+ layers = mdef['from']
+ filters = output_filters[-1]
+ routs.extend([i + l if l < 0 else l for l in layers])
+ modules = ScaleChannel(layers=layers)
+
elif mdef['type'] == 'sam': # nn.Sequential() placeholder for 'shortcut' layer
layers = mdef['from']
filters = output_filters[-1]
@@ -501,7 +507,7 @@ def forward_once(self, x, augment=False, verbose=False):
for i, module in enumerate(self.module_list):
name = module.__class__.__name__
#print(name)
- if name in ['WeightedFeatureFusion', 'FeatureConcat', 'FeatureConcat2', 'FeatureConcat3', 'FeatureConcat_l', 'ScaleSpatial']: # sum, concat
+ if name in ['WeightedFeatureFusion', 'FeatureConcat', 'FeatureConcat2', 'FeatureConcat3', 'FeatureConcat_l', 'ScaleChannel', 'ScaleSpatial']: # sum, concat
if verbose:
l = [i - 1] + module.layers # layers
sh = [list(x.shape)] + [list(out[i].shape) for i in module.layers] # shapes
From cd21c8f27755601f418737e559d1eb0f192fb3e9 Mon Sep 17 00:00:00 2001
From: "Kin-Yiu, Wong" <102582011@cc.ncu.edu.tw>
Date: Fri, 21 May 2021 08:58:07 +0800
Subject: [PATCH 31/37] Update README.md
---
README.md | 21 +++++++++++++++------
1 file changed, 15 insertions(+), 6 deletions(-)
diff --git a/README.md b/README.md
index 6e807c2..a784b7b 100644
--- a/README.md
+++ b/README.md
@@ -8,11 +8,20 @@ This is the implementation of "[Scaled-YOLOv4: Scaling Cross Stage Partial Netwo
```
# create the docker container, you can change the share memory size if you have more.
-nvidia-docker run --name yolov4_csp -it -v your_coco_path/:/coco/ -v your_code_path/:/yolo --shm-size=64g nvcr.io/nvidia/pytorch:20.06-py3
+nvidia-docker run --name yolov4_csp -it -v your_coco_path/:/coco/ -v your_code_path/:/yolo --shm-size=64g nvcr.io/nvidia/pytorch:20.11-py3
-# install mish-cuda, if you use different pytorch version, you could try https://github.com/JunnYu/mish-cuda
+# apt install required packages
+apt update
+apt install -y zip htop screen libgl1-mesa-glx
+
+# pip install required packages
+pip install seaborn thop
+
+# install mish-cuda if you want to use mish activation
+# https://github.com/thomasbrandon/mish-cuda
+# https://github.com/JunnYu/mish-cuda
cd /
-git clone https://github.com/thomasbrandon/mish-cuda
+git clone https://github.com/JunnYu/mish-cuda
cd mish-cuda
python setup.py build install
@@ -26,7 +35,7 @@ cd /yolo
```
# download yolov4-csp.weights and put it in /yolo/weights/ folder.
-python test.py --img 640 --conf 0.001 --batch 8 --device 0 --data coco.yaml --cfg models/yolov4-csp.cfg --weights weights/yolov4-csp.weights
+python test.py --img 640 --conf 0.001 --iou 0.65 --batch 8 --device 0 --data coco.yaml --cfg models/yolov4-csp.cfg --weights weights/yolov4-csp.weights
```
You will get the results:
@@ -54,8 +63,8 @@ python train.py --device 0 --batch-size 16 --data coco.yaml --cfg yolov4-csp.cfg
For resume training:
```
-# assume the checkpoint is stored in runs/exp0_yolov4-csp/weights/.
-python train.py --device 0 --batch-size 16 --data coco.yaml --cfg yolov4-csp.cfg --weights 'runs/exp0_yolov4-csp/weights/last.pt' --name yolov4-csp --resume
+# assume the checkpoint is stored in runs/train/yolov4-csp/weights/.
+python train.py --device 0 --batch-size 16 --data coco.yaml --cfg yolov4-csp.cfg --weights 'runs/train/yolov4-csp/weights/last.pt' --name yolov4-csp --resume
```
If you want to use multiple GPUs for training
From 8aaf0ed6fa01f453d70a5ab0f0e6eee183b1816f Mon Sep 17 00:00:00 2001
From: "Kin-Yiu, Wong" <102582011@cc.ncu.edu.tw>
Date: Fri, 21 May 2021 13:35:25 +0800
Subject: [PATCH 32/37] Rename metric.py to metrics.py
---
utils/{metric.py => metrics.py} | 0
1 file changed, 0 insertions(+), 0 deletions(-)
rename utils/{metric.py => metrics.py} (100%)
diff --git a/utils/metric.py b/utils/metrics.py
similarity index 100%
rename from utils/metric.py
rename to utils/metrics.py
From 0c13b23d8cfcbfcd42f7f6731084c4037475a4be Mon Sep 17 00:00:00 2001
From: "Kin-Yiu, Wong" <102582011@cc.ncu.edu.tw>
Date: Fri, 21 May 2021 14:29:47 +0800
Subject: [PATCH 33/37] Update README.md
---
README.md | 28 +++++++++++++++++++++++++++-
1 file changed, 27 insertions(+), 1 deletion(-)
diff --git a/README.md b/README.md
index a784b7b..9533fc2 100644
--- a/README.md
+++ b/README.md
@@ -2,7 +2,9 @@
This is the implementation of "[Scaled-YOLOv4: Scaling Cross Stage Partial Network](https://arxiv.org/abs/2011.08036)" using PyTorch framwork.
-* **2020.11.16** Now supported by [Darknet](https://github.com/AlexeyAB/darknet). [`yolov4-csp.cfg`](https://github.com/AlexeyAB/darknet/blob/master/cfg/yolov4-csp.cfg) [`yolov4-csp.weights`](https://drive.google.com/file/d/1NQwz47cW0NUgy7L3_xOKaNEfLoQuq3EL/view?usp=sharing)
+* **2021.05.21** Due to unknown issue some people can not reproduce the performance in paper and I can not reproduce the [issue#89](https://github.com/WongKinYiu/ScaledYOLOv4/issues/89), I update the codebase. But it will makes the reproduce performance becomes better than paper (47.8 AP -> 48.7 AP).
+
+* **2020.11.16** Now supported by [Darknet](https://github.com/AlexeyAB/darknet). [`yolov4-csp.cfg`](https://github.com/AlexeyAB/darknet/blob/master/cfg/yolov4-csp.cfg) [`yolov4-csp.weights`](https://drive.google.com/file/d/1TdKvDQb2QpP4EhOIyks8kgT8dgI1iOWT/view?usp=sharing)
## Installation
@@ -31,14 +33,36 @@ cd /yolo
## Testing
+[`yolov4-csp.weights`](https://drive.google.com/file/d/1TdKvDQb2QpP4EhOIyks8kgT8dgI1iOWT/view?usp=sharing)
+
+ old weights
+
[`yolov4-csp.weights`](https://drive.google.com/file/d/1NQwz47cW0NUgy7L3_xOKaNEfLoQuq3EL/view?usp=sharing)
+
+
```
# download yolov4-csp.weights and put it in /yolo/weights/ folder.
python test.py --img 640 --conf 0.001 --iou 0.65 --batch 8 --device 0 --data coco.yaml --cfg models/yolov4-csp.cfg --weights weights/yolov4-csp.weights
```
You will get the results:
+```
+ Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.48656
+ Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.67002
+ Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.52739
+ Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.33082
+ Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.54036
+ Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.62107
+ Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.37197
+ Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.61211
+ Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.66544
+ Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.49676
+ Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.72018
+ Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.80528
+```
+ old results
+
```
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.47827
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.66448
@@ -54,6 +78,8 @@ You will get the results:
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.79914
```
+
+
## Training
```
From e75367ea3e4279efb581b57032d78c4dbedbb58d Mon Sep 17 00:00:00 2001
From: "Kin-Yiu, Wong" <102582011@cc.ncu.edu.tw>
Date: Tue, 8 Jun 2021 05:22:58 +0800
Subject: [PATCH 34/37] Update detect.py
---
detect.py | 9 ++++++---
1 file changed, 6 insertions(+), 3 deletions(-)
diff --git a/detect.py b/detect.py
index d871194..911ba9e 100644
--- a/detect.py
+++ b/detect.py
@@ -41,9 +41,12 @@ def detect(save_img=False):
# Load model
model = Darknet(cfg, imgsz).cuda()
- model.load_state_dict(torch.load(weights[0], map_location=device)['model'])
- #model = attempt_load(weights, map_location=device) # load FP32 model
- #imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
+ try:
+ model.load_state_dict(torch.load(weights[0], map_location=device)['model'])
+ #model = attempt_load(weights, map_location=device) # load FP32 model
+ #imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
+ except:
+ load_darknet_weights(model, weights[0])
model.to(device).eval()
if half:
model.half() # to FP16
From c505fdbbaf2be2cd26097b383c2546162f351455 Mon Sep 17 00:00:00 2001
From: Kyle Weston
Date: Thu, 11 Feb 2021 11:00:32 -0800
Subject: [PATCH 35/37] - created a yolov4 CSP single class config file - fixed
various run-time errors while training/testing - added a speedco.yaml file
for training on speedco data - added a run_train bash file
---
data/coco.yaml | 7 +-
data/speedco.yaml | 10 +
models/yolov4-csp-single-class.cfg | 1259 ++++++++++++++++++++++++++++
run_train.sh | 3 +
utils/datasets.py | 4 +-
5 files changed, 1278 insertions(+), 5 deletions(-)
create mode 100644 data/speedco.yaml
create mode 100644 models/yolov4-csp-single-class.cfg
create mode 100755 run_train.sh
diff --git a/data/coco.yaml b/data/coco.yaml
index a31e20f..e80a45c 100644
--- a/data/coco.yaml
+++ b/data/coco.yaml
@@ -1,7 +1,8 @@
# train and val datasets (image directory or *.txt file with image paths)
-train: ../coco/train2017.txt # 118k images
-val: ../coco/val2017.txt # 5k images
-test: ../coco/testdev2017.txt # 20k images for submission to https://competitions.codalab.org/competitions/20794
+train: /mnt/coco/5k.txt # 118k images
+#val: /mnt/coco/5k.txt # 5k images
+val: data/coco_5k_500_subset.txt
+test: data/coco_5k_500_subset.txt # 20k images for submission to https://competitions.codalab.org/competitions/20794
# number of classes
nc: 80
diff --git a/data/speedco.yaml b/data/speedco.yaml
new file mode 100644
index 0000000..8d25b63
--- /dev/null
+++ b/data/speedco.yaml
@@ -0,0 +1,10 @@
+# train and val datasets (image directory or *.txt file with image paths)
+train: data/speedco_train_images.txt
+val: data/speedco_val_images.txt
+test: data/speedco_val_images.txt
+
+# number of classes
+nc: 1
+
+# class names
+names: ['truck']
diff --git a/models/yolov4-csp-single-class.cfg b/models/yolov4-csp-single-class.cfg
new file mode 100644
index 0000000..ce5f800
--- /dev/null
+++ b/models/yolov4-csp-single-class.cfg
@@ -0,0 +1,1259 @@
+[net]
+# Testing
+#batch=1
+#subdivisions=1
+# Training
+batch=64
+subdivisions=8
+width=512
+height=512
+channels=3
+momentum=0.949
+decay=0.0005
+angle=0
+saturation = 1.5
+exposure = 1.5
+hue=.1
+
+learning_rate=0.00261
+burn_in=1000
+max_batches = 10000
+policy=steps
+steps=8000,9000
+scales=.1,.1
+
+#cutmix=1
+mosaic=1
+
+#23:104x104 54:52x52 85:26x26 104:13x13 for 416
+
+
+
+[convolutional]
+batch_normalize=1
+filters=32
+size=3
+stride=1
+pad=1
+activation=mish
+
+# Downsample
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=3
+stride=2
+pad=1
+activation=mish
+
+#[convolutional]
+#batch_normalize=1
+#filters=64
+#size=1
+#stride=1
+#pad=1
+#activation=mish
+
+#[route]
+#layers = -2
+
+#[convolutional]
+#batch_normalize=1
+#filters=64
+#size=1
+#stride=1
+#pad=1
+#activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=32
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+#[convolutional]
+#batch_normalize=1
+#filters=64
+#size=1
+#stride=1
+#pad=1
+#activation=mish
+
+#[route]
+#layers = -1,-7
+
+#[convolutional]
+#batch_normalize=1
+#filters=64
+#size=1
+#stride=1
+#pad=1
+#activation=mish
+
+# Downsample
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=2
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -2
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -1,-10
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+# Downsample
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=2
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -2
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -1,-28
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+# Downsample
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=2
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -2
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -1,-28
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+# Downsample
+
+[convolutional]
+batch_normalize=1
+filters=1024
+size=3
+stride=2
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -2
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -1,-16
+
+[convolutional]
+batch_normalize=1
+filters=1024
+size=1
+stride=1
+pad=1
+activation=mish
+
+##########################
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -2
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=512
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+### SPP ###
+[maxpool]
+stride=1
+size=5
+
+[route]
+layers=-2
+
+[maxpool]
+stride=1
+size=9
+
+[route]
+layers=-4
+
+[maxpool]
+stride=1
+size=13
+
+[route]
+layers=-1,-3,-5,-6
+### End SPP ###
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=512
+activation=mish
+
+[route]
+layers = -1, -13
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[upsample]
+stride=2
+
+[route]
+layers = 79
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -1, -3
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -2
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=256
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=256
+activation=mish
+
+[route]
+layers = -1, -6
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[upsample]
+stride=2
+
+[route]
+layers = 48
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -1, -3
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -2
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=128
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=128
+activation=mish
+
+[route]
+layers = -1, -6
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+##########################
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=256
+activation=mish
+
+[convolutional]
+size=1
+stride=1
+pad=1
+filters=18
+activation=linear
+
+
+[yolo]
+mask = 0,1,2
+anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401
+classes=1
+num=9
+jitter=.3
+ignore_thresh = .7
+truth_thresh = 1
+random=1
+scale_x_y = 1.05
+iou_thresh=0.213
+cls_normalizer=1.0
+iou_normalizer=0.07
+iou_loss=ciou
+nms_kind=greedynms
+beta_nms=0.6
+
+[route]
+layers = -4
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=2
+pad=1
+filters=256
+activation=mish
+
+[route]
+layers = -1, -20
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -2
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=256
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=256
+activation=mish
+
+[route]
+layers = -1,-6
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=512
+activation=mish
+
+[convolutional]
+size=1
+stride=1
+pad=1
+filters=18
+activation=linear
+
+
+[yolo]
+mask = 3,4,5
+anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401
+classes=1
+num=9
+jitter=.3
+ignore_thresh = .7
+truth_thresh = 1
+random=1
+scale_x_y = 1.05
+iou_thresh=0.213
+cls_normalizer=1.0
+iou_normalizer=0.07
+iou_loss=ciou
+nms_kind=greedynms
+beta_nms=0.6
+
+[route]
+layers = -4
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=2
+pad=1
+filters=512
+activation=mish
+
+[route]
+layers = -1, -49
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -2
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=512
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=512
+activation=mish
+
+[route]
+layers = -1,-6
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=1024
+activation=mish
+
+[convolutional]
+size=1
+stride=1
+pad=1
+filters=18
+activation=linear
+
+
+[yolo]
+mask = 6,7,8
+anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401
+classes=1
+num=9
+jitter=.3
+ignore_thresh = .7
+truth_thresh = 1
+random=1
+scale_x_y = 1.05
+iou_thresh=0.213
+cls_normalizer=1.0
+iou_normalizer=0.07
+iou_loss=ciou
+nms_kind=greedynms
+beta_nms=0.6
diff --git a/run_train.sh b/run_train.sh
new file mode 100755
index 0000000..a4caeda
--- /dev/null
+++ b/run_train.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+# Used for training with 3 GTX 1070 GPUs
+python -m torch.distributed.launch --nproc_per_node 3 train.py --device 0,1,2 --batch-size 21 --data speedco.yaml --weights '' --cfg yolov4-csp.cfg --name yolov4-csp-speedco --sync-bn --rect --single-cl
\ No newline at end of file
diff --git a/utils/datasets.py b/utils/datasets.py
index d104af1..aa31808 100644
--- a/utils/datasets.py
+++ b/utils/datasets.py
@@ -803,8 +803,8 @@ def cache_labels(self, path='labels.cache3'):
shape = exif_size(im) # image size
assert (shape[0] > 9) & (shape[1] > 9), 'image size <10 pixels'
if os.path.isfile(label):
- with open(label, 'r') as f:
- l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32) # labels
+ with open(label, 'r') as f:
+ l = np.array([x.split() for x in f.read().splitlines() if len(x) > 0], dtype=np.float32) # labels
if len(l) == 0:
l = np.zeros((0, 5), dtype=np.float32)
x[img] = [l, shape]
From 8c76b8b8d9c305cd27afcc74477307fb30c3acc7 Mon Sep 17 00:00:00 2001
From: Kyle Weston
Date: Thu, 11 Feb 2021 13:45:15 -0800
Subject: [PATCH 36/37] - added docker-compose.yml file
- start pushing to docker images to gcr.io
- added a .dockerignore file
- moved models and utils directory into yolo directory for better package management
- fixed import paths to conform with new directory structure
- added setup.py
- install yolo as a package in docker image
- changed the default label cache location so it doesn't mess with DVC
- fixed a run-time error
- use correct label name for single class
- set working_dir in docker-compose.yml
- add version tag to pushed image
- push tagged docker image
---
.dockerignore | 2 +
.env | 1 +
Dockerfile | 55 +++++++++++++++++++
data/speedco.yaml | 6 +-
docker-compose.yml | 24 ++++++++
run_train.sh | 4 +-
setup.py | 20 +++++++
test.py | 18 +++---
train.py | 18 +++---
{models => yolo/models}/__init__.py | 0
{models => yolo/models}/export.py | 2 +-
{models => yolo/models}/models.py | 8 +--
{models => yolo/models}/yolov3-spp.cfg | 0
.../models}/yolov4-csp-single-class.cfg | 0
{models => yolo/models}/yolov4-csp.cfg | 0
{models => yolo/models}/yolov4.cfg | 0
{utils => yolo/utils}/__init__.py | 0
{utils => yolo/utils}/activations.py | 0
{utils => yolo/utils}/autoanchor.py | 0
{utils => yolo/utils}/datasets.py | 6 +-
{utils => yolo/utils}/general.py | 6 +-
{utils => yolo/utils}/google_utils.py | 0
{utils => yolo/utils}/layers.py | 2 +-
{utils => yolo/utils}/loss.py | 4 +-
{utils => yolo/utils}/metrics.py | 0
{utils => yolo/utils}/parse_config.py | 0
{utils => yolo/utils}/plots.py | 4 +-
{utils => yolo/utils}/torch_utils.py | 0
28 files changed, 141 insertions(+), 39 deletions(-)
create mode 100644 .dockerignore
create mode 100644 .env
create mode 100644 Dockerfile
create mode 100644 docker-compose.yml
create mode 100644 setup.py
rename {models => yolo/models}/__init__.py (100%)
rename {models => yolo/models}/export.py (98%)
rename {models => yolo/models}/models.py (99%)
rename {models => yolo/models}/yolov3-spp.cfg (100%)
rename {models => yolo/models}/yolov4-csp-single-class.cfg (100%)
rename {models => yolo/models}/yolov4-csp.cfg (100%)
rename {models => yolo/models}/yolov4.cfg (100%)
rename {utils => yolo/utils}/__init__.py (100%)
rename {utils => yolo/utils}/activations.py (100%)
rename {utils => yolo/utils}/autoanchor.py (100%)
rename {utils => yolo/utils}/datasets.py (99%)
rename {utils => yolo/utils}/general.py (98%)
rename {utils => yolo/utils}/google_utils.py (100%)
rename {utils => yolo/utils}/layers.py (99%)
rename {utils => yolo/utils}/loss.py (98%)
rename {utils => yolo/utils}/metrics.py (100%)
rename {utils => yolo/utils}/parse_config.py (100%)
rename {utils => yolo/utils}/plots.py (99%)
rename {utils => yolo/utils}/torch_utils.py (100%)
diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 0000000..bbceb85
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,2 @@
+runs
+weights
diff --git a/.env b/.env
new file mode 100644
index 0000000..cdbc703
--- /dev/null
+++ b/.env
@@ -0,0 +1 @@
+TAG=0.1.0
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000..5f5be47
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,55 @@
+#FROM nvcr.io/nvidia/pytorch:20.06-py3
+#FROM continuumio/miniconda3
+#FROM gpuci/miniconda-cuda:11.0-devel-ubuntu20.04
+FROM nvidia/cuda:11.1-cudnn8-devel-ubuntu20.04
+
+#COPY environment.yml .
+#RUN conda update --force-reinstall conda
+#RUN conda env update --name base --file environment.yml --prune
+
+ENV MY_ROOT=/workspace \
+ PKG_PATH=/yolo_src \
+ NUMPROC=4 \
+ PYTHON_VER=3.8 \
+ PYTHONUNBUFFERED=1 \
+ PYTHONPATH=. \
+ DEBIAN_FRONTEND=noninteractive
+
+WORKDIR $PKG_PATH
+
+RUN apt-get update && apt-get install -y apt-utils && apt-get -y upgrade && \
+ apt-get install -y git libsnappy-dev libopencv-dev libhdf5-serial-dev libboost-all-dev libatlas-base-dev \
+ libgflags-dev libgoogle-glog-dev liblmdb-dev curl unzip\
+ python${PYTHON_VER}-dev && \
+ curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py && \
+ python${PYTHON_VER} get-pip.py && \
+ rm get-pip.py && \
+ # Clean UP
+ apt upgrade -y && \
+ apt clean && \
+ apt autoremove -y && \
+ rm -rf /var/lib/apt/lists/* # cleanup to reduce image size
+
+RUN ln -s /usr/bin/python${PYTHON_VER} /usr/bin/python
+
+RUN apt install unzip
+RUN pip install torch==1.7.1+cu110 torchvision==0.8.2+cu110 -f https://download.pytorch.org/whl/torch_stable.html
+
+WORKDIR $MY_ROOT
+# We have to install mish-cuda from source due to an issue with one of the header files
+ADD https://github.com/thomasbrandon/mish-cuda/archive/master.zip $MY_ROOT/mish-cuda.zip
+RUN unzip mish-cuda.zip
+WORKDIR $MY_ROOT/mish-cuda-master
+RUN cp external/CUDAApplyUtils.cuh csrc/
+RUN python setup.py build install
+WORKDIR $PKG_PATH
+# ADD https://drive.google.com/file/d/1NQwz47cW0NUgy7L3_xOKaNEfLoQuq3EL/view?usp=sharing /weights/yolov4-csp.weights
+ADD requirements.txt $PKG_PATH/requirements.txt
+RUN pip install -r $PKG_PATH/requirements.txt
+ADD yolo $PKG_PATH/yolo
+ADD train.py $PKG_PATH/train.py
+ADD test.py $PKG_PATH/test.py
+ADD setup.py $PKG_PATH/setup.py
+ADD data $PKG_PATH/data
+RUN pip install .
+
diff --git a/data/speedco.yaml b/data/speedco.yaml
index 8d25b63..84b59b4 100644
--- a/data/speedco.yaml
+++ b/data/speedco.yaml
@@ -1,7 +1,7 @@
# train and val datasets (image directory or *.txt file with image paths)
-train: data/speedco_train_images.txt
-val: data/speedco_val_images.txt
-test: data/speedco_val_images.txt
+train: data/speedco_dataset_train_images.txt
+val: data/speedco_dataset_val_images.txt
+test: data/speedco_dataset_val_images.txt
# number of classes
nc: 1
diff --git a/docker-compose.yml b/docker-compose.yml
new file mode 100644
index 0000000..146aa23
--- /dev/null
+++ b/docker-compose.yml
@@ -0,0 +1,24 @@
+---
+version: '2.3'
+
+services:
+ train:
+ image: gcr.io/kinsol-generic/yolov4-csp:${TAG:-dev}
+ build:
+ context: .
+ dockerfile: ./Dockerfile
+ runtime: nvidia
+ volumes:
+ - .:/home/dev
+ - /mnt/NAS/Production/TruckBay/:/mnt/NAS/Production/TruckBay
+ - /mnt/NAS/Public/parque_research/datasets/coco_yolo/coco:/mnt/coco
+ - /home/kweston/darknet_utils:/home/kweston/darknet_utils
+ - /home/kweston/speedco/baywatchr-inference/speedco_dataset:/mnt/speedco_dataset
+ - /home/kweston/speedco/baywatchr-inference/data/lists:/mnt/speedco_datalists
+ - /data/kweston/sandbox/mlannotation/results:/results
+ environment:
+ - GOOGLE_APPLICATION_CREDENTIALS=/app/baywatchr-api-key.json
+ command:
+ - bash
+ shm_size: 64g
+ working_dir: /home/dev
diff --git a/run_train.sh b/run_train.sh
index a4caeda..0aae77f 100755
--- a/run_train.sh
+++ b/run_train.sh
@@ -1,3 +1 @@
-#!/bin/bash
-# Used for training with 3 GTX 1070 GPUs
-python -m torch.distributed.launch --nproc_per_node 3 train.py --device 0,1,2 --batch-size 21 --data speedco.yaml --weights '' --cfg yolov4-csp.cfg --name yolov4-csp-speedco --sync-bn --rect --single-cl
\ No newline at end of file
+python -m torch.distributed.launch --nproc_per_node 3 train.py --device 0,1,2 --batch-size 21 --data speedco.yaml --weights --cfg yolov4-csp-single-class.cfg --name yolov4-csp-speedco --sync-bn --rect --single-cls
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000..7310b97
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,20 @@
+import os
+from setuptools import setup, find_namespace_packages
+
+
+def readlines(fname):
+ with open(os.path.join(os.path.dirname(__file__), fname)) as f:
+ return f.readlines()
+
+
+install_requires = readlines('requirements.txt')
+
+
+setup(
+ name='yolov4-csp',
+ version='1.0.0',
+ install_requires=install_requires,
+ packages=find_namespace_packages(include=['yolo', 'yolo.*']),
+ include_package_data=True,
+ python_requires='>=3.7'
+)
diff --git a/test.py b/test.py
index bd99b59..8e0df4e 100644
--- a/test.py
+++ b/test.py
@@ -2,6 +2,7 @@
import glob
import json
import os
+import shutil
from pathlib import Path
import numpy as np
@@ -9,16 +10,16 @@
import yaml
from tqdm import tqdm
-from utils.google_utils import attempt_load
-from utils.datasets import create_dataloader
-from utils.general import coco80_to_coco91_class, check_dataset, check_file, check_img_size, box_iou, \
+from yolo.utils.google_utils import attempt_load
+from yolo.utils.datasets import create_dataloader
+from yolo.utils.general import coco80_to_coco91_class, check_dataset, check_file, check_img_size, box_iou, \
non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, clip_coords, set_logging, increment_path
-from utils.loss import compute_loss
-from utils.metrics import ap_per_class
-from utils.plots import plot_images, output_to_target
-from utils.torch_utils import select_device, time_synchronized
+from yolo.utils.loss import compute_loss
+from yolo.utils.metrics import ap_per_class
+from yolo.utils.plots import plot_images, output_to_target
+from yolo.utils.torch_utils import select_device, time_synchronized
-from models.models import *
+from yolo.models.models import *
def load_classes(path):
# Loads *.names file at 'path'
@@ -27,6 +28,7 @@ def load_classes(path):
return list(filter(None, names)) # filter removes empty strings (such as last line)
+
def test(data,
weights=None,
batch_size=16,
diff --git a/train.py b/train.py
index d7cbf1c..03bc917 100644
--- a/train.py
+++ b/train.py
@@ -22,16 +22,16 @@
import test # import test.py to get mAP after each epoch
#from models.yolo import Model
-from models.models import *
-from utils.autoanchor import check_anchors
-from utils.datasets import create_dataloader
-from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \
+from yolo.models.models import *
+from yolo.utils.autoanchor import check_anchors
+from yolo.utils.datasets import create_dataloader
+from yolo.utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \
fitness, fitness_p, fitness_r, fitness_ap50, fitness_ap, fitness_f, strip_optimizer, get_latest_run,\
check_dataset, check_file, check_git_status, check_img_size, print_mutation, set_logging
-from utils.google_utils import attempt_download
-from utils.loss import compute_loss
-from utils.plots import plot_images, plot_labels, plot_results, plot_evolution
-from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first
+from yolo.utils.google_utils import attempt_download
+from yolo.utils.loss import compute_loss
+from yolo.utils.plots import plot_images, plot_labels, plot_results, plot_evolution
+from yolo.utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first
logger = logging.getLogger(__name__)
@@ -69,7 +69,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None):
check_dataset(data_dict) # check
train_path = data_dict['train']
test_path = data_dict['val']
- nc, names = (1, ['item']) if opt.single_cls else (int(data_dict['nc']), data_dict['names']) # number classes, names
+ nc, names = (1, data_dict['names']) if opt.single_cls else (int(data_dict['nc']), data_dict['names']) # number classes, names
assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check
# Model
diff --git a/models/__init__.py b/yolo/models/__init__.py
similarity index 100%
rename from models/__init__.py
rename to yolo/models/__init__.py
diff --git a/models/export.py b/yolo/models/export.py
similarity index 98%
rename from models/export.py
rename to yolo/models/export.py
index 947a7a8..c6cc8cd 100644
--- a/models/export.py
+++ b/yolo/models/export.py
@@ -2,7 +2,7 @@
import torch
-from utils.google_utils import attempt_download
+from yolo.utils.google_utils import attempt_download
if __name__ == '__main__':
parser = argparse.ArgumentParser()
diff --git a/models/models.py b/yolo/models/models.py
similarity index 99%
rename from models/models.py
rename to yolo/models/models.py
index 8b3bd2c..a93d0b8 100644
--- a/models/models.py
+++ b/yolo/models/models.py
@@ -1,7 +1,7 @@
-from utils.google_utils import *
-from utils.layers import *
-from utils.parse_config import *
-from utils import torch_utils
+from yolo.utils.google_utils import *
+from yolo.utils.layers import *
+from yolo.utils.parse_config import *
+from yolo.utils import torch_utils
ONNX_EXPORT = False
diff --git a/models/yolov3-spp.cfg b/yolo/models/yolov3-spp.cfg
similarity index 100%
rename from models/yolov3-spp.cfg
rename to yolo/models/yolov3-spp.cfg
diff --git a/models/yolov4-csp-single-class.cfg b/yolo/models/yolov4-csp-single-class.cfg
similarity index 100%
rename from models/yolov4-csp-single-class.cfg
rename to yolo/models/yolov4-csp-single-class.cfg
diff --git a/models/yolov4-csp.cfg b/yolo/models/yolov4-csp.cfg
similarity index 100%
rename from models/yolov4-csp.cfg
rename to yolo/models/yolov4-csp.cfg
diff --git a/models/yolov4.cfg b/yolo/models/yolov4.cfg
similarity index 100%
rename from models/yolov4.cfg
rename to yolo/models/yolov4.cfg
diff --git a/utils/__init__.py b/yolo/utils/__init__.py
similarity index 100%
rename from utils/__init__.py
rename to yolo/utils/__init__.py
diff --git a/utils/activations.py b/yolo/utils/activations.py
similarity index 100%
rename from utils/activations.py
rename to yolo/utils/activations.py
diff --git a/utils/autoanchor.py b/yolo/utils/autoanchor.py
similarity index 100%
rename from utils/autoanchor.py
rename to yolo/utils/autoanchor.py
diff --git a/utils/datasets.py b/yolo/utils/datasets.py
similarity index 99%
rename from utils/datasets.py
rename to yolo/utils/datasets.py
index aa31808..06fd089 100644
--- a/utils/datasets.py
+++ b/yolo/utils/datasets.py
@@ -23,8 +23,8 @@
from pycocotools import mask as maskUtils
from torchvision.utils import save_image
-from utils.general import xyxy2xywh, xywh2xyxy
-from utils.torch_utils import torch_distributed_zero_first
+from yolo.utils.general import xyxy2xywh, xywh2xyxy
+from yolo.utils.torch_utils import torch_distributed_zero_first
# Parameters
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
@@ -649,7 +649,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r
def img2label_paths(img_paths):
# Define label paths as a function of image paths
- sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
+ sa, sb = os.sep + 'JPEGImages' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
return [x.replace(sa, sb, 1).replace(x.split('.')[-1], 'txt') for x in img_paths]
try:
diff --git a/utils/general.py b/yolo/utils/general.py
similarity index 98%
rename from utils/general.py
rename to yolo/utils/general.py
index 0585f28..0cefcff 100644
--- a/utils/general.py
+++ b/yolo/utils/general.py
@@ -17,9 +17,9 @@
import torch
import yaml
-from utils.google_utils import gsutil_getsize
-from utils.metrics import fitness, fitness_p, fitness_r, fitness_ap50, fitness_ap, fitness_f
-from utils.torch_utils import init_torch_seeds
+from yolo.utils.google_utils import gsutil_getsize
+from yolo.utils.metrics import fitness, fitness_p, fitness_r, fitness_ap50, fitness_ap, fitness_f
+from yolo.utils.torch_utils import init_torch_seeds
# Set printoptions
torch.set_printoptions(linewidth=320, precision=5, profile='long')
diff --git a/utils/google_utils.py b/yolo/utils/google_utils.py
similarity index 100%
rename from utils/google_utils.py
rename to yolo/utils/google_utils.py
diff --git a/utils/layers.py b/yolo/utils/layers.py
similarity index 99%
rename from utils/layers.py
rename to yolo/utils/layers.py
index 1d46a18..1b5cb8e 100644
--- a/utils/layers.py
+++ b/yolo/utils/layers.py
@@ -1,6 +1,6 @@
import torch.nn.functional as F
-from utils.general import *
+from yolo.utils.general import *
import torch
from torch import nn
diff --git a/utils/loss.py b/yolo/utils/loss.py
similarity index 98%
rename from utils/loss.py
rename to yolo/utils/loss.py
index 288b388..482a213 100644
--- a/utils/loss.py
+++ b/yolo/utils/loss.py
@@ -3,8 +3,8 @@
import torch
import torch.nn as nn
-from utils.general import bbox_iou
-from utils.torch_utils import is_parallel
+from yolo.utils.general import bbox_iou
+from yolo.utils.torch_utils import is_parallel
def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441
diff --git a/utils/metrics.py b/yolo/utils/metrics.py
similarity index 100%
rename from utils/metrics.py
rename to yolo/utils/metrics.py
diff --git a/utils/parse_config.py b/yolo/utils/parse_config.py
similarity index 100%
rename from utils/parse_config.py
rename to yolo/utils/parse_config.py
diff --git a/utils/plots.py b/yolo/utils/plots.py
similarity index 99%
rename from utils/plots.py
rename to yolo/utils/plots.py
index c90a96b..a4df1ad 100644
--- a/utils/plots.py
+++ b/yolo/utils/plots.py
@@ -16,8 +16,8 @@
from PIL import Image
from scipy.signal import butter, filtfilt
-from utils.general import xywh2xyxy, xyxy2xywh
-from utils.metrics import fitness
+from yolo.utils.general import xywh2xyxy, xyxy2xywh
+from yolo.utils.metrics import fitness
# Settings
matplotlib.use('Agg') # for writing to files only
diff --git a/utils/torch_utils.py b/yolo/utils/torch_utils.py
similarity index 100%
rename from utils/torch_utils.py
rename to yolo/utils/torch_utils.py
From 31b0ca7a80cdd1115bb24fb5d5643ef6f3c838e5 Mon Sep 17 00:00:00 2001
From: Kyle Weston
Date: Thu, 10 Jun 2021 20:39:19 +0000
Subject: [PATCH 37/37] - write error cases to file in test.py
---
test.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/test.py b/test.py
index 8e0df4e..a9231f1 100644
--- a/test.py
+++ b/test.py
@@ -312,6 +312,7 @@ def test(data,
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
parser.add_argument('--cfg', type=str, default='models/yolov4-csp.cfg', help='*.cfg path')
parser.add_argument('--names', type=str, default='data/coco.names', help='*.cfg path')
+ parser.add_argument('--save-errors', help='Save the error cases to file', action='store_true')
opt = parser.parse_args()
opt.save_json |= opt.data.endswith('coco.yaml')
opt.data = check_file(opt.data) # check file