From d04cbc3fb266dc433bd0d35df10dab4994a1f3c1 Mon Sep 17 00:00:00 2001 From: Nathan Date: Mon, 31 Jul 2023 23:11:27 +0700 Subject: [PATCH 01/67] upload yolov7 human tracker --- human_detection/yolov7 skeleton/.gitignore | 2 + human_detection/yolov7 skeleton/commands.txt | 5 + human_detection/yolov7 skeleton/main.py | 287 +++ .../yolov7 skeleton/models/__init__.py | 1 + .../yolov7 skeleton/models/common.py | 2019 +++++++++++++++++ .../yolov7 skeleton/models/experimental.py | 272 +++ .../yolov7 skeleton/models/yolo.py | 843 +++++++ human_detection/yolov7 skeleton/readme.md | 6 + human_detection/yolov7 skeleton/sort.py | 362 +++ .../yolov7 skeleton/utils/__init__.py | 1 + .../yolov7 skeleton/utils/activations.py | 72 + .../yolov7 skeleton/utils/add_nms.py | 155 ++ .../yolov7 skeleton/utils/autoanchor.py | 160 ++ .../yolov7 skeleton/utils/aws/__init__.py | 1 + .../yolov7 skeleton/utils/aws/mime.sh | 26 + .../yolov7 skeleton/utils/aws/resume.py | 37 + .../yolov7 skeleton/utils/aws/userdata.sh | 27 + .../yolov7 skeleton/utils/datasets.py | 1320 +++++++++++ .../yolov7 skeleton/utils/general.py | 892 ++++++++ .../utils/google_app_engine/Dockerfile | 25 + .../additional_requirements.txt | 4 + .../utils/google_app_engine/app.yaml | 14 + .../yolov7 skeleton/utils/google_utils.py | 123 + human_detection/yolov7 skeleton/utils/loss.py | 1697 ++++++++++++++ .../yolov7 skeleton/utils/metrics.py | 227 ++ .../yolov7 skeleton/utils/plots.py | 489 ++++ .../yolov7 skeleton/utils/torch_utils.py | 374 +++ .../utils/wandb_logging/__init__.py | 1 + .../utils/wandb_logging/log_dataset.py | 24 + .../utils/wandb_logging/wandb_utils.py | 306 +++ 30 files changed, 9772 insertions(+) create mode 100644 human_detection/yolov7 skeleton/.gitignore create mode 100644 human_detection/yolov7 skeleton/commands.txt create mode 100644 human_detection/yolov7 skeleton/main.py create mode 100644 human_detection/yolov7 skeleton/models/__init__.py create mode 100644 human_detection/yolov7 skeleton/models/common.py create mode 100644 human_detection/yolov7 skeleton/models/experimental.py create mode 100644 human_detection/yolov7 skeleton/models/yolo.py create mode 100644 human_detection/yolov7 skeleton/readme.md create mode 100644 human_detection/yolov7 skeleton/sort.py create mode 100644 human_detection/yolov7 skeleton/utils/__init__.py create mode 100644 human_detection/yolov7 skeleton/utils/activations.py create mode 100644 human_detection/yolov7 skeleton/utils/add_nms.py create mode 100644 human_detection/yolov7 skeleton/utils/autoanchor.py create mode 100644 human_detection/yolov7 skeleton/utils/aws/__init__.py create mode 100644 human_detection/yolov7 skeleton/utils/aws/mime.sh create mode 100644 human_detection/yolov7 skeleton/utils/aws/resume.py create mode 100644 human_detection/yolov7 skeleton/utils/aws/userdata.sh create mode 100644 human_detection/yolov7 skeleton/utils/datasets.py create mode 100644 human_detection/yolov7 skeleton/utils/general.py create mode 100644 human_detection/yolov7 skeleton/utils/google_app_engine/Dockerfile create mode 100644 human_detection/yolov7 skeleton/utils/google_app_engine/additional_requirements.txt create mode 100644 human_detection/yolov7 skeleton/utils/google_app_engine/app.yaml create mode 100644 human_detection/yolov7 skeleton/utils/google_utils.py create mode 100644 human_detection/yolov7 skeleton/utils/loss.py create mode 100644 human_detection/yolov7 skeleton/utils/metrics.py create mode 100644 human_detection/yolov7 skeleton/utils/plots.py create mode 100644 human_detection/yolov7 skeleton/utils/torch_utils.py create mode 100644 human_detection/yolov7 skeleton/utils/wandb_logging/__init__.py create mode 100644 human_detection/yolov7 skeleton/utils/wandb_logging/log_dataset.py create mode 100644 human_detection/yolov7 skeleton/utils/wandb_logging/wandb_utils.py diff --git a/human_detection/yolov7 skeleton/.gitignore b/human_detection/yolov7 skeleton/.gitignore new file mode 100644 index 00000000..7c80f4b0 --- /dev/null +++ b/human_detection/yolov7 skeleton/.gitignore @@ -0,0 +1,2 @@ +**.pt +**.mp4 diff --git a/human_detection/yolov7 skeleton/commands.txt b/human_detection/yolov7 skeleton/commands.txt new file mode 100644 index 00000000..0afe0eec --- /dev/null +++ b/human_detection/yolov7 skeleton/commands.txt @@ -0,0 +1,5 @@ +python detect_or_track.py --weights yolov7.pt --view-img --nosave --show-fps --track --show-track --unique-track-color --classes 0 --source Media1.mp4 + +python main.py --weights yolov7.pt --view-img --show-fps --track --show-track --unique-track-color --classes 0 --source video.mp4 + +python main.py --weights-file yolov7.pt --nosave --view-img --show-fps --track --show-track-lines --classes 0 --no-trace --source video.mp4 \ No newline at end of file diff --git a/human_detection/yolov7 skeleton/main.py b/human_detection/yolov7 skeleton/main.py new file mode 100644 index 00000000..3c86db38 --- /dev/null +++ b/human_detection/yolov7 skeleton/main.py @@ -0,0 +1,287 @@ +import argparse +import time +from pathlib import Path +import cv2 +import torch +import numpy as np + +from models.experimental import attempt_load +from utils.datasets import LoadStreams, LoadImages +from utils.general import check_img_size, check_requirements, \ + check_imshow, non_max_suppression, apply_classifier, \ + scale_coords, xyxy2xywh, strip_optimizer, set_logging, \ + increment_path +from utils.plots import plot_one_box +from utils.torch_utils import select_device, load_classifier, time_synchronized, TracedModel +import sort + +"""Function to Draw Bounding boxes""" +def draw_boxes(img, bbox, identities=None, categories=None, confidences = None, names=None, colors = None): + for i, box in enumerate(bbox): + x1, y1, x2, y2 = [int(i) for i in box] + tl = opt.thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness + + cat = int(categories[i]) if categories is not None else 0 + id = int(identities[i]) if identities is not None else 0 + # conf = confidences[i] if confidences is not None else 0 + + color = colors[cat] + + if not opt.nobbox: + cv2.rectangle(img, (x1, y1), (x2, y2), color, tl) + + if not opt.nolabel: + label = str(id) + ":"+ names[cat] if identities is not None else f'{names[cat]} {confidences[i]:.2f}' + tf = max(tl - 1, 1) # font thickness + t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] + c2 = x1 + t_size[0], y1 - t_size[1] - 3 + cv2.rectangle(img, (x1, y1), c2, color, -1, cv2.LINE_AA) # filled + cv2.putText(img, label, (x1, y1 - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) + + return img + + +def draw_track_lines(im0, tracks, sort_tracker, thickness): + for t, track in enumerate(tracks): # loop over tracks + track_color = sort_tracker.color_list[t] # Get the color for the current track from the color_list of sort_tracker + for i in range(len(track.centroidarr) - 1): # Iterate over the centroids in the track + current_centroid = track.centroidarr[i] + next_centroid = track.centroidarr[i + 1] + current_point = (int(current_centroid[0]), int(current_centroid[1])) + next_point = (int(next_centroid[0]), int(next_centroid[1])) + cv2.line(im0, current_point, next_point, track_color, thickness=thickness) + +class Yolo_sort_tracker: + def __init__(self): + # Initialize + set_logging() + self.device = select_device(opt.device) + self.use_half_precision = self.device.type != 'cpu' # enable half precision if on GPU (only supported on CUDA) + + # Load model + self.model = attempt_load(opt.weights_file, map_location=self.device) # load FP32 model + self.stride = int(self.model.stride.max()) # model stride, which is the step size or the number of units the sliding window moves when performing operations like convolution or pooling + self.imgsize = check_img_size(opt.img_size, s=self.stride) # check img_size + if not opt.no_trace: + self.model = TracedModel(self.model, self.device, opt.img_size) + if self.use_half_precision: + self.model.half() # to FP16 + + # Run inference once if on GPU. Not sure why or even if this is necessary. ### to be tested + if self.device.type != 'cpu': + self.model(torch.zeros(1, 3, self.imgsize, self.imgsize).to(self.device).type_as(next(self.model.parameters()))) + + # Set Dataloader + source = opt.source + self.vid_path, self.vid_writer = None, None + self.webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( + ('rtsp://', 'rtmp://', 'http://', 'https://')) + if self.webcam: + opt.view_img = check_imshow() + torch.backends.cudnn.benchmark = True # set True to speed up constant image size inference + self.dataset = LoadStreams(source, img_size=self.imgsize, stride=self.stride) + else: + self.dataset = LoadImages(source, img_size=self.imgsize, stride=self.stride) + + # defining option flags + self.save_img = not opt.nosave and not source.endswith('.txt') # save inference images + self.save_dir = Path(increment_path(Path(opt.project) / opt.name)) # increment run + if not opt.nosave: + self.save_dir.mkdir(parents=True) # make dir + + # # Second-stage classifier. + # # This is commented because we are just detecting people, and we don't need to classify the detected stuffs + # classify = False + # if classify: + # modelc = load_classifier(name='resnet101', n=2) # initialize + # modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(device).eval() + self.names = self.model.module.names if hasattr(self.model, 'module') else self.model.names + self.colors = [[np.random.randint(0, 255) for _ in range(3)] for _ in self.names] + + + def process_video_file(self): + # Get names and colors + for path, img, im0s, vid_cap in self.dataset: + self.detect(img, self.imgsize, im0s, path, vid_cap) + def process_frame(self, image_frame): + im0s = np.array(image_frame) + image_frame = cv2.resize(image_frame, (576,640)) + img = np.array(image_frame).transpose(2, 0, 1) + self.detect(img, self.imgsize, image_frame) + def detect(self, img, imgsize, im0s, path=None, vid_cap=False): + old_img_w = old_img_h = imgsize + old_img_b = 1 ### idk what is this . need to understand later + + startTime = 0 + img = torch.from_numpy(img).to(self.device) + img = img.half() if self.use_half_precision else img.float() # uint8 to FP16 or FP32 + img /= 255.0 # 0~255 to 0.0~1.0 + if img.ndimension() == 3: + img = img.unsqueeze(0) + + # Warmup. Not sure why or even if this is necessary. ### to be tested + if self.device.type != 'cpu' and (old_img_b != img.shape[0] or old_img_h != img.shape[2] or old_img_w != img.shape[3]): + old_img_b = img.shape[0] + old_img_h = img.shape[2] + old_img_w = img.shape[3] + for i in range(3): + self.model(img, augment=opt.augment)[0] + + # Inference + time1 = time_synchronized() + pred = self.model(img, augment=opt.augment)[0] + time2 = time_synchronized() + + # Apply NMS + pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms) + time3 = time_synchronized() + + # # Apply second-stage classifier + # if classify: + # pred = apply_classifier(pred, modelc, img, im0s) + + #TESTING ###to be removed + if len(pred)!=1: + print("\n",len(pred)) + exit() + + # Process detections + for i, det in enumerate(pred): # detections per image + if self.webcam: # batch_size >= 1 + p, output_string, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), self.dataset.count + else: + p, output_string, im0, frame = path, '', im0s, getattr(self.dataset, 'frame', 0) + + gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh + if len(det)!=0: + # Rescale boxes from img_size to im0 size + det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() + + dets_to_sort = np.empty((0,6)) + # NOTE: We send in detected object class too + for x1,y1,x2,y2,conf,detclass in det.cpu().detach().numpy(): + dets_to_sort = np.vstack((dets_to_sort, np.array([x1, y1, x2, y2, conf, detclass]))) + + if opt.track: + tracked_dets = sort_tracker.update(dets_to_sort, unique_color=True) + tracks =sort_tracker.getTrackers() + if len(tracked_dets)>0: + bbox_xyxy = tracked_dets[:,:4] + identities = tracked_dets[:, 8] + categories = tracked_dets[:, 4] + confidences = None + if opt.show_track_lines: + draw_track_lines(im0, tracks, sort_tracker, opt.thickness) + else: + ### not sure if this is possible + bbox_xyxy = dets_to_sort[:,:4] + identities = None + categories = dets_to_sort[:, 5] + confidences = dets_to_sort[:, 4] + print("if len(tracked_dets)>0 == FALSE!!!!") + # exit() + else: + bbox_xyxy = dets_to_sort[:,:4] + identities = None + categories = dets_to_sort[:, 5] + confidences = dets_to_sort[:, 4] + # draw bounding boxes for visualization + im0 = draw_boxes(im0, bbox_xyxy, identities, categories, confidences, self.names, self.colors) + + # prepare print results + for c in det[:, -1].unique(): + n = (det[:, -1] == c).sum() # detections per class + output_string += f"{n} {self.names[int(c)]}, " # add to string + + # Print time + print(f'{output_string}Done. ({(1E3 * (time2 - time1)):.1f}ms) Inference, ({(1E3 * (time3 - time2)):.1f}ms) NMS') + + + # Show result on live cv2 window view: FPS + if opt.show_fps and self.dataset.mode != 'image' : + currentTime = time.time() + fps = 1/(currentTime - startTime) + startTime = currentTime + cv2.putText(im0, "FPS: " + str(round(fps, 3)), (20, 70), cv2.FONT_HERSHEY_PLAIN, 2, (0,255,0),2) + # Show result on live cv2 window view: image + if opt.view_img: + cv2.imshow("yolov7 preview", im0) + cv2.waitKey(1) # 1 millisecond + + + # Save results (image with detections) to local file. + if self.save_img: + p = Path(p) # to Path + save_path = str(self.save_dir / p.name) # img.jpg + if self.dataset.mode == 'image': + cv2.imwrite(save_path, im0) + print(f" The image with the result is saved in: {save_path}") + else: # 'video' or 'stream' + if self.vid_path != save_path: # new video + self.vid_path = save_path + if isinstance(self.vid_writer, cv2.VideoWriter): + self.vid_writer.release() # release previous video writer + if vid_cap: # video + fps = vid_cap.get(cv2.CAP_PROP_FPS) + w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + else: # stream + fps, w, h = 30, im0.shape[1], im0.shape[0] + save_path += '.mp4' + self.vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) + self.vid_writer.write(im0) + + +if __name__ == '__main__': + np.random.seed(0) # make outputs reproducible + + parser = argparse.ArgumentParser() + # Files and devices: + parser.add_argument('--weights-file', nargs='+', type=str, default='yolov7.pt', help='model.pt path(s)') + parser.add_argument('--no-trace', action='store_true', help='don`t trace model (if traced_model.pt already exist this can save time)') + parser.add_argument('--source', type=str, default='inference/images', help='video source to process') # mp4 file/folder, 0 for webcam + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + # Hyperparameters: + parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)') + parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold') + parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS') # Non-maximum suppression is a post-processing step to remove duplicate and overlapping bounding boxes. Intersection over Union (IOU) is a metric used to measure the overlap between two bounding boxes. The IOU threshold controls how strictly the algorithm filters out overlapping bounding boxes. A higher IOU threshold will result in more aggressive suppression and fewer overlapping boxes being retained, while a lower threshold will allow more boxes to survive, even if they partially overlap with each other. + # What to output + parser.add_argument('--view-img', action='store_true', help='display results') + parser.add_argument('--nosave', action='store_true', help='do not save images/videos') + parser.add_argument('--project', default='runs/detect', help='save results to project/name') + parser.add_argument('--name', default='exp', help='save results to project/name') + + # yolov7 detection options + parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3') + parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') + parser.add_argument('--augment', action='store_true', help='augmented inference') + # SORT tracking options + parser.add_argument('--track', action='store_true', help='run tracking') + # Appearance option (what to display on screen or output video) + parser.add_argument('--show-track-lines', action='store_true', help='show tracked path') + parser.add_argument('--show-fps', action='store_true', help='show fps') + parser.add_argument('--thickness', type=int, default=2, help='bounding box and font size thickness') + parser.add_argument('--nobbox', action='store_true', help='don`t show bounding box') + parser.add_argument('--nolabel', action='store_true', help='don`t show label') + + opt = parser.parse_args() + print(opt) + + # define the SORT tracker + sort_tracker = sort.Sort(max_age=5, + min_hits=2, + iou_threshold=0.2) + + with torch.no_grad(): #deactivate the autograd engine to save memory and speed up computations. On cpu, the speed is 15% faster with this + yolo_sort_tracker=Yolo_sort_tracker() + + if webcam:=1:#opt.source.isnumeric(): + mywebcam = cv2.VideoCapture(0) + while 1: + _, image_frame = mywebcam.read() + bounding_boxes=yolo_sort_tracker.process_frame(image_frame) + print(bounding_boxes) + else: + yolo_sort_tracker.process_video_file() + +### fix up file specific stuffs \ No newline at end of file diff --git a/human_detection/yolov7 skeleton/models/__init__.py b/human_detection/yolov7 skeleton/models/__init__.py new file mode 100644 index 00000000..84952a81 --- /dev/null +++ b/human_detection/yolov7 skeleton/models/__init__.py @@ -0,0 +1 @@ +# init \ No newline at end of file diff --git a/human_detection/yolov7 skeleton/models/common.py b/human_detection/yolov7 skeleton/models/common.py new file mode 100644 index 00000000..edb5edc9 --- /dev/null +++ b/human_detection/yolov7 skeleton/models/common.py @@ -0,0 +1,2019 @@ +import math +from copy import copy +from pathlib import Path + +import numpy as np +import pandas as pd +import requests +import torch +import torch.nn as nn +import torch.nn.functional as F +from torchvision.ops import DeformConv2d +from PIL import Image +from torch.cuda import amp + +from utils.datasets import letterbox +from utils.general import non_max_suppression, make_divisible, scale_coords, increment_path, xyxy2xywh +from utils.plots import color_list, plot_one_box +from utils.torch_utils import time_synchronized + + +##### basic #### + +def autopad(k, p=None): # kernel, padding + # Pad to 'same' + if p is None: + p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad + return p + + +class MP(nn.Module): + def __init__(self, k=2): + super(MP, self).__init__() + self.m = nn.MaxPool2d(kernel_size=k, stride=k) + + def forward(self, x): + return self.m(x) + + +class SP(nn.Module): + def __init__(self, k=3, s=1): + super(SP, self).__init__() + self.m = nn.MaxPool2d(kernel_size=k, stride=s, padding=k // 2) + + def forward(self, x): + return self.m(x) + + +class ReOrg(nn.Module): + def __init__(self): + super(ReOrg, self).__init__() + + def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2) + return torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1) + + +class Concat(nn.Module): + def __init__(self, dimension=1): + super(Concat, self).__init__() + self.d = dimension + + def forward(self, x): + return torch.cat(x, self.d) + + +class Chuncat(nn.Module): + def __init__(self, dimension=1): + super(Chuncat, self).__init__() + self.d = dimension + + def forward(self, x): + x1 = [] + x2 = [] + for xi in x: + xi1, xi2 = xi.chunk(2, self.d) + x1.append(xi1) + x2.append(xi2) + return torch.cat(x1+x2, self.d) + + +class Shortcut(nn.Module): + def __init__(self, dimension=0): + super(Shortcut, self).__init__() + self.d = dimension + + def forward(self, x): + return x[0]+x[1] + + +class Foldcut(nn.Module): + def __init__(self, dimension=0): + super(Foldcut, self).__init__() + self.d = dimension + + def forward(self, x): + x1, x2 = x.chunk(2, self.d) + return x1+x2 + + +class Conv(nn.Module): + # Standard convolution + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups + super(Conv, self).__init__() + self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False) + self.bn = nn.BatchNorm2d(c2) + self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) + + def forward(self, x): + return self.act(self.bn(self.conv(x))) + + def fuseforward(self, x): + return self.act(self.conv(x)) + + +class RobustConv(nn.Module): + # Robust convolution (use high kernel size 7-11 for: downsampling and other layers). Train for 300 - 450 epochs. + def __init__(self, c1, c2, k=7, s=1, p=None, g=1, act=True, layer_scale_init_value=1e-6): # ch_in, ch_out, kernel, stride, padding, groups + super(RobustConv, self).__init__() + self.conv_dw = Conv(c1, c1, k=k, s=s, p=p, g=c1, act=act) + self.conv1x1 = nn.Conv2d(c1, c2, 1, 1, 0, groups=1, bias=True) + self.gamma = nn.Parameter(layer_scale_init_value * torch.ones(c2)) if layer_scale_init_value > 0 else None + + def forward(self, x): + x = x.to(memory_format=torch.channels_last) + x = self.conv1x1(self.conv_dw(x)) + if self.gamma is not None: + x = x.mul(self.gamma.reshape(1, -1, 1, 1)) + return x + + +class RobustConv2(nn.Module): + # Robust convolution 2 (use [32, 5, 2] or [32, 7, 4] or [32, 11, 8] for one of the paths in CSP). + def __init__(self, c1, c2, k=7, s=4, p=None, g=1, act=True, layer_scale_init_value=1e-6): # ch_in, ch_out, kernel, stride, padding, groups + super(RobustConv2, self).__init__() + self.conv_strided = Conv(c1, c1, k=k, s=s, p=p, g=c1, act=act) + self.conv_deconv = nn.ConvTranspose2d(in_channels=c1, out_channels=c2, kernel_size=s, stride=s, + padding=0, bias=True, dilation=1, groups=1 + ) + self.gamma = nn.Parameter(layer_scale_init_value * torch.ones(c2)) if layer_scale_init_value > 0 else None + + def forward(self, x): + x = self.conv_deconv(self.conv_strided(x)) + if self.gamma is not None: + x = x.mul(self.gamma.reshape(1, -1, 1, 1)) + return x + + +def DWConv(c1, c2, k=1, s=1, act=True): + # Depthwise convolution + return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act) + + +class GhostConv(nn.Module): + # Ghost Convolution https://github.com/huawei-noah/ghostnet + def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups + super(GhostConv, self).__init__() + c_ = c2 // 2 # hidden channels + self.cv1 = Conv(c1, c_, k, s, None, g, act) + self.cv2 = Conv(c_, c_, 5, 1, None, c_, act) + + def forward(self, x): + y = self.cv1(x) + return torch.cat([y, self.cv2(y)], 1) + + +class Stem(nn.Module): + # Stem + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups + super(Stem, self).__init__() + c_ = int(c2/2) # hidden channels + self.cv1 = Conv(c1, c_, 3, 2) + self.cv2 = Conv(c_, c_, 1, 1) + self.cv3 = Conv(c_, c_, 3, 2) + self.pool = torch.nn.MaxPool2d(2, stride=2) + self.cv4 = Conv(2 * c_, c2, 1, 1) + + def forward(self, x): + x = self.cv1(x) + return self.cv4(torch.cat((self.cv3(self.cv2(x)), self.pool(x)), dim=1)) + + +class DownC(nn.Module): + # Spatial pyramid pooling layer used in YOLOv3-SPP + def __init__(self, c1, c2, n=1, k=2): + super(DownC, self).__init__() + c_ = int(c1) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_, c2//2, 3, k) + self.cv3 = Conv(c1, c2//2, 1, 1) + self.mp = nn.MaxPool2d(kernel_size=k, stride=k) + + def forward(self, x): + return torch.cat((self.cv2(self.cv1(x)), self.cv3(self.mp(x))), dim=1) + + +class SPP(nn.Module): + # Spatial pyramid pooling layer used in YOLOv3-SPP + def __init__(self, c1, c2, k=(5, 9, 13)): + super(SPP, self).__init__() + c_ = c1 // 2 # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1) + self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k]) + + def forward(self, x): + x = self.cv1(x) + return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1)) + + +class Bottleneck(nn.Module): + # Darknet bottleneck + def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion + super(Bottleneck, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_, c2, 3, 1, g=g) + self.add = shortcut and c1 == c2 + + def forward(self, x): + return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) + + +class Res(nn.Module): + # ResNet bottleneck + def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion + super(Res, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_, c_, 3, 1, g=g) + self.cv3 = Conv(c_, c2, 1, 1) + self.add = shortcut and c1 == c2 + + def forward(self, x): + return x + self.cv3(self.cv2(self.cv1(x))) if self.add else self.cv3(self.cv2(self.cv1(x))) + + +class ResX(Res): + # ResNet bottleneck + def __init__(self, c1, c2, shortcut=True, g=32, e=0.5): # ch_in, ch_out, shortcut, groups, expansion + super().__init__(c1, c2, shortcut, g, e) + c_ = int(c2 * e) # hidden channels + + +class Ghost(nn.Module): + # Ghost Bottleneck https://github.com/huawei-noah/ghostnet + def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride + super(Ghost, self).__init__() + c_ = c2 // 2 + self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw + DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw + GhostConv(c_, c2, 1, 1, act=False)) # pw-linear + self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False), + Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity() + + def forward(self, x): + return self.conv(x) + self.shortcut(x) + +##### end of basic ##### + + +##### cspnet ##### + +class SPPCSPC(nn.Module): + # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5, k=(5, 9, 13)): + super(SPPCSPC, self).__init__() + c_ = int(2 * c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c1, c_, 1, 1) + self.cv3 = Conv(c_, c_, 3, 1) + self.cv4 = Conv(c_, c_, 1, 1) + self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k]) + self.cv5 = Conv(4 * c_, c_, 1, 1) + self.cv6 = Conv(c_, c_, 3, 1) + self.cv7 = Conv(2 * c_, c2, 1, 1) + + def forward(self, x): + x1 = self.cv4(self.cv3(self.cv1(x))) + y1 = self.cv6(self.cv5(torch.cat([x1] + [m(x1) for m in self.m], 1))) + y2 = self.cv2(x) + return self.cv7(torch.cat((y1, y2), dim=1)) + +class GhostSPPCSPC(SPPCSPC): + # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5, k=(5, 9, 13)): + super().__init__(c1, c2, n, shortcut, g, e, k) + c_ = int(2 * c2 * e) # hidden channels + self.cv1 = GhostConv(c1, c_, 1, 1) + self.cv2 = GhostConv(c1, c_, 1, 1) + self.cv3 = GhostConv(c_, c_, 3, 1) + self.cv4 = GhostConv(c_, c_, 1, 1) + self.cv5 = GhostConv(4 * c_, c_, 1, 1) + self.cv6 = GhostConv(c_, c_, 3, 1) + self.cv7 = GhostConv(2 * c_, c2, 1, 1) + + +class GhostStem(Stem): + # Stem + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups + super().__init__(c1, c2, k, s, p, g, act) + c_ = int(c2/2) # hidden channels + self.cv1 = GhostConv(c1, c_, 3, 2) + self.cv2 = GhostConv(c_, c_, 1, 1) + self.cv3 = GhostConv(c_, c_, 3, 2) + self.cv4 = GhostConv(2 * c_, c2, 1, 1) + + +class BottleneckCSPA(nn.Module): + # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super(BottleneckCSPA, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c1, c_, 1, 1) + self.cv3 = Conv(2 * c_, c2, 1, 1) + self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + + def forward(self, x): + y1 = self.m(self.cv1(x)) + y2 = self.cv2(x) + return self.cv3(torch.cat((y1, y2), dim=1)) + + +class BottleneckCSPB(nn.Module): + # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super(BottleneckCSPB, self).__init__() + c_ = int(c2) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_, c_, 1, 1) + self.cv3 = Conv(2 * c_, c2, 1, 1) + self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + + def forward(self, x): + x1 = self.cv1(x) + y1 = self.m(x1) + y2 = self.cv2(x1) + return self.cv3(torch.cat((y1, y2), dim=1)) + + +class BottleneckCSPC(nn.Module): + # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super(BottleneckCSPC, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c1, c_, 1, 1) + self.cv3 = Conv(c_, c_, 1, 1) + self.cv4 = Conv(2 * c_, c2, 1, 1) + self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + + def forward(self, x): + y1 = self.cv3(self.m(self.cv1(x))) + y2 = self.cv2(x) + return self.cv4(torch.cat((y1, y2), dim=1)) + + +class ResCSPA(BottleneckCSPA): + # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) # hidden channels + self.m = nn.Sequential(*[Res(c_, c_, shortcut, g, e=0.5) for _ in range(n)]) + + +class ResCSPB(BottleneckCSPB): + # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2) # hidden channels + self.m = nn.Sequential(*[Res(c_, c_, shortcut, g, e=0.5) for _ in range(n)]) + + +class ResCSPC(BottleneckCSPC): + # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) # hidden channels + self.m = nn.Sequential(*[Res(c_, c_, shortcut, g, e=0.5) for _ in range(n)]) + + +class ResXCSPA(ResCSPA): + # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=32, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) # hidden channels + self.m = nn.Sequential(*[Res(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + + +class ResXCSPB(ResCSPB): + # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=32, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2) # hidden channels + self.m = nn.Sequential(*[Res(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + + +class ResXCSPC(ResCSPC): + # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=32, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) # hidden channels + self.m = nn.Sequential(*[Res(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + + +class GhostCSPA(BottleneckCSPA): + # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) # hidden channels + self.m = nn.Sequential(*[Ghost(c_, c_) for _ in range(n)]) + + +class GhostCSPB(BottleneckCSPB): + # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2) # hidden channels + self.m = nn.Sequential(*[Ghost(c_, c_) for _ in range(n)]) + + +class GhostCSPC(BottleneckCSPC): + # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) # hidden channels + self.m = nn.Sequential(*[Ghost(c_, c_) for _ in range(n)]) + +##### end of cspnet ##### + + +##### yolor ##### + +class ImplicitA(nn.Module): + def __init__(self, channel, mean=0., std=.02): + super(ImplicitA, self).__init__() + self.channel = channel + self.mean = mean + self.std = std + self.implicit = nn.Parameter(torch.zeros(1, channel, 1, 1)) + nn.init.normal_(self.implicit, mean=self.mean, std=self.std) + + def forward(self, x): + return self.implicit + x + + +class ImplicitM(nn.Module): + def __init__(self, channel, mean=1., std=.02): + super(ImplicitM, self).__init__() + self.channel = channel + self.mean = mean + self.std = std + self.implicit = nn.Parameter(torch.ones(1, channel, 1, 1)) + nn.init.normal_(self.implicit, mean=self.mean, std=self.std) + + def forward(self, x): + return self.implicit * x + +##### end of yolor ##### + + +##### repvgg ##### + +class RepConv(nn.Module): + # Represented convolution + # https://arxiv.org/abs/2101.03697 + + def __init__(self, c1, c2, k=3, s=1, p=None, g=1, act=True, deploy=False): + super(RepConv, self).__init__() + + self.deploy = deploy + self.groups = g + self.in_channels = c1 + self.out_channels = c2 + + assert k == 3 + assert autopad(k, p) == 1 + + padding_11 = autopad(k, p) - k // 2 + + self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) + + if deploy: + self.rbr_reparam = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=True) + + else: + self.rbr_identity = (nn.BatchNorm2d(num_features=c1) if c2 == c1 and s == 1 else None) + + self.rbr_dense = nn.Sequential( + nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False), + nn.BatchNorm2d(num_features=c2), + ) + + self.rbr_1x1 = nn.Sequential( + nn.Conv2d( c1, c2, 1, s, padding_11, groups=g, bias=False), + nn.BatchNorm2d(num_features=c2), + ) + + def forward(self, inputs): + if hasattr(self, "rbr_reparam"): + return self.act(self.rbr_reparam(inputs)) + + if self.rbr_identity is None: + id_out = 0 + else: + id_out = self.rbr_identity(inputs) + + return self.act(self.rbr_dense(inputs) + self.rbr_1x1(inputs) + id_out) + + def get_equivalent_kernel_bias(self): + kernel3x3, bias3x3 = self._fuse_bn_tensor(self.rbr_dense) + kernel1x1, bias1x1 = self._fuse_bn_tensor(self.rbr_1x1) + kernelid, biasid = self._fuse_bn_tensor(self.rbr_identity) + return ( + kernel3x3 + self._pad_1x1_to_3x3_tensor(kernel1x1) + kernelid, + bias3x3 + bias1x1 + biasid, + ) + + def _pad_1x1_to_3x3_tensor(self, kernel1x1): + if kernel1x1 is None: + return 0 + else: + return nn.functional.pad(kernel1x1, [1, 1, 1, 1]) + + def _fuse_bn_tensor(self, branch): + if branch is None: + return 0, 0 + if isinstance(branch, nn.Sequential): + kernel = branch[0].weight + running_mean = branch[1].running_mean + running_var = branch[1].running_var + gamma = branch[1].weight + beta = branch[1].bias + eps = branch[1].eps + else: + assert isinstance(branch, nn.BatchNorm2d) + if not hasattr(self, "id_tensor"): + input_dim = self.in_channels // self.groups + kernel_value = np.zeros( + (self.in_channels, input_dim, 3, 3), dtype=np.float32 + ) + for i in range(self.in_channels): + kernel_value[i, i % input_dim, 1, 1] = 1 + self.id_tensor = torch.from_numpy(kernel_value).to(branch.weight.device) + kernel = self.id_tensor + running_mean = branch.running_mean + running_var = branch.running_var + gamma = branch.weight + beta = branch.bias + eps = branch.eps + std = (running_var + eps).sqrt() + t = (gamma / std).reshape(-1, 1, 1, 1) + return kernel * t, beta - running_mean * gamma / std + + def repvgg_convert(self): + kernel, bias = self.get_equivalent_kernel_bias() + return ( + kernel.detach().cpu().numpy(), + bias.detach().cpu().numpy(), + ) + + def fuse_conv_bn(self, conv, bn): + + std = (bn.running_var + bn.eps).sqrt() + bias = bn.bias - bn.running_mean * bn.weight / std + + t = (bn.weight / std).reshape(-1, 1, 1, 1) + weights = conv.weight * t + + bn = nn.Identity() + conv = nn.Conv2d(in_channels = conv.in_channels, + out_channels = conv.out_channels, + kernel_size = conv.kernel_size, + stride=conv.stride, + padding = conv.padding, + dilation = conv.dilation, + groups = conv.groups, + bias = True, + padding_mode = conv.padding_mode) + + conv.weight = torch.nn.Parameter(weights) + conv.bias = torch.nn.Parameter(bias) + return conv + + def fuse_repvgg_block(self): + if self.deploy: + return + print(f"RepConv.fuse_repvgg_block") + + self.rbr_dense = self.fuse_conv_bn(self.rbr_dense[0], self.rbr_dense[1]) + + self.rbr_1x1 = self.fuse_conv_bn(self.rbr_1x1[0], self.rbr_1x1[1]) + rbr_1x1_bias = self.rbr_1x1.bias + weight_1x1_expanded = torch.nn.functional.pad(self.rbr_1x1.weight, [1, 1, 1, 1]) + + # Fuse self.rbr_identity + if (isinstance(self.rbr_identity, nn.BatchNorm2d) or isinstance(self.rbr_identity, nn.modules.batchnorm.SyncBatchNorm)): + # print(f"fuse: rbr_identity == BatchNorm2d or SyncBatchNorm") + identity_conv_1x1 = nn.Conv2d( + in_channels=self.in_channels, + out_channels=self.out_channels, + kernel_size=1, + stride=1, + padding=0, + groups=self.groups, + bias=False) + identity_conv_1x1.weight.data = identity_conv_1x1.weight.data.to(self.rbr_1x1.weight.data.device) + identity_conv_1x1.weight.data = identity_conv_1x1.weight.data.squeeze().squeeze() + # print(f" identity_conv_1x1.weight = {identity_conv_1x1.weight.shape}") + identity_conv_1x1.weight.data.fill_(0.0) + identity_conv_1x1.weight.data.fill_diagonal_(1.0) + identity_conv_1x1.weight.data = identity_conv_1x1.weight.data.unsqueeze(2).unsqueeze(3) + # print(f" identity_conv_1x1.weight = {identity_conv_1x1.weight.shape}") + + identity_conv_1x1 = self.fuse_conv_bn(identity_conv_1x1, self.rbr_identity) + bias_identity_expanded = identity_conv_1x1.bias + weight_identity_expanded = torch.nn.functional.pad(identity_conv_1x1.weight, [1, 1, 1, 1]) + else: + # print(f"fuse: rbr_identity != BatchNorm2d, rbr_identity = {self.rbr_identity}") + bias_identity_expanded = torch.nn.Parameter( torch.zeros_like(rbr_1x1_bias) ) + weight_identity_expanded = torch.nn.Parameter( torch.zeros_like(weight_1x1_expanded) ) + + + #print(f"self.rbr_1x1.weight = {self.rbr_1x1.weight.shape}, ") + #print(f"weight_1x1_expanded = {weight_1x1_expanded.shape}, ") + #print(f"self.rbr_dense.weight = {self.rbr_dense.weight.shape}, ") + + self.rbr_dense.weight = torch.nn.Parameter(self.rbr_dense.weight + weight_1x1_expanded + weight_identity_expanded) + self.rbr_dense.bias = torch.nn.Parameter(self.rbr_dense.bias + rbr_1x1_bias + bias_identity_expanded) + + self.rbr_reparam = self.rbr_dense + self.deploy = True + + if self.rbr_identity is not None: + del self.rbr_identity + self.rbr_identity = None + + if self.rbr_1x1 is not None: + del self.rbr_1x1 + self.rbr_1x1 = None + + if self.rbr_dense is not None: + del self.rbr_dense + self.rbr_dense = None + + +class RepBottleneck(Bottleneck): + # Standard bottleneck + def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion + super().__init__(c1, c2, shortcut=True, g=1, e=0.5) + c_ = int(c2 * e) # hidden channels + self.cv2 = RepConv(c_, c2, 3, 1, g=g) + + +class RepBottleneckCSPA(BottleneckCSPA): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) # hidden channels + self.m = nn.Sequential(*[RepBottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + + +class RepBottleneckCSPB(BottleneckCSPB): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2) # hidden channels + self.m = nn.Sequential(*[RepBottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + + +class RepBottleneckCSPC(BottleneckCSPC): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) # hidden channels + self.m = nn.Sequential(*[RepBottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + + +class RepRes(Res): + # Standard bottleneck + def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion + super().__init__(c1, c2, shortcut, g, e) + c_ = int(c2 * e) # hidden channels + self.cv2 = RepConv(c_, c_, 3, 1, g=g) + + +class RepResCSPA(ResCSPA): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) # hidden channels + self.m = nn.Sequential(*[RepRes(c_, c_, shortcut, g, e=0.5) for _ in range(n)]) + + +class RepResCSPB(ResCSPB): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2) # hidden channels + self.m = nn.Sequential(*[RepRes(c_, c_, shortcut, g, e=0.5) for _ in range(n)]) + + +class RepResCSPC(ResCSPC): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) # hidden channels + self.m = nn.Sequential(*[RepRes(c_, c_, shortcut, g, e=0.5) for _ in range(n)]) + + +class RepResX(ResX): + # Standard bottleneck + def __init__(self, c1, c2, shortcut=True, g=32, e=0.5): # ch_in, ch_out, shortcut, groups, expansion + super().__init__(c1, c2, shortcut, g, e) + c_ = int(c2 * e) # hidden channels + self.cv2 = RepConv(c_, c_, 3, 1, g=g) + + +class RepResXCSPA(ResXCSPA): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=32, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) # hidden channels + self.m = nn.Sequential(*[RepResX(c_, c_, shortcut, g, e=0.5) for _ in range(n)]) + + +class RepResXCSPB(ResXCSPB): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=False, g=32, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2) # hidden channels + self.m = nn.Sequential(*[RepResX(c_, c_, shortcut, g, e=0.5) for _ in range(n)]) + + +class RepResXCSPC(ResXCSPC): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=32, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) # hidden channels + self.m = nn.Sequential(*[RepResX(c_, c_, shortcut, g, e=0.5) for _ in range(n)]) + +##### end of repvgg ##### + + +##### transformer ##### + +class TransformerLayer(nn.Module): + # Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance) + def __init__(self, c, num_heads): + super().__init__() + self.q = nn.Linear(c, c, bias=False) + self.k = nn.Linear(c, c, bias=False) + self.v = nn.Linear(c, c, bias=False) + self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads) + self.fc1 = nn.Linear(c, c, bias=False) + self.fc2 = nn.Linear(c, c, bias=False) + + def forward(self, x): + x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x + x = self.fc2(self.fc1(x)) + x + return x + + +class TransformerBlock(nn.Module): + # Vision Transformer https://arxiv.org/abs/2010.11929 + def __init__(self, c1, c2, num_heads, num_layers): + super().__init__() + self.conv = None + if c1 != c2: + self.conv = Conv(c1, c2) + self.linear = nn.Linear(c2, c2) # learnable position embedding + self.tr = nn.Sequential(*[TransformerLayer(c2, num_heads) for _ in range(num_layers)]) + self.c2 = c2 + + def forward(self, x): + if self.conv is not None: + x = self.conv(x) + b, _, w, h = x.shape + p = x.flatten(2) + p = p.unsqueeze(0) + p = p.transpose(0, 3) + p = p.squeeze(3) + e = self.linear(p) + x = p + e + + x = self.tr(x) + x = x.unsqueeze(3) + x = x.transpose(0, 3) + x = x.reshape(b, self.c2, w, h) + return x + +##### end of transformer ##### + + +##### yolov5 ##### + +class Focus(nn.Module): + # Focus wh information into c-space + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups + super(Focus, self).__init__() + self.conv = Conv(c1 * 4, c2, k, s, p, g, act) + # self.contract = Contract(gain=2) + + def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2) + return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1)) + # return self.conv(self.contract(x)) + + +class SPPF(nn.Module): + # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher + def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13)) + super().__init__() + c_ = c1 // 2 # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_ * 4, c2, 1, 1) + self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2) + + def forward(self, x): + x = self.cv1(x) + y1 = self.m(x) + y2 = self.m(y1) + return self.cv2(torch.cat([x, y1, y2, self.m(y2)], 1)) + + +class Contract(nn.Module): + # Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40) + def __init__(self, gain=2): + super().__init__() + self.gain = gain + + def forward(self, x): + N, C, H, W = x.size() # assert (H / s == 0) and (W / s == 0), 'Indivisible gain' + s = self.gain + x = x.view(N, C, H // s, s, W // s, s) # x(1,64,40,2,40,2) + x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40) + return x.view(N, C * s * s, H // s, W // s) # x(1,256,40,40) + + +class Expand(nn.Module): + # Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160) + def __init__(self, gain=2): + super().__init__() + self.gain = gain + + def forward(self, x): + N, C, H, W = x.size() # assert C / s ** 2 == 0, 'Indivisible gain' + s = self.gain + x = x.view(N, s, s, C // s ** 2, H, W) # x(1,2,2,16,80,80) + x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2) + return x.view(N, C // s ** 2, H * s, W * s) # x(1,16,160,160) + + +class NMS(nn.Module): + # Non-Maximum Suppression (NMS) module + conf = 0.25 # confidence threshold + iou = 0.45 # IoU threshold + classes = None # (optional list) filter by class + + def __init__(self): + super(NMS, self).__init__() + + def forward(self, x): + return non_max_suppression(x[0], conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) + + +class autoShape(nn.Module): + # input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS + conf = 0.25 # NMS confidence threshold + iou = 0.45 # NMS IoU threshold + classes = None # (optional list) filter by class + + def __init__(self, model): + super(autoShape, self).__init__() + self.model = model.eval() + + def autoshape(self): + print('autoShape already enabled, skipping... ') # model already converted to model.autoshape() + return self + + @torch.no_grad() + def forward(self, imgs, size=640, augment=False, profile=False): + # Inference from various sources. For height=640, width=1280, RGB images example inputs are: + # filename: imgs = 'data/samples/zidane.jpg' + # URI: = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg' + # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3) + # PIL: = Image.open('image.jpg') # HWC x(640,1280,3) + # numpy: = np.zeros((640,1280,3)) # HWC + # torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values) + # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images + + t = [time_synchronized()] + p = next(self.model.parameters()) # for device and type + if isinstance(imgs, torch.Tensor): # torch + with amp.autocast(enabled=p.device.type != 'cpu'): + return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference + + # Pre-process + n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images + shape0, shape1, files = [], [], [] # image and inference shapes, filenames + for i, im in enumerate(imgs): + f = f'image{i}' # filename + if isinstance(im, str): # filename or uri + im, f = np.asarray(Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im)), im + elif isinstance(im, Image.Image): # PIL Image + im, f = np.asarray(im), getattr(im, 'filename', f) or f + files.append(Path(f).with_suffix('.jpg').name) + if im.shape[0] < 5: # image in CHW + im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1) + im = im[:, :, :3] if im.ndim == 3 else np.tile(im[:, :, None], 3) # enforce 3ch input + s = im.shape[:2] # HWC + shape0.append(s) # image shape + g = (size / max(s)) # gain + shape1.append([y * g for y in s]) + imgs[i] = im # update + shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape + x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad + x = np.stack(x, 0) if n > 1 else x[0][None] # stack + x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW + x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32 + t.append(time_synchronized()) + + with amp.autocast(enabled=p.device.type != 'cpu'): + # Inference + y = self.model(x, augment, profile)[0] # forward + t.append(time_synchronized()) + + # Post-process + y = non_max_suppression(y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) # NMS + for i in range(n): + scale_coords(shape1, y[i][:, :4], shape0[i]) + + t.append(time_synchronized()) + return Detections(imgs, y, files, t, self.names, x.shape) + + +class Detections: + # detections class for YOLOv5 inference results + def __init__(self, imgs, pred, files, times=None, names=None, shape=None): + super(Detections, self).__init__() + d = pred[0].device # device + gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations + self.imgs = imgs # list of images as numpy arrays + self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls) + self.names = names # class names + self.files = files # image filenames + self.xyxy = pred # xyxy pixels + self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels + self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized + self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized + self.n = len(self.pred) # number of images (batch size) + self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms) + self.s = shape # inference BCHW shape + + def display(self, pprint=False, show=False, save=False, render=False, save_dir=''): + colors = color_list() + for i, (img, pred) in enumerate(zip(self.imgs, self.pred)): + str = f'image {i + 1}/{len(self.pred)}: {img.shape[0]}x{img.shape[1]} ' + if pred is not None: + for c in pred[:, -1].unique(): + n = (pred[:, -1] == c).sum() # detections per class + str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string + if show or save or render: + for *box, conf, cls in pred: # xyxy, confidence, class + label = f'{self.names[int(cls)]} {conf:.2f}' + plot_one_box(box, img, label=label, color=colors[int(cls) % 10]) + img = Image.fromarray(img.astype(np.uint8)) if isinstance(img, np.ndarray) else img # from np + if pprint: + print(str.rstrip(', ')) + if show: + img.show(self.files[i]) # show + if save: + f = self.files[i] + img.save(Path(save_dir) / f) # save + print(f"{'Saved' * (i == 0)} {f}", end=',' if i < self.n - 1 else f' to {save_dir}\n') + if render: + self.imgs[i] = np.asarray(img) + + def print(self): + self.display(pprint=True) # print results + print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % self.t) + + def show(self): + self.display(show=True) # show results + + def save(self, save_dir='runs/hub/exp'): + save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp') # increment save_dir + Path(save_dir).mkdir(parents=True, exist_ok=True) + self.display(save=True, save_dir=save_dir) # save results + + def render(self): + self.display(render=True) # render results + return self.imgs + + def pandas(self): + # return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0]) + new = copy(self) # return copy + ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns + cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns + for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]): + a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update + setattr(new, k, [pd.DataFrame(x, columns=c) for x in a]) + return new + + def tolist(self): + # return a list of Detections objects, i.e. 'for result in results.tolist():' + x = [Detections([self.imgs[i]], [self.pred[i]], self.names, self.s) for i in range(self.n)] + for d in x: + for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']: + setattr(d, k, getattr(d, k)[0]) # pop out of list + return x + + def __len__(self): + return self.n + + +class Classify(nn.Module): + # Classification head, i.e. x(b,c1,20,20) to x(b,c2) + def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups + super(Classify, self).__init__() + self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1) + self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) # to x(b,c2,1,1) + self.flat = nn.Flatten() + + def forward(self, x): + z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list + return self.flat(self.conv(z)) # flatten to x(b,c2) + +##### end of yolov5 ###### + + +##### orepa ##### + +def transI_fusebn(kernel, bn): + gamma = bn.weight + std = (bn.running_var + bn.eps).sqrt() + return kernel * ((gamma / std).reshape(-1, 1, 1, 1)), bn.bias - bn.running_mean * gamma / std + + +class ConvBN(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size, + stride=1, padding=0, dilation=1, groups=1, deploy=False, nonlinear=None): + super().__init__() + if nonlinear is None: + self.nonlinear = nn.Identity() + else: + self.nonlinear = nonlinear + if deploy: + self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, + stride=stride, padding=padding, dilation=dilation, groups=groups, bias=True) + else: + self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, + stride=stride, padding=padding, dilation=dilation, groups=groups, bias=False) + self.bn = nn.BatchNorm2d(num_features=out_channels) + + def forward(self, x): + if hasattr(self, 'bn'): + return self.nonlinear(self.bn(self.conv(x))) + else: + return self.nonlinear(self.conv(x)) + + def switch_to_deploy(self): + kernel, bias = transI_fusebn(self.conv.weight, self.bn) + conv = nn.Conv2d(in_channels=self.conv.in_channels, out_channels=self.conv.out_channels, kernel_size=self.conv.kernel_size, + stride=self.conv.stride, padding=self.conv.padding, dilation=self.conv.dilation, groups=self.conv.groups, bias=True) + conv.weight.data = kernel + conv.bias.data = bias + for para in self.parameters(): + para.detach_() + self.__delattr__('conv') + self.__delattr__('bn') + self.conv = conv + +class OREPA_3x3_RepConv(nn.Module): + + def __init__(self, in_channels, out_channels, kernel_size, + stride=1, padding=0, dilation=1, groups=1, + internal_channels_1x1_3x3=None, + deploy=False, nonlinear=None, single_init=False): + super(OREPA_3x3_RepConv, self).__init__() + self.deploy = deploy + + if nonlinear is None: + self.nonlinear = nn.Identity() + else: + self.nonlinear = nonlinear + + self.kernel_size = kernel_size + self.in_channels = in_channels + self.out_channels = out_channels + self.groups = groups + assert padding == kernel_size // 2 + + self.stride = stride + self.padding = padding + self.dilation = dilation + + self.branch_counter = 0 + + self.weight_rbr_origin = nn.Parameter(torch.Tensor(out_channels, int(in_channels/self.groups), kernel_size, kernel_size)) + nn.init.kaiming_uniform_(self.weight_rbr_origin, a=math.sqrt(1.0)) + self.branch_counter += 1 + + + if groups < out_channels: + self.weight_rbr_avg_conv = nn.Parameter(torch.Tensor(out_channels, int(in_channels/self.groups), 1, 1)) + self.weight_rbr_pfir_conv = nn.Parameter(torch.Tensor(out_channels, int(in_channels/self.groups), 1, 1)) + nn.init.kaiming_uniform_(self.weight_rbr_avg_conv, a=1.0) + nn.init.kaiming_uniform_(self.weight_rbr_pfir_conv, a=1.0) + self.weight_rbr_avg_conv.data + self.weight_rbr_pfir_conv.data + self.register_buffer('weight_rbr_avg_avg', torch.ones(kernel_size, kernel_size).mul(1.0/kernel_size/kernel_size)) + self.branch_counter += 1 + + else: + raise NotImplementedError + self.branch_counter += 1 + + if internal_channels_1x1_3x3 is None: + internal_channels_1x1_3x3 = in_channels if groups < out_channels else 2 * in_channels # For mobilenet, it is better to have 2X internal channels + + if internal_channels_1x1_3x3 == in_channels: + self.weight_rbr_1x1_kxk_idconv1 = nn.Parameter(torch.zeros(in_channels, int(in_channels/self.groups), 1, 1)) + id_value = np.zeros((in_channels, int(in_channels/self.groups), 1, 1)) + for i in range(in_channels): + id_value[i, i % int(in_channels/self.groups), 0, 0] = 1 + id_tensor = torch.from_numpy(id_value).type_as(self.weight_rbr_1x1_kxk_idconv1) + self.register_buffer('id_tensor', id_tensor) + + else: + self.weight_rbr_1x1_kxk_conv1 = nn.Parameter(torch.Tensor(internal_channels_1x1_3x3, int(in_channels/self.groups), 1, 1)) + nn.init.kaiming_uniform_(self.weight_rbr_1x1_kxk_conv1, a=math.sqrt(1.0)) + self.weight_rbr_1x1_kxk_conv2 = nn.Parameter(torch.Tensor(out_channels, int(internal_channels_1x1_3x3/self.groups), kernel_size, kernel_size)) + nn.init.kaiming_uniform_(self.weight_rbr_1x1_kxk_conv2, a=math.sqrt(1.0)) + self.branch_counter += 1 + + expand_ratio = 8 + self.weight_rbr_gconv_dw = nn.Parameter(torch.Tensor(in_channels*expand_ratio, 1, kernel_size, kernel_size)) + self.weight_rbr_gconv_pw = nn.Parameter(torch.Tensor(out_channels, in_channels*expand_ratio, 1, 1)) + nn.init.kaiming_uniform_(self.weight_rbr_gconv_dw, a=math.sqrt(1.0)) + nn.init.kaiming_uniform_(self.weight_rbr_gconv_pw, a=math.sqrt(1.0)) + self.branch_counter += 1 + + if out_channels == in_channels and stride == 1: + self.branch_counter += 1 + + self.vector = nn.Parameter(torch.Tensor(self.branch_counter, self.out_channels)) + self.bn = nn.BatchNorm2d(out_channels) + + self.fre_init() + + nn.init.constant_(self.vector[0, :], 0.25) #origin + nn.init.constant_(self.vector[1, :], 0.25) #avg + nn.init.constant_(self.vector[2, :], 0.0) #prior + nn.init.constant_(self.vector[3, :], 0.5) #1x1_kxk + nn.init.constant_(self.vector[4, :], 0.5) #dws_conv + + + def fre_init(self): + prior_tensor = torch.Tensor(self.out_channels, self.kernel_size, self.kernel_size) + half_fg = self.out_channels/2 + for i in range(self.out_channels): + for h in range(3): + for w in range(3): + if i < half_fg: + prior_tensor[i, h, w] = math.cos(math.pi*(h+0.5)*(i+1)/3) + else: + prior_tensor[i, h, w] = math.cos(math.pi*(w+0.5)*(i+1-half_fg)/3) + + self.register_buffer('weight_rbr_prior', prior_tensor) + + def weight_gen(self): + + weight_rbr_origin = torch.einsum('oihw,o->oihw', self.weight_rbr_origin, self.vector[0, :]) + + weight_rbr_avg = torch.einsum('oihw,o->oihw', torch.einsum('oihw,hw->oihw', self.weight_rbr_avg_conv, self.weight_rbr_avg_avg), self.vector[1, :]) + + weight_rbr_pfir = torch.einsum('oihw,o->oihw', torch.einsum('oihw,ohw->oihw', self.weight_rbr_pfir_conv, self.weight_rbr_prior), self.vector[2, :]) + + weight_rbr_1x1_kxk_conv1 = None + if hasattr(self, 'weight_rbr_1x1_kxk_idconv1'): + weight_rbr_1x1_kxk_conv1 = (self.weight_rbr_1x1_kxk_idconv1 + self.id_tensor).squeeze() + elif hasattr(self, 'weight_rbr_1x1_kxk_conv1'): + weight_rbr_1x1_kxk_conv1 = self.weight_rbr_1x1_kxk_conv1.squeeze() + else: + raise NotImplementedError + weight_rbr_1x1_kxk_conv2 = self.weight_rbr_1x1_kxk_conv2 + + if self.groups > 1: + g = self.groups + t, ig = weight_rbr_1x1_kxk_conv1.size() + o, tg, h, w = weight_rbr_1x1_kxk_conv2.size() + weight_rbr_1x1_kxk_conv1 = weight_rbr_1x1_kxk_conv1.view(g, int(t/g), ig) + weight_rbr_1x1_kxk_conv2 = weight_rbr_1x1_kxk_conv2.view(g, int(o/g), tg, h, w) + weight_rbr_1x1_kxk = torch.einsum('gti,gothw->goihw', weight_rbr_1x1_kxk_conv1, weight_rbr_1x1_kxk_conv2).view(o, ig, h, w) + else: + weight_rbr_1x1_kxk = torch.einsum('ti,othw->oihw', weight_rbr_1x1_kxk_conv1, weight_rbr_1x1_kxk_conv2) + + weight_rbr_1x1_kxk = torch.einsum('oihw,o->oihw', weight_rbr_1x1_kxk, self.vector[3, :]) + + weight_rbr_gconv = self.dwsc2full(self.weight_rbr_gconv_dw, self.weight_rbr_gconv_pw, self.in_channels) + weight_rbr_gconv = torch.einsum('oihw,o->oihw', weight_rbr_gconv, self.vector[4, :]) + + weight = weight_rbr_origin + weight_rbr_avg + weight_rbr_1x1_kxk + weight_rbr_pfir + weight_rbr_gconv + + return weight + + def dwsc2full(self, weight_dw, weight_pw, groups): + + t, ig, h, w = weight_dw.size() + o, _, _, _ = weight_pw.size() + tg = int(t/groups) + i = int(ig*groups) + weight_dw = weight_dw.view(groups, tg, ig, h, w) + weight_pw = weight_pw.squeeze().view(o, groups, tg) + + weight_dsc = torch.einsum('gtihw,ogt->ogihw', weight_dw, weight_pw) + return weight_dsc.view(o, i, h, w) + + def forward(self, inputs): + weight = self.weight_gen() + out = F.conv2d(inputs, weight, bias=None, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups) + + return self.nonlinear(self.bn(out)) + +class RepConv_OREPA(nn.Module): + + def __init__(self, c1, c2, k=3, s=1, padding=1, dilation=1, groups=1, padding_mode='zeros', deploy=False, use_se=False, nonlinear=nn.SiLU()): + super(RepConv_OREPA, self).__init__() + self.deploy = deploy + self.groups = groups + self.in_channels = c1 + self.out_channels = c2 + + self.padding = padding + self.dilation = dilation + self.groups = groups + + assert k == 3 + assert padding == 1 + + padding_11 = padding - k // 2 + + if nonlinear is None: + self.nonlinearity = nn.Identity() + else: + self.nonlinearity = nonlinear + + if use_se: + self.se = SEBlock(self.out_channels, internal_neurons=self.out_channels // 16) + else: + self.se = nn.Identity() + + if deploy: + self.rbr_reparam = nn.Conv2d(in_channels=self.in_channels, out_channels=self.out_channels, kernel_size=k, stride=s, + padding=padding, dilation=dilation, groups=groups, bias=True, padding_mode=padding_mode) + + else: + self.rbr_identity = nn.BatchNorm2d(num_features=self.in_channels) if self.out_channels == self.in_channels and s == 1 else None + self.rbr_dense = OREPA_3x3_RepConv(in_channels=self.in_channels, out_channels=self.out_channels, kernel_size=k, stride=s, padding=padding, groups=groups, dilation=1) + self.rbr_1x1 = ConvBN(in_channels=self.in_channels, out_channels=self.out_channels, kernel_size=1, stride=s, padding=padding_11, groups=groups, dilation=1) + print('RepVGG Block, identity = ', self.rbr_identity) + + + def forward(self, inputs): + if hasattr(self, 'rbr_reparam'): + return self.nonlinearity(self.se(self.rbr_reparam(inputs))) + + if self.rbr_identity is None: + id_out = 0 + else: + id_out = self.rbr_identity(inputs) + + out1 = self.rbr_dense(inputs) + out2 = self.rbr_1x1(inputs) + out3 = id_out + out = out1 + out2 + out3 + + return self.nonlinearity(self.se(out)) + + + # Optional. This improves the accuracy and facilitates quantization. + # 1. Cancel the original weight decay on rbr_dense.conv.weight and rbr_1x1.conv.weight. + # 2. Use like this. + # loss = criterion(....) + # for every RepVGGBlock blk: + # loss += weight_decay_coefficient * 0.5 * blk.get_cust_L2() + # optimizer.zero_grad() + # loss.backward() + + # Not used for OREPA + def get_custom_L2(self): + K3 = self.rbr_dense.weight_gen() + K1 = self.rbr_1x1.conv.weight + t3 = (self.rbr_dense.bn.weight / ((self.rbr_dense.bn.running_var + self.rbr_dense.bn.eps).sqrt())).reshape(-1, 1, 1, 1).detach() + t1 = (self.rbr_1x1.bn.weight / ((self.rbr_1x1.bn.running_var + self.rbr_1x1.bn.eps).sqrt())).reshape(-1, 1, 1, 1).detach() + + l2_loss_circle = (K3 ** 2).sum() - (K3[:, :, 1:2, 1:2] ** 2).sum() # The L2 loss of the "circle" of weights in 3x3 kernel. Use regular L2 on them. + eq_kernel = K3[:, :, 1:2, 1:2] * t3 + K1 * t1 # The equivalent resultant central point of 3x3 kernel. + l2_loss_eq_kernel = (eq_kernel ** 2 / (t3 ** 2 + t1 ** 2)).sum() # Normalize for an L2 coefficient comparable to regular L2. + return l2_loss_eq_kernel + l2_loss_circle + + def get_equivalent_kernel_bias(self): + kernel3x3, bias3x3 = self._fuse_bn_tensor(self.rbr_dense) + kernel1x1, bias1x1 = self._fuse_bn_tensor(self.rbr_1x1) + kernelid, biasid = self._fuse_bn_tensor(self.rbr_identity) + return kernel3x3 + self._pad_1x1_to_3x3_tensor(kernel1x1) + kernelid, bias3x3 + bias1x1 + biasid + + def _pad_1x1_to_3x3_tensor(self, kernel1x1): + if kernel1x1 is None: + return 0 + else: + return torch.nn.functional.pad(kernel1x1, [1,1,1,1]) + + def _fuse_bn_tensor(self, branch): + if branch is None: + return 0, 0 + if not isinstance(branch, nn.BatchNorm2d): + if isinstance(branch, OREPA_3x3_RepConv): + kernel = branch.weight_gen() + elif isinstance(branch, ConvBN): + kernel = branch.conv.weight + else: + raise NotImplementedError + running_mean = branch.bn.running_mean + running_var = branch.bn.running_var + gamma = branch.bn.weight + beta = branch.bn.bias + eps = branch.bn.eps + else: + if not hasattr(self, 'id_tensor'): + input_dim = self.in_channels // self.groups + kernel_value = np.zeros((self.in_channels, input_dim, 3, 3), dtype=np.float32) + for i in range(self.in_channels): + kernel_value[i, i % input_dim, 1, 1] = 1 + self.id_tensor = torch.from_numpy(kernel_value).to(branch.weight.device) + kernel = self.id_tensor + running_mean = branch.running_mean + running_var = branch.running_var + gamma = branch.weight + beta = branch.bias + eps = branch.eps + std = (running_var + eps).sqrt() + t = (gamma / std).reshape(-1, 1, 1, 1) + return kernel * t, beta - running_mean * gamma / std + + def switch_to_deploy(self): + if hasattr(self, 'rbr_reparam'): + return + print(f"RepConv_OREPA.switch_to_deploy") + kernel, bias = self.get_equivalent_kernel_bias() + self.rbr_reparam = nn.Conv2d(in_channels=self.rbr_dense.in_channels, out_channels=self.rbr_dense.out_channels, + kernel_size=self.rbr_dense.kernel_size, stride=self.rbr_dense.stride, + padding=self.rbr_dense.padding, dilation=self.rbr_dense.dilation, groups=self.rbr_dense.groups, bias=True) + self.rbr_reparam.weight.data = kernel + self.rbr_reparam.bias.data = bias + for para in self.parameters(): + para.detach_() + self.__delattr__('rbr_dense') + self.__delattr__('rbr_1x1') + if hasattr(self, 'rbr_identity'): + self.__delattr__('rbr_identity') + +##### end of orepa ##### + + +##### swin transformer ##### + +class WindowAttention(nn.Module): + + def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.): + + super().__init__() + self.dim = dim + self.window_size = window_size # Wh, Ww + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim ** -0.5 + + # define a parameter table of relative position bias + self.relative_position_bias_table = nn.Parameter( + torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH + + # get pair-wise relative position index for each token inside the window + coords_h = torch.arange(self.window_size[0]) + coords_w = torch.arange(self.window_size[1]) + coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += self.window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 + relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + self.register_buffer("relative_position_index", relative_position_index) + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + nn.init.normal_(self.relative_position_bias_table, std=.02) + self.softmax = nn.Softmax(dim=-1) + + def forward(self, x, mask=None): + + B_, N, C = x.shape + qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + + if mask is not None: + nW = mask.shape[0] + attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, self.num_heads, N, N) + attn = self.softmax(attn) + else: + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + + # print(attn.dtype, v.dtype) + try: + x = (attn @ v).transpose(1, 2).reshape(B_, N, C) + except: + #print(attn.dtype, v.dtype) + x = (attn.half() @ v).transpose(1, 2).reshape(B_, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + +class Mlp(nn.Module): + + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.SiLU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + +def window_partition(x, window_size): + + B, H, W, C = x.shape + assert H % window_size == 0, 'feature map h and w can not divide by window size' + x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) + return windows + +def window_reverse(windows, window_size, H, W): + + B = int(windows.shape[0] / (H * W / window_size / window_size)) + x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) + return x + + +class SwinTransformerLayer(nn.Module): + + def __init__(self, dim, num_heads, window_size=8, shift_size=0, + mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0., + act_layer=nn.SiLU, norm_layer=nn.LayerNorm): + super().__init__() + self.dim = dim + self.num_heads = num_heads + self.window_size = window_size + self.shift_size = shift_size + self.mlp_ratio = mlp_ratio + # if min(self.input_resolution) <= self.window_size: + # # if window size is larger than input resolution, we don't partition windows + # self.shift_size = 0 + # self.window_size = min(self.input_resolution) + assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" + + self.norm1 = norm_layer(dim) + self.attn = WindowAttention( + dim, window_size=(self.window_size, self.window_size), num_heads=num_heads, + qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + def create_mask(self, H, W): + # calculate attention mask for SW-MSA + img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1 + h_slices = (slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None)) + w_slices = (slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None)) + cnt = 0 + for h in h_slices: + for w in w_slices: + img_mask[:, h, w, :] = cnt + cnt += 1 + + mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1 + mask_windows = mask_windows.view(-1, self.window_size * self.window_size) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) + + return attn_mask + + def forward(self, x): + # reshape x[b c h w] to x[b l c] + _, _, H_, W_ = x.shape + + Padding = False + if min(H_, W_) < self.window_size or H_ % self.window_size!=0 or W_ % self.window_size!=0: + Padding = True + # print(f'img_size {min(H_, W_)} is less than (or not divided by) window_size {self.window_size}, Padding.') + pad_r = (self.window_size - W_ % self.window_size) % self.window_size + pad_b = (self.window_size - H_ % self.window_size) % self.window_size + x = F.pad(x, (0, pad_r, 0, pad_b)) + + # print('2', x.shape) + B, C, H, W = x.shape + L = H * W + x = x.permute(0, 2, 3, 1).contiguous().view(B, L, C) # b, L, c + + # create mask from init to forward + if self.shift_size > 0: + attn_mask = self.create_mask(H, W).to(x.device) + else: + attn_mask = None + + shortcut = x + x = self.norm1(x) + x = x.view(B, H, W, C) + + # cyclic shift + if self.shift_size > 0: + shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) + else: + shifted_x = x + + # partition windows + x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C + x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C + + # W-MSA/SW-MSA + attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C + + # merge windows + attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) + shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C + + # reverse cyclic shift + if self.shift_size > 0: + x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) + else: + x = shifted_x + x = x.view(B, H * W, C) + + # FFN + x = shortcut + self.drop_path(x) + x = x + self.drop_path(self.mlp(self.norm2(x))) + + x = x.permute(0, 2, 1).contiguous().view(-1, C, H, W) # b c h w + + if Padding: + x = x[:, :, :H_, :W_] # reverse padding + + return x + + +class SwinTransformerBlock(nn.Module): + def __init__(self, c1, c2, num_heads, num_layers, window_size=8): + super().__init__() + self.conv = None + if c1 != c2: + self.conv = Conv(c1, c2) + + # remove input_resolution + self.blocks = nn.Sequential(*[SwinTransformerLayer(dim=c2, num_heads=num_heads, window_size=window_size, + shift_size=0 if (i % 2 == 0) else window_size // 2) for i in range(num_layers)]) + + def forward(self, x): + if self.conv is not None: + x = self.conv(x) + x = self.blocks(x) + return x + + +class STCSPA(nn.Module): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super(STCSPA, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c1, c_, 1, 1) + self.cv3 = Conv(2 * c_, c2, 1, 1) + num_heads = c_ // 32 + self.m = SwinTransformerBlock(c_, c_, num_heads, n) + #self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + + def forward(self, x): + y1 = self.m(self.cv1(x)) + y2 = self.cv2(x) + return self.cv3(torch.cat((y1, y2), dim=1)) + + +class STCSPB(nn.Module): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super(STCSPB, self).__init__() + c_ = int(c2) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_, c_, 1, 1) + self.cv3 = Conv(2 * c_, c2, 1, 1) + num_heads = c_ // 32 + self.m = SwinTransformerBlock(c_, c_, num_heads, n) + #self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + + def forward(self, x): + x1 = self.cv1(x) + y1 = self.m(x1) + y2 = self.cv2(x1) + return self.cv3(torch.cat((y1, y2), dim=1)) + + +class STCSPC(nn.Module): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super(STCSPC, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c1, c_, 1, 1) + self.cv3 = Conv(c_, c_, 1, 1) + self.cv4 = Conv(2 * c_, c2, 1, 1) + num_heads = c_ // 32 + self.m = SwinTransformerBlock(c_, c_, num_heads, n) + #self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + + def forward(self, x): + y1 = self.cv3(self.m(self.cv1(x))) + y2 = self.cv2(x) + return self.cv4(torch.cat((y1, y2), dim=1)) + +##### end of swin transformer ##### + + +##### swin transformer v2 ##### + +class WindowAttention_v2(nn.Module): + + def __init__(self, dim, window_size, num_heads, qkv_bias=True, attn_drop=0., proj_drop=0., + pretrained_window_size=[0, 0]): + + super().__init__() + self.dim = dim + self.window_size = window_size # Wh, Ww + self.pretrained_window_size = pretrained_window_size + self.num_heads = num_heads + + self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1))), requires_grad=True) + + # mlp to generate continuous relative position bias + self.cpb_mlp = nn.Sequential(nn.Linear(2, 512, bias=True), + nn.ReLU(inplace=True), + nn.Linear(512, num_heads, bias=False)) + + # get relative_coords_table + relative_coords_h = torch.arange(-(self.window_size[0] - 1), self.window_size[0], dtype=torch.float32) + relative_coords_w = torch.arange(-(self.window_size[1] - 1), self.window_size[1], dtype=torch.float32) + relative_coords_table = torch.stack( + torch.meshgrid([relative_coords_h, + relative_coords_w])).permute(1, 2, 0).contiguous().unsqueeze(0) # 1, 2*Wh-1, 2*Ww-1, 2 + if pretrained_window_size[0] > 0: + relative_coords_table[:, :, :, 0] /= (pretrained_window_size[0] - 1) + relative_coords_table[:, :, :, 1] /= (pretrained_window_size[1] - 1) + else: + relative_coords_table[:, :, :, 0] /= (self.window_size[0] - 1) + relative_coords_table[:, :, :, 1] /= (self.window_size[1] - 1) + relative_coords_table *= 8 # normalize to -8, 8 + relative_coords_table = torch.sign(relative_coords_table) * torch.log2( + torch.abs(relative_coords_table) + 1.0) / np.log2(8) + + self.register_buffer("relative_coords_table", relative_coords_table) + + # get pair-wise relative position index for each token inside the window + coords_h = torch.arange(self.window_size[0]) + coords_w = torch.arange(self.window_size[1]) + coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += self.window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 + relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + self.register_buffer("relative_position_index", relative_position_index) + + self.qkv = nn.Linear(dim, dim * 3, bias=False) + if qkv_bias: + self.q_bias = nn.Parameter(torch.zeros(dim)) + self.v_bias = nn.Parameter(torch.zeros(dim)) + else: + self.q_bias = None + self.v_bias = None + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + self.softmax = nn.Softmax(dim=-1) + + def forward(self, x, mask=None): + + B_, N, C = x.shape + qkv_bias = None + if self.q_bias is not None: + qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias)) + qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) + qkv = qkv.reshape(B_, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) + + # cosine attention + attn = (F.normalize(q, dim=-1) @ F.normalize(k, dim=-1).transpose(-2, -1)) + logit_scale = torch.clamp(self.logit_scale, max=torch.log(torch.tensor(1. / 0.01))).exp() + attn = attn * logit_scale + + relative_position_bias_table = self.cpb_mlp(self.relative_coords_table).view(-1, self.num_heads) + relative_position_bias = relative_position_bias_table[self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + relative_position_bias = 16 * torch.sigmoid(relative_position_bias) + attn = attn + relative_position_bias.unsqueeze(0) + + if mask is not None: + nW = mask.shape[0] + attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, self.num_heads, N, N) + attn = self.softmax(attn) + else: + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + + try: + x = (attn @ v).transpose(1, 2).reshape(B_, N, C) + except: + x = (attn.half() @ v).transpose(1, 2).reshape(B_, N, C) + + x = self.proj(x) + x = self.proj_drop(x) + return x + + def extra_repr(self) -> str: + return f'dim={self.dim}, window_size={self.window_size}, ' \ + f'pretrained_window_size={self.pretrained_window_size}, num_heads={self.num_heads}' + + def flops(self, N): + # calculate flops for 1 window with token length of N + flops = 0 + # qkv = self.qkv(x) + flops += N * self.dim * 3 * self.dim + # attn = (q @ k.transpose(-2, -1)) + flops += self.num_heads * N * (self.dim // self.num_heads) * N + # x = (attn @ v) + flops += self.num_heads * N * N * (self.dim // self.num_heads) + # x = self.proj(x) + flops += N * self.dim * self.dim + return flops + +class Mlp_v2(nn.Module): + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.SiLU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +def window_partition_v2(x, window_size): + + B, H, W, C = x.shape + x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) + return windows + + +def window_reverse_v2(windows, window_size, H, W): + + B = int(windows.shape[0] / (H * W / window_size / window_size)) + x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) + return x + + +class SwinTransformerLayer_v2(nn.Module): + + def __init__(self, dim, num_heads, window_size=7, shift_size=0, + mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0., + act_layer=nn.SiLU, norm_layer=nn.LayerNorm, pretrained_window_size=0): + super().__init__() + self.dim = dim + #self.input_resolution = input_resolution + self.num_heads = num_heads + self.window_size = window_size + self.shift_size = shift_size + self.mlp_ratio = mlp_ratio + #if min(self.input_resolution) <= self.window_size: + # # if window size is larger than input resolution, we don't partition windows + # self.shift_size = 0 + # self.window_size = min(self.input_resolution) + assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" + + self.norm1 = norm_layer(dim) + self.attn = WindowAttention_v2( + dim, window_size=(self.window_size, self.window_size), num_heads=num_heads, + qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop, + pretrained_window_size=(pretrained_window_size, pretrained_window_size)) + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp_v2(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + def create_mask(self, H, W): + # calculate attention mask for SW-MSA + img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1 + h_slices = (slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None)) + w_slices = (slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None)) + cnt = 0 + for h in h_slices: + for w in w_slices: + img_mask[:, h, w, :] = cnt + cnt += 1 + + mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1 + mask_windows = mask_windows.view(-1, self.window_size * self.window_size) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) + + return attn_mask + + def forward(self, x): + # reshape x[b c h w] to x[b l c] + _, _, H_, W_ = x.shape + + Padding = False + if min(H_, W_) < self.window_size or H_ % self.window_size!=0 or W_ % self.window_size!=0: + Padding = True + # print(f'img_size {min(H_, W_)} is less than (or not divided by) window_size {self.window_size}, Padding.') + pad_r = (self.window_size - W_ % self.window_size) % self.window_size + pad_b = (self.window_size - H_ % self.window_size) % self.window_size + x = F.pad(x, (0, pad_r, 0, pad_b)) + + # print('2', x.shape) + B, C, H, W = x.shape + L = H * W + x = x.permute(0, 2, 3, 1).contiguous().view(B, L, C) # b, L, c + + # create mask from init to forward + if self.shift_size > 0: + attn_mask = self.create_mask(H, W).to(x.device) + else: + attn_mask = None + + shortcut = x + x = x.view(B, H, W, C) + + # cyclic shift + if self.shift_size > 0: + shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) + else: + shifted_x = x + + # partition windows + x_windows = window_partition_v2(shifted_x, self.window_size) # nW*B, window_size, window_size, C + x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C + + # W-MSA/SW-MSA + attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C + + # merge windows + attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) + shifted_x = window_reverse_v2(attn_windows, self.window_size, H, W) # B H' W' C + + # reverse cyclic shift + if self.shift_size > 0: + x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) + else: + x = shifted_x + x = x.view(B, H * W, C) + x = shortcut + self.drop_path(self.norm1(x)) + + # FFN + x = x + self.drop_path(self.norm2(self.mlp(x))) + x = x.permute(0, 2, 1).contiguous().view(-1, C, H, W) # b c h w + + if Padding: + x = x[:, :, :H_, :W_] # reverse padding + + return x + + def extra_repr(self) -> str: + return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \ + f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}" + + def flops(self): + flops = 0 + H, W = self.input_resolution + # norm1 + flops += self.dim * H * W + # W-MSA/SW-MSA + nW = H * W / self.window_size / self.window_size + flops += nW * self.attn.flops(self.window_size * self.window_size) + # mlp + flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio + # norm2 + flops += self.dim * H * W + return flops + + +class SwinTransformer2Block(nn.Module): + def __init__(self, c1, c2, num_heads, num_layers, window_size=7): + super().__init__() + self.conv = None + if c1 != c2: + self.conv = Conv(c1, c2) + + # remove input_resolution + self.blocks = nn.Sequential(*[SwinTransformerLayer_v2(dim=c2, num_heads=num_heads, window_size=window_size, + shift_size=0 if (i % 2 == 0) else window_size // 2) for i in range(num_layers)]) + + def forward(self, x): + if self.conv is not None: + x = self.conv(x) + x = self.blocks(x) + return x + + +class ST2CSPA(nn.Module): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super(ST2CSPA, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c1, c_, 1, 1) + self.cv3 = Conv(2 * c_, c2, 1, 1) + num_heads = c_ // 32 + self.m = SwinTransformer2Block(c_, c_, num_heads, n) + #self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + + def forward(self, x): + y1 = self.m(self.cv1(x)) + y2 = self.cv2(x) + return self.cv3(torch.cat((y1, y2), dim=1)) + + +class ST2CSPB(nn.Module): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super(ST2CSPB, self).__init__() + c_ = int(c2) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_, c_, 1, 1) + self.cv3 = Conv(2 * c_, c2, 1, 1) + num_heads = c_ // 32 + self.m = SwinTransformer2Block(c_, c_, num_heads, n) + #self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + + def forward(self, x): + x1 = self.cv1(x) + y1 = self.m(x1) + y2 = self.cv2(x1) + return self.cv3(torch.cat((y1, y2), dim=1)) + + +class ST2CSPC(nn.Module): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super(ST2CSPC, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c1, c_, 1, 1) + self.cv3 = Conv(c_, c_, 1, 1) + self.cv4 = Conv(2 * c_, c2, 1, 1) + num_heads = c_ // 32 + self.m = SwinTransformer2Block(c_, c_, num_heads, n) + #self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + + def forward(self, x): + y1 = self.cv3(self.m(self.cv1(x))) + y2 = self.cv2(x) + return self.cv4(torch.cat((y1, y2), dim=1)) + +##### end of swin transformer v2 ##### diff --git a/human_detection/yolov7 skeleton/models/experimental.py b/human_detection/yolov7 skeleton/models/experimental.py new file mode 100644 index 00000000..735d7aa0 --- /dev/null +++ b/human_detection/yolov7 skeleton/models/experimental.py @@ -0,0 +1,272 @@ +import numpy as np +import random +import torch +import torch.nn as nn + +from models.common import Conv, DWConv +from utils.google_utils import attempt_download + + +class CrossConv(nn.Module): + # Cross Convolution Downsample + def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False): + # ch_in, ch_out, kernel, stride, groups, expansion, shortcut + super(CrossConv, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, (1, k), (1, s)) + self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g) + self.add = shortcut and c1 == c2 + + def forward(self, x): + return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) + + +class Sum(nn.Module): + # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070 + def __init__(self, n, weight=False): # n: number of inputs + super(Sum, self).__init__() + self.weight = weight # apply weights boolean + self.iter = range(n - 1) # iter object + if weight: + self.w = nn.Parameter(-torch.arange(1., n) / 2, requires_grad=True) # layer weights + + def forward(self, x): + y = x[0] # no weight + if self.weight: + w = torch.sigmoid(self.w) * 2 + for i in self.iter: + y = y + x[i + 1] * w[i] + else: + for i in self.iter: + y = y + x[i + 1] + return y + + +class MixConv2d(nn.Module): + # Mixed Depthwise Conv https://arxiv.org/abs/1907.09595 + def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): + super(MixConv2d, self).__init__() + groups = len(k) + if equal_ch: # equal c_ per group + i = torch.linspace(0, groups - 1E-6, c2).floor() # c2 indices + c_ = [(i == g).sum() for g in range(groups)] # intermediate channels + else: # equal weight.numel() per group + b = [c2] + [0] * groups + a = np.eye(groups + 1, groups, k=-1) + a -= np.roll(a, 1, axis=1) + a *= np.array(k) ** 2 + a[0] = 1 + c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b + + self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)]) + self.bn = nn.BatchNorm2d(c2) + self.act = nn.LeakyReLU(0.1, inplace=True) + + def forward(self, x): + return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1))) + + +class Ensemble(nn.ModuleList): + # Ensemble of models + def __init__(self): + super(Ensemble, self).__init__() + + def forward(self, x, augment=False): + y = [] + for module in self: + y.append(module(x, augment)[0]) + # y = torch.stack(y).max(0)[0] # max ensemble + # y = torch.stack(y).mean(0) # mean ensemble + y = torch.cat(y, 1) # nms ensemble + return y, None # inference, train output + + + + + +class ORT_NMS(torch.autograd.Function): + '''ONNX-Runtime NMS operation''' + @staticmethod + def forward(ctx, + boxes, + scores, + max_output_boxes_per_class=torch.tensor([100]), + iou_threshold=torch.tensor([0.45]), + score_threshold=torch.tensor([0.25])): + device = boxes.device + batch = scores.shape[0] + num_det = random.randint(0, 100) + batches = torch.randint(0, batch, (num_det,)).sort()[0].to(device) + idxs = torch.arange(100, 100 + num_det).to(device) + zeros = torch.zeros((num_det,), dtype=torch.int64).to(device) + selected_indices = torch.cat([batches[None], zeros[None], idxs[None]], 0).T.contiguous() + selected_indices = selected_indices.to(torch.int64) + return selected_indices + + @staticmethod + def symbolic(g, boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold): + return g.op("NonMaxSuppression", boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold) + + +class TRT_NMS(torch.autograd.Function): + '''TensorRT NMS operation''' + @staticmethod + def forward( + ctx, + boxes, + scores, + background_class=-1, + box_coding=1, + iou_threshold=0.45, + max_output_boxes=100, + plugin_version="1", + score_activation=0, + score_threshold=0.25, + ): + batch_size, num_boxes, num_classes = scores.shape + num_det = torch.randint(0, max_output_boxes, (batch_size, 1), dtype=torch.int32) + det_boxes = torch.randn(batch_size, max_output_boxes, 4) + det_scores = torch.randn(batch_size, max_output_boxes) + det_classes = torch.randint(0, num_classes, (batch_size, max_output_boxes), dtype=torch.int32) + return num_det, det_boxes, det_scores, det_classes + + @staticmethod + def symbolic(g, + boxes, + scores, + background_class=-1, + box_coding=1, + iou_threshold=0.45, + max_output_boxes=100, + plugin_version="1", + score_activation=0, + score_threshold=0.25): + out = g.op("TRT::EfficientNMS_TRT", + boxes, + scores, + background_class_i=background_class, + box_coding_i=box_coding, + iou_threshold_f=iou_threshold, + max_output_boxes_i=max_output_boxes, + plugin_version_s=plugin_version, + score_activation_i=score_activation, + score_threshold_f=score_threshold, + outputs=4) + nums, boxes, scores, classes = out + return nums, boxes, scores, classes + + +class ONNX_ORT(nn.Module): + '''onnx module with ONNX-Runtime NMS operation.''' + def __init__(self, max_obj=100, iou_thres=0.45, score_thres=0.25, max_wh=640, device=None, n_classes=80): + super().__init__() + self.device = device if device else torch.device("cpu") + self.max_obj = torch.tensor([max_obj]).to(device) + self.iou_threshold = torch.tensor([iou_thres]).to(device) + self.score_threshold = torch.tensor([score_thres]).to(device) + self.max_wh = max_wh # if max_wh != 0 : non-agnostic else : agnostic + self.convert_matrix = torch.tensor([[1, 0, 1, 0], [0, 1, 0, 1], [-0.5, 0, 0.5, 0], [0, -0.5, 0, 0.5]], + dtype=torch.float32, + device=self.device) + self.n_classes=n_classes + + def forward(self, x): + boxes = x[:, :, :4] + conf = x[:, :, 4:5] + scores = x[:, :, 5:] + if self.n_classes == 1: + scores = conf # for models with one class, cls_loss is 0 and cls_conf is always 0.5, + # so there is no need to multiplicate. + else: + scores *= conf # conf = obj_conf * cls_conf + boxes @= self.convert_matrix + max_score, category_id = scores.max(2, keepdim=True) + dis = category_id.float() * self.max_wh + nmsbox = boxes + dis + max_score_tp = max_score.transpose(1, 2).contiguous() + selected_indices = ORT_NMS.apply(nmsbox, max_score_tp, self.max_obj, self.iou_threshold, self.score_threshold) + X, Y = selected_indices[:, 0], selected_indices[:, 2] + selected_boxes = boxes[X, Y, :] + selected_categories = category_id[X, Y, :].float() + selected_scores = max_score[X, Y, :] + X = X.unsqueeze(1).float() + return torch.cat([X, selected_boxes, selected_categories, selected_scores], 1) + +class ONNX_TRT(nn.Module): + '''onnx module with TensorRT NMS operation.''' + def __init__(self, max_obj=100, iou_thres=0.45, score_thres=0.25, max_wh=None ,device=None, n_classes=80): + super().__init__() + assert max_wh is None + self.device = device if device else torch.device('cpu') + self.background_class = -1, + self.box_coding = 1, + self.iou_threshold = iou_thres + self.max_obj = max_obj + self.plugin_version = '1' + self.score_activation = 0 + self.score_threshold = score_thres + self.n_classes=n_classes + + def forward(self, x): + boxes = x[:, :, :4] + conf = x[:, :, 4:5] + scores = x[:, :, 5:] + if self.n_classes == 1: + scores = conf # for models with one class, cls_loss is 0 and cls_conf is always 0.5, + # so there is no need to multiplicate. + else: + scores *= conf # conf = obj_conf * cls_conf + num_det, det_boxes, det_scores, det_classes = TRT_NMS.apply(boxes, scores, self.background_class, self.box_coding, + self.iou_threshold, self.max_obj, + self.plugin_version, self.score_activation, + self.score_threshold) + return num_det, det_boxes, det_scores, det_classes + + +class End2End(nn.Module): + '''export onnx or tensorrt model with NMS operation.''' + def __init__(self, model, max_obj=100, iou_thres=0.45, score_thres=0.25, max_wh=None, device=None, n_classes=80): + super().__init__() + device = device if device else torch.device('cpu') + assert isinstance(max_wh,(int)) or max_wh is None + self.model = model.to(device) + self.model.model[-1].end2end = True + self.patch_model = ONNX_TRT if max_wh is None else ONNX_ORT + self.end2end = self.patch_model(max_obj, iou_thres, score_thres, max_wh, device, n_classes) + self.end2end.eval() + + def forward(self, x): + x = self.model(x) + x = self.end2end(x) + return x + + + + + +def attempt_load(weights, map_location=None): + # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a + model = Ensemble() + for w in weights if isinstance(weights, list) else [weights]: + attempt_download(w) + ckpt = torch.load(w, map_location=map_location) # load + model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model + + # Compatibility updates + for m in model.modules(): + if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]: + m.inplace = True # pytorch 1.7.0 compatibility + elif type(m) is nn.Upsample: + m.recompute_scale_factor = None # torch 1.11.0 compatibility + elif type(m) is Conv: + m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility + + if len(model) == 1: + return model[-1] # return model + else: + print('Ensemble created with %s\n' % weights) + for k in ['names', 'stride']: + setattr(model, k, getattr(model[-1], k)) + return model # return ensemble + + diff --git a/human_detection/yolov7 skeleton/models/yolo.py b/human_detection/yolov7 skeleton/models/yolo.py new file mode 100644 index 00000000..95a019c6 --- /dev/null +++ b/human_detection/yolov7 skeleton/models/yolo.py @@ -0,0 +1,843 @@ +import argparse +import logging +import sys +from copy import deepcopy + +sys.path.append('./') # to run '$ python *.py' files in subdirectories +logger = logging.getLogger(__name__) +import torch +from models.common import * +from models.experimental import * +from utils.autoanchor import check_anchor_order +from utils.general import make_divisible, check_file, set_logging +from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \ + select_device, copy_attr +from utils.loss import SigmoidBin + +try: + import thop # for FLOPS computation +except ImportError: + thop = None + + +class Detect(nn.Module): + stride = None # strides computed during build + export = False # onnx export + end2end = False + include_nms = False + concat = False + + def __init__(self, nc=80, anchors=(), ch=()): # detection layer + super(Detect, self).__init__() + self.nc = nc # number of classes + self.no = nc + 5 # number of outputs per anchor + self.nl = len(anchors) # number of detection layers + self.na = len(anchors[0]) // 2 # number of anchors + self.grid = [torch.zeros(1)] * self.nl # init grid + a = torch.tensor(anchors).float().view(self.nl, -1, 2) + self.register_buffer('anchors', a) # shape(nl,na,2) + self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2) + self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv + + def forward(self, x): + # x = x.copy() # for profiling + z = [] # inference output + self.training |= self.export + for i in range(self.nl): + x[i] = self.m[i](x[i]) # conv + bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) + x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() + + if not self.training: # inference + if self.grid[i].shape[2:4] != x[i].shape[2:4]: + self.grid[i] = self._make_grid(nx, ny).to(x[i].device) + y = x[i].sigmoid() + if not torch.onnx.is_in_onnx_export(): + y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy + y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh + else: + xy, wh, conf = y.split((2, 2, self.nc + 1), 4) # y.tensor_split((2, 4, 5), 4) # torch 1.8.0 + xy = xy * (2. * self.stride[i]) + (self.stride[i] * (self.grid[i] - 0.5)) # new xy + wh = wh ** 2 * (4 * self.anchor_grid[i].data) # new wh + y = torch.cat((xy, wh, conf), 4) + z.append(y.view(bs, -1, self.no)) + + if self.training: + out = x + elif self.end2end: + out = torch.cat(z, 1) + elif self.include_nms: + z = self.convert(z) + out = (z, ) + elif self.concat: + out = torch.cat(z, 1) + else: + out = (torch.cat(z, 1), x) + + return out + + @staticmethod + def _make_grid(nx=20, ny=20): + yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) + return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() + + def convert(self, z): + z = torch.cat(z, 1) + box = z[:, :, :4] + conf = z[:, :, 4:5] + score = z[:, :, 5:] + score *= conf + convert_matrix = torch.tensor([[1, 0, 1, 0], [0, 1, 0, 1], [-0.5, 0, 0.5, 0], [0, -0.5, 0, 0.5]], + dtype=torch.float32, + device=z.device) + box @= convert_matrix + return (box, score) + + +class IDetect(nn.Module): + stride = None # strides computed during build + export = False # onnx export + end2end = False + include_nms = False + concat = False + + def __init__(self, nc=80, anchors=(), ch=()): # detection layer + super(IDetect, self).__init__() + self.nc = nc # number of classes + self.no = nc + 5 # number of outputs per anchor + self.nl = len(anchors) # number of detection layers + self.na = len(anchors[0]) // 2 # number of anchors + self.grid = [torch.zeros(1)] * self.nl # init grid + a = torch.tensor(anchors).float().view(self.nl, -1, 2) + self.register_buffer('anchors', a) # shape(nl,na,2) + self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2) + self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv + + self.ia = nn.ModuleList(ImplicitA(x) for x in ch) + self.im = nn.ModuleList(ImplicitM(self.no * self.na) for _ in ch) + + def forward(self, x): + # x = x.copy() # for profiling + z = [] # inference output + self.training |= self.export + for i in range(self.nl): + x[i] = self.m[i](self.ia[i](x[i])) # conv + x[i] = self.im[i](x[i]) + bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) + x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() + + if not self.training: # inference + if self.grid[i].shape[2:4] != x[i].shape[2:4]: + self.grid[i] = self._make_grid(nx, ny).to(x[i].device) + + y = x[i].sigmoid() + y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy + y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh + z.append(y.view(bs, -1, self.no)) + + return x if self.training else (torch.cat(z, 1), x) + + def fuseforward(self, x): + # x = x.copy() # for profiling + z = [] # inference output + self.training |= self.export + for i in range(self.nl): + x[i] = self.m[i](x[i]) # conv + bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) + x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() + + if not self.training: # inference + if self.grid[i].shape[2:4] != x[i].shape[2:4]: + self.grid[i] = self._make_grid(nx, ny).to(x[i].device) + + y = x[i].sigmoid() + if not torch.onnx.is_in_onnx_export(): + y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy + y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh + else: + xy, wh, conf = y.split((2, 2, self.nc + 1), 4) # y.tensor_split((2, 4, 5), 4) # torch 1.8.0 + xy = xy * (2. * self.stride[i]) + (self.stride[i] * (self.grid[i] - 0.5)) # new xy + wh = wh ** 2 * (4 * self.anchor_grid[i].data) # new wh + y = torch.cat((xy, wh, conf), 4) + z.append(y.view(bs, -1, self.no)) + + if self.training: + out = x + elif self.end2end: + out = torch.cat(z, 1) + elif self.include_nms: + z = self.convert(z) + out = (z, ) + elif self.concat: + out = torch.cat(z, 1) + else: + out = (torch.cat(z, 1), x) + + return out + + def fuse(self): + print("IDetect.fuse") + # fuse ImplicitA and Convolution + for i in range(len(self.m)): + c1,c2,_,_ = self.m[i].weight.shape + c1_,c2_, _,_ = self.ia[i].implicit.shape + self.m[i].bias += torch.matmul(self.m[i].weight.reshape(c1,c2),self.ia[i].implicit.reshape(c2_,c1_)).squeeze(1) + + # fuse ImplicitM and Convolution + for i in range(len(self.m)): + c1,c2, _,_ = self.im[i].implicit.shape + self.m[i].bias *= self.im[i].implicit.reshape(c2) + self.m[i].weight *= self.im[i].implicit.transpose(0,1) + + @staticmethod + def _make_grid(nx=20, ny=20): + yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) + return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() + + def convert(self, z): + z = torch.cat(z, 1) + box = z[:, :, :4] + conf = z[:, :, 4:5] + score = z[:, :, 5:] + score *= conf + convert_matrix = torch.tensor([[1, 0, 1, 0], [0, 1, 0, 1], [-0.5, 0, 0.5, 0], [0, -0.5, 0, 0.5]], + dtype=torch.float32, + device=z.device) + box @= convert_matrix + return (box, score) + + +class IKeypoint(nn.Module): + stride = None # strides computed during build + export = False # onnx export + + def __init__(self, nc=80, anchors=(), nkpt=17, ch=(), inplace=True, dw_conv_kpt=False): # detection layer + super(IKeypoint, self).__init__() + self.nc = nc # number of classes + self.nkpt = nkpt + self.dw_conv_kpt = dw_conv_kpt + self.no_det=(nc + 5) # number of outputs per anchor for box and class + self.no_kpt = 3*self.nkpt ## number of outputs per anchor for keypoints + self.no = self.no_det+self.no_kpt + self.nl = len(anchors) # number of detection layers + self.na = len(anchors[0]) // 2 # number of anchors + self.grid = [torch.zeros(1)] * self.nl # init grid + self.flip_test = False + a = torch.tensor(anchors).float().view(self.nl, -1, 2) + self.register_buffer('anchors', a) # shape(nl,na,2) + self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2) + self.m = nn.ModuleList(nn.Conv2d(x, self.no_det * self.na, 1) for x in ch) # output conv + + self.ia = nn.ModuleList(ImplicitA(x) for x in ch) + self.im = nn.ModuleList(ImplicitM(self.no_det * self.na) for _ in ch) + + if self.nkpt is not None: + if self.dw_conv_kpt: #keypoint head is slightly more complex + self.m_kpt = nn.ModuleList( + nn.Sequential(DWConv(x, x, k=3), Conv(x,x), + DWConv(x, x, k=3), Conv(x, x), + DWConv(x, x, k=3), Conv(x,x), + DWConv(x, x, k=3), Conv(x, x), + DWConv(x, x, k=3), Conv(x, x), + DWConv(x, x, k=3), nn.Conv2d(x, self.no_kpt * self.na, 1)) for x in ch) + else: #keypoint head is a single convolution + self.m_kpt = nn.ModuleList(nn.Conv2d(x, self.no_kpt * self.na, 1) for x in ch) + + self.inplace = inplace # use in-place ops (e.g. slice assignment) + + def forward(self, x): + # x = x.copy() # for profiling + z = [] # inference output + self.training |= self.export + for i in range(self.nl): + if self.nkpt is None or self.nkpt==0: + x[i] = self.im[i](self.m[i](self.ia[i](x[i]))) # conv + else : + x[i] = torch.cat((self.im[i](self.m[i](self.ia[i](x[i]))), self.m_kpt[i](x[i])), axis=1) + + bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) + x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() + x_det = x[i][..., :6] + x_kpt = x[i][..., 6:] + + if not self.training: # inference + if self.grid[i].shape[2:4] != x[i].shape[2:4]: + self.grid[i] = self._make_grid(nx, ny).to(x[i].device) + kpt_grid_x = self.grid[i][..., 0:1] + kpt_grid_y = self.grid[i][..., 1:2] + + if self.nkpt == 0: + y = x[i].sigmoid() + else: + y = x_det.sigmoid() + + if self.inplace: + xy = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy + wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i].view(1, self.na, 1, 1, 2) # wh + if self.nkpt != 0: + x_kpt[..., 0::3] = (x_kpt[..., ::3] * 2. - 0.5 + kpt_grid_x.repeat(1,1,1,1,17)) * self.stride[i] # xy + x_kpt[..., 1::3] = (x_kpt[..., 1::3] * 2. - 0.5 + kpt_grid_y.repeat(1,1,1,1,17)) * self.stride[i] # xy + #x_kpt[..., 0::3] = (x_kpt[..., ::3] + kpt_grid_x.repeat(1,1,1,1,17)) * self.stride[i] # xy + #x_kpt[..., 1::3] = (x_kpt[..., 1::3] + kpt_grid_y.repeat(1,1,1,1,17)) * self.stride[i] # xy + #print('=============') + #print(self.anchor_grid[i].shape) + #print(self.anchor_grid[i][...,0].unsqueeze(4).shape) + #print(x_kpt[..., 0::3].shape) + #x_kpt[..., 0::3] = ((x_kpt[..., 0::3].tanh() * 2.) ** 3 * self.anchor_grid[i][...,0].unsqueeze(4).repeat(1,1,1,1,self.nkpt)) + kpt_grid_x.repeat(1,1,1,1,17) * self.stride[i] # xy + #x_kpt[..., 1::3] = ((x_kpt[..., 1::3].tanh() * 2.) ** 3 * self.anchor_grid[i][...,1].unsqueeze(4).repeat(1,1,1,1,self.nkpt)) + kpt_grid_y.repeat(1,1,1,1,17) * self.stride[i] # xy + #x_kpt[..., 0::3] = (((x_kpt[..., 0::3].sigmoid() * 4.) ** 2 - 8.) * self.anchor_grid[i][...,0].unsqueeze(4).repeat(1,1,1,1,self.nkpt)) + kpt_grid_x.repeat(1,1,1,1,17) * self.stride[i] # xy + #x_kpt[..., 1::3] = (((x_kpt[..., 1::3].sigmoid() * 4.) ** 2 - 8.) * self.anchor_grid[i][...,1].unsqueeze(4).repeat(1,1,1,1,self.nkpt)) + kpt_grid_y.repeat(1,1,1,1,17) * self.stride[i] # xy + x_kpt[..., 2::3] = x_kpt[..., 2::3].sigmoid() + + y = torch.cat((xy, wh, y[..., 4:], x_kpt), dim = -1) + + else: # for YOLOv5 on AWS Inferentia https://github.com/ultralytics/yolov5/pull/2953 + xy = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy + wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh + if self.nkpt != 0: + y[..., 6:] = (y[..., 6:] * 2. - 0.5 + self.grid[i].repeat((1,1,1,1,self.nkpt))) * self.stride[i] # xy + y = torch.cat((xy, wh, y[..., 4:]), -1) + + z.append(y.view(bs, -1, self.no)) + + return x if self.training else (torch.cat(z, 1), x) + + @staticmethod + def _make_grid(nx=20, ny=20): + yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) + return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() + + +class IAuxDetect(nn.Module): + stride = None # strides computed during build + export = False # onnx export + end2end = False + include_nms = False + concat = False + + def __init__(self, nc=80, anchors=(), ch=()): # detection layer + super(IAuxDetect, self).__init__() + self.nc = nc # number of classes + self.no = nc + 5 # number of outputs per anchor + self.nl = len(anchors) # number of detection layers + self.na = len(anchors[0]) // 2 # number of anchors + self.grid = [torch.zeros(1)] * self.nl # init grid + a = torch.tensor(anchors).float().view(self.nl, -1, 2) + self.register_buffer('anchors', a) # shape(nl,na,2) + self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2) + self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch[:self.nl]) # output conv + self.m2 = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch[self.nl:]) # output conv + + self.ia = nn.ModuleList(ImplicitA(x) for x in ch[:self.nl]) + self.im = nn.ModuleList(ImplicitM(self.no * self.na) for _ in ch[:self.nl]) + + def forward(self, x): + # x = x.copy() # for profiling + z = [] # inference output + self.training |= self.export + for i in range(self.nl): + x[i] = self.m[i](self.ia[i](x[i])) # conv + x[i] = self.im[i](x[i]) + bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) + x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() + + x[i+self.nl] = self.m2[i](x[i+self.nl]) + x[i+self.nl] = x[i+self.nl].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() + + if not self.training: # inference + if self.grid[i].shape[2:4] != x[i].shape[2:4]: + self.grid[i] = self._make_grid(nx, ny).to(x[i].device) + + y = x[i].sigmoid() + if not torch.onnx.is_in_onnx_export(): + y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy + y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh + else: + xy, wh, conf = y.split((2, 2, self.nc + 1), 4) # y.tensor_split((2, 4, 5), 4) # torch 1.8.0 + xy = xy * (2. * self.stride[i]) + (self.stride[i] * (self.grid[i] - 0.5)) # new xy + wh = wh ** 2 * (4 * self.anchor_grid[i].data) # new wh + y = torch.cat((xy, wh, conf), 4) + z.append(y.view(bs, -1, self.no)) + + return x if self.training else (torch.cat(z, 1), x[:self.nl]) + + def fuseforward(self, x): + # x = x.copy() # for profiling + z = [] # inference output + self.training |= self.export + for i in range(self.nl): + x[i] = self.m[i](x[i]) # conv + bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) + x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() + + if not self.training: # inference + if self.grid[i].shape[2:4] != x[i].shape[2:4]: + self.grid[i] = self._make_grid(nx, ny).to(x[i].device) + + y = x[i].sigmoid() + if not torch.onnx.is_in_onnx_export(): + y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy + y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh + else: + xy = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy + wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i].data # wh + y = torch.cat((xy, wh, y[..., 4:]), -1) + z.append(y.view(bs, -1, self.no)) + + if self.training: + out = x + elif self.end2end: + out = torch.cat(z, 1) + elif self.include_nms: + z = self.convert(z) + out = (z, ) + elif self.concat: + out = torch.cat(z, 1) + else: + out = (torch.cat(z, 1), x) + + return out + + def fuse(self): + print("IAuxDetect.fuse") + # fuse ImplicitA and Convolution + for i in range(len(self.m)): + c1,c2,_,_ = self.m[i].weight.shape + c1_,c2_, _,_ = self.ia[i].implicit.shape + self.m[i].bias += torch.matmul(self.m[i].weight.reshape(c1,c2),self.ia[i].implicit.reshape(c2_,c1_)).squeeze(1) + + # fuse ImplicitM and Convolution + for i in range(len(self.m)): + c1,c2, _,_ = self.im[i].implicit.shape + self.m[i].bias *= self.im[i].implicit.reshape(c2) + self.m[i].weight *= self.im[i].implicit.transpose(0,1) + + @staticmethod + def _make_grid(nx=20, ny=20): + yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) + return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() + + def convert(self, z): + z = torch.cat(z, 1) + box = z[:, :, :4] + conf = z[:, :, 4:5] + score = z[:, :, 5:] + score *= conf + convert_matrix = torch.tensor([[1, 0, 1, 0], [0, 1, 0, 1], [-0.5, 0, 0.5, 0], [0, -0.5, 0, 0.5]], + dtype=torch.float32, + device=z.device) + box @= convert_matrix + return (box, score) + + +class IBin(nn.Module): + stride = None # strides computed during build + export = False # onnx export + + def __init__(self, nc=80, anchors=(), ch=(), bin_count=21): # detection layer + super(IBin, self).__init__() + self.nc = nc # number of classes + self.bin_count = bin_count + + self.w_bin_sigmoid = SigmoidBin(bin_count=self.bin_count, min=0.0, max=4.0) + self.h_bin_sigmoid = SigmoidBin(bin_count=self.bin_count, min=0.0, max=4.0) + # classes, x,y,obj + self.no = nc + 3 + \ + self.w_bin_sigmoid.get_length() + self.h_bin_sigmoid.get_length() # w-bce, h-bce + # + self.x_bin_sigmoid.get_length() + self.y_bin_sigmoid.get_length() + + self.nl = len(anchors) # number of detection layers + self.na = len(anchors[0]) // 2 # number of anchors + self.grid = [torch.zeros(1)] * self.nl # init grid + a = torch.tensor(anchors).float().view(self.nl, -1, 2) + self.register_buffer('anchors', a) # shape(nl,na,2) + self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2) + self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv + + self.ia = nn.ModuleList(ImplicitA(x) for x in ch) + self.im = nn.ModuleList(ImplicitM(self.no * self.na) for _ in ch) + + def forward(self, x): + + #self.x_bin_sigmoid.use_fw_regression = True + #self.y_bin_sigmoid.use_fw_regression = True + self.w_bin_sigmoid.use_fw_regression = True + self.h_bin_sigmoid.use_fw_regression = True + + # x = x.copy() # for profiling + z = [] # inference output + self.training |= self.export + for i in range(self.nl): + x[i] = self.m[i](self.ia[i](x[i])) # conv + x[i] = self.im[i](x[i]) + bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) + x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() + + if not self.training: # inference + if self.grid[i].shape[2:4] != x[i].shape[2:4]: + self.grid[i] = self._make_grid(nx, ny).to(x[i].device) + + y = x[i].sigmoid() + y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy + #y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh + + + #px = (self.x_bin_sigmoid.forward(y[..., 0:12]) + self.grid[i][..., 0]) * self.stride[i] + #py = (self.y_bin_sigmoid.forward(y[..., 12:24]) + self.grid[i][..., 1]) * self.stride[i] + + pw = self.w_bin_sigmoid.forward(y[..., 2:24]) * self.anchor_grid[i][..., 0] + ph = self.h_bin_sigmoid.forward(y[..., 24:46]) * self.anchor_grid[i][..., 1] + + #y[..., 0] = px + #y[..., 1] = py + y[..., 2] = pw + y[..., 3] = ph + + y = torch.cat((y[..., 0:4], y[..., 46:]), dim=-1) + + z.append(y.view(bs, -1, y.shape[-1])) + + return x if self.training else (torch.cat(z, 1), x) + + @staticmethod + def _make_grid(nx=20, ny=20): + yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) + return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() + + +class Model(nn.Module): + def __init__(self, cfg='yolor-csp-c.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes + super(Model, self).__init__() + self.traced = False + if isinstance(cfg, dict): + self.yaml = cfg # model dict + else: # is *.yaml + import yaml # for torch hub + self.yaml_file = Path(cfg).name + with open(cfg) as f: + self.yaml = yaml.load(f, Loader=yaml.SafeLoader) # model dict + + # Define model + ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels + if nc and nc != self.yaml['nc']: + logger.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}") + self.yaml['nc'] = nc # override yaml value + if anchors: + logger.info(f'Overriding model.yaml anchors with anchors={anchors}') + self.yaml['anchors'] = round(anchors) # override yaml value + self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist + self.names = [str(i) for i in range(self.yaml['nc'])] # default names + # print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))]) + + # Build strides, anchors + m = self.model[-1] # Detect() + if isinstance(m, Detect): + s = 256 # 2x min stride + m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward + check_anchor_order(m) + m.anchors /= m.stride.view(-1, 1, 1) + self.stride = m.stride + self._initialize_biases() # only run once + # print('Strides: %s' % m.stride.tolist()) + if isinstance(m, IDetect): + s = 256 # 2x min stride + m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward + check_anchor_order(m) + m.anchors /= m.stride.view(-1, 1, 1) + self.stride = m.stride + self._initialize_biases() # only run once + # print('Strides: %s' % m.stride.tolist()) + if isinstance(m, IAuxDetect): + s = 256 # 2x min stride + m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))[:4]]) # forward + #print(m.stride) + check_anchor_order(m) + m.anchors /= m.stride.view(-1, 1, 1) + self.stride = m.stride + self._initialize_aux_biases() # only run once + # print('Strides: %s' % m.stride.tolist()) + if isinstance(m, IBin): + s = 256 # 2x min stride + m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward + check_anchor_order(m) + m.anchors /= m.stride.view(-1, 1, 1) + self.stride = m.stride + self._initialize_biases_bin() # only run once + # print('Strides: %s' % m.stride.tolist()) + if isinstance(m, IKeypoint): + s = 256 # 2x min stride + m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward + check_anchor_order(m) + m.anchors /= m.stride.view(-1, 1, 1) + self.stride = m.stride + self._initialize_biases_kpt() # only run once + # print('Strides: %s' % m.stride.tolist()) + + # Init weights, biases + initialize_weights(self) + self.info() + logger.info('') + + def forward(self, x, augment=False, profile=False): + if augment: + img_size = x.shape[-2:] # height, width + s = [1, 0.83, 0.67] # scales + f = [None, 3, None] # flips (2-ud, 3-lr) + y = [] # outputs + for si, fi in zip(s, f): + xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max())) + yi = self.forward_once(xi)[0] # forward + # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save + yi[..., :4] /= si # de-scale + if fi == 2: + yi[..., 1] = img_size[0] - yi[..., 1] # de-flip ud + elif fi == 3: + yi[..., 0] = img_size[1] - yi[..., 0] # de-flip lr + y.append(yi) + return torch.cat(y, 1), None # augmented inference, train + else: + return self.forward_once(x, profile) # single-scale inference, train + + def forward_once(self, x, profile=False): + y, dt = [], [] # outputs + for m in self.model: + if m.f != -1: # if not from previous layer + x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers + + if not hasattr(self, 'traced'): + self.traced=False + + if self.traced: + if isinstance(m, Detect) or isinstance(m, IDetect) or isinstance(m, IAuxDetect) or isinstance(m, IKeypoint): + break + + if profile: + c = isinstance(m, (Detect, IDetect, IAuxDetect, IBin)) + o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPS + for _ in range(10): + m(x.copy() if c else x) + t = time_synchronized() + for _ in range(10): + m(x.copy() if c else x) + dt.append((time_synchronized() - t) * 100) + print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type)) + + x = m(x) # run + + y.append(x if m.i in self.save else None) # save output + + if profile: + print('%.1fms total' % sum(dt)) + return x + + def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency + # https://arxiv.org/abs/1708.02002 section 3.3 + # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. + m = self.model[-1] # Detect() module + for mi, s in zip(m.m, m.stride): # from + b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) + b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) + b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls + mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) + + def _initialize_aux_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency + # https://arxiv.org/abs/1708.02002 section 3.3 + # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. + m = self.model[-1] # Detect() module + for mi, mi2, s in zip(m.m, m.m2, m.stride): # from + b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) + b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) + b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls + mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) + b2 = mi2.bias.view(m.na, -1) # conv.bias(255) to (3,85) + b2.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) + b2.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls + mi2.bias = torch.nn.Parameter(b2.view(-1), requires_grad=True) + + def _initialize_biases_bin(self, cf=None): # initialize biases into Detect(), cf is class frequency + # https://arxiv.org/abs/1708.02002 section 3.3 + # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. + m = self.model[-1] # Bin() module + bc = m.bin_count + for mi, s in zip(m.m, m.stride): # from + b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) + old = b[:, (0,1,2,bc+3)].data + obj_idx = 2*bc+4 + b[:, :obj_idx].data += math.log(0.6 / (bc + 1 - 0.99)) + b[:, obj_idx].data += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) + b[:, (obj_idx+1):].data += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls + b[:, (0,1,2,bc+3)].data = old + mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) + + def _initialize_biases_kpt(self, cf=None): # initialize biases into Detect(), cf is class frequency + # https://arxiv.org/abs/1708.02002 section 3.3 + # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. + m = self.model[-1] # Detect() module + for mi, s in zip(m.m, m.stride): # from + b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) + b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) + b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls + mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) + + def _print_biases(self): + m = self.model[-1] # Detect() module + for mi in m.m: # from + b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85) + print(('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean())) + + # def _print_weights(self): + # for m in self.model.modules(): + # if type(m) is Bottleneck: + # print('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights + + def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers + print('Fusing layers... ') + for m in self.model.modules(): + if isinstance(m, RepConv): + #print(f" fuse_repvgg_block") + m.fuse_repvgg_block() + elif isinstance(m, RepConv_OREPA): + #print(f" switch_to_deploy") + m.switch_to_deploy() + elif type(m) is Conv and hasattr(m, 'bn'): + m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv + delattr(m, 'bn') # remove batchnorm + m.forward = m.fuseforward # update forward + elif isinstance(m, (IDetect, IAuxDetect)): + m.fuse() + m.forward = m.fuseforward + self.info() + return self + + def nms(self, mode=True): # add or remove NMS module + present = type(self.model[-1]) is NMS # last layer is NMS + if mode and not present: + print('Adding NMS... ') + m = NMS() # module + m.f = -1 # from + m.i = self.model[-1].i + 1 # index + self.model.add_module(name='%s' % m.i, module=m) # add + self.eval() + elif not mode and present: + print('Removing NMS... ') + self.model = self.model[:-1] # remove + return self + + def autoshape(self): # add autoShape module + print('Adding autoShape... ') + m = autoShape(self) # wrap model + copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes + return m + + def info(self, verbose=False, img_size=640): # print model information + model_info(self, verbose, img_size) + + +def parse_model(d, ch): # model_dict, input_channels(3) + logger.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments')) + anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'] + na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors + no = na * (nc + 5) # number of outputs = anchors * (classes + 5) + + layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out + for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args + m = eval(m) if isinstance(m, str) else m # eval strings + for j, a in enumerate(args): + try: + args[j] = eval(a) if isinstance(a, str) else a # eval strings + except: + pass + + n = max(round(n * gd), 1) if n > 1 else n # depth gain + if m in [nn.Conv2d, Conv, RobustConv, RobustConv2, DWConv, GhostConv, RepConv, RepConv_OREPA, DownC, + SPP, SPPF, SPPCSPC, GhostSPPCSPC, MixConv2d, Focus, Stem, GhostStem, CrossConv, + Bottleneck, BottleneckCSPA, BottleneckCSPB, BottleneckCSPC, + RepBottleneck, RepBottleneckCSPA, RepBottleneckCSPB, RepBottleneckCSPC, + Res, ResCSPA, ResCSPB, ResCSPC, + RepRes, RepResCSPA, RepResCSPB, RepResCSPC, + ResX, ResXCSPA, ResXCSPB, ResXCSPC, + RepResX, RepResXCSPA, RepResXCSPB, RepResXCSPC, + Ghost, GhostCSPA, GhostCSPB, GhostCSPC, + SwinTransformerBlock, STCSPA, STCSPB, STCSPC, + SwinTransformer2Block, ST2CSPA, ST2CSPB, ST2CSPC]: + c1, c2 = ch[f], args[0] + if c2 != no: # if not output + c2 = make_divisible(c2 * gw, 8) + + args = [c1, c2, *args[1:]] + if m in [DownC, SPPCSPC, GhostSPPCSPC, + BottleneckCSPA, BottleneckCSPB, BottleneckCSPC, + RepBottleneckCSPA, RepBottleneckCSPB, RepBottleneckCSPC, + ResCSPA, ResCSPB, ResCSPC, + RepResCSPA, RepResCSPB, RepResCSPC, + ResXCSPA, ResXCSPB, ResXCSPC, + RepResXCSPA, RepResXCSPB, RepResXCSPC, + GhostCSPA, GhostCSPB, GhostCSPC, + STCSPA, STCSPB, STCSPC, + ST2CSPA, ST2CSPB, ST2CSPC]: + args.insert(2, n) # number of repeats + n = 1 + elif m is nn.BatchNorm2d: + args = [ch[f]] + elif m is Concat: + c2 = sum([ch[x] for x in f]) + elif m is Chuncat: + c2 = sum([ch[x] for x in f]) + elif m is Shortcut: + c2 = ch[f[0]] + elif m is Foldcut: + c2 = ch[f] // 2 + elif m in [Detect, IDetect, IAuxDetect, IBin, IKeypoint]: + args.append([ch[x] for x in f]) + if isinstance(args[1], int): # number of anchors + args[1] = [list(range(args[1] * 2))] * len(f) + elif m is ReOrg: + c2 = ch[f] * 4 + elif m is Contract: + c2 = ch[f] * args[0] ** 2 + elif m is Expand: + c2 = ch[f] // args[0] ** 2 + else: + c2 = ch[f] + + m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module + t = str(m)[8:-2].replace('__main__.', '') # module type + np = sum([x.numel() for x in m_.parameters()]) # number params + m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params + logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print + save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist + layers.append(m_) + if i == 0: + ch = [] + ch.append(c2) + return nn.Sequential(*layers), sorted(save) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--cfg', type=str, default='yolor-csp-c.yaml', help='model.yaml') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--profile', action='store_true', help='profile model speed') + opt = parser.parse_args() + opt.cfg = check_file(opt.cfg) # check file + set_logging() + device = select_device(opt.device) + + # Create model + model = Model(opt.cfg).to(device) + model.train() + + if opt.profile: + img = torch.rand(1, 3, 640, 640).to(device) + y = model(img, profile=True) + + # Profile + # img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device) + # y = model(img, profile=True) + + # Tensorboard + # from torch.utils.tensorboard import SummaryWriter + # tb_writer = SummaryWriter() + # print("Run 'tensorboard --logdir=models/runs' to view tensorboard at http://localhost:6006/") + # tb_writer.add_graph(model.model, img) # add model to tensorboard + # tb_writer.add_image('test', img[0], dataformats='CWH') # add model to tensorboard diff --git a/human_detection/yolov7 skeleton/readme.md b/human_detection/yolov7 skeleton/readme.md new file mode 100644 index 00000000..4778cca2 --- /dev/null +++ b/human_detection/yolov7 skeleton/readme.md @@ -0,0 +1,6 @@ +`main.py ` is taken from https://github.com/haroonshakeel/yolov7-object-tracking/blob/main/detect_or_track.py and modified + +`models/` `utils/` `sort.py` is downloaded from https://github.com/WongKinYiu/yolov7 (with no modification) + +Usage: +```python main.py --weights-file yolov7.pt --nosave --view-img --show-fps --track --show-track-lines --classes 0 --no-trace --source video.mp4 ``` \ No newline at end of file diff --git a/human_detection/yolov7 skeleton/sort.py b/human_detection/yolov7 skeleton/sort.py new file mode 100644 index 00000000..11420268 --- /dev/null +++ b/human_detection/yolov7 skeleton/sort.py @@ -0,0 +1,362 @@ +from __future__ import print_function + +import os +import numpy as np +import matplotlib +matplotlib.use('TkAgg') +import matplotlib.pyplot as plt +import matplotlib.patches as patches +from skimage import io +from random import randint +import glob +import time +import argparse +from filterpy.kalman import KalmanFilter + + +def get_color(): + # r = randint(0, 255) + # g = randint(0, 255) + # b = randint(0, 255) + color = (randint(0, 255), randint(0, 255), randint(0, 255)) + return color +def linear_assignment(cost_matrix): + try: + import lap #linear assignment problem solver + _, x, y = lap.lapjv(cost_matrix, extend_cost = True) + return np.array([[y[i],i] for i in x if i>=0]) + except ImportError: + from scipy.optimize import linear_sum_assignment + x,y = linear_sum_assignment(cost_matrix) + return np.array(list(zip(x,y))) + + +"""From SORT: Computes IOU between two boxes in the form [x1,y1,x2,y2]""" +def iou_batch(bb_test, bb_gt): + + bb_gt = np.expand_dims(bb_gt, 0) + bb_test = np.expand_dims(bb_test, 1) + + xx1 = np.maximum(bb_test[...,0], bb_gt[..., 0]) + yy1 = np.maximum(bb_test[..., 1], bb_gt[..., 1]) + xx2 = np.minimum(bb_test[..., 2], bb_gt[..., 2]) + yy2 = np.minimum(bb_test[..., 3], bb_gt[..., 3]) + w = np.maximum(0., xx2 - xx1) + h = np.maximum(0., yy2 - yy1) + wh = w * h + o = wh / ((bb_test[..., 2] - bb_test[..., 0]) * (bb_test[..., 3] - bb_test[..., 1]) + + (bb_gt[..., 2] - bb_gt[..., 0]) * (bb_gt[..., 3] - bb_gt[..., 1]) - wh) + return(o) + + +"""Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form [x,y,s,r] where x,y is the center of the box and s is the scale/area and r is the aspect ratio""" +def convert_bbox_to_z(bbox): + w = bbox[2] - bbox[0] + h = bbox[3] - bbox[1] + x = bbox[0] + w/2. + y = bbox[1] + h/2. + s = w * h + #scale is just area + r = w / float(h) + return np.array([x, y, s, r]).reshape((4, 1)) + + +"""Takes a bounding box in the centre form [x,y,s,r] and returns it in the form + [x1,y1,x2,y2] where x1,y1 is the top left and x2,y2 is the bottom right""" +def convert_x_to_bbox(x, score=None): + w = np.sqrt(x[2] * x[3]) + h = x[2] / w + if(score==None): + return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.]).reshape((1,4)) + else: + return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.,score]).reshape((1,5)) + +"""This class represents the internal state of individual tracked objects observed as bbox.""" +class KalmanBoxTracker(object): + + count = 0 + def __init__(self, bbox): + """ + Initialize a tracker using initial bounding box + + Parameter 'bbox' must have 'detected class' int number at the -1 position. + """ + self.kf = KalmanFilter(dim_x=7, dim_z=4) + self.kf.F = np.array([[1,0,0,0,1,0,0],[0,1,0,0,0,1,0],[0,0,1,0,0,0,1],[0,0,0,1,0,0,0],[0,0,0,0,1,0,0],[0,0,0,0,0,1,0],[0,0,0,0,0,0,1]]) + self.kf.H = np.array([[1,0,0,0,0,0,0],[0,1,0,0,0,0,0],[0,0,1,0,0,0,0],[0,0,0,1,0,0,0]]) + + self.kf.R[2:,2:] *= 10. # R: Covariance matrix of measurement noise (set to high for noisy inputs -> more 'inertia' of boxes') + self.kf.P[4:,4:] *= 1000. #give high uncertainty to the unobservable initial velocities + self.kf.P *= 10. + self.kf.Q[-1,-1] *= 0.5 # Q: Covariance matrix of process noise (set to high for erratically moving things) + self.kf.Q[4:,4:] *= 0.5 + + self.kf.x[:4] = convert_bbox_to_z(bbox) # STATE VECTOR + self.time_since_update = 0 + self.id = KalmanBoxTracker.count + KalmanBoxTracker.count += 1 + self.history = [] + self.hits = 0 + self.hit_streak = 0 + self.age = 0 + self.centroidarr = [] + CX = (bbox[0]+bbox[2])//2 + CY = (bbox[1]+bbox[3])//2 + self.centroidarr.append((CX,CY)) + + + #keep yolov5 detected class information + self.detclass = bbox[5] + + def update(self, bbox): + """ + Updates the state vector with observed bbox + """ + self.time_since_update = 0 + self.history = [] + self.hits += 1 + self.hit_streak += 1 + self.kf.update(convert_bbox_to_z(bbox)) + self.detclass = bbox[5] + CX = (bbox[0]+bbox[2])//2 + CY = (bbox[1]+bbox[3])//2 + self.centroidarr.append((CX,CY)) + + def predict(self): + """ + Advances the state vector and returns the predicted bounding box estimate + """ + if((self.kf.x[6]+self.kf.x[2])<=0): + self.kf.x[6] *= 0.0 + self.kf.predict() + self.age += 1 + if(self.time_since_update>0): + self.hit_streak = 0 + self.time_since_update += 1 + self.history.append(convert_x_to_bbox(self.kf.x)) + # bbox=self.history[-1] + # CX = (bbox[0]+bbox[2])/2 + # CY = (bbox[1]+bbox[3])/2 + # self.centroidarr.append((CX,CY)) + + return self.history[-1] + + + def get_state(self): + """ + Returns the current bounding box estimate + # test + arr1 = np.array([[1,2,3,4]]) + arr2 = np.array([0]) + arr3 = np.expand_dims(arr2, 0) + np.concatenate((arr1,arr3), axis=1) + """ + arr_detclass = np.expand_dims(np.array([self.detclass]), 0) + + arr_u_dot = np.expand_dims(self.kf.x[4],0) + arr_v_dot = np.expand_dims(self.kf.x[5],0) + arr_s_dot = np.expand_dims(self.kf.x[6],0) + + return np.concatenate((convert_x_to_bbox(self.kf.x), arr_detclass, arr_u_dot, arr_v_dot, arr_s_dot), axis=1) + +def associate_detections_to_trackers(detections, trackers, iou_threshold = 0.3): + """ + Assigns detections to tracked object (both represented as bounding boxes) + Returns 3 lists of + 1. matches, + 2. unmatched_detections + 3. unmatched_trackers + """ + if(len(trackers)==0): + return np.empty((0,2),dtype=int), np.arange(len(detections)), np.empty((0,5),dtype=int) + + iou_matrix = iou_batch(detections, trackers) + + if min(iou_matrix.shape) > 0: + a = (iou_matrix > iou_threshold).astype(np.int32) + if a.sum(1).max() == 1 and a.sum(0).max() ==1: + matched_indices = np.stack(np.where(a), axis=1) + else: + matched_indices = linear_assignment(-iou_matrix) + else: + matched_indices = np.empty(shape=(0,2)) + + unmatched_detections = [] + for d, det in enumerate(detections): + if(d not in matched_indices[:,0]): + unmatched_detections.append(d) + + + unmatched_trackers = [] + for t, trk in enumerate(trackers): + if(t not in matched_indices[:,1]): + unmatched_trackers.append(t) + + #filter out matched with low IOU + matches = [] + for m in matched_indices: + if(iou_matrix[m[0], m[1]]= self.min_hits or self.frame_count <= self.min_hits): + ret.append(np.concatenate((d, [trk.id+1])).reshape(1,-1)) #+1'd because MOT benchmark requires positive value + i -= 1 + #remove dead tracklet + if(trk.time_since_update >self.max_age): + self.trackers.pop(i) + if unique_color: + self.color_list.pop(i) + + if(len(ret) > 0): + return np.concatenate(ret) + return np.empty((0,6)) + +def parse_args(): + """Parse input arguments.""" + parser = argparse.ArgumentParser(description='SORT demo') + parser.add_argument('--display', dest='display', help='Display online tracker output (slow) [False]',action='store_true') + parser.add_argument("--seq_path", help="Path to detections.", type=str, default='data') + parser.add_argument("--phase", help="Subdirectory in seq_path.", type=str, default='train') + parser.add_argument("--max_age", + help="Maximum number of frames to keep alive a track without associated detections.", + type=int, default=1) + parser.add_argument("--min_hits", + help="Minimum number of associated detections before track is initialised.", + type=int, default=3) + parser.add_argument("--iou_threshold", help="Minimum IOU for match.", type=float, default=0.3) + args = parser.parse_args() + return args + +if __name__ == '__main__': + # all train + args = parse_args() + display = args.display + phase = args.phase + total_time = 0.0 + total_frames = 0 + colours = np.random.rand(32, 3) #used only for display + if(display): + if not os.path.exists('mot_benchmark'): + print('\n\tERROR: mot_benchmark link not found!\n\n Create a symbolic link to the MOT benchmark\n (https://motchallenge.net/data/2D_MOT_2015/#download). E.g.:\n\n $ ln -s /path/to/MOT2015_challenge/2DMOT2015 mot_benchmark\n\n') + exit() + plt.ion() + fig = plt.figure() + ax1 = fig.add_subplot(111, aspect='equal') + + if not os.path.exists('output'): + os.makedirs('output') + pattern = os.path.join(args.seq_path, phase, '*', 'det', 'det.txt') + for seq_dets_fn in glob.glob(pattern): + mot_tracker = Sort(max_age=args.max_age, + min_hits=args.min_hits, + iou_threshold=args.iou_threshold) #create instance of the SORT tracker + seq_dets = np.loadtxt(seq_dets_fn, delimiter=',') + seq = seq_dets_fn[pattern.find('*'):].split(os.path.sep)[0] + + with open(os.path.join('output', '%s.txt'%(seq)),'w') as out_file: + print("Processing %s."%(seq)) + for frame in range(int(seq_dets[:,0].max())): + frame += 1 #detection and frame numbers begin at 1 + dets = seq_dets[seq_dets[:, 0]==frame, 2:7] + dets[:, 2:4] += dets[:, 0:2] #convert to [x1,y1,w,h] to [x1,y1,x2,y2] + total_frames += 1 + + if(display): + fn = os.path.join('mot_benchmark', phase, seq, 'img1', '%06d.jpg'%(frame)) + im =io.imread(fn) + ax1.imshow(im) + plt.title(seq + ' Tracked Targets') + + start_time = time.time() + trackers = mot_tracker.update(dets) + cycle_time = time.time() - start_time + total_time += cycle_time + + for d in trackers: + print('%d,%d,%.2f,%.2f,%.2f,%.2f,1,-1,-1,-1'%(frame,d[4],d[0],d[1],d[2]-d[0],d[3]-d[1]),file=out_file) + if(display): + d = d.astype(np.int32) + ax1.add_patch(patches.Rectangle((d[0],d[1]),d[2]-d[0],d[3]-d[1],fill=False,lw=3,ec=colours[d[4]%32,:])) + + if(display): + fig.canvas.flush_events() + plt.draw() + ax1.cla() + + print("Total Tracking took: %.3f seconds for %d frames or %.1f FPS" % (total_time, total_frames, total_frames / total_time)) + + if(display): + print("Note: to get real runtime results run without the option: --display") diff --git a/human_detection/yolov7 skeleton/utils/__init__.py b/human_detection/yolov7 skeleton/utils/__init__.py new file mode 100644 index 00000000..84952a81 --- /dev/null +++ b/human_detection/yolov7 skeleton/utils/__init__.py @@ -0,0 +1 @@ +# init \ No newline at end of file diff --git a/human_detection/yolov7 skeleton/utils/activations.py b/human_detection/yolov7 skeleton/utils/activations.py new file mode 100644 index 00000000..aa3ddf07 --- /dev/null +++ b/human_detection/yolov7 skeleton/utils/activations.py @@ -0,0 +1,72 @@ +# Activation functions + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +# SiLU https://arxiv.org/pdf/1606.08415.pdf ---------------------------------------------------------------------------- +class SiLU(nn.Module): # export-friendly version of nn.SiLU() + @staticmethod + def forward(x): + return x * torch.sigmoid(x) + + +class Hardswish(nn.Module): # export-friendly version of nn.Hardswish() + @staticmethod + def forward(x): + # return x * F.hardsigmoid(x) # for torchscript and CoreML + return x * F.hardtanh(x + 3, 0., 6.) / 6. # for torchscript, CoreML and ONNX + + +class MemoryEfficientSwish(nn.Module): + class F(torch.autograd.Function): + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return x * torch.sigmoid(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + sx = torch.sigmoid(x) + return grad_output * (sx * (1 + x * (1 - sx))) + + def forward(self, x): + return self.F.apply(x) + + +# Mish https://github.com/digantamisra98/Mish -------------------------------------------------------------------------- +class Mish(nn.Module): + @staticmethod + def forward(x): + return x * F.softplus(x).tanh() + + +class MemoryEfficientMish(nn.Module): + class F(torch.autograd.Function): + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x))) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + sx = torch.sigmoid(x) + fx = F.softplus(x).tanh() + return grad_output * (fx + x * sx * (1 - fx * fx)) + + def forward(self, x): + return self.F.apply(x) + + +# FReLU https://arxiv.org/abs/2007.11824 ------------------------------------------------------------------------------- +class FReLU(nn.Module): + def __init__(self, c1, k=3): # ch_in, kernel + super().__init__() + self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False) + self.bn = nn.BatchNorm2d(c1) + + def forward(self, x): + return torch.max(x, self.bn(self.conv(x))) diff --git a/human_detection/yolov7 skeleton/utils/add_nms.py b/human_detection/yolov7 skeleton/utils/add_nms.py new file mode 100644 index 00000000..0a1f7976 --- /dev/null +++ b/human_detection/yolov7 skeleton/utils/add_nms.py @@ -0,0 +1,155 @@ +import numpy as np +import onnx +from onnx import shape_inference +try: + import onnx_graphsurgeon as gs +except Exception as e: + print('Import onnx_graphsurgeon failure: %s' % e) + +import logging + +LOGGER = logging.getLogger(__name__) + +class RegisterNMS(object): + def __init__( + self, + onnx_model_path: str, + precision: str = "fp32", + ): + + self.graph = gs.import_onnx(onnx.load(onnx_model_path)) + assert self.graph + LOGGER.info("ONNX graph created successfully") + # Fold constants via ONNX-GS that PyTorch2ONNX may have missed + self.graph.fold_constants() + self.precision = precision + self.batch_size = 1 + def infer(self): + """ + Sanitize the graph by cleaning any unconnected nodes, do a topological resort, + and fold constant inputs values. When possible, run shape inference on the + ONNX graph to determine tensor shapes. + """ + for _ in range(3): + count_before = len(self.graph.nodes) + + self.graph.cleanup().toposort() + try: + for node in self.graph.nodes: + for o in node.outputs: + o.shape = None + model = gs.export_onnx(self.graph) + model = shape_inference.infer_shapes(model) + self.graph = gs.import_onnx(model) + except Exception as e: + LOGGER.info(f"Shape inference could not be performed at this time:\n{e}") + try: + self.graph.fold_constants(fold_shapes=True) + except TypeError as e: + LOGGER.error( + "This version of ONNX GraphSurgeon does not support folding shapes, " + f"please upgrade your onnx_graphsurgeon module. Error:\n{e}" + ) + raise + + count_after = len(self.graph.nodes) + if count_before == count_after: + # No new folding occurred in this iteration, so we can stop for now. + break + + def save(self, output_path): + """ + Save the ONNX model to the given location. + Args: + output_path: Path pointing to the location where to write + out the updated ONNX model. + """ + self.graph.cleanup().toposort() + model = gs.export_onnx(self.graph) + onnx.save(model, output_path) + LOGGER.info(f"Saved ONNX model to {output_path}") + + def register_nms( + self, + *, + score_thresh: float = 0.25, + nms_thresh: float = 0.45, + detections_per_img: int = 100, + ): + """ + Register the ``EfficientNMS_TRT`` plugin node. + NMS expects these shapes for its input tensors: + - box_net: [batch_size, number_boxes, 4] + - class_net: [batch_size, number_boxes, number_labels] + Args: + score_thresh (float): The scalar threshold for score (low scoring boxes are removed). + nms_thresh (float): The scalar threshold for IOU (new boxes that have high IOU + overlap with previously selected boxes are removed). + detections_per_img (int): Number of best detections to keep after NMS. + """ + + self.infer() + # Find the concat node at the end of the network + op_inputs = self.graph.outputs + op = "EfficientNMS_TRT" + attrs = { + "plugin_version": "1", + "background_class": -1, # no background class + "max_output_boxes": detections_per_img, + "score_threshold": score_thresh, + "iou_threshold": nms_thresh, + "score_activation": False, + "box_coding": 0, + } + + if self.precision == "fp32": + dtype_output = np.float32 + elif self.precision == "fp16": + dtype_output = np.float16 + else: + raise NotImplementedError(f"Currently not supports precision: {self.precision}") + + # NMS Outputs + output_num_detections = gs.Variable( + name="num_dets", + dtype=np.int32, + shape=[self.batch_size, 1], + ) # A scalar indicating the number of valid detections per batch image. + output_boxes = gs.Variable( + name="det_boxes", + dtype=dtype_output, + shape=[self.batch_size, detections_per_img, 4], + ) + output_scores = gs.Variable( + name="det_scores", + dtype=dtype_output, + shape=[self.batch_size, detections_per_img], + ) + output_labels = gs.Variable( + name="det_classes", + dtype=np.int32, + shape=[self.batch_size, detections_per_img], + ) + + op_outputs = [output_num_detections, output_boxes, output_scores, output_labels] + + # Create the NMS Plugin node with the selected inputs. The outputs of the node will also + # become the final outputs of the graph. + self.graph.layer(op=op, name="batched_nms", inputs=op_inputs, outputs=op_outputs, attrs=attrs) + LOGGER.info(f"Created NMS plugin '{op}' with attributes: {attrs}") + + self.graph.outputs = op_outputs + + self.infer() + + def save(self, output_path): + """ + Save the ONNX model to the given location. + Args: + output_path: Path pointing to the location where to write + out the updated ONNX model. + """ + self.graph.cleanup().toposort() + model = gs.export_onnx(self.graph) + onnx.save(model, output_path) + LOGGER.info(f"Saved ONNX model to {output_path}") diff --git a/human_detection/yolov7 skeleton/utils/autoanchor.py b/human_detection/yolov7 skeleton/utils/autoanchor.py new file mode 100644 index 00000000..f491032e --- /dev/null +++ b/human_detection/yolov7 skeleton/utils/autoanchor.py @@ -0,0 +1,160 @@ +# Auto-anchor utils + +import numpy as np +import torch +import yaml +from scipy.cluster.vq import kmeans +from tqdm import tqdm + +from utils.general import colorstr + + +def check_anchor_order(m): + # Check anchor order against stride order for YOLO Detect() module m, and correct if necessary + a = m.anchor_grid.prod(-1).view(-1) # anchor area + da = a[-1] - a[0] # delta a + ds = m.stride[-1] - m.stride[0] # delta s + if da.sign() != ds.sign(): # same order + print('Reversing anchor order') + m.anchors[:] = m.anchors.flip(0) + m.anchor_grid[:] = m.anchor_grid.flip(0) + + +def check_anchors(dataset, model, thr=4.0, imgsz=640): + # Check anchor fit to data, recompute if necessary + prefix = colorstr('autoanchor: ') + print(f'\n{prefix}Analyzing anchors... ', end='') + m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect() + shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True) + scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale + wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh + + def metric(k): # compute metric + r = wh[:, None] / k[None] + x = torch.min(r, 1. / r).min(2)[0] # ratio metric + best = x.max(1)[0] # best_x + aat = (x > 1. / thr).float().sum(1).mean() # anchors above threshold + bpr = (best > 1. / thr).float().mean() # best possible recall + return bpr, aat + + anchors = m.anchor_grid.clone().cpu().view(-1, 2) # current anchors + bpr, aat = metric(anchors) + print(f'anchors/target = {aat:.2f}, Best Possible Recall (BPR) = {bpr:.4f}', end='') + if bpr < 0.98: # threshold to recompute + print('. Attempting to improve anchors, please wait...') + na = m.anchor_grid.numel() // 2 # number of anchors + try: + anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False) + except Exception as e: + print(f'{prefix}ERROR: {e}') + new_bpr = metric(anchors)[0] + if new_bpr > bpr: # replace anchors + anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors) + m.anchor_grid[:] = anchors.clone().view_as(m.anchor_grid) # for inference + check_anchor_order(m) + m.anchors[:] = anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss + print(f'{prefix}New anchors saved to model. Update model *.yaml to use these anchors in the future.') + else: + print(f'{prefix}Original anchors better than new anchors. Proceeding with original anchors.') + print('') # newline + + +def kmean_anchors(path='./data/coco.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True): + """ Creates kmeans-evolved anchors from training dataset + + Arguments: + path: path to dataset *.yaml, or a loaded dataset + n: number of anchors + img_size: image size used for training + thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0 + gen: generations to evolve anchors using genetic algorithm + verbose: print all results + + Return: + k: kmeans evolved anchors + + Usage: + from utils.autoanchor import *; _ = kmean_anchors() + """ + thr = 1. / thr + prefix = colorstr('autoanchor: ') + + def metric(k, wh): # compute metrics + r = wh[:, None] / k[None] + x = torch.min(r, 1. / r).min(2)[0] # ratio metric + # x = wh_iou(wh, torch.tensor(k)) # iou metric + return x, x.max(1)[0] # x, best_x + + def anchor_fitness(k): # mutation fitness + _, best = metric(torch.tensor(k, dtype=torch.float32), wh) + return (best * (best > thr).float()).mean() # fitness + + def print_results(k): + k = k[np.argsort(k.prod(1))] # sort small to large + x, best = metric(k, wh0) + bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr + print(f'{prefix}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr') + print(f'{prefix}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, ' + f'past_thr={x[x > thr].mean():.3f}-mean: ', end='') + for i, x in enumerate(k): + print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg + return k + + if isinstance(path, str): # *.yaml file + with open(path) as f: + data_dict = yaml.load(f, Loader=yaml.SafeLoader) # model dict + from utils.datasets import LoadImagesAndLabels + dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True) + else: + dataset = path # dataset + + # Get label wh + shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True) + wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh + + # Filter + i = (wh0 < 3.0).any(1).sum() + if i: + print(f'{prefix}WARNING: Extremely small objects found. {i} of {len(wh0)} labels are < 3 pixels in size.') + wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels + # wh = wh * (np.random.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1 + + # Kmeans calculation + print(f'{prefix}Running kmeans for {n} anchors on {len(wh)} points...') + s = wh.std(0) # sigmas for whitening + k, dist = kmeans(wh / s, n, iter=30) # points, mean distance + assert len(k) == n, print(f'{prefix}ERROR: scipy.cluster.vq.kmeans requested {n} points but returned only {len(k)}') + k *= s + wh = torch.tensor(wh, dtype=torch.float32) # filtered + wh0 = torch.tensor(wh0, dtype=torch.float32) # unfiltered + k = print_results(k) + + # Plot + # k, d = [None] * 20, [None] * 20 + # for i in tqdm(range(1, 21)): + # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance + # fig, ax = plt.subplots(1, 2, figsize=(14, 7), tight_layout=True) + # ax = ax.ravel() + # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.') + # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh + # ax[0].hist(wh[wh[:, 0]<100, 0],400) + # ax[1].hist(wh[wh[:, 1]<100, 1],400) + # fig.savefig('wh.png', dpi=200) + + # Evolve + npr = np.random + f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma + pbar = tqdm(range(gen), desc=f'{prefix}Evolving anchors with Genetic Algorithm:') # progress bar + for _ in pbar: + v = np.ones(sh) + while (v == 1).all(): # mutate until a change occurs (prevent duplicates) + v = ((npr.random(sh) < mp) * npr.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0) + kg = (k.copy() * v).clip(min=2.0) + fg = anchor_fitness(kg) + if fg > f: + f, k = fg, kg.copy() + pbar.desc = f'{prefix}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}' + if verbose: + print_results(k) + + return print_results(k) diff --git a/human_detection/yolov7 skeleton/utils/aws/__init__.py b/human_detection/yolov7 skeleton/utils/aws/__init__.py new file mode 100644 index 00000000..e9691f24 --- /dev/null +++ b/human_detection/yolov7 skeleton/utils/aws/__init__.py @@ -0,0 +1 @@ +#init \ No newline at end of file diff --git a/human_detection/yolov7 skeleton/utils/aws/mime.sh b/human_detection/yolov7 skeleton/utils/aws/mime.sh new file mode 100644 index 00000000..c319a83c --- /dev/null +++ b/human_detection/yolov7 skeleton/utils/aws/mime.sh @@ -0,0 +1,26 @@ +# AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/ +# This script will run on every instance restart, not only on first start +# --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA --- + +Content-Type: multipart/mixed; boundary="//" +MIME-Version: 1.0 + +--// +Content-Type: text/cloud-config; charset="us-ascii" +MIME-Version: 1.0 +Content-Transfer-Encoding: 7bit +Content-Disposition: attachment; filename="cloud-config.txt" + +#cloud-config +cloud_final_modules: +- [scripts-user, always] + +--// +Content-Type: text/x-shellscript; charset="us-ascii" +MIME-Version: 1.0 +Content-Transfer-Encoding: 7bit +Content-Disposition: attachment; filename="userdata.txt" + +#!/bin/bash +# --- paste contents of userdata.sh here --- +--// diff --git a/human_detection/yolov7 skeleton/utils/aws/resume.py b/human_detection/yolov7 skeleton/utils/aws/resume.py new file mode 100644 index 00000000..338685b1 --- /dev/null +++ b/human_detection/yolov7 skeleton/utils/aws/resume.py @@ -0,0 +1,37 @@ +# Resume all interrupted trainings in yolor/ dir including DDP trainings +# Usage: $ python utils/aws/resume.py + +import os +import sys +from pathlib import Path + +import torch +import yaml + +sys.path.append('./') # to run '$ python *.py' files in subdirectories + +port = 0 # --master_port +path = Path('').resolve() +for last in path.rglob('*/**/last.pt'): + ckpt = torch.load(last) + if ckpt['optimizer'] is None: + continue + + # Load opt.yaml + with open(last.parent.parent / 'opt.yaml') as f: + opt = yaml.load(f, Loader=yaml.SafeLoader) + + # Get device count + d = opt['device'].split(',') # devices + nd = len(d) # number of devices + ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel + + if ddp: # multi-GPU + port += 1 + cmd = f'python -m torch.distributed.launch --nproc_per_node {nd} --master_port {port} train.py --resume {last}' + else: # single-GPU + cmd = f'python train.py --resume {last}' + + cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread + print(cmd) + os.system(cmd) diff --git a/human_detection/yolov7 skeleton/utils/aws/userdata.sh b/human_detection/yolov7 skeleton/utils/aws/userdata.sh new file mode 100644 index 00000000..5a99d4be --- /dev/null +++ b/human_detection/yolov7 skeleton/utils/aws/userdata.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# AWS EC2 instance startup script https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html +# This script will run only once on first instance start (for a re-start script see mime.sh) +# /home/ubuntu (ubuntu) or /home/ec2-user (amazon-linux) is working dir +# Use >300 GB SSD + +cd home/ubuntu +if [ ! -d yolor ]; then + echo "Running first-time script." # install dependencies, download COCO, pull Docker + git clone -b main https://github.com/WongKinYiu/yolov7 && sudo chmod -R 777 yolov7 + cd yolov7 + bash data/scripts/get_coco.sh && echo "Data done." & + sudo docker pull nvcr.io/nvidia/pytorch:21.08-py3 && echo "Docker done." & + python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." & + wait && echo "All tasks done." # finish background tasks +else + echo "Running re-start script." # resume interrupted runs + i=0 + list=$(sudo docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour' + while IFS= read -r id; do + ((i++)) + echo "restarting container $i: $id" + sudo docker start $id + # sudo docker exec -it $id python train.py --resume # single-GPU + sudo docker exec -d $id python utils/aws/resume.py # multi-scenario + done <<<"$list" +fi diff --git a/human_detection/yolov7 skeleton/utils/datasets.py b/human_detection/yolov7 skeleton/utils/datasets.py new file mode 100644 index 00000000..5fe4f7bc --- /dev/null +++ b/human_detection/yolov7 skeleton/utils/datasets.py @@ -0,0 +1,1320 @@ +# Dataset utils and dataloaders + +import glob +import logging +import math +import os +import random +import shutil +import time +from itertools import repeat +from multiprocessing.pool import ThreadPool +from pathlib import Path +from threading import Thread + +import cv2 +import numpy as np +import torch +import torch.nn.functional as F +from PIL import Image, ExifTags +from torch.utils.data import Dataset +from tqdm import tqdm + +import pickle +from copy import deepcopy +#from pycocotools import mask as maskUtils +from torchvision.utils import save_image +from torchvision.ops import roi_pool, roi_align, ps_roi_pool, ps_roi_align + +from utils.general import check_requirements, xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, \ + resample_segments, clean_str +from utils.torch_utils import torch_distributed_zero_first + +# Parameters +help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' +img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes +vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes +logger = logging.getLogger(__name__) + +# Get orientation exif tag +for orientation in ExifTags.TAGS.keys(): + if ExifTags.TAGS[orientation] == 'Orientation': + break + + +def get_hash(files): + # Returns a single hash value of a list of files + return sum(os.path.getsize(f) for f in files if os.path.isfile(f)) + + +def exif_size(img): + # Returns exif-corrected PIL size + s = img.size # (width, height) + try: + rotation = dict(img._getexif().items())[orientation] + if rotation == 6: # rotation 270 + s = (s[1], s[0]) + elif rotation == 8: # rotation 90 + s = (s[1], s[0]) + except: + pass + + return s + + +def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False, + rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''): + # Make sure only the first process in DDP process the dataset first, and the following others can use the cache + with torch_distributed_zero_first(rank): + dataset = LoadImagesAndLabels(path, imgsz, batch_size, + augment=augment, # augment images + hyp=hyp, # augmentation hyperparameters + rect=rect, # rectangular training + cache_images=cache, + single_cls=opt.single_cls, + stride=int(stride), + pad=pad, + image_weights=image_weights, + prefix=prefix) + + batch_size = min(batch_size, len(dataset)) + nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers + sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None + loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader + # Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader() + dataloader = loader(dataset, + batch_size=batch_size, + num_workers=nw, + sampler=sampler, + pin_memory=True, + collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn) + return dataloader, dataset + + +class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader): + """ Dataloader that reuses workers + + Uses same syntax as vanilla DataLoader + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler)) + self.iterator = super().__iter__() + + def __len__(self): + return len(self.batch_sampler.sampler) + + def __iter__(self): + for i in range(len(self)): + yield next(self.iterator) + + +class _RepeatSampler(object): + """ Sampler that repeats forever + + Args: + sampler (Sampler) + """ + + def __init__(self, sampler): + self.sampler = sampler + + def __iter__(self): + while True: + yield from iter(self.sampler) + + +class LoadImages: # for inference + def __init__(self, path, img_size=640, stride=32): + p = str(Path(path).absolute()) # os-agnostic absolute path + if '*' in p: + files = sorted(glob.glob(p, recursive=True)) # glob + elif os.path.isdir(p): + files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir + elif os.path.isfile(p): + files = [p] # files + else: + raise Exception(f'ERROR: {p} does not exist') + + images = [x for x in files if x.split('.')[-1].lower() in img_formats] + videos = [x for x in files if x.split('.')[-1].lower() in vid_formats] + ni, nv = len(images), len(videos) + + self.img_size = img_size + self.stride = stride + self.files = images + videos + self.nf = ni + nv # number of files + self.video_flag = [False] * ni + [True] * nv + self.mode = 'image' + if any(videos): + self.new_video(videos[0]) # new video + else: + self.cap = None + assert self.nf > 0, f'No images or videos found in {p}. ' \ + f'Supported formats are:\nimages: {img_formats}\nvideos: {vid_formats}' + + def __iter__(self): + self.count = 0 + return self + + def __next__(self): + if self.count == self.nf: + raise StopIteration + path = self.files[self.count] + + if self.video_flag[self.count]: + # Read video + self.mode = 'video' + ret_val, img0 = self.cap.read() + if not ret_val: + self.count += 1 + self.cap.release() + if self.count == self.nf: # last video + raise StopIteration + else: + path = self.files[self.count] + self.new_video(path) + ret_val, img0 = self.cap.read() + + self.frame += 1 + print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.nframes}) {path}: ', end='') + + else: + # Read image + self.count += 1 + img0 = cv2.imread(path) # BGR + assert img0 is not None, 'Image Not Found ' + path + #print(f'image {self.count}/{self.nf} {path}: ', end='') + + # Padded resize + img = letterbox(img0, self.img_size, stride=self.stride)[0] + + # Convert + img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + img = np.ascontiguousarray(img) + + return path, img, img0, self.cap + + def new_video(self, path): + self.frame = 0 + self.cap = cv2.VideoCapture(path) + self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) + + def __len__(self): + return self.nf # number of files + + +class LoadWebcam: # for inference + def __init__(self, pipe='0', img_size=640, stride=32): + self.img_size = img_size + self.stride = stride + + if pipe.isnumeric(): + pipe = eval(pipe) # local camera + # pipe = 'rtsp://192.168.1.64/1' # IP camera + # pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login + # pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera + + self.pipe = pipe + self.cap = cv2.VideoCapture(pipe) # video capture object + self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size + + def __iter__(self): + self.count = -1 + return self + + def __next__(self): + self.count += 1 + if cv2.waitKey(1) == ord('q'): # q to quit + self.cap.release() + cv2.destroyAllWindows() + raise StopIteration + + # Read frame + if self.pipe == 0: # local camera + ret_val, img0 = self.cap.read() + img0 = cv2.flip(img0, 1) # flip left-right + else: # IP camera + n = 0 + while True: + n += 1 + self.cap.grab() + if n % 30 == 0: # skip frames + ret_val, img0 = self.cap.retrieve() + if ret_val: + break + + # Print + assert ret_val, f'Camera Error {self.pipe}' + img_path = 'webcam.jpg' + print(f'webcam {self.count}: ', end='') + + # Padded resize + img = letterbox(img0, self.img_size, stride=self.stride)[0] + + # Convert + img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + img = np.ascontiguousarray(img) + + return img_path, img, img0, None + + def __len__(self): + return 0 + + +class LoadStreams: # multiple IP or RTSP cameras + def __init__(self, sources='streams.txt', img_size=640, stride=32): + self.mode = 'stream' + self.img_size = img_size + self.stride = stride + + if os.path.isfile(sources): + with open(sources, 'r') as f: + sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())] + else: + sources = [sources] + + n = len(sources) + self.imgs = [None] * n + self.sources = [clean_str(x) for x in sources] # clean source names for later + for i, s in enumerate(sources): + # Start the thread to read frames from the video stream + print(f'{i + 1}/{n}: {s}... ', end='') + url = eval(s) if s.isnumeric() else s + if 'youtube.com/' in str(url) or 'youtu.be/' in str(url): # if source is YouTube video + check_requirements(('pafy', 'youtube_dl')) + import pafy + url = pafy.new(url).getbest(preftype="mp4").url + cap = cv2.VideoCapture(url) + assert cap.isOpened(), f'Failed to open {s}' + w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + self.fps = cap.get(cv2.CAP_PROP_FPS) % 100 + + _, self.imgs[i] = cap.read() # guarantee first frame + thread = Thread(target=self.update, args=([i, cap]), daemon=True) + print(f' success ({w}x{h} at {self.fps:.2f} FPS).') + thread.start() + print('') # newline + + # check for common shapes + s = np.stack([letterbox(x, self.img_size, stride=self.stride)[0].shape for x in self.imgs], 0) # shapes + self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal + if not self.rect: + print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.') + + def update(self, index, cap): + # Read next stream frame in a daemon thread + n = 0 + while cap.isOpened(): + n += 1 + # _, self.imgs[index] = cap.read() + cap.grab() + if n == 4: # read every 4th frame + success, im = cap.retrieve() + self.imgs[index] = im if success else self.imgs[index] * 0 + n = 0 + time.sleep(1 / self.fps) # wait time + + def __iter__(self): + self.count = -1 + return self + + def __next__(self): + self.count += 1 + img0 = self.imgs.copy() + if cv2.waitKey(1) == ord('q'): # q to quit + cv2.destroyAllWindows() + raise StopIteration + + # Letterbox + img = [letterbox(x, self.img_size, auto=self.rect, stride=self.stride)[0] for x in img0] + + # Stack + img = np.stack(img, 0) + + # Convert + img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416 + img = np.ascontiguousarray(img) + + return self.sources, img, img0, None + + def __len__(self): + return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years + + +def img2label_paths(img_paths): + # Define label paths as a function of image paths + sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings + return ['txt'.join(x.replace(sa, sb, 1).rsplit(x.split('.')[-1], 1)) for x in img_paths] + + +class LoadImagesAndLabels(Dataset): # for training/testing + def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False, + cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''): + self.img_size = img_size + self.augment = augment + self.hyp = hyp + self.image_weights = image_weights + self.rect = False if image_weights else rect + self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training) + self.mosaic_border = [-img_size // 2, -img_size // 2] + self.stride = stride + self.path = path + #self.albumentations = Albumentations() if augment else None + + try: + f = [] # image files + for p in path if isinstance(path, list) else [path]: + p = Path(p) # os-agnostic + if p.is_dir(): # dir + f += glob.glob(str(p / '**' / '*.*'), recursive=True) + # f = list(p.rglob('**/*.*')) # pathlib + elif p.is_file(): # file + with open(p, 'r') as t: + t = t.read().strip().splitlines() + parent = str(p.parent) + os.sep + f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path + # f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib) + else: + raise Exception(f'{prefix}{p} does not exist') + self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats]) + # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in img_formats]) # pathlib + assert self.img_files, f'{prefix}No images found' + except Exception as e: + raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {help_url}') + + # Check cache + self.label_files = img2label_paths(self.img_files) # labels + cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') # cached labels + if cache_path.is_file(): + cache, exists = torch.load(cache_path), True # load + #if cache['hash'] != get_hash(self.label_files + self.img_files) or 'version' not in cache: # changed + # cache, exists = self.cache_labels(cache_path, prefix), False # re-cache + else: + cache, exists = self.cache_labels(cache_path, prefix), False # cache + + # Display cache + nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total + if exists: + d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted" + tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results + assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {help_url}' + + # Read cache + cache.pop('hash') # remove hash + cache.pop('version') # remove version + labels, shapes, self.segments = zip(*cache.values()) + self.labels = list(labels) + self.shapes = np.array(shapes, dtype=np.float64) + self.img_files = list(cache.keys()) # update + self.label_files = img2label_paths(cache.keys()) # update + if single_cls: + for x in self.labels: + x[:, 0] = 0 + + n = len(shapes) # number of images + bi = np.floor(np.arange(n) / batch_size).astype(int) # batch index + nb = bi[-1] + 1 # number of batches + self.batch = bi # batch index of image + self.n = n + self.indices = range(n) + + # Rectangular Training + if self.rect: + # Sort by aspect ratio + s = self.shapes # wh + ar = s[:, 1] / s[:, 0] # aspect ratio + irect = ar.argsort() + self.img_files = [self.img_files[i] for i in irect] + self.label_files = [self.label_files[i] for i in irect] + self.labels = [self.labels[i] for i in irect] + self.shapes = s[irect] # wh + ar = ar[irect] + + # Set training image shapes + shapes = [[1, 1]] * nb + for i in range(nb): + ari = ar[bi == i] + mini, maxi = ari.min(), ari.max() + if maxi < 1: + shapes[i] = [maxi, 1] + elif mini > 1: + shapes[i] = [1, 1 / mini] + + self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(int) * stride + + # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM) + self.imgs = [None] * n + if cache_images: + if cache_images == 'disk': + self.im_cache_dir = Path(Path(self.img_files[0]).parent.as_posix() + '_npy') + self.img_npy = [self.im_cache_dir / Path(f).with_suffix('.npy').name for f in self.img_files] + self.im_cache_dir.mkdir(parents=True, exist_ok=True) + gb = 0 # Gigabytes of cached images + self.img_hw0, self.img_hw = [None] * n, [None] * n + results = ThreadPool(8).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) + pbar = tqdm(enumerate(results), total=n) + for i, x in pbar: + if cache_images == 'disk': + if not self.img_npy[i].exists(): + np.save(self.img_npy[i].as_posix(), x[0]) + gb += self.img_npy[i].stat().st_size + else: + self.imgs[i], self.img_hw0[i], self.img_hw[i] = x + gb += self.imgs[i].nbytes + pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)' + pbar.close() + + def cache_labels(self, path=Path('./labels.cache'), prefix=''): + # Cache dataset labels, check images and read shapes + x = {} # dict + nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, duplicate + pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files)) + for i, (im_file, lb_file) in enumerate(pbar): + try: + # verify images + im = Image.open(im_file) + im.verify() # PIL verify + shape = exif_size(im) # image size + segments = [] # instance segments + assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels' + assert im.format.lower() in img_formats, f'invalid image format {im.format}' + + # verify labels + if os.path.isfile(lb_file): + nf += 1 # label found + with open(lb_file, 'r') as f: + l = [x.split() for x in f.read().strip().splitlines()] + if any([len(x) > 8 for x in l]): # is segment + classes = np.array([x[0] for x in l], dtype=np.float32) + segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...) + l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh) + l = np.array(l, dtype=np.float32) + if len(l): + assert l.shape[1] == 5, 'labels require 5 columns each' + assert (l >= 0).all(), 'negative labels' + assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels' + assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels' + else: + ne += 1 # label empty + l = np.zeros((0, 5), dtype=np.float32) + else: + nm += 1 # label missing + l = np.zeros((0, 5), dtype=np.float32) + x[im_file] = [l, shape, segments] + except Exception as e: + nc += 1 + print(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}') + + pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels... " \ + f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted" + pbar.close() + + if nf == 0: + print(f'{prefix}WARNING: No labels found in {path}. See {help_url}') + + x['hash'] = get_hash(self.label_files + self.img_files) + x['results'] = nf, nm, ne, nc, i + 1 + x['version'] = 0.1 # cache version + torch.save(x, path) # save for next time + logging.info(f'{prefix}New cache created: {path}') + return x + + def __len__(self): + return len(self.img_files) + + # def __iter__(self): + # self.count = -1 + # print('ran dataset iter') + # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF) + # return self + + def __getitem__(self, index): + index = self.indices[index] # linear, shuffled, or image_weights + + hyp = self.hyp + mosaic = self.mosaic and random.random() < hyp['mosaic'] + if mosaic: + # Load mosaic + if random.random() < 0.8: + img, labels = load_mosaic(self, index) + else: + img, labels = load_mosaic9(self, index) + shapes = None + + # MixUp https://arxiv.org/pdf/1710.09412.pdf + if random.random() < hyp['mixup']: + if random.random() < 0.8: + img2, labels2 = load_mosaic(self, random.randint(0, len(self.labels) - 1)) + else: + img2, labels2 = load_mosaic9(self, random.randint(0, len(self.labels) - 1)) + r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0 + img = (img * r + img2 * (1 - r)).astype(np.uint8) + labels = np.concatenate((labels, labels2), 0) + + else: + # Load image + img, (h0, w0), (h, w) = load_image(self, index) + + # Letterbox + shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape + img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) + shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling + + labels = self.labels[index].copy() + if labels.size: # normalized xywh to pixel xyxy format + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) + + if self.augment: + # Augment imagespace + if not mosaic: + img, labels = random_perspective(img, labels, + degrees=hyp['degrees'], + translate=hyp['translate'], + scale=hyp['scale'], + shear=hyp['shear'], + perspective=hyp['perspective']) + + + #img, labels = self.albumentations(img, labels) + + # Augment colorspace + augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) + + # Apply cutouts + # if random.random() < 0.9: + # labels = cutout(img, labels) + + if random.random() < hyp['paste_in']: + sample_labels, sample_images, sample_masks = [], [], [] + while len(sample_labels) < 30: + sample_labels_, sample_images_, sample_masks_ = load_samples(self, random.randint(0, len(self.labels) - 1)) + sample_labels += sample_labels_ + sample_images += sample_images_ + sample_masks += sample_masks_ + #print(len(sample_labels)) + if len(sample_labels) == 0: + break + labels = pastein(img, labels, sample_labels, sample_images, sample_masks) + + nL = len(labels) # number of labels + if nL: + labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh + labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1 + labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1 + + if self.augment: + # flip up-down + if random.random() < hyp['flipud']: + img = np.flipud(img) + if nL: + labels[:, 2] = 1 - labels[:, 2] + + # flip left-right + if random.random() < hyp['fliplr']: + img = np.fliplr(img) + if nL: + labels[:, 1] = 1 - labels[:, 1] + + labels_out = torch.zeros((nL, 6)) + if nL: + labels_out[:, 1:] = torch.from_numpy(labels) + + # Convert + img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + img = np.ascontiguousarray(img) + + return torch.from_numpy(img), labels_out, self.img_files[index], shapes + + @staticmethod + def collate_fn(batch): + img, label, path, shapes = zip(*batch) # transposed + for i, l in enumerate(label): + l[:, 0] = i # add target image index for build_targets() + return torch.stack(img, 0), torch.cat(label, 0), path, shapes + + @staticmethod + def collate_fn4(batch): + img, label, path, shapes = zip(*batch) # transposed + n = len(shapes) // 4 + img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n] + + ho = torch.tensor([[0., 0, 0, 1, 0, 0]]) + wo = torch.tensor([[0., 0, 1, 0, 0, 0]]) + s = torch.tensor([[1, 1, .5, .5, .5, .5]]) # scale + for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW + i *= 4 + if random.random() < 0.5: + im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[ + 0].type(img[i].type()) + l = label[i] + else: + im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2) + l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s + img4.append(im) + label4.append(l) + + for i, l in enumerate(label4): + l[:, 0] = i # add target image index for build_targets() + + return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4 + + +# Ancillary functions -------------------------------------------------------------------------------------------------- +def load_image(self, index): + # loads 1 image from dataset, returns img, original hw, resized hw + img = self.imgs[index] + if img is None: # not cached + path = self.img_files[index] + img = cv2.imread(path) # BGR + assert img is not None, 'Image Not Found ' + path + h0, w0 = img.shape[:2] # orig hw + r = self.img_size / max(h0, w0) # resize image to img_size + if r != 1: # always resize down, only resize up if training with augmentation + interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR + img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp) + return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized + else: + return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized + + +def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5): + r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains + hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV)) + dtype = img.dtype # uint8 + + x = np.arange(0, 256, dtype=np.int16) + lut_hue = ((x * r[0]) % 180).astype(dtype) + lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) + lut_val = np.clip(x * r[2], 0, 255).astype(dtype) + + img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype) + cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed + + +def hist_equalize(img, clahe=True, bgr=False): + # Equalize histogram on BGR image 'img' with img.shape(n,m,3) and range 0-255 + yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV) + if clahe: + c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) + yuv[:, :, 0] = c.apply(yuv[:, :, 0]) + else: + yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram + return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB + + +def load_mosaic(self, index): + # loads images in a 4-mosaic + + labels4, segments4 = [], [] + s = self.img_size + yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y + indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices + for i, index in enumerate(indices): + # Load image + img, _, (h, w) = load_image(self, index) + + # place img in img4 + if i == 0: # top left + img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) + x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) + elif i == 1: # top right + x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc + x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h + elif i == 2: # bottom left + x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) + x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) + elif i == 3: # bottom right + x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) + x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) + + img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] + padw = x1a - x1b + padh = y1a - y1b + + # Labels + labels, segments = self.labels[index].copy(), self.segments[index].copy() + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padw, padh) for x in segments] + labels4.append(labels) + segments4.extend(segments) + + # Concat/clip labels + labels4 = np.concatenate(labels4, 0) + for x in (labels4[:, 1:], *segments4): + np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() + # img4, labels4 = replicate(img4, labels4) # replicate + + # Augment + #img4, labels4, segments4 = remove_background(img4, labels4, segments4) + #sample_segments(img4, labels4, segments4, probability=self.hyp['copy_paste']) + img4, labels4, segments4 = copy_paste(img4, labels4, segments4, probability=self.hyp['copy_paste']) + img4, labels4 = random_perspective(img4, labels4, segments4, + degrees=self.hyp['degrees'], + translate=self.hyp['translate'], + scale=self.hyp['scale'], + shear=self.hyp['shear'], + perspective=self.hyp['perspective'], + border=self.mosaic_border) # border to remove + + return img4, labels4 + + +def load_mosaic9(self, index): + # loads images in a 9-mosaic + + labels9, segments9 = [], [] + s = self.img_size + indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices + for i, index in enumerate(indices): + # Load image + img, _, (h, w) = load_image(self, index) + + # place img in img9 + if i == 0: # center + img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + h0, w0 = h, w + c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates + elif i == 1: # top + c = s, s - h, s + w, s + elif i == 2: # top right + c = s + wp, s - h, s + wp + w, s + elif i == 3: # right + c = s + w0, s, s + w0 + w, s + h + elif i == 4: # bottom right + c = s + w0, s + hp, s + w0 + w, s + hp + h + elif i == 5: # bottom + c = s + w0 - w, s + h0, s + w0, s + h0 + h + elif i == 6: # bottom left + c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h + elif i == 7: # left + c = s - w, s + h0 - h, s, s + h0 + elif i == 8: # top left + c = s - w, s + h0 - hp - h, s, s + h0 - hp + + padx, pady = c[:2] + x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords + + # Labels + labels, segments = self.labels[index].copy(), self.segments[index].copy() + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padx, pady) for x in segments] + labels9.append(labels) + segments9.extend(segments) + + # Image + img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax] + hp, wp = h, w # height, width previous + + # Offset + yc, xc = [int(random.uniform(0, s)) for _ in self.mosaic_border] # mosaic center x, y + img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s] + + # Concat/clip labels + labels9 = np.concatenate(labels9, 0) + labels9[:, [1, 3]] -= xc + labels9[:, [2, 4]] -= yc + c = np.array([xc, yc]) # centers + segments9 = [x - c for x in segments9] + + for x in (labels9[:, 1:], *segments9): + np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() + # img9, labels9 = replicate(img9, labels9) # replicate + + # Augment + #img9, labels9, segments9 = remove_background(img9, labels9, segments9) + img9, labels9, segments9 = copy_paste(img9, labels9, segments9, probability=self.hyp['copy_paste']) + img9, labels9 = random_perspective(img9, labels9, segments9, + degrees=self.hyp['degrees'], + translate=self.hyp['translate'], + scale=self.hyp['scale'], + shear=self.hyp['shear'], + perspective=self.hyp['perspective'], + border=self.mosaic_border) # border to remove + + return img9, labels9 + + +def load_samples(self, index): + # loads images in a 4-mosaic + + labels4, segments4 = [], [] + s = self.img_size + yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y + indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices + for i, index in enumerate(indices): + # Load image + img, _, (h, w) = load_image(self, index) + + # place img in img4 + if i == 0: # top left + img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) + x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) + elif i == 1: # top right + x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc + x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h + elif i == 2: # bottom left + x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) + x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) + elif i == 3: # bottom right + x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) + x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) + + img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] + padw = x1a - x1b + padh = y1a - y1b + + # Labels + labels, segments = self.labels[index].copy(), self.segments[index].copy() + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padw, padh) for x in segments] + labels4.append(labels) + segments4.extend(segments) + + # Concat/clip labels + labels4 = np.concatenate(labels4, 0) + for x in (labels4[:, 1:], *segments4): + np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() + # img4, labels4 = replicate(img4, labels4) # replicate + + # Augment + #img4, labels4, segments4 = remove_background(img4, labels4, segments4) + sample_labels, sample_images, sample_masks = sample_segments(img4, labels4, segments4, probability=0.5) + + return sample_labels, sample_images, sample_masks + + +def copy_paste(img, labels, segments, probability=0.5): + # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy) + n = len(segments) + if probability and n: + h, w, c = img.shape # height, width, channels + im_new = np.zeros(img.shape, np.uint8) + for j in random.sample(range(n), k=round(probability * n)): + l, s = labels[j], segments[j] + box = w - l[3], l[2], w - l[1], l[4] + ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area + if (ioa < 0.30).all(): # allow 30% obscuration of existing labels + labels = np.concatenate((labels, [[l[0], *box]]), 0) + segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1)) + cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED) + + result = cv2.bitwise_and(src1=img, src2=im_new) + result = cv2.flip(result, 1) # augment segments (flip left-right) + i = result > 0 # pixels to replace + # i[:, :] = result.max(2).reshape(h, w, 1) # act over ch + img[i] = result[i] # cv2.imwrite('debug.jpg', img) # debug + + return img, labels, segments + + +def remove_background(img, labels, segments): + # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy) + n = len(segments) + h, w, c = img.shape # height, width, channels + im_new = np.zeros(img.shape, np.uint8) + img_new = np.ones(img.shape, np.uint8) * 114 + for j in range(n): + cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED) + + result = cv2.bitwise_and(src1=img, src2=im_new) + + i = result > 0 # pixels to replace + img_new[i] = result[i] # cv2.imwrite('debug.jpg', img) # debug + + return img_new, labels, segments + + +def sample_segments(img, labels, segments, probability=0.5): + # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy) + n = len(segments) + sample_labels = [] + sample_images = [] + sample_masks = [] + if probability and n: + h, w, c = img.shape # height, width, channels + for j in random.sample(range(n), k=round(probability * n)): + l, s = labels[j], segments[j] + box = l[1].astype(int).clip(0,w-1), l[2].astype(int).clip(0,h-1), l[3].astype(int).clip(0,w-1), l[4].astype(int).clip(0,h-1) + + #print(box) + if (box[2] <= box[0]) or (box[3] <= box[1]): + continue + + sample_labels.append(l[0]) + + mask = np.zeros(img.shape, np.uint8) + + cv2.drawContours(mask, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED) + sample_masks.append(mask[box[1]:box[3],box[0]:box[2],:]) + + result = cv2.bitwise_and(src1=img, src2=mask) + i = result > 0 # pixels to replace + mask[i] = result[i] # cv2.imwrite('debug.jpg', img) # debug + #print(box) + sample_images.append(mask[box[1]:box[3],box[0]:box[2],:]) + + return sample_labels, sample_images, sample_masks + + +def replicate(img, labels): + # Replicate labels + h, w = img.shape[:2] + boxes = labels[:, 1:].astype(int) + x1, y1, x2, y2 = boxes.T + s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels) + for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices + x1b, y1b, x2b, y2b = boxes[i] + bh, bw = y2b - y1b, x2b - x1b + yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y + x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh] + img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] + labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0) + + return img, labels + + +def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32): + # Resize and pad image while meeting stride-multiple constraints + shape = img.shape[:2] # current shape [height, width] + if isinstance(new_shape, int): + new_shape = (new_shape, new_shape) + + # Scale ratio (new / old) + r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) + if not scaleup: # only scale down, do not scale up (for better test mAP) + r = min(r, 1.0) + + # Compute padding + ratio = r, r # width, height ratios + new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) + dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding + if auto: # minimum rectangle + dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding + elif scaleFill: # stretch + dw, dh = 0.0, 0.0 + new_unpad = (new_shape[1], new_shape[0]) + ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios + + dw /= 2 # divide padding into 2 sides + dh /= 2 + + if shape[::-1] != new_unpad: # resize + img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR) + top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) + left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) + img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border + return img, ratio, (dw, dh) + + +def random_perspective(img, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, + border=(0, 0)): + # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10)) + # targets = [cls, xyxy] + + height = img.shape[0] + border[0] * 2 # shape(h,w,c) + width = img.shape[1] + border[1] * 2 + + # Center + C = np.eye(3) + C[0, 2] = -img.shape[1] / 2 # x translation (pixels) + C[1, 2] = -img.shape[0] / 2 # y translation (pixels) + + # Perspective + P = np.eye(3) + P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) + P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) + + # Rotation and Scale + R = np.eye(3) + a = random.uniform(-degrees, degrees) + # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations + s = random.uniform(1 - scale, 1.1 + scale) + # s = 2 ** random.uniform(-scale, scale) + R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) + + # Shear + S = np.eye(3) + S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) + S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) + + # Translation + T = np.eye(3) + T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels) + T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels) + + # Combined rotation matrix + M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT + if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed + if perspective: + img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114)) + else: # affine + img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) + + # Visualize + # import matplotlib.pyplot as plt + # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() + # ax[0].imshow(img[:, :, ::-1]) # base + # ax[1].imshow(img2[:, :, ::-1]) # warped + + # Transform label coordinates + n = len(targets) + if n: + use_segments = any(x.any() for x in segments) + new = np.zeros((n, 4)) + if use_segments: # warp segments + segments = resample_segments(segments) # upsample + for i, segment in enumerate(segments): + xy = np.ones((len(segment), 3)) + xy[:, :2] = segment + xy = xy @ M.T # transform + xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine + + # clip + new[i] = segment2box(xy, width, height) + + else: # warp boxes + xy = np.ones((n * 4, 3)) + xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 + xy = xy @ M.T # transform + xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine + + # create new boxes + x = xy[:, [0, 2, 4, 6]] + y = xy[:, [1, 3, 5, 7]] + new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T + + # clip + new[:, [0, 2]] = new[:, [0, 2]].clip(0, width) + new[:, [1, 3]] = new[:, [1, 3]].clip(0, height) + + # filter candidates + i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10) + targets = targets[i] + targets[:, 1:5] = new[i] + + return img, targets + + +def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) + # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio + w1, h1 = box1[2] - box1[0], box1[3] - box1[1] + w2, h2 = box2[2] - box2[0], box2[3] - box2[1] + ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio + return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates + + +def bbox_ioa(box1, box2): + # Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2 + box2 = box2.transpose() + + # Get the coordinates of bounding boxes + b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] + b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] + + # Intersection area + inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \ + (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0) + + # box2 area + box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16 + + # Intersection over box2 area + return inter_area / box2_area + + +def cutout(image, labels): + # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 + h, w = image.shape[:2] + + # create random masks + scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction + for s in scales: + mask_h = random.randint(1, int(h * s)) + mask_w = random.randint(1, int(w * s)) + + # box + xmin = max(0, random.randint(0, w) - mask_w // 2) + ymin = max(0, random.randint(0, h) - mask_h // 2) + xmax = min(w, xmin + mask_w) + ymax = min(h, ymin + mask_h) + + # apply random color mask + image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)] + + # return unobscured labels + if len(labels) and s > 0.03: + box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) + ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area + labels = labels[ioa < 0.60] # remove >60% obscured labels + + return labels + + +def pastein(image, labels, sample_labels, sample_images, sample_masks): + # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 + h, w = image.shape[:2] + + # create random masks + scales = [0.75] * 2 + [0.5] * 4 + [0.25] * 4 + [0.125] * 4 + [0.0625] * 6 # image size fraction + for s in scales: + if random.random() < 0.2: + continue + mask_h = random.randint(1, int(h * s)) + mask_w = random.randint(1, int(w * s)) + + # box + xmin = max(0, random.randint(0, w) - mask_w // 2) + ymin = max(0, random.randint(0, h) - mask_h // 2) + xmax = min(w, xmin + mask_w) + ymax = min(h, ymin + mask_h) + + box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) + if len(labels): + ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area + else: + ioa = np.zeros(1) + + if (ioa < 0.30).all() and len(sample_labels) and (xmax > xmin+20) and (ymax > ymin+20): # allow 30% obscuration of existing labels + sel_ind = random.randint(0, len(sample_labels)-1) + #print(len(sample_labels)) + #print(sel_ind) + #print((xmax-xmin, ymax-ymin)) + #print(image[ymin:ymax, xmin:xmax].shape) + #print([[sample_labels[sel_ind], *box]]) + #print(labels.shape) + hs, ws, cs = sample_images[sel_ind].shape + r_scale = min((ymax-ymin)/hs, (xmax-xmin)/ws) + r_w = int(ws*r_scale) + r_h = int(hs*r_scale) + + if (r_w > 10) and (r_h > 10): + r_mask = cv2.resize(sample_masks[sel_ind], (r_w, r_h)) + r_image = cv2.resize(sample_images[sel_ind], (r_w, r_h)) + temp_crop = image[ymin:ymin+r_h, xmin:xmin+r_w] + m_ind = r_mask > 0 + if m_ind.astype(np.int32).sum() > 60: + temp_crop[m_ind] = r_image[m_ind] + #print(sample_labels[sel_ind]) + #print(sample_images[sel_ind].shape) + #print(temp_crop.shape) + box = np.array([xmin, ymin, xmin+r_w, ymin+r_h], dtype=np.float32) + if len(labels): + labels = np.concatenate((labels, [[sample_labels[sel_ind], *box]]), 0) + else: + labels = np.array([[sample_labels[sel_ind], *box]]) + + image[ymin:ymin+r_h, xmin:xmin+r_w] = temp_crop + + return labels + +class Albumentations: + # YOLOv5 Albumentations class (optional, only used if package is installed) + def __init__(self): + self.transform = None + import albumentations as A + + self.transform = A.Compose([ + A.CLAHE(p=0.01), + A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2, p=0.01), + A.RandomGamma(gamma_limit=[80, 120], p=0.01), + A.Blur(p=0.01), + A.MedianBlur(p=0.01), + A.ToGray(p=0.01), + A.ImageCompression(quality_lower=75, p=0.01),], + bbox_params=A.BboxParams(format='pascal_voc', label_fields=['class_labels'])) + + #logging.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms if x.p)) + + def __call__(self, im, labels, p=1.0): + if self.transform and random.random() < p: + new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0]) # transformed + im, labels = new['image'], np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])]) + return im, labels + + +def create_folder(path='./new'): + # Create folder + if os.path.exists(path): + shutil.rmtree(path) # delete output folder + os.makedirs(path) # make new output folder + + +def flatten_recursive(path='../coco'): + # Flatten a recursive directory by bringing all files to top level + new_path = Path(path + '_flat') + create_folder(new_path) + for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)): + shutil.copyfile(file, new_path / Path(file).name) + + +def extract_boxes(path='../coco/'): # from utils.datasets import *; extract_boxes('../coco128') + # Convert detection dataset into classification dataset, with one directory per class + + path = Path(path) # images dir + shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing + files = list(path.rglob('*.*')) + n = len(files) # number of files + for im_file in tqdm(files, total=n): + if im_file.suffix[1:] in img_formats: + # image + im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB + h, w = im.shape[:2] + + # labels + lb_file = Path(img2label_paths([str(im_file)])[0]) + if Path(lb_file).exists(): + with open(lb_file, 'r') as f: + lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels + + for j, x in enumerate(lb): + c = int(x[0]) # class + f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename + if not f.parent.is_dir(): + f.parent.mkdir(parents=True) + + b = x[1:] * [w, h, w, h] # box + # b[2:] = b[2:].max() # rectangle to square + b[2:] = b[2:] * 1.2 + 3 # pad + b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int) + + b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image + b[[1, 3]] = np.clip(b[[1, 3]], 0, h) + assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}' + + +def autosplit(path='../coco', weights=(0.9, 0.1, 0.0), annotated_only=False): + """ Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files + Usage: from utils.datasets import *; autosplit('../coco') + Arguments + path: Path to images directory + weights: Train, val, test weights (list) + annotated_only: Only use images with an annotated txt file + """ + path = Path(path) # images dir + files = sum([list(path.rglob(f"*.{img_ext}")) for img_ext in img_formats], []) # image files only + n = len(files) # number of files + indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split + + txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files + [(path / x).unlink() for x in txt if (path / x).exists()] # remove existing + + print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only) + for i, img in tqdm(zip(indices, files), total=n): + if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label + with open(path / txt[i], 'a') as f: + f.write(str(img) + '\n') # add image to txt file + + +def load_segmentations(self, index): + key = '/work/handsomejw66/coco17/' + self.img_files[index] + #print(key) + # /work/handsomejw66/coco17/ + return self.segs[key] diff --git a/human_detection/yolov7 skeleton/utils/general.py b/human_detection/yolov7 skeleton/utils/general.py new file mode 100644 index 00000000..decdcc64 --- /dev/null +++ b/human_detection/yolov7 skeleton/utils/general.py @@ -0,0 +1,892 @@ +# YOLOR general utils + +import glob +import logging +import math +import os +import platform +import random +import re +import subprocess +import time +from pathlib import Path + +import cv2 +import numpy as np +import pandas as pd +import torch +import torchvision +import yaml + +from utils.google_utils import gsutil_getsize +from utils.metrics import fitness +from utils.torch_utils import init_torch_seeds + +# Settings +torch.set_printoptions(linewidth=320, precision=5, profile='long') +np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 +pd.options.display.max_columns = 10 +cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) +os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) # NumExpr max threads + + +def set_logging(rank=-1): + logging.basicConfig( + format="%(message)s", + level=logging.INFO if rank in [-1, 0] else logging.WARN) + + +def init_seeds(seed=0): + # Initialize random number generator (RNG) seeds + random.seed(seed) + np.random.seed(seed) + init_torch_seeds(seed) + + +def get_latest_run(search_dir='.'): + # Return path to most recent 'last.pt' in /runs (i.e. to --resume from) + last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True) + return max(last_list, key=os.path.getctime) if last_list else '' + + +def isdocker(): + # Is environment a Docker container + return Path('/workspace').exists() # or Path('/.dockerenv').exists() + + +def emojis(str=''): + # Return platform-dependent emoji-safe version of string + return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str + + +def check_online(): + # Check internet connectivity + import socket + try: + socket.create_connection(("1.1.1.1", 443), 5) # check host accesability + return True + except OSError: + return False + + +def check_git_status(): + # Recommend 'git pull' if code is out of date + print(colorstr('github: '), end='') + try: + assert Path('.git').exists(), 'skipping check (not a git repository)' + assert not isdocker(), 'skipping check (Docker image)' + assert check_online(), 'skipping check (offline)' + + cmd = 'git fetch && git config --get remote.origin.url' + url = subprocess.check_output(cmd, shell=True).decode().strip().rstrip('.git') # github repo url + branch = subprocess.check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out + n = int(subprocess.check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind + if n > 0: + s = f"⚠️ WARNING: code is out of date by {n} commit{'s' * (n > 1)}. " \ + f"Use 'git pull' to update or 'git clone {url}' to download latest." + else: + s = f'up to date with {url} ✅' + print(emojis(s)) # emoji-safe + except Exception as e: + print(e) + + +def check_requirements(requirements='requirements.txt', exclude=()): + # Check installed dependencies meet requirements (pass *.txt file or list of packages) + import pkg_resources as pkg + prefix = colorstr('red', 'bold', 'requirements:') + if isinstance(requirements, (str, Path)): # requirements.txt file + file = Path(requirements) + if not file.exists(): + print(f"{prefix} {file.resolve()} not found, check failed.") + return + requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude] + else: # list or tuple of packages + requirements = [x for x in requirements if x not in exclude] + + n = 0 # number of packages updates + for r in requirements: + try: + pkg.require(r) + except Exception as e: # DistributionNotFound or VersionConflict if requirements not met + n += 1 + print(f"{prefix} {e.req} not found and is required by YOLOR, attempting auto-update...") + print(subprocess.check_output(f"pip install '{e.req}'", shell=True).decode()) + + if n: # if packages updated + source = file.resolve() if 'file' in locals() else requirements + s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ + f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" + print(emojis(s)) # emoji-safe + + +def check_img_size(img_size, s=32): + # Verify img_size is a multiple of stride s + new_size = make_divisible(img_size, int(s)) # ceil gs-multiple + if new_size != img_size: + print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size)) + return new_size + + +def check_imshow(): + # Check if environment supports image displays + try: + assert not isdocker(), 'cv2.imshow() is disabled in Docker environments' + cv2.imshow('test', np.zeros((1, 1, 3))) + cv2.waitKey(1) + cv2.destroyAllWindows() + cv2.waitKey(1) + return True + except Exception as e: + print(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}') + return False + + +def check_file(file): + # Search for file if not found + if Path(file).is_file() or file == '': + return file + else: + files = glob.glob('./**/' + file, recursive=True) # find file + assert len(files), f'File Not Found: {file}' # assert file was found + assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique + return files[0] # return file + + +def check_dataset(dict): + # Download dataset if not found locally + val, s = dict.get('val'), dict.get('download') + if val and len(val): + val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path + if not all(x.exists() for x in val): + print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()]) + if s and len(s): # download script + print('Downloading %s ...' % s) + if s.startswith('http') and s.endswith('.zip'): # URL + f = Path(s).name # filename + torch.hub.download_url_to_file(s, f) + r = os.system('unzip -q %s -d ../ && rm %s' % (f, f)) # unzip + else: # bash script + r = os.system(s) + print('Dataset autodownload %s\n' % ('success' if r == 0 else 'failure')) # analyze return value + else: + raise Exception('Dataset not found.') + + +def make_divisible(x, divisor): + # Returns x evenly divisible by divisor + return math.ceil(x / divisor) * divisor + + +def clean_str(s): + # Cleans a string by replacing special characters with underscore _ + return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s) + + +def one_cycle(y1=0.0, y2=1.0, steps=100): + # lambda function for sinusoidal ramp from y1 to y2 + return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1 + + +def colorstr(*input): + # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world') + *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string + colors = {'black': '\033[30m', # basic colors + 'red': '\033[31m', + 'green': '\033[32m', + 'yellow': '\033[33m', + 'blue': '\033[34m', + 'magenta': '\033[35m', + 'cyan': '\033[36m', + 'white': '\033[37m', + 'bright_black': '\033[90m', # bright colors + 'bright_red': '\033[91m', + 'bright_green': '\033[92m', + 'bright_yellow': '\033[93m', + 'bright_blue': '\033[94m', + 'bright_magenta': '\033[95m', + 'bright_cyan': '\033[96m', + 'bright_white': '\033[97m', + 'end': '\033[0m', # misc + 'bold': '\033[1m', + 'underline': '\033[4m'} + return ''.join(colors[x] for x in args) + f'{string}' + colors['end'] + + +def labels_to_class_weights(labels, nc=80): + # Get class weights (inverse frequency) from training labels + if labels[0] is None: # no labels loaded + return torch.Tensor() + + labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO + classes = labels[:, 0].astype(np.int32) # labels = [class xywh] + weights = np.bincount(classes, minlength=nc) # occurrences per class + + # Prepend gridpoint count (for uCE training) + # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image + # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start + + weights[weights == 0] = 1 # replace empty bins with 1 + weights = 1 / weights # number of targets per class + weights /= weights.sum() # normalize + return torch.from_numpy(weights) + + +def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)): + # Produces image weights based on class_weights and image contents + class_counts = np.array([np.bincount(x[:, 0].astype(np.int32), minlength=nc) for x in labels]) + image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1) + # index = random.choices(range(n), weights=image_weights, k=1) # weight image sample + return image_weights + + +def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper) + # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/ + # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n') + # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n') + # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco + # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet + x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90] + return x + + +def xyxy2xywh(x): + # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center + y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center + y[:, 2] = x[:, 2] - x[:, 0] # width + y[:, 3] = x[:, 3] - x[:, 1] # height + return y + + +def xywh2xyxy(x): + # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x + y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y + y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x + y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y + return y + + +def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): + # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x + y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y + y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x + y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y + return y + + +def xyn2xy(x, w=640, h=640, padw=0, padh=0): + # Convert normalized segments into pixel segments, shape (n,2) + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = w * x[:, 0] + padw # top left x + y[:, 1] = h * x[:, 1] + padh # top left y + return y + + +def segment2box(segment, width=640, height=640): + # Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy) + x, y = segment.T # segment xy + inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height) + x, y, = x[inside], y[inside] + return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy + + +def segments2boxes(segments): + # Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh) + boxes = [] + for s in segments: + x, y = s.T # segment xy + boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy + return xyxy2xywh(np.array(boxes)) # cls, xywh + + +def resample_segments(segments, n=1000): + # Up-sample an (n,2) segment + for i, s in enumerate(segments): + s = np.concatenate((s, s[0:1, :]), axis=0) + x = np.linspace(0, len(s) - 1, n) + xp = np.arange(len(s)) + segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy + return segments + + +def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): + # Rescale coords (xyxy) from img1_shape to img0_shape + if ratio_pad is None: # calculate from img0_shape + gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new + pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding + else: + gain = ratio_pad[0][0] + pad = ratio_pad[1] + + coords[:, [0, 2]] -= pad[0] # x padding + coords[:, [1, 3]] -= pad[1] # y padding + coords[:, :4] /= gain + clip_coords(coords, img0_shape) + return coords + + +def clip_coords(boxes, img_shape): + # Clip bounding xyxy bounding boxes to image shape (height, width) + boxes[:, 0].clamp_(0, img_shape[1]) # x1 + boxes[:, 1].clamp_(0, img_shape[0]) # y1 + boxes[:, 2].clamp_(0, img_shape[1]) # x2 + boxes[:, 3].clamp_(0, img_shape[0]) # y2 + + +def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7): + # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4 + box2 = box2.T + + # Get the coordinates of bounding boxes + if x1y1x2y2: # x1, y1, x2, y2 = box1 + b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] + b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] + else: # transform from xywh to xyxy + b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2 + b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2 + b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2 + b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2 + + # Intersection area + inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ + (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0) + + # Union Area + w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps + w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps + union = w1 * h1 + w2 * h2 - inter + eps + + iou = inter / union + + if GIoU or DIoU or CIoU: + cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width + ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height + if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 + c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared + rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared + if DIoU: + return iou - rho2 / c2 # DIoU + elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 + v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / (h2 + eps)) - torch.atan(w1 / (h1 + eps)), 2) + with torch.no_grad(): + alpha = v / (v - iou + (1 + eps)) + return iou - (rho2 / c2 + v * alpha) # CIoU + else: # GIoU https://arxiv.org/pdf/1902.09630.pdf + c_area = cw * ch + eps # convex area + return iou - (c_area - union) / c_area # GIoU + else: + return iou # IoU + + + + +def bbox_alpha_iou(box1, box2, x1y1x2y2=False, GIoU=False, DIoU=False, CIoU=False, alpha=2, eps=1e-9): + # Returns tsqrt_he IoU of box1 to box2. box1 is 4, box2 is nx4 + box2 = box2.T + + # Get the coordinates of bounding boxes + if x1y1x2y2: # x1, y1, x2, y2 = box1 + b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] + b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] + else: # transform from xywh to xyxy + b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2 + b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2 + b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2 + b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2 + + # Intersection area + inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ + (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0) + + # Union Area + w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps + w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps + union = w1 * h1 + w2 * h2 - inter + eps + + # change iou into pow(iou+eps) + # iou = inter / union + iou = torch.pow(inter/union + eps, alpha) + # beta = 2 * alpha + if GIoU or DIoU or CIoU: + cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width + ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height + if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 + c2 = (cw ** 2 + ch ** 2) ** alpha + eps # convex diagonal + rho_x = torch.abs(b2_x1 + b2_x2 - b1_x1 - b1_x2) + rho_y = torch.abs(b2_y1 + b2_y2 - b1_y1 - b1_y2) + rho2 = ((rho_x ** 2 + rho_y ** 2) / 4) ** alpha # center distance + if DIoU: + return iou - rho2 / c2 # DIoU + elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 + v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) + with torch.no_grad(): + alpha_ciou = v / ((1 + eps) - inter / union + v) + # return iou - (rho2 / c2 + v * alpha_ciou) # CIoU + return iou - (rho2 / c2 + torch.pow(v * alpha_ciou + eps, alpha)) # CIoU + else: # GIoU https://arxiv.org/pdf/1902.09630.pdf + # c_area = cw * ch + eps # convex area + # return iou - (c_area - union) / c_area # GIoU + c_area = torch.max(cw * ch + eps, union) # convex area + return iou - torch.pow((c_area - union) / c_area + eps, alpha) # GIoU + else: + return iou # torch.log(iou+eps) or iou + + +def box_iou(box1, box2): + # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py + """ + Return intersection-over-union (Jaccard index) of boxes. + Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + Arguments: + box1 (Tensor[N, 4]) + box2 (Tensor[M, 4]) + Returns: + iou (Tensor[N, M]): the NxM matrix containing the pairwise + IoU values for every element in boxes1 and boxes2 + """ + + def box_area(box): + # box = 4xn + return (box[2] - box[0]) * (box[3] - box[1]) + + area1 = box_area(box1.T) + area2 = box_area(box2.T) + + # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) + inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2) + return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter) + + +def wh_iou(wh1, wh2): + # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2 + wh1 = wh1[:, None] # [N,1,2] + wh2 = wh2[None] # [1,M,2] + inter = torch.min(wh1, wh2).prod(2) # [N,M] + return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter) + + +def box_giou(box1, box2): + """ + Return generalized intersection-over-union (Jaccard index) between two sets of boxes. + Both sets of boxes are expected to be in ``(x1, y1, x2, y2)`` format with + ``0 <= x1 < x2`` and ``0 <= y1 < y2``. + Args: + boxes1 (Tensor[N, 4]): first set of boxes + boxes2 (Tensor[M, 4]): second set of boxes + Returns: + Tensor[N, M]: the NxM matrix containing the pairwise generalized IoU values + for every element in boxes1 and boxes2 + """ + + def box_area(box): + # box = 4xn + return (box[2] - box[0]) * (box[3] - box[1]) + + area1 = box_area(box1.T) + area2 = box_area(box2.T) + + inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2) + union = (area1[:, None] + area2 - inter) + + iou = inter / union + + lti = torch.min(box1[:, None, :2], box2[:, :2]) + rbi = torch.max(box1[:, None, 2:], box2[:, 2:]) + + whi = (rbi - lti).clamp(min=0) # [N,M,2] + areai = whi[:, :, 0] * whi[:, :, 1] + + return iou - (areai - union) / areai + + +def box_ciou(box1, box2, eps: float = 1e-7): + """ + Return complete intersection-over-union (Jaccard index) between two sets of boxes. + Both sets of boxes are expected to be in ``(x1, y1, x2, y2)`` format with + ``0 <= x1 < x2`` and ``0 <= y1 < y2``. + Args: + boxes1 (Tensor[N, 4]): first set of boxes + boxes2 (Tensor[M, 4]): second set of boxes + eps (float, optional): small number to prevent division by zero. Default: 1e-7 + Returns: + Tensor[N, M]: the NxM matrix containing the pairwise complete IoU values + for every element in boxes1 and boxes2 + """ + + def box_area(box): + # box = 4xn + return (box[2] - box[0]) * (box[3] - box[1]) + + area1 = box_area(box1.T) + area2 = box_area(box2.T) + + inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2) + union = (area1[:, None] + area2 - inter) + + iou = inter / union + + lti = torch.min(box1[:, None, :2], box2[:, :2]) + rbi = torch.max(box1[:, None, 2:], box2[:, 2:]) + + whi = (rbi - lti).clamp(min=0) # [N,M,2] + diagonal_distance_squared = (whi[:, :, 0] ** 2) + (whi[:, :, 1] ** 2) + eps + + # centers of boxes + x_p = (box1[:, None, 0] + box1[:, None, 2]) / 2 + y_p = (box1[:, None, 1] + box1[:, None, 3]) / 2 + x_g = (box2[:, 0] + box2[:, 2]) / 2 + y_g = (box2[:, 1] + box2[:, 3]) / 2 + # The distance between boxes' centers squared. + centers_distance_squared = (x_p - x_g) ** 2 + (y_p - y_g) ** 2 + + w_pred = box1[:, None, 2] - box1[:, None, 0] + h_pred = box1[:, None, 3] - box1[:, None, 1] + + w_gt = box2[:, 2] - box2[:, 0] + h_gt = box2[:, 3] - box2[:, 1] + + v = (4 / (torch.pi ** 2)) * torch.pow((torch.atan(w_gt / h_gt) - torch.atan(w_pred / h_pred)), 2) + with torch.no_grad(): + alpha = v / (1 - iou + v + eps) + return iou - (centers_distance_squared / diagonal_distance_squared) - alpha * v + + +def box_diou(box1, box2, eps: float = 1e-7): + """ + Return distance intersection-over-union (Jaccard index) between two sets of boxes. + Both sets of boxes are expected to be in ``(x1, y1, x2, y2)`` format with + ``0 <= x1 < x2`` and ``0 <= y1 < y2``. + Args: + boxes1 (Tensor[N, 4]): first set of boxes + boxes2 (Tensor[M, 4]): second set of boxes + eps (float, optional): small number to prevent division by zero. Default: 1e-7 + Returns: + Tensor[N, M]: the NxM matrix containing the pairwise distance IoU values + for every element in boxes1 and boxes2 + """ + + def box_area(box): + # box = 4xn + return (box[2] - box[0]) * (box[3] - box[1]) + + area1 = box_area(box1.T) + area2 = box_area(box2.T) + + inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2) + union = (area1[:, None] + area2 - inter) + + iou = inter / union + + lti = torch.min(box1[:, None, :2], box2[:, :2]) + rbi = torch.max(box1[:, None, 2:], box2[:, 2:]) + + whi = (rbi - lti).clamp(min=0) # [N,M,2] + diagonal_distance_squared = (whi[:, :, 0] ** 2) + (whi[:, :, 1] ** 2) + eps + + # centers of boxes + x_p = (box1[:, None, 0] + box1[:, None, 2]) / 2 + y_p = (box1[:, None, 1] + box1[:, None, 3]) / 2 + x_g = (box2[:, 0] + box2[:, 2]) / 2 + y_g = (box2[:, 1] + box2[:, 3]) / 2 + # The distance between boxes' centers squared. + centers_distance_squared = (x_p - x_g) ** 2 + (y_p - y_g) ** 2 + + # The distance IoU is the IoU penalized by a normalized + # distance between boxes' centers squared. + return iou - (centers_distance_squared / diagonal_distance_squared) + + +def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, + labels=()): + """Runs Non-Maximum Suppression (NMS) on inference results + + Returns: + list of detections, on (n,6) tensor per image [xyxy, conf, cls] + """ + + nc = prediction.shape[2] - 5 # number of classes + xc = prediction[..., 4] > conf_thres # candidates + + # Settings + min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height + max_det = 300 # maximum number of detections per image + max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() + time_limit = 10.0 # seconds to quit after + redundant = True # require redundant detections + multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) + merge = False # use merge-NMS + + t = time.time() + output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0] + for xi, x in enumerate(prediction): # image index, image inference + # Apply constraints + # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height + x = x[xc[xi]] # confidence + + # Cat apriori labels if autolabelling + if labels and len(labels[xi]): + l = labels[xi] + v = torch.zeros((len(l), nc + 5), device=x.device) + v[:, :4] = l[:, 1:5] # box + v[:, 4] = 1.0 # conf + v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls + x = torch.cat((x, v), 0) + + # If none remain process next image + if not x.shape[0]: + continue + + # Compute conf + if nc == 1: + x[:, 5:] = x[:, 4:5] # for models with one class, cls_loss is 0 and cls_conf is always 0.5, + # so there is no need to multiplicate. + else: + x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf + + # Box (center x, center y, width, height) to (x1, y1, x2, y2) + box = xywh2xyxy(x[:, :4]) + + # Detections matrix nx6 (xyxy, conf, cls) + if multi_label: + i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T + x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1) + else: # best class only + conf, j = x[:, 5:].max(1, keepdim=True) + x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres] + + # Filter by class + if classes is not None: + x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] + + # Apply finite constraint + # if not torch.isfinite(x).all(): + # x = x[torch.isfinite(x).all(1)] + + # Check shape + n = x.shape[0] # number of boxes + if not n: # no boxes + continue + elif n > max_nms: # excess boxes + x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence + + # Batched NMS + c = x[:, 5:6] * (0 if agnostic else max_wh) # classes + boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores + i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS + if i.shape[0] > max_det: # limit detections + i = i[:max_det] + if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) + # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) + iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix + weights = iou * scores[None] # box weights + x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes + if redundant: + i = i[iou.sum(1) > 1] # require redundancy + + output[xi] = x[i] + if (time.time() - t) > time_limit: + print(f'WARNING: NMS time limit {time_limit}s exceeded') + break # time limit exceeded + + return output + + +def non_max_suppression_kpt(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, + labels=(), kpt_label=False, nc=None, nkpt=None): + """Runs Non-Maximum Suppression (NMS) on inference results + + Returns: + list of detections, on (n,6) tensor per image [xyxy, conf, cls] + """ + if nc is None: + nc = prediction.shape[2] - 5 if not kpt_label else prediction.shape[2] - 56 # number of classes + xc = prediction[..., 4] > conf_thres # candidates + + # Settings + min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height + max_det = 300 # maximum number of detections per image + max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() + time_limit = 10.0 # seconds to quit after + redundant = True # require redundant detections + multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) + merge = False # use merge-NMS + + t = time.time() + output = [torch.zeros((0,6), device=prediction.device)] * prediction.shape[0] + for xi, x in enumerate(prediction): # image index, image inference + # Apply constraints + # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height + x = x[xc[xi]] # confidence + + # Cat apriori labels if autolabelling + if labels and len(labels[xi]): + l = labels[xi] + v = torch.zeros((len(l), nc + 5), device=x.device) + v[:, :4] = l[:, 1:5] # box + v[:, 4] = 1.0 # conf + v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls + x = torch.cat((x, v), 0) + + # If none remain process next image + if not x.shape[0]: + continue + + # Compute conf + x[:, 5:5+nc] *= x[:, 4:5] # conf = obj_conf * cls_conf + + # Box (center x, center y, width, height) to (x1, y1, x2, y2) + box = xywh2xyxy(x[:, :4]) + + # Detections matrix nx6 (xyxy, conf, cls) + if multi_label: + i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T + x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1) + else: # best class only + if not kpt_label: + conf, j = x[:, 5:].max(1, keepdim=True) + x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres] + else: + kpts = x[:, 6:] + conf, j = x[:, 5:6].max(1, keepdim=True) + x = torch.cat((box, conf, j.float(), kpts), 1)[conf.view(-1) > conf_thres] + + + # Filter by class + if classes is not None: + x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] + + # Apply finite constraint + # if not torch.isfinite(x).all(): + # x = x[torch.isfinite(x).all(1)] + + # Check shape + n = x.shape[0] # number of boxes + if not n: # no boxes + continue + elif n > max_nms: # excess boxes + x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence + + # Batched NMS + c = x[:, 5:6] * (0 if agnostic else max_wh) # classes + boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores + i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS + if i.shape[0] > max_det: # limit detections + i = i[:max_det] + if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) + # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) + iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix + weights = iou * scores[None] # box weights + x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes + if redundant: + i = i[iou.sum(1) > 1] # require redundancy + + output[xi] = x[i] + if (time.time() - t) > time_limit: + print(f'WARNING: NMS time limit {time_limit}s exceeded') + break # time limit exceeded + + return output + + +def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer() + # Strip optimizer from 'f' to finalize training, optionally save as 's' + x = torch.load(f, map_location=torch.device('cpu')) + if x.get('ema'): + x['model'] = x['ema'] # replace model with ema + for k in 'optimizer', 'training_results', 'wandb_id', 'ema', 'updates': # keys + x[k] = None + x['epoch'] = -1 + x['model'].half() # to FP16 + for p in x['model'].parameters(): + p.requires_grad = False + torch.save(x, s or f) + mb = os.path.getsize(s or f) / 1E6 # filesize + print(f"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB") + + +def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''): + # Print mutation results to evolve.txt (for use with train.py --evolve) + a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys + b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values + c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3) + print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c)) + + if bucket: + url = 'gs://%s/evolve.txt' % bucket + if gsutil_getsize(url) > (os.path.getsize('evolve.txt') if os.path.exists('evolve.txt') else 0): + os.system('gsutil cp %s .' % url) # download evolve.txt if larger than local + + with open('evolve.txt', 'a') as f: # append result + f.write(c + b + '\n') + x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0) # load unique rows + x = x[np.argsort(-fitness(x))] # sort + np.savetxt('evolve.txt', x, '%10.3g') # save sort by fitness + + # Save yaml + for i, k in enumerate(hyp.keys()): + hyp[k] = float(x[0, i + 7]) + with open(yaml_file, 'w') as f: + results = tuple(x[0, :7]) + c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3) + f.write('# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: ' % len(x) + c + '\n\n') + yaml.dump(hyp, f, sort_keys=False) + + if bucket: + os.system('gsutil cp evolve.txt %s gs://%s' % (yaml_file, bucket)) # upload + + +def apply_classifier(x, model, img, im0): + # applies a second stage classifier to yolo outputs + im0 = [im0] if isinstance(im0, np.ndarray) else im0 + for i, d in enumerate(x): # per image + if d is not None and len(d): + d = d.clone() + + # Reshape and pad cutouts + b = xyxy2xywh(d[:, :4]) # boxes + b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square + b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad + d[:, :4] = xywh2xyxy(b).long() + + # Rescale boxes from img_size to im0 size + scale_coords(img.shape[2:], d[:, :4], im0[i].shape) + + # Classes + pred_cls1 = d[:, 5].long() + ims = [] + for j, a in enumerate(d): # per item + cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])] + im = cv2.resize(cutout, (224, 224)) # BGR + # cv2.imwrite('test%i.jpg' % j, cutout) + + im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32 + im /= 255.0 # 0 - 255 to 0.0 - 1.0 + ims.append(im) + + pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction + x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections + + return x + + +def increment_path(path, exist_ok=True, sep=''): + # Increment path, i.e. runs/exp --> runs/exp{sep}0, runs/exp{sep}1 etc. + path = Path(path) # os-agnostic + if (path.exists() and exist_ok) or (not path.exists()): + return str(path) + else: + dirs = glob.glob(f"{path}{sep}*") # similar paths + matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs] + i = [int(m.groups()[0]) for m in matches if m] # indices + n = max(i) + 1 if i else 2 # increment number + return f"{path}{sep}{n}" # update path diff --git a/human_detection/yolov7 skeleton/utils/google_app_engine/Dockerfile b/human_detection/yolov7 skeleton/utils/google_app_engine/Dockerfile new file mode 100644 index 00000000..0155618f --- /dev/null +++ b/human_detection/yolov7 skeleton/utils/google_app_engine/Dockerfile @@ -0,0 +1,25 @@ +FROM gcr.io/google-appengine/python + +# Create a virtualenv for dependencies. This isolates these packages from +# system-level packages. +# Use -p python3 or -p python3.7 to select python version. Default is version 2. +RUN virtualenv /env -p python3 + +# Setting these environment variables are the same as running +# source /env/bin/activate. +ENV VIRTUAL_ENV /env +ENV PATH /env/bin:$PATH + +RUN apt-get update && apt-get install -y python-opencv + +# Copy the application's requirements.txt and run pip to install all +# dependencies into the virtualenv. +ADD requirements.txt /app/requirements.txt +RUN pip install -r /app/requirements.txt + +# Add the application source code. +ADD . /app + +# Run a WSGI server to serve the application. gunicorn must be declared as +# a dependency in requirements.txt. +CMD gunicorn -b :$PORT main:app diff --git a/human_detection/yolov7 skeleton/utils/google_app_engine/additional_requirements.txt b/human_detection/yolov7 skeleton/utils/google_app_engine/additional_requirements.txt new file mode 100644 index 00000000..5fcc3052 --- /dev/null +++ b/human_detection/yolov7 skeleton/utils/google_app_engine/additional_requirements.txt @@ -0,0 +1,4 @@ +# add these requirements in your app on top of the existing ones +pip==18.1 +Flask==1.0.2 +gunicorn==19.9.0 diff --git a/human_detection/yolov7 skeleton/utils/google_app_engine/app.yaml b/human_detection/yolov7 skeleton/utils/google_app_engine/app.yaml new file mode 100644 index 00000000..69b8f68b --- /dev/null +++ b/human_detection/yolov7 skeleton/utils/google_app_engine/app.yaml @@ -0,0 +1,14 @@ +runtime: custom +env: flex + +service: yolorapp + +liveness_check: + initial_delay_sec: 600 + +manual_scaling: + instances: 1 +resources: + cpu: 1 + memory_gb: 4 + disk_size_gb: 20 \ No newline at end of file diff --git a/human_detection/yolov7 skeleton/utils/google_utils.py b/human_detection/yolov7 skeleton/utils/google_utils.py new file mode 100644 index 00000000..f363408e --- /dev/null +++ b/human_detection/yolov7 skeleton/utils/google_utils.py @@ -0,0 +1,123 @@ +# Google utils: https://cloud.google.com/storage/docs/reference/libraries + +import os +import platform +import subprocess +import time +from pathlib import Path + +import requests +import torch + + +def gsutil_getsize(url=''): + # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du + s = subprocess.check_output(f'gsutil du {url}', shell=True).decode('utf-8') + return eval(s.split(' ')[0]) if len(s) else 0 # bytes + + +def attempt_download(file, repo='WongKinYiu/yolov7'): + # Attempt file download if does not exist + file = Path(str(file).strip().replace("'", '').lower()) + + if not file.exists(): + try: + response = requests.get(f'https://api.github.com/repos/{repo}/releases/latest').json() # github api + assets = [x['name'] for x in response['assets']] # release assets + tag = response['tag_name'] # i.e. 'v1.0' + except: # fallback plan + assets = ['yolov7.pt', 'yolov7-tiny.pt', 'yolov7x.pt', 'yolov7-d6.pt', 'yolov7-e6.pt', + 'yolov7-e6e.pt', 'yolov7-w6.pt'] + tag = subprocess.check_output('git tag', shell=True).decode().split()[-1] + + name = file.name + if name in assets: + msg = f'{file} missing, try downloading from https://github.com/{repo}/releases/' + redundant = False # second download option + try: # GitHub + url = f'https://github.com/{repo}/releases/download/{tag}/{name}' + print(f'Downloading {url} to {file}...') + torch.hub.download_url_to_file(url, file) + assert file.exists() and file.stat().st_size > 1E6 # check + except Exception as e: # GCP + print(f'Download error: {e}') + assert redundant, 'No secondary mirror' + url = f'https://storage.googleapis.com/{repo}/ckpt/{name}' + print(f'Downloading {url} to {file}...') + os.system(f'curl -L {url} -o {file}') # torch.hub.download_url_to_file(url, weights) + finally: + if not file.exists() or file.stat().st_size < 1E6: # check + file.unlink(missing_ok=True) # remove partial downloads + print(f'ERROR: Download failure: {msg}') + print('') + return + + +def gdrive_download(id='', file='tmp.zip'): + # Downloads a file from Google Drive. from yolov7.utils.google_utils import *; gdrive_download() + t = time.time() + file = Path(file) + cookie = Path('cookie') # gdrive cookie + print(f'Downloading https://drive.google.com/uc?export=download&id={id} as {file}... ', end='') + file.unlink(missing_ok=True) # remove existing file + cookie.unlink(missing_ok=True) # remove existing cookie + + # Attempt file download + out = "NUL" if platform.system() == "Windows" else "/dev/null" + os.system(f'curl -c ./cookie -s -L "drive.google.com/uc?export=download&id={id}" > {out}') + if os.path.exists('cookie'): # large file + s = f'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm={get_token()}&id={id}" -o {file}' + else: # small file + s = f'curl -s -L -o {file} "drive.google.com/uc?export=download&id={id}"' + r = os.system(s) # execute, capture return + cookie.unlink(missing_ok=True) # remove existing cookie + + # Error check + if r != 0: + file.unlink(missing_ok=True) # remove partial + print('Download error ') # raise Exception('Download error') + return r + + # Unzip if archive + if file.suffix == '.zip': + print('unzipping... ', end='') + os.system(f'unzip -q {file}') # unzip + file.unlink() # remove zip to free space + + print(f'Done ({time.time() - t:.1f}s)') + return r + + +def get_token(cookie="./cookie"): + with open(cookie) as f: + for line in f: + if "download" in line: + return line.split()[-1] + return "" + +# def upload_blob(bucket_name, source_file_name, destination_blob_name): +# # Uploads a file to a bucket +# # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python +# +# storage_client = storage.Client() +# bucket = storage_client.get_bucket(bucket_name) +# blob = bucket.blob(destination_blob_name) +# +# blob.upload_from_filename(source_file_name) +# +# print('File {} uploaded to {}.'.format( +# source_file_name, +# destination_blob_name)) +# +# +# def download_blob(bucket_name, source_blob_name, destination_file_name): +# # Uploads a blob from a bucket +# storage_client = storage.Client() +# bucket = storage_client.get_bucket(bucket_name) +# blob = bucket.blob(source_blob_name) +# +# blob.download_to_filename(destination_file_name) +# +# print('Blob {} downloaded to {}.'.format( +# source_blob_name, +# destination_file_name)) diff --git a/human_detection/yolov7 skeleton/utils/loss.py b/human_detection/yolov7 skeleton/utils/loss.py new file mode 100644 index 00000000..2b1d968f --- /dev/null +++ b/human_detection/yolov7 skeleton/utils/loss.py @@ -0,0 +1,1697 @@ +# Loss functions + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from utils.general import bbox_iou, bbox_alpha_iou, box_iou, box_giou, box_diou, box_ciou, xywh2xyxy +from utils.torch_utils import is_parallel + + +def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441 + # return positive, negative label smoothing BCE targets + return 1.0 - 0.5 * eps, 0.5 * eps + + +class BCEBlurWithLogitsLoss(nn.Module): + # BCEwithLogitLoss() with reduced missing label effects. + def __init__(self, alpha=0.05): + super(BCEBlurWithLogitsLoss, self).__init__() + self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss() + self.alpha = alpha + + def forward(self, pred, true): + loss = self.loss_fcn(pred, true) + pred = torch.sigmoid(pred) # prob from logits + dx = pred - true # reduce only missing label effects + # dx = (pred - true).abs() # reduce missing label and false label effects + alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4)) + loss *= alpha_factor + return loss.mean() + + +class SigmoidBin(nn.Module): + stride = None # strides computed during build + export = False # onnx export + + def __init__(self, bin_count=10, min=0.0, max=1.0, reg_scale = 2.0, use_loss_regression=True, use_fw_regression=True, BCE_weight=1.0, smooth_eps=0.0): + super(SigmoidBin, self).__init__() + + self.bin_count = bin_count + self.length = bin_count + 1 + self.min = min + self.max = max + self.scale = float(max - min) + self.shift = self.scale / 2.0 + + self.use_loss_regression = use_loss_regression + self.use_fw_regression = use_fw_regression + self.reg_scale = reg_scale + self.BCE_weight = BCE_weight + + start = min + (self.scale/2.0) / self.bin_count + end = max - (self.scale/2.0) / self.bin_count + step = self.scale / self.bin_count + self.step = step + #print(f" start = {start}, end = {end}, step = {step} ") + + bins = torch.range(start, end + 0.0001, step).float() + self.register_buffer('bins', bins) + + + self.cp = 1.0 - 0.5 * smooth_eps + self.cn = 0.5 * smooth_eps + + self.BCEbins = nn.BCEWithLogitsLoss(pos_weight=torch.Tensor([BCE_weight])) + self.MSELoss = nn.MSELoss() + + def get_length(self): + return self.length + + def forward(self, pred): + assert pred.shape[-1] == self.length, 'pred.shape[-1]=%d is not equal to self.length=%d' % (pred.shape[-1], self.length) + + pred_reg = (pred[..., 0] * self.reg_scale - self.reg_scale/2.0) * self.step + pred_bin = pred[..., 1:(1+self.bin_count)] + + _, bin_idx = torch.max(pred_bin, dim=-1) + bin_bias = self.bins[bin_idx] + + if self.use_fw_regression: + result = pred_reg + bin_bias + else: + result = bin_bias + result = result.clamp(min=self.min, max=self.max) + + return result + + + def training_loss(self, pred, target): + assert pred.shape[-1] == self.length, 'pred.shape[-1]=%d is not equal to self.length=%d' % (pred.shape[-1], self.length) + assert pred.shape[0] == target.shape[0], 'pred.shape=%d is not equal to the target.shape=%d' % (pred.shape[0], target.shape[0]) + device = pred.device + + pred_reg = (pred[..., 0].sigmoid() * self.reg_scale - self.reg_scale/2.0) * self.step + pred_bin = pred[..., 1:(1+self.bin_count)] + + diff_bin_target = torch.abs(target[..., None] - self.bins) + _, bin_idx = torch.min(diff_bin_target, dim=-1) + + bin_bias = self.bins[bin_idx] + bin_bias.requires_grad = False + result = pred_reg + bin_bias + + target_bins = torch.full_like(pred_bin, self.cn, device=device) # targets + n = pred.shape[0] + target_bins[range(n), bin_idx] = self.cp + + loss_bin = self.BCEbins(pred_bin, target_bins) # BCE + + if self.use_loss_regression: + loss_regression = self.MSELoss(result, target) # MSE + loss = loss_bin + loss_regression + else: + loss = loss_bin + + out_result = result.clamp(min=self.min, max=self.max) + + return loss, out_result + + +class FocalLoss(nn.Module): + # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) + def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): + super(FocalLoss, self).__init__() + self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() + self.gamma = gamma + self.alpha = alpha + self.reduction = loss_fcn.reduction + self.loss_fcn.reduction = 'none' # required to apply FL to each element + + def forward(self, pred, true): + loss = self.loss_fcn(pred, true) + # p_t = torch.exp(-loss) + # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability + + # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py + pred_prob = torch.sigmoid(pred) # prob from logits + p_t = true * pred_prob + (1 - true) * (1 - pred_prob) + alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) + modulating_factor = (1.0 - p_t) ** self.gamma + loss *= alpha_factor * modulating_factor + + if self.reduction == 'mean': + return loss.mean() + elif self.reduction == 'sum': + return loss.sum() + else: # 'none' + return loss + + +class QFocalLoss(nn.Module): + # Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) + def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): + super(QFocalLoss, self).__init__() + self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() + self.gamma = gamma + self.alpha = alpha + self.reduction = loss_fcn.reduction + self.loss_fcn.reduction = 'none' # required to apply FL to each element + + def forward(self, pred, true): + loss = self.loss_fcn(pred, true) + + pred_prob = torch.sigmoid(pred) # prob from logits + alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) + modulating_factor = torch.abs(true - pred_prob) ** self.gamma + loss *= alpha_factor * modulating_factor + + if self.reduction == 'mean': + return loss.mean() + elif self.reduction == 'sum': + return loss.sum() + else: # 'none' + return loss + +class RankSort(torch.autograd.Function): + @staticmethod + def forward(ctx, logits, targets, delta_RS=0.50, eps=1e-10): + + classification_grads=torch.zeros(logits.shape).cuda() + + #Filter fg logits + fg_labels = (targets > 0.) + fg_logits = logits[fg_labels] + fg_targets = targets[fg_labels] + fg_num = len(fg_logits) + + #Do not use bg with scores less than minimum fg logit + #since changing its score does not have an effect on precision + threshold_logit = torch.min(fg_logits)-delta_RS + relevant_bg_labels=((targets==0) & (logits>=threshold_logit)) + + relevant_bg_logits = logits[relevant_bg_labels] + relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda() + sorting_error=torch.zeros(fg_num).cuda() + ranking_error=torch.zeros(fg_num).cuda() + fg_grad=torch.zeros(fg_num).cuda() + + #sort the fg logits + order=torch.argsort(fg_logits) + #Loops over each positive following the order + for ii in order: + # Difference Transforms (x_ij) + fg_relations=fg_logits-fg_logits[ii] + bg_relations=relevant_bg_logits-fg_logits[ii] + + if delta_RS > 0: + fg_relations=torch.clamp(fg_relations/(2*delta_RS)+0.5,min=0,max=1) + bg_relations=torch.clamp(bg_relations/(2*delta_RS)+0.5,min=0,max=1) + else: + fg_relations = (fg_relations >= 0).float() + bg_relations = (bg_relations >= 0).float() + + # Rank of ii among pos and false positive number (bg with larger scores) + rank_pos=torch.sum(fg_relations) + FP_num=torch.sum(bg_relations) + + # Rank of ii among all examples + rank=rank_pos+FP_num + + # Ranking error of example ii. target_ranking_error is always 0. (Eq. 7) + ranking_error[ii]=FP_num/rank + + # Current sorting error of example ii. (Eq. 7) + current_sorting_error = torch.sum(fg_relations*(1-fg_targets))/rank_pos + + #Find examples in the target sorted order for example ii + iou_relations = (fg_targets >= fg_targets[ii]) + target_sorted_order = iou_relations * fg_relations + + #The rank of ii among positives in sorted order + rank_pos_target = torch.sum(target_sorted_order) + + #Compute target sorting error. (Eq. 8) + #Since target ranking error is 0, this is also total target error + target_sorting_error= torch.sum(target_sorted_order*(1-fg_targets))/rank_pos_target + + #Compute sorting error on example ii + sorting_error[ii] = current_sorting_error - target_sorting_error + + #Identity Update for Ranking Error + if FP_num > eps: + #For ii the update is the ranking error + fg_grad[ii] -= ranking_error[ii] + #For negatives, distribute error via ranking pmf (i.e. bg_relations/FP_num) + relevant_bg_grad += (bg_relations*(ranking_error[ii]/FP_num)) + + #Find the positives that are misranked (the cause of the error) + #These are the ones with smaller IoU but larger logits + missorted_examples = (~ iou_relations) * fg_relations + + #Denominotor of sorting pmf + sorting_pmf_denom = torch.sum(missorted_examples) + + #Identity Update for Sorting Error + if sorting_pmf_denom > eps: + #For ii the update is the sorting error + fg_grad[ii] -= sorting_error[ii] + #For positives, distribute error via sorting pmf (i.e. missorted_examples/sorting_pmf_denom) + fg_grad += (missorted_examples*(sorting_error[ii]/sorting_pmf_denom)) + + #Normalize gradients by number of positives + classification_grads[fg_labels]= (fg_grad/fg_num) + classification_grads[relevant_bg_labels]= (relevant_bg_grad/fg_num) + + ctx.save_for_backward(classification_grads) + + return ranking_error.mean(), sorting_error.mean() + + @staticmethod + def backward(ctx, out_grad1, out_grad2): + g1, =ctx.saved_tensors + return g1*out_grad1, None, None, None + +class aLRPLoss(torch.autograd.Function): + @staticmethod + def forward(ctx, logits, targets, regression_losses, delta=1., eps=1e-5): + classification_grads=torch.zeros(logits.shape).cuda() + + #Filter fg logits + fg_labels = (targets == 1) + fg_logits = logits[fg_labels] + fg_num = len(fg_logits) + + #Do not use bg with scores less than minimum fg logit + #since changing its score does not have an effect on precision + threshold_logit = torch.min(fg_logits)-delta + + #Get valid bg logits + relevant_bg_labels=((targets==0)&(logits>=threshold_logit)) + relevant_bg_logits=logits[relevant_bg_labels] + relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda() + rank=torch.zeros(fg_num).cuda() + prec=torch.zeros(fg_num).cuda() + fg_grad=torch.zeros(fg_num).cuda() + + max_prec=0 + #sort the fg logits + order=torch.argsort(fg_logits) + #Loops over each positive following the order + for ii in order: + #x_ij s as score differences with fgs + fg_relations=fg_logits-fg_logits[ii] + #Apply piecewise linear function and determine relations with fgs + fg_relations=torch.clamp(fg_relations/(2*delta)+0.5,min=0,max=1) + #Discard i=j in the summation in rank_pos + fg_relations[ii]=0 + + #x_ij s as score differences with bgs + bg_relations=relevant_bg_logits-fg_logits[ii] + #Apply piecewise linear function and determine relations with bgs + bg_relations=torch.clamp(bg_relations/(2*delta)+0.5,min=0,max=1) + + #Compute the rank of the example within fgs and number of bgs with larger scores + rank_pos=1+torch.sum(fg_relations) + FP_num=torch.sum(bg_relations) + #Store the total since it is normalizer also for aLRP Regression error + rank[ii]=rank_pos+FP_num + + #Compute precision for this example to compute classification loss + prec[ii]=rank_pos/rank[ii] + #For stability, set eps to a infinitesmall value (e.g. 1e-6), then compute grads + if FP_num > eps: + fg_grad[ii] = -(torch.sum(fg_relations*regression_losses)+FP_num)/rank[ii] + relevant_bg_grad += (bg_relations*(-fg_grad[ii]/FP_num)) + + #aLRP with grad formulation fg gradient + classification_grads[fg_labels]= fg_grad + #aLRP with grad formulation bg gradient + classification_grads[relevant_bg_labels]= relevant_bg_grad + + classification_grads /= (fg_num) + + cls_loss=1-prec.mean() + ctx.save_for_backward(classification_grads) + + return cls_loss, rank, order + + @staticmethod + def backward(ctx, out_grad1, out_grad2, out_grad3): + g1, =ctx.saved_tensors + return g1*out_grad1, None, None, None, None + + +class APLoss(torch.autograd.Function): + @staticmethod + def forward(ctx, logits, targets, delta=1.): + classification_grads=torch.zeros(logits.shape).cuda() + + #Filter fg logits + fg_labels = (targets == 1) + fg_logits = logits[fg_labels] + fg_num = len(fg_logits) + + #Do not use bg with scores less than minimum fg logit + #since changing its score does not have an effect on precision + threshold_logit = torch.min(fg_logits)-delta + + #Get valid bg logits + relevant_bg_labels=((targets==0)&(logits>=threshold_logit)) + relevant_bg_logits=logits[relevant_bg_labels] + relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda() + rank=torch.zeros(fg_num).cuda() + prec=torch.zeros(fg_num).cuda() + fg_grad=torch.zeros(fg_num).cuda() + + max_prec=0 + #sort the fg logits + order=torch.argsort(fg_logits) + #Loops over each positive following the order + for ii in order: + #x_ij s as score differences with fgs + fg_relations=fg_logits-fg_logits[ii] + #Apply piecewise linear function and determine relations with fgs + fg_relations=torch.clamp(fg_relations/(2*delta)+0.5,min=0,max=1) + #Discard i=j in the summation in rank_pos + fg_relations[ii]=0 + + #x_ij s as score differences with bgs + bg_relations=relevant_bg_logits-fg_logits[ii] + #Apply piecewise linear function and determine relations with bgs + bg_relations=torch.clamp(bg_relations/(2*delta)+0.5,min=0,max=1) + + #Compute the rank of the example within fgs and number of bgs with larger scores + rank_pos=1+torch.sum(fg_relations) + FP_num=torch.sum(bg_relations) + #Store the total since it is normalizer also for aLRP Regression error + rank[ii]=rank_pos+FP_num + + #Compute precision for this example + current_prec=rank_pos/rank[ii] + + #Compute interpolated AP and store gradients for relevant bg examples + if (max_prec<=current_prec): + max_prec=current_prec + relevant_bg_grad += (bg_relations/rank[ii]) + else: + relevant_bg_grad += (bg_relations/rank[ii])*(((1-max_prec)/(1-current_prec))) + + #Store fg gradients + fg_grad[ii]=-(1-max_prec) + prec[ii]=max_prec + + #aLRP with grad formulation fg gradient + classification_grads[fg_labels]= fg_grad + #aLRP with grad formulation bg gradient + classification_grads[relevant_bg_labels]= relevant_bg_grad + + classification_grads /= fg_num + + cls_loss=1-prec.mean() + ctx.save_for_backward(classification_grads) + + return cls_loss + + @staticmethod + def backward(ctx, out_grad1): + g1, =ctx.saved_tensors + return g1*out_grad1, None, None + + +class ComputeLoss: + # Compute losses + def __init__(self, model, autobalance=False): + super(ComputeLoss, self).__init__() + device = next(model.parameters()).device # get model device + h = model.hyp # hyperparameters + + # Define criteria + BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) + BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) + + # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 + self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets + + # Focal loss + g = h['fl_gamma'] # focal loss gamma + if g > 0: + BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) + + det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module + self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7 + #self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.1, .05]) # P3-P7 + #self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.5, 0.4, .1]) # P3-P7 + self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index + self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance + for k in 'na', 'nc', 'nl', 'anchors': + setattr(self, k, getattr(det, k)) + + def __call__(self, p, targets): # predictions, targets, model + device = targets.device + lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device) + tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets + + # Losses + for i, pi in enumerate(p): # layer index, layer predictions + b, a, gj, gi = indices[i] # image, anchor, gridy, gridx + tobj = torch.zeros_like(pi[..., 0], device=device) # target obj + + n = b.shape[0] # number of targets + if n: + ps = pi[b, a, gj, gi] # prediction subset corresponding to targets + + # Regression + pxy = ps[:, :2].sigmoid() * 2. - 0.5 + pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i] + pbox = torch.cat((pxy, pwh), 1) # predicted box + iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # iou(prediction, target) + lbox += (1.0 - iou).mean() # iou loss + + # Objectness + tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio + + # Classification + if self.nc > 1: # cls loss (only if multiple classes) + t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets + t[range(n), tcls[i]] = self.cp + #t[t==self.cp] = iou.detach().clamp(0).type(t.dtype) + lcls += self.BCEcls(ps[:, 5:], t) # BCE + + # Append targets to text file + # with open('targets.txt', 'a') as file: + # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)] + + obji = self.BCEobj(pi[..., 4], tobj) + lobj += obji * self.balance[i] # obj loss + if self.autobalance: + self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item() + + if self.autobalance: + self.balance = [x / self.balance[self.ssi] for x in self.balance] + lbox *= self.hyp['box'] + lobj *= self.hyp['obj'] + lcls *= self.hyp['cls'] + bs = tobj.shape[0] # batch size + + loss = lbox + lobj + lcls + return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach() + + def build_targets(self, p, targets): + # Build targets for compute_loss(), input targets(image,class,x,y,w,h) + na, nt = self.na, targets.shape[0] # number of anchors, targets + tcls, tbox, indices, anch = [], [], [], [] + gain = torch.ones(7, device=targets.device).long() # normalized to gridspace gain + ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) + targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices + + g = 0.5 # bias + off = torch.tensor([[0, 0], + [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m + # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm + ], device=targets.device).float() * g # offsets + + for i in range(self.nl): + anchors = self.anchors[i] + gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain + + # Match targets to anchors + t = targets * gain + if nt: + # Matches + r = t[:, :, 4:6] / anchors[:, None] # wh ratio + j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare + # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) + t = t[j] # filter + + # Offsets + gxy = t[:, 2:4] # grid xy + gxi = gain[[2, 3]] - gxy # inverse + j, k = ((gxy % 1. < g) & (gxy > 1.)).T + l, m = ((gxi % 1. < g) & (gxi > 1.)).T + j = torch.stack((torch.ones_like(j), j, k, l, m)) + t = t.repeat((5, 1, 1))[j] + offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] + else: + t = targets[0] + offsets = 0 + + # Define + b, c = t[:, :2].long().T # image, class + gxy = t[:, 2:4] # grid xy + gwh = t[:, 4:6] # grid wh + gij = (gxy - offsets).long() + gi, gj = gij.T # grid xy indices + + # Append + a = t[:, 6].long() # anchor indices + indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices + tbox.append(torch.cat((gxy - gij, gwh), 1)) # box + anch.append(anchors[a]) # anchors + tcls.append(c) # class + + return tcls, tbox, indices, anch + + +class ComputeLossOTA: + # Compute losses + def __init__(self, model, autobalance=False): + super(ComputeLossOTA, self).__init__() + device = next(model.parameters()).device # get model device + h = model.hyp # hyperparameters + + # Define criteria + BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) + BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) + + # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 + self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets + + # Focal loss + g = h['fl_gamma'] # focal loss gamma + if g > 0: + BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) + + det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module + self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7 + self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index + self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance + for k in 'na', 'nc', 'nl', 'anchors', 'stride': + setattr(self, k, getattr(det, k)) + + def __call__(self, p, targets, imgs): # predictions, targets, model + device = targets.device + lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device) + bs, as_, gjs, gis, targets, anchors = self.build_targets(p, targets, imgs) + pre_gen_gains = [torch.tensor(pp.shape, device=device)[[3, 2, 3, 2]] for pp in p] + + + # Losses + for i, pi in enumerate(p): # layer index, layer predictions + b, a, gj, gi = bs[i], as_[i], gjs[i], gis[i] # image, anchor, gridy, gridx + tobj = torch.zeros_like(pi[..., 0], device=device) # target obj + + n = b.shape[0] # number of targets + if n: + ps = pi[b, a, gj, gi] # prediction subset corresponding to targets + + # Regression + grid = torch.stack([gi, gj], dim=1) + pxy = ps[:, :2].sigmoid() * 2. - 0.5 + #pxy = ps[:, :2].sigmoid() * 3. - 1. + pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i] + pbox = torch.cat((pxy, pwh), 1) # predicted box + selected_tbox = targets[i][:, 2:6] * pre_gen_gains[i] + selected_tbox[:, :2] -= grid + iou = bbox_iou(pbox.T, selected_tbox, x1y1x2y2=False, CIoU=True) # iou(prediction, target) + lbox += (1.0 - iou).mean() # iou loss + + # Objectness + tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio + + # Classification + selected_tcls = targets[i][:, 1].long() + if self.nc > 1: # cls loss (only if multiple classes) + t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets + t[range(n), selected_tcls] = self.cp + lcls += self.BCEcls(ps[:, 5:], t) # BCE + + # Append targets to text file + # with open('targets.txt', 'a') as file: + # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)] + + obji = self.BCEobj(pi[..., 4], tobj) + lobj += obji * self.balance[i] # obj loss + if self.autobalance: + self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item() + + if self.autobalance: + self.balance = [x / self.balance[self.ssi] for x in self.balance] + lbox *= self.hyp['box'] + lobj *= self.hyp['obj'] + lcls *= self.hyp['cls'] + bs = tobj.shape[0] # batch size + + loss = lbox + lobj + lcls + return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach() + + def build_targets(self, p, targets, imgs): + + #indices, anch = self.find_positive(p, targets) + indices, anch = self.find_3_positive(p, targets) + #indices, anch = self.find_4_positive(p, targets) + #indices, anch = self.find_5_positive(p, targets) + #indices, anch = self.find_9_positive(p, targets) + device = torch.device(targets.device) + matching_bs = [[] for pp in p] + matching_as = [[] for pp in p] + matching_gjs = [[] for pp in p] + matching_gis = [[] for pp in p] + matching_targets = [[] for pp in p] + matching_anchs = [[] for pp in p] + + nl = len(p) + + for batch_idx in range(p[0].shape[0]): + + b_idx = targets[:, 0]==batch_idx + this_target = targets[b_idx] + if this_target.shape[0] == 0: + continue + + txywh = this_target[:, 2:6] * imgs[batch_idx].shape[1] + txyxy = xywh2xyxy(txywh) + + pxyxys = [] + p_cls = [] + p_obj = [] + from_which_layer = [] + all_b = [] + all_a = [] + all_gj = [] + all_gi = [] + all_anch = [] + + for i, pi in enumerate(p): + + b, a, gj, gi = indices[i] + idx = (b == batch_idx) + b, a, gj, gi = b[idx], a[idx], gj[idx], gi[idx] + all_b.append(b) + all_a.append(a) + all_gj.append(gj) + all_gi.append(gi) + all_anch.append(anch[i][idx]) + from_which_layer.append((torch.ones(size=(len(b),)) * i).to(device)) + + fg_pred = pi[b, a, gj, gi] + p_obj.append(fg_pred[:, 4:5]) + p_cls.append(fg_pred[:, 5:]) + + grid = torch.stack([gi, gj], dim=1) + pxy = (fg_pred[:, :2].sigmoid() * 2. - 0.5 + grid) * self.stride[i] #/ 8. + #pxy = (fg_pred[:, :2].sigmoid() * 3. - 1. + grid) * self.stride[i] + pwh = (fg_pred[:, 2:4].sigmoid() * 2) ** 2 * anch[i][idx] * self.stride[i] #/ 8. + pxywh = torch.cat([pxy, pwh], dim=-1) + pxyxy = xywh2xyxy(pxywh) + pxyxys.append(pxyxy) + + pxyxys = torch.cat(pxyxys, dim=0) + if pxyxys.shape[0] == 0: + continue + p_obj = torch.cat(p_obj, dim=0) + p_cls = torch.cat(p_cls, dim=0) + from_which_layer = torch.cat(from_which_layer, dim=0) + all_b = torch.cat(all_b, dim=0) + all_a = torch.cat(all_a, dim=0) + all_gj = torch.cat(all_gj, dim=0) + all_gi = torch.cat(all_gi, dim=0) + all_anch = torch.cat(all_anch, dim=0) + + pair_wise_iou = box_iou(txyxy, pxyxys) + + pair_wise_iou_loss = -torch.log(pair_wise_iou + 1e-8) + + top_k, _ = torch.topk(pair_wise_iou, min(10, pair_wise_iou.shape[1]), dim=1) + dynamic_ks = torch.clamp(top_k.sum(1).int(), min=1) + + gt_cls_per_image = ( + F.one_hot(this_target[:, 1].to(torch.int64), self.nc) + .float() + .unsqueeze(1) + .repeat(1, pxyxys.shape[0], 1) + ) + + num_gt = this_target.shape[0] + cls_preds_ = ( + p_cls.float().unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_() + * p_obj.unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_() + ) + + y = cls_preds_.sqrt_() + pair_wise_cls_loss = F.binary_cross_entropy_with_logits( + torch.log(y/(1-y)) , gt_cls_per_image, reduction="none" + ).sum(-1) + del cls_preds_ + + cost = ( + pair_wise_cls_loss + + 3.0 * pair_wise_iou_loss + ) + + matching_matrix = torch.zeros_like(cost, device=device) + + for gt_idx in range(num_gt): + _, pos_idx = torch.topk( + cost[gt_idx], k=dynamic_ks[gt_idx].item(), largest=False + ) + matching_matrix[gt_idx][pos_idx] = 1.0 + + del top_k, dynamic_ks + anchor_matching_gt = matching_matrix.sum(0) + if (anchor_matching_gt > 1).sum() > 0: + _, cost_argmin = torch.min(cost[:, anchor_matching_gt > 1], dim=0) + matching_matrix[:, anchor_matching_gt > 1] *= 0.0 + matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1.0 + fg_mask_inboxes = (matching_matrix.sum(0) > 0.0).to(device) + matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0) + + from_which_layer = from_which_layer[fg_mask_inboxes] + all_b = all_b[fg_mask_inboxes] + all_a = all_a[fg_mask_inboxes] + all_gj = all_gj[fg_mask_inboxes] + all_gi = all_gi[fg_mask_inboxes] + all_anch = all_anch[fg_mask_inboxes] + + this_target = this_target[matched_gt_inds] + + for i in range(nl): + layer_idx = from_which_layer == i + matching_bs[i].append(all_b[layer_idx]) + matching_as[i].append(all_a[layer_idx]) + matching_gjs[i].append(all_gj[layer_idx]) + matching_gis[i].append(all_gi[layer_idx]) + matching_targets[i].append(this_target[layer_idx]) + matching_anchs[i].append(all_anch[layer_idx]) + + for i in range(nl): + if matching_targets[i] != []: + matching_bs[i] = torch.cat(matching_bs[i], dim=0) + matching_as[i] = torch.cat(matching_as[i], dim=0) + matching_gjs[i] = torch.cat(matching_gjs[i], dim=0) + matching_gis[i] = torch.cat(matching_gis[i], dim=0) + matching_targets[i] = torch.cat(matching_targets[i], dim=0) + matching_anchs[i] = torch.cat(matching_anchs[i], dim=0) + else: + matching_bs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + matching_as[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + matching_gjs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + matching_gis[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + matching_targets[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + matching_anchs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + + return matching_bs, matching_as, matching_gjs, matching_gis, matching_targets, matching_anchs + + def find_3_positive(self, p, targets): + # Build targets for compute_loss(), input targets(image,class,x,y,w,h) + na, nt = self.na, targets.shape[0] # number of anchors, targets + indices, anch = [], [] + gain = torch.ones(7, device=targets.device).long() # normalized to gridspace gain + ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) + targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices + + g = 0.5 # bias + off = torch.tensor([[0, 0], + [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m + # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm + ], device=targets.device).float() * g # offsets + + for i in range(self.nl): + anchors = self.anchors[i] + gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain + + # Match targets to anchors + t = targets * gain + if nt: + # Matches + r = t[:, :, 4:6] / anchors[:, None] # wh ratio + j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare + # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) + t = t[j] # filter + + # Offsets + gxy = t[:, 2:4] # grid xy + gxi = gain[[2, 3]] - gxy # inverse + j, k = ((gxy % 1. < g) & (gxy > 1.)).T + l, m = ((gxi % 1. < g) & (gxi > 1.)).T + j = torch.stack((torch.ones_like(j), j, k, l, m)) + t = t.repeat((5, 1, 1))[j] + offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] + else: + t = targets[0] + offsets = 0 + + # Define + b, c = t[:, :2].long().T # image, class + gxy = t[:, 2:4] # grid xy + gwh = t[:, 4:6] # grid wh + gij = (gxy - offsets).long() + gi, gj = gij.T # grid xy indices + + # Append + a = t[:, 6].long() # anchor indices + indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices + anch.append(anchors[a]) # anchors + + return indices, anch + + +class ComputeLossBinOTA: + # Compute losses + def __init__(self, model, autobalance=False): + super(ComputeLossBinOTA, self).__init__() + device = next(model.parameters()).device # get model device + h = model.hyp # hyperparameters + + # Define criteria + BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) + BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) + #MSEangle = nn.MSELoss().to(device) + + # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 + self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets + + # Focal loss + g = h['fl_gamma'] # focal loss gamma + if g > 0: + BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) + + det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module + self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7 + self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index + self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance + for k in 'na', 'nc', 'nl', 'anchors', 'stride', 'bin_count': + setattr(self, k, getattr(det, k)) + + #xy_bin_sigmoid = SigmoidBin(bin_count=11, min=-0.5, max=1.5, use_loss_regression=False).to(device) + wh_bin_sigmoid = SigmoidBin(bin_count=self.bin_count, min=0.0, max=4.0, use_loss_regression=False).to(device) + #angle_bin_sigmoid = SigmoidBin(bin_count=31, min=-1.1, max=1.1, use_loss_regression=False).to(device) + self.wh_bin_sigmoid = wh_bin_sigmoid + + def __call__(self, p, targets, imgs): # predictions, targets, model + device = targets.device + lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device) + bs, as_, gjs, gis, targets, anchors = self.build_targets(p, targets, imgs) + pre_gen_gains = [torch.tensor(pp.shape, device=device)[[3, 2, 3, 2]] for pp in p] + + + # Losses + for i, pi in enumerate(p): # layer index, layer predictions + b, a, gj, gi = bs[i], as_[i], gjs[i], gis[i] # image, anchor, gridy, gridx + tobj = torch.zeros_like(pi[..., 0], device=device) # target obj + + obj_idx = self.wh_bin_sigmoid.get_length()*2 + 2 # x,y, w-bce, h-bce # xy_bin_sigmoid.get_length()*2 + + n = b.shape[0] # number of targets + if n: + ps = pi[b, a, gj, gi] # prediction subset corresponding to targets + + # Regression + grid = torch.stack([gi, gj], dim=1) + selected_tbox = targets[i][:, 2:6] * pre_gen_gains[i] + selected_tbox[:, :2] -= grid + + #pxy = ps[:, :2].sigmoid() * 2. - 0.5 + ##pxy = ps[:, :2].sigmoid() * 3. - 1. + #pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i] + #pbox = torch.cat((pxy, pwh), 1) # predicted box + + #x_loss, px = xy_bin_sigmoid.training_loss(ps[..., 0:12], tbox[i][..., 0]) + #y_loss, py = xy_bin_sigmoid.training_loss(ps[..., 12:24], tbox[i][..., 1]) + w_loss, pw = self.wh_bin_sigmoid.training_loss(ps[..., 2:(3+self.bin_count)], selected_tbox[..., 2] / anchors[i][..., 0]) + h_loss, ph = self.wh_bin_sigmoid.training_loss(ps[..., (3+self.bin_count):obj_idx], selected_tbox[..., 3] / anchors[i][..., 1]) + + pw *= anchors[i][..., 0] + ph *= anchors[i][..., 1] + + px = ps[:, 0].sigmoid() * 2. - 0.5 + py = ps[:, 1].sigmoid() * 2. - 0.5 + + lbox += w_loss + h_loss # + x_loss + y_loss + + #print(f"\n px = {px.shape}, py = {py.shape}, pw = {pw.shape}, ph = {ph.shape} \n") + + pbox = torch.cat((px.unsqueeze(1), py.unsqueeze(1), pw.unsqueeze(1), ph.unsqueeze(1)), 1).to(device) # predicted box + + + + + iou = bbox_iou(pbox.T, selected_tbox, x1y1x2y2=False, CIoU=True) # iou(prediction, target) + lbox += (1.0 - iou).mean() # iou loss + + # Objectness + tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio + + # Classification + selected_tcls = targets[i][:, 1].long() + if self.nc > 1: # cls loss (only if multiple classes) + t = torch.full_like(ps[:, (1+obj_idx):], self.cn, device=device) # targets + t[range(n), selected_tcls] = self.cp + lcls += self.BCEcls(ps[:, (1+obj_idx):], t) # BCE + + # Append targets to text file + # with open('targets.txt', 'a') as file: + # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)] + + obji = self.BCEobj(pi[..., obj_idx], tobj) + lobj += obji * self.balance[i] # obj loss + if self.autobalance: + self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item() + + if self.autobalance: + self.balance = [x / self.balance[self.ssi] for x in self.balance] + lbox *= self.hyp['box'] + lobj *= self.hyp['obj'] + lcls *= self.hyp['cls'] + bs = tobj.shape[0] # batch size + + loss = lbox + lobj + lcls + return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach() + + def build_targets(self, p, targets, imgs): + + #indices, anch = self.find_positive(p, targets) + indices, anch = self.find_3_positive(p, targets) + #indices, anch = self.find_4_positive(p, targets) + #indices, anch = self.find_5_positive(p, targets) + #indices, anch = self.find_9_positive(p, targets) + + matching_bs = [[] for pp in p] + matching_as = [[] for pp in p] + matching_gjs = [[] for pp in p] + matching_gis = [[] for pp in p] + matching_targets = [[] for pp in p] + matching_anchs = [[] for pp in p] + + nl = len(p) + + for batch_idx in range(p[0].shape[0]): + + b_idx = targets[:, 0]==batch_idx + this_target = targets[b_idx] + if this_target.shape[0] == 0: + continue + + txywh = this_target[:, 2:6] * imgs[batch_idx].shape[1] + txyxy = xywh2xyxy(txywh) + + pxyxys = [] + p_cls = [] + p_obj = [] + from_which_layer = [] + all_b = [] + all_a = [] + all_gj = [] + all_gi = [] + all_anch = [] + + for i, pi in enumerate(p): + + obj_idx = self.wh_bin_sigmoid.get_length()*2 + 2 + + b, a, gj, gi = indices[i] + idx = (b == batch_idx) + b, a, gj, gi = b[idx], a[idx], gj[idx], gi[idx] + all_b.append(b) + all_a.append(a) + all_gj.append(gj) + all_gi.append(gi) + all_anch.append(anch[i][idx]) + from_which_layer.append(torch.ones(size=(len(b),)) * i) + + fg_pred = pi[b, a, gj, gi] + p_obj.append(fg_pred[:, obj_idx:(obj_idx+1)]) + p_cls.append(fg_pred[:, (obj_idx+1):]) + + grid = torch.stack([gi, gj], dim=1) + pxy = (fg_pred[:, :2].sigmoid() * 2. - 0.5 + grid) * self.stride[i] #/ 8. + #pwh = (fg_pred[:, 2:4].sigmoid() * 2) ** 2 * anch[i][idx] * self.stride[i] #/ 8. + pw = self.wh_bin_sigmoid.forward(fg_pred[..., 2:(3+self.bin_count)].sigmoid()) * anch[i][idx][:, 0] * self.stride[i] + ph = self.wh_bin_sigmoid.forward(fg_pred[..., (3+self.bin_count):obj_idx].sigmoid()) * anch[i][idx][:, 1] * self.stride[i] + + pxywh = torch.cat([pxy, pw.unsqueeze(1), ph.unsqueeze(1)], dim=-1) + pxyxy = xywh2xyxy(pxywh) + pxyxys.append(pxyxy) + + pxyxys = torch.cat(pxyxys, dim=0) + if pxyxys.shape[0] == 0: + continue + p_obj = torch.cat(p_obj, dim=0) + p_cls = torch.cat(p_cls, dim=0) + from_which_layer = torch.cat(from_which_layer, dim=0) + all_b = torch.cat(all_b, dim=0) + all_a = torch.cat(all_a, dim=0) + all_gj = torch.cat(all_gj, dim=0) + all_gi = torch.cat(all_gi, dim=0) + all_anch = torch.cat(all_anch, dim=0) + + pair_wise_iou = box_iou(txyxy, pxyxys) + + pair_wise_iou_loss = -torch.log(pair_wise_iou + 1e-8) + + top_k, _ = torch.topk(pair_wise_iou, min(10, pair_wise_iou.shape[1]), dim=1) + dynamic_ks = torch.clamp(top_k.sum(1).int(), min=1) + + gt_cls_per_image = ( + F.one_hot(this_target[:, 1].to(torch.int64), self.nc) + .float() + .unsqueeze(1) + .repeat(1, pxyxys.shape[0], 1) + ) + + num_gt = this_target.shape[0] + cls_preds_ = ( + p_cls.float().unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_() + * p_obj.unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_() + ) + + y = cls_preds_.sqrt_() + pair_wise_cls_loss = F.binary_cross_entropy_with_logits( + torch.log(y/(1-y)) , gt_cls_per_image, reduction="none" + ).sum(-1) + del cls_preds_ + + cost = ( + pair_wise_cls_loss + + 3.0 * pair_wise_iou_loss + ) + + matching_matrix = torch.zeros_like(cost) + + for gt_idx in range(num_gt): + _, pos_idx = torch.topk( + cost[gt_idx], k=dynamic_ks[gt_idx].item(), largest=False + ) + matching_matrix[gt_idx][pos_idx] = 1.0 + + del top_k, dynamic_ks + anchor_matching_gt = matching_matrix.sum(0) + if (anchor_matching_gt > 1).sum() > 0: + _, cost_argmin = torch.min(cost[:, anchor_matching_gt > 1], dim=0) + matching_matrix[:, anchor_matching_gt > 1] *= 0.0 + matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1.0 + fg_mask_inboxes = matching_matrix.sum(0) > 0.0 + matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0) + + from_which_layer = from_which_layer[fg_mask_inboxes] + all_b = all_b[fg_mask_inboxes] + all_a = all_a[fg_mask_inboxes] + all_gj = all_gj[fg_mask_inboxes] + all_gi = all_gi[fg_mask_inboxes] + all_anch = all_anch[fg_mask_inboxes] + + this_target = this_target[matched_gt_inds] + + for i in range(nl): + layer_idx = from_which_layer == i + matching_bs[i].append(all_b[layer_idx]) + matching_as[i].append(all_a[layer_idx]) + matching_gjs[i].append(all_gj[layer_idx]) + matching_gis[i].append(all_gi[layer_idx]) + matching_targets[i].append(this_target[layer_idx]) + matching_anchs[i].append(all_anch[layer_idx]) + + for i in range(nl): + if matching_targets[i] != []: + matching_bs[i] = torch.cat(matching_bs[i], dim=0) + matching_as[i] = torch.cat(matching_as[i], dim=0) + matching_gjs[i] = torch.cat(matching_gjs[i], dim=0) + matching_gis[i] = torch.cat(matching_gis[i], dim=0) + matching_targets[i] = torch.cat(matching_targets[i], dim=0) + matching_anchs[i] = torch.cat(matching_anchs[i], dim=0) + else: + matching_bs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + matching_as[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + matching_gjs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + matching_gis[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + matching_targets[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + matching_anchs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + + return matching_bs, matching_as, matching_gjs, matching_gis, matching_targets, matching_anchs + + def find_3_positive(self, p, targets): + # Build targets for compute_loss(), input targets(image,class,x,y,w,h) + na, nt = self.na, targets.shape[0] # number of anchors, targets + indices, anch = [], [] + gain = torch.ones(7, device=targets.device).long() # normalized to gridspace gain + ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) + targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices + + g = 0.5 # bias + off = torch.tensor([[0, 0], + [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m + # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm + ], device=targets.device).float() * g # offsets + + for i in range(self.nl): + anchors = self.anchors[i] + gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain + + # Match targets to anchors + t = targets * gain + if nt: + # Matches + r = t[:, :, 4:6] / anchors[:, None] # wh ratio + j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare + # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) + t = t[j] # filter + + # Offsets + gxy = t[:, 2:4] # grid xy + gxi = gain[[2, 3]] - gxy # inverse + j, k = ((gxy % 1. < g) & (gxy > 1.)).T + l, m = ((gxi % 1. < g) & (gxi > 1.)).T + j = torch.stack((torch.ones_like(j), j, k, l, m)) + t = t.repeat((5, 1, 1))[j] + offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] + else: + t = targets[0] + offsets = 0 + + # Define + b, c = t[:, :2].long().T # image, class + gxy = t[:, 2:4] # grid xy + gwh = t[:, 4:6] # grid wh + gij = (gxy - offsets).long() + gi, gj = gij.T # grid xy indices + + # Append + a = t[:, 6].long() # anchor indices + indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices + anch.append(anchors[a]) # anchors + + return indices, anch + + +class ComputeLossAuxOTA: + # Compute losses + def __init__(self, model, autobalance=False): + super(ComputeLossAuxOTA, self).__init__() + device = next(model.parameters()).device # get model device + h = model.hyp # hyperparameters + + # Define criteria + BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) + BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) + + # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 + self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets + + # Focal loss + g = h['fl_gamma'] # focal loss gamma + if g > 0: + BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) + + det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module + self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7 + self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index + self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance + for k in 'na', 'nc', 'nl', 'anchors', 'stride': + setattr(self, k, getattr(det, k)) + + def __call__(self, p, targets, imgs): # predictions, targets, model + device = targets.device + lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device) + bs_aux, as_aux_, gjs_aux, gis_aux, targets_aux, anchors_aux = self.build_targets2(p[:self.nl], targets, imgs) + bs, as_, gjs, gis, targets, anchors = self.build_targets(p[:self.nl], targets, imgs) + pre_gen_gains_aux = [torch.tensor(pp.shape, device=device)[[3, 2, 3, 2]] for pp in p[:self.nl]] + pre_gen_gains = [torch.tensor(pp.shape, device=device)[[3, 2, 3, 2]] for pp in p[:self.nl]] + + + # Losses + for i in range(self.nl): # layer index, layer predictions + pi = p[i] + pi_aux = p[i+self.nl] + b, a, gj, gi = bs[i], as_[i], gjs[i], gis[i] # image, anchor, gridy, gridx + b_aux, a_aux, gj_aux, gi_aux = bs_aux[i], as_aux_[i], gjs_aux[i], gis_aux[i] # image, anchor, gridy, gridx + tobj = torch.zeros_like(pi[..., 0], device=device) # target obj + tobj_aux = torch.zeros_like(pi_aux[..., 0], device=device) # target obj + + n = b.shape[0] # number of targets + if n: + ps = pi[b, a, gj, gi] # prediction subset corresponding to targets + + # Regression + grid = torch.stack([gi, gj], dim=1) + pxy = ps[:, :2].sigmoid() * 2. - 0.5 + pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i] + pbox = torch.cat((pxy, pwh), 1) # predicted box + selected_tbox = targets[i][:, 2:6] * pre_gen_gains[i] + selected_tbox[:, :2] -= grid + iou = bbox_iou(pbox.T, selected_tbox, x1y1x2y2=False, CIoU=True) # iou(prediction, target) + lbox += (1.0 - iou).mean() # iou loss + + # Objectness + tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio + + # Classification + selected_tcls = targets[i][:, 1].long() + if self.nc > 1: # cls loss (only if multiple classes) + t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets + t[range(n), selected_tcls] = self.cp + lcls += self.BCEcls(ps[:, 5:], t) # BCE + + # Append targets to text file + # with open('targets.txt', 'a') as file: + # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)] + + n_aux = b_aux.shape[0] # number of targets + if n_aux: + ps_aux = pi_aux[b_aux, a_aux, gj_aux, gi_aux] # prediction subset corresponding to targets + grid_aux = torch.stack([gi_aux, gj_aux], dim=1) + pxy_aux = ps_aux[:, :2].sigmoid() * 2. - 0.5 + #pxy_aux = ps_aux[:, :2].sigmoid() * 3. - 1. + pwh_aux = (ps_aux[:, 2:4].sigmoid() * 2) ** 2 * anchors_aux[i] + pbox_aux = torch.cat((pxy_aux, pwh_aux), 1) # predicted box + selected_tbox_aux = targets_aux[i][:, 2:6] * pre_gen_gains_aux[i] + selected_tbox_aux[:, :2] -= grid_aux + iou_aux = bbox_iou(pbox_aux.T, selected_tbox_aux, x1y1x2y2=False, CIoU=True) # iou(prediction, target) + lbox += 0.25 * (1.0 - iou_aux).mean() # iou loss + + # Objectness + tobj_aux[b_aux, a_aux, gj_aux, gi_aux] = (1.0 - self.gr) + self.gr * iou_aux.detach().clamp(0).type(tobj_aux.dtype) # iou ratio + + # Classification + selected_tcls_aux = targets_aux[i][:, 1].long() + if self.nc > 1: # cls loss (only if multiple classes) + t_aux = torch.full_like(ps_aux[:, 5:], self.cn, device=device) # targets + t_aux[range(n_aux), selected_tcls_aux] = self.cp + lcls += 0.25 * self.BCEcls(ps_aux[:, 5:], t_aux) # BCE + + obji = self.BCEobj(pi[..., 4], tobj) + obji_aux = self.BCEobj(pi_aux[..., 4], tobj_aux) + lobj += obji * self.balance[i] + 0.25 * obji_aux * self.balance[i] # obj loss + if self.autobalance: + self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item() + + if self.autobalance: + self.balance = [x / self.balance[self.ssi] for x in self.balance] + lbox *= self.hyp['box'] + lobj *= self.hyp['obj'] + lcls *= self.hyp['cls'] + bs = tobj.shape[0] # batch size + + loss = lbox + lobj + lcls + return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach() + + def build_targets(self, p, targets, imgs): + + indices, anch = self.find_3_positive(p, targets) + + matching_bs = [[] for pp in p] + matching_as = [[] for pp in p] + matching_gjs = [[] for pp in p] + matching_gis = [[] for pp in p] + matching_targets = [[] for pp in p] + matching_anchs = [[] for pp in p] + + nl = len(p) + + for batch_idx in range(p[0].shape[0]): + + b_idx = targets[:, 0]==batch_idx + this_target = targets[b_idx] + if this_target.shape[0] == 0: + continue + + txywh = this_target[:, 2:6] * imgs[batch_idx].shape[1] + txyxy = xywh2xyxy(txywh) + + pxyxys = [] + p_cls = [] + p_obj = [] + from_which_layer = [] + all_b = [] + all_a = [] + all_gj = [] + all_gi = [] + all_anch = [] + + for i, pi in enumerate(p): + + b, a, gj, gi = indices[i] + idx = (b == batch_idx) + b, a, gj, gi = b[idx], a[idx], gj[idx], gi[idx] + all_b.append(b) + all_a.append(a) + all_gj.append(gj) + all_gi.append(gi) + all_anch.append(anch[i][idx]) + from_which_layer.append(torch.ones(size=(len(b),)) * i) + + fg_pred = pi[b, a, gj, gi] + p_obj.append(fg_pred[:, 4:5]) + p_cls.append(fg_pred[:, 5:]) + + grid = torch.stack([gi, gj], dim=1) + pxy = (fg_pred[:, :2].sigmoid() * 2. - 0.5 + grid) * self.stride[i] #/ 8. + #pxy = (fg_pred[:, :2].sigmoid() * 3. - 1. + grid) * self.stride[i] + pwh = (fg_pred[:, 2:4].sigmoid() * 2) ** 2 * anch[i][idx] * self.stride[i] #/ 8. + pxywh = torch.cat([pxy, pwh], dim=-1) + pxyxy = xywh2xyxy(pxywh) + pxyxys.append(pxyxy) + + pxyxys = torch.cat(pxyxys, dim=0) + if pxyxys.shape[0] == 0: + continue + p_obj = torch.cat(p_obj, dim=0) + p_cls = torch.cat(p_cls, dim=0) + from_which_layer = torch.cat(from_which_layer, dim=0) + all_b = torch.cat(all_b, dim=0) + all_a = torch.cat(all_a, dim=0) + all_gj = torch.cat(all_gj, dim=0) + all_gi = torch.cat(all_gi, dim=0) + all_anch = torch.cat(all_anch, dim=0) + + pair_wise_iou = box_iou(txyxy, pxyxys) + + pair_wise_iou_loss = -torch.log(pair_wise_iou + 1e-8) + + top_k, _ = torch.topk(pair_wise_iou, min(20, pair_wise_iou.shape[1]), dim=1) + dynamic_ks = torch.clamp(top_k.sum(1).int(), min=1) + + gt_cls_per_image = ( + F.one_hot(this_target[:, 1].to(torch.int64), self.nc) + .float() + .unsqueeze(1) + .repeat(1, pxyxys.shape[0], 1) + ) + + num_gt = this_target.shape[0] + cls_preds_ = ( + p_cls.float().unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_() + * p_obj.unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_() + ) + + y = cls_preds_.sqrt_() + pair_wise_cls_loss = F.binary_cross_entropy_with_logits( + torch.log(y/(1-y)) , gt_cls_per_image, reduction="none" + ).sum(-1) + del cls_preds_ + + cost = ( + pair_wise_cls_loss + + 3.0 * pair_wise_iou_loss + ) + + matching_matrix = torch.zeros_like(cost) + + for gt_idx in range(num_gt): + _, pos_idx = torch.topk( + cost[gt_idx], k=dynamic_ks[gt_idx].item(), largest=False + ) + matching_matrix[gt_idx][pos_idx] = 1.0 + + del top_k, dynamic_ks + anchor_matching_gt = matching_matrix.sum(0) + if (anchor_matching_gt > 1).sum() > 0: + _, cost_argmin = torch.min(cost[:, anchor_matching_gt > 1], dim=0) + matching_matrix[:, anchor_matching_gt > 1] *= 0.0 + matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1.0 + fg_mask_inboxes = matching_matrix.sum(0) > 0.0 + matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0) + + from_which_layer = from_which_layer[fg_mask_inboxes] + all_b = all_b[fg_mask_inboxes] + all_a = all_a[fg_mask_inboxes] + all_gj = all_gj[fg_mask_inboxes] + all_gi = all_gi[fg_mask_inboxes] + all_anch = all_anch[fg_mask_inboxes] + + this_target = this_target[matched_gt_inds] + + for i in range(nl): + layer_idx = from_which_layer == i + matching_bs[i].append(all_b[layer_idx]) + matching_as[i].append(all_a[layer_idx]) + matching_gjs[i].append(all_gj[layer_idx]) + matching_gis[i].append(all_gi[layer_idx]) + matching_targets[i].append(this_target[layer_idx]) + matching_anchs[i].append(all_anch[layer_idx]) + + for i in range(nl): + if matching_targets[i] != []: + matching_bs[i] = torch.cat(matching_bs[i], dim=0) + matching_as[i] = torch.cat(matching_as[i], dim=0) + matching_gjs[i] = torch.cat(matching_gjs[i], dim=0) + matching_gis[i] = torch.cat(matching_gis[i], dim=0) + matching_targets[i] = torch.cat(matching_targets[i], dim=0) + matching_anchs[i] = torch.cat(matching_anchs[i], dim=0) + else: + matching_bs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + matching_as[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + matching_gjs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + matching_gis[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + matching_targets[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + matching_anchs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + + return matching_bs, matching_as, matching_gjs, matching_gis, matching_targets, matching_anchs + + def build_targets2(self, p, targets, imgs): + + indices, anch = self.find_5_positive(p, targets) + + matching_bs = [[] for pp in p] + matching_as = [[] for pp in p] + matching_gjs = [[] for pp in p] + matching_gis = [[] for pp in p] + matching_targets = [[] for pp in p] + matching_anchs = [[] for pp in p] + + nl = len(p) + + for batch_idx in range(p[0].shape[0]): + + b_idx = targets[:, 0]==batch_idx + this_target = targets[b_idx] + if this_target.shape[0] == 0: + continue + + txywh = this_target[:, 2:6] * imgs[batch_idx].shape[1] + txyxy = xywh2xyxy(txywh) + + pxyxys = [] + p_cls = [] + p_obj = [] + from_which_layer = [] + all_b = [] + all_a = [] + all_gj = [] + all_gi = [] + all_anch = [] + + for i, pi in enumerate(p): + + b, a, gj, gi = indices[i] + idx = (b == batch_idx) + b, a, gj, gi = b[idx], a[idx], gj[idx], gi[idx] + all_b.append(b) + all_a.append(a) + all_gj.append(gj) + all_gi.append(gi) + all_anch.append(anch[i][idx]) + from_which_layer.append(torch.ones(size=(len(b),)) * i) + + fg_pred = pi[b, a, gj, gi] + p_obj.append(fg_pred[:, 4:5]) + p_cls.append(fg_pred[:, 5:]) + + grid = torch.stack([gi, gj], dim=1) + pxy = (fg_pred[:, :2].sigmoid() * 2. - 0.5 + grid) * self.stride[i] #/ 8. + #pxy = (fg_pred[:, :2].sigmoid() * 3. - 1. + grid) * self.stride[i] + pwh = (fg_pred[:, 2:4].sigmoid() * 2) ** 2 * anch[i][idx] * self.stride[i] #/ 8. + pxywh = torch.cat([pxy, pwh], dim=-1) + pxyxy = xywh2xyxy(pxywh) + pxyxys.append(pxyxy) + + pxyxys = torch.cat(pxyxys, dim=0) + if pxyxys.shape[0] == 0: + continue + p_obj = torch.cat(p_obj, dim=0) + p_cls = torch.cat(p_cls, dim=0) + from_which_layer = torch.cat(from_which_layer, dim=0) + all_b = torch.cat(all_b, dim=0) + all_a = torch.cat(all_a, dim=0) + all_gj = torch.cat(all_gj, dim=0) + all_gi = torch.cat(all_gi, dim=0) + all_anch = torch.cat(all_anch, dim=0) + + pair_wise_iou = box_iou(txyxy, pxyxys) + + pair_wise_iou_loss = -torch.log(pair_wise_iou + 1e-8) + + top_k, _ = torch.topk(pair_wise_iou, min(20, pair_wise_iou.shape[1]), dim=1) + dynamic_ks = torch.clamp(top_k.sum(1).int(), min=1) + + gt_cls_per_image = ( + F.one_hot(this_target[:, 1].to(torch.int64), self.nc) + .float() + .unsqueeze(1) + .repeat(1, pxyxys.shape[0], 1) + ) + + num_gt = this_target.shape[0] + cls_preds_ = ( + p_cls.float().unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_() + * p_obj.unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_() + ) + + y = cls_preds_.sqrt_() + pair_wise_cls_loss = F.binary_cross_entropy_with_logits( + torch.log(y/(1-y)) , gt_cls_per_image, reduction="none" + ).sum(-1) + del cls_preds_ + + cost = ( + pair_wise_cls_loss + + 3.0 * pair_wise_iou_loss + ) + + matching_matrix = torch.zeros_like(cost) + + for gt_idx in range(num_gt): + _, pos_idx = torch.topk( + cost[gt_idx], k=dynamic_ks[gt_idx].item(), largest=False + ) + matching_matrix[gt_idx][pos_idx] = 1.0 + + del top_k, dynamic_ks + anchor_matching_gt = matching_matrix.sum(0) + if (anchor_matching_gt > 1).sum() > 0: + _, cost_argmin = torch.min(cost[:, anchor_matching_gt > 1], dim=0) + matching_matrix[:, anchor_matching_gt > 1] *= 0.0 + matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1.0 + fg_mask_inboxes = matching_matrix.sum(0) > 0.0 + matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0) + + from_which_layer = from_which_layer[fg_mask_inboxes] + all_b = all_b[fg_mask_inboxes] + all_a = all_a[fg_mask_inboxes] + all_gj = all_gj[fg_mask_inboxes] + all_gi = all_gi[fg_mask_inboxes] + all_anch = all_anch[fg_mask_inboxes] + + this_target = this_target[matched_gt_inds] + + for i in range(nl): + layer_idx = from_which_layer == i + matching_bs[i].append(all_b[layer_idx]) + matching_as[i].append(all_a[layer_idx]) + matching_gjs[i].append(all_gj[layer_idx]) + matching_gis[i].append(all_gi[layer_idx]) + matching_targets[i].append(this_target[layer_idx]) + matching_anchs[i].append(all_anch[layer_idx]) + + for i in range(nl): + if matching_targets[i] != []: + matching_bs[i] = torch.cat(matching_bs[i], dim=0) + matching_as[i] = torch.cat(matching_as[i], dim=0) + matching_gjs[i] = torch.cat(matching_gjs[i], dim=0) + matching_gis[i] = torch.cat(matching_gis[i], dim=0) + matching_targets[i] = torch.cat(matching_targets[i], dim=0) + matching_anchs[i] = torch.cat(matching_anchs[i], dim=0) + else: + matching_bs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + matching_as[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + matching_gjs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + matching_gis[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + matching_targets[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + matching_anchs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + + return matching_bs, matching_as, matching_gjs, matching_gis, matching_targets, matching_anchs + + def find_5_positive(self, p, targets): + # Build targets for compute_loss(), input targets(image,class,x,y,w,h) + na, nt = self.na, targets.shape[0] # number of anchors, targets + indices, anch = [], [] + gain = torch.ones(7, device=targets.device).long() # normalized to gridspace gain + ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) + targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices + + g = 1.0 # bias + off = torch.tensor([[0, 0], + [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m + # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm + ], device=targets.device).float() * g # offsets + + for i in range(self.nl): + anchors = self.anchors[i] + gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain + + # Match targets to anchors + t = targets * gain + if nt: + # Matches + r = t[:, :, 4:6] / anchors[:, None] # wh ratio + j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare + # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) + t = t[j] # filter + + # Offsets + gxy = t[:, 2:4] # grid xy + gxi = gain[[2, 3]] - gxy # inverse + j, k = ((gxy % 1. < g) & (gxy > 1.)).T + l, m = ((gxi % 1. < g) & (gxi > 1.)).T + j = torch.stack((torch.ones_like(j), j, k, l, m)) + t = t.repeat((5, 1, 1))[j] + offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] + else: + t = targets[0] + offsets = 0 + + # Define + b, c = t[:, :2].long().T # image, class + gxy = t[:, 2:4] # grid xy + gwh = t[:, 4:6] # grid wh + gij = (gxy - offsets).long() + gi, gj = gij.T # grid xy indices + + # Append + a = t[:, 6].long() # anchor indices + indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices + anch.append(anchors[a]) # anchors + + return indices, anch + + def find_3_positive(self, p, targets): + # Build targets for compute_loss(), input targets(image,class,x,y,w,h) + na, nt = self.na, targets.shape[0] # number of anchors, targets + indices, anch = [], [] + gain = torch.ones(7, device=targets.device).long() # normalized to gridspace gain + ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) + targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices + + g = 0.5 # bias + off = torch.tensor([[0, 0], + [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m + # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm + ], device=targets.device).float() * g # offsets + + for i in range(self.nl): + anchors = self.anchors[i] + gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain + + # Match targets to anchors + t = targets * gain + if nt: + # Matches + r = t[:, :, 4:6] / anchors[:, None] # wh ratio + j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare + # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) + t = t[j] # filter + + # Offsets + gxy = t[:, 2:4] # grid xy + gxi = gain[[2, 3]] - gxy # inverse + j, k = ((gxy % 1. < g) & (gxy > 1.)).T + l, m = ((gxi % 1. < g) & (gxi > 1.)).T + j = torch.stack((torch.ones_like(j), j, k, l, m)) + t = t.repeat((5, 1, 1))[j] + offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] + else: + t = targets[0] + offsets = 0 + + # Define + b, c = t[:, :2].long().T # image, class + gxy = t[:, 2:4] # grid xy + gwh = t[:, 4:6] # grid wh + gij = (gxy - offsets).long() + gi, gj = gij.T # grid xy indices + + # Append + a = t[:, 6].long() # anchor indices + indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices + anch.append(anchors[a]) # anchors + + return indices, anch diff --git a/human_detection/yolov7 skeleton/utils/metrics.py b/human_detection/yolov7 skeleton/utils/metrics.py new file mode 100644 index 00000000..6d2f5364 --- /dev/null +++ b/human_detection/yolov7 skeleton/utils/metrics.py @@ -0,0 +1,227 @@ +# Model validation metrics + +from pathlib import Path + +import matplotlib.pyplot as plt +import numpy as np +import torch + +from . import general + + +def fitness(x): + # Model fitness as a weighted combination of metrics + w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] + return (x[:, :4] * w).sum(1) + + +def ap_per_class(tp, conf, pred_cls, target_cls, v5_metric=False, plot=False, save_dir='.', names=()): + """ Compute the average precision, given the recall and precision curves. + Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. + # Arguments + tp: True positives (nparray, nx1 or nx10). + conf: Objectness value from 0-1 (nparray). + pred_cls: Predicted object classes (nparray). + target_cls: True object classes (nparray). + plot: Plot precision-recall curve at mAP@0.5 + save_dir: Plot save directory + # Returns + The average precision as computed in py-faster-rcnn. + """ + + # Sort by objectness + i = np.argsort(-conf) + tp, conf, pred_cls = tp[i], conf[i], pred_cls[i] + + # Find unique classes + unique_classes = np.unique(target_cls) + nc = unique_classes.shape[0] # number of classes, number of detections + + # Create Precision-Recall curve and compute AP for each class + px, py = np.linspace(0, 1, 1000), [] # for plotting + ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000)) + for ci, c in enumerate(unique_classes): + i = pred_cls == c + n_l = (target_cls == c).sum() # number of labels + n_p = i.sum() # number of predictions + + if n_p == 0 or n_l == 0: + continue + else: + # Accumulate FPs and TPs + fpc = (1 - tp[i]).cumsum(0) + tpc = tp[i].cumsum(0) + + # Recall + recall = tpc / (n_l + 1e-16) # recall curve + r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases + + # Precision + precision = tpc / (tpc + fpc) # precision curve + p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score + + # AP from recall-precision curve + for j in range(tp.shape[1]): + ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j], v5_metric=v5_metric) + if plot and j == 0: + py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5 + + # Compute F1 (harmonic mean of precision and recall) + f1 = 2 * p * r / (p + r + 1e-16) + if plot: + plot_pr_curve(px, py, ap, Path(save_dir) / 'PR_curve.png', names) + plot_mc_curve(px, f1, Path(save_dir) / 'F1_curve.png', names, ylabel='F1') + plot_mc_curve(px, p, Path(save_dir) / 'P_curve.png', names, ylabel='Precision') + plot_mc_curve(px, r, Path(save_dir) / 'R_curve.png', names, ylabel='Recall') + + i = f1.mean(0).argmax() # max F1 index + return p[:, i], r[:, i], ap, f1[:, i], unique_classes.astype('int32') + + +def compute_ap(recall, precision, v5_metric=False): + """ Compute the average precision, given the recall and precision curves + # Arguments + recall: The recall curve (list) + precision: The precision curve (list) + v5_metric: Assume maximum recall to be 1.0, as in YOLOv5, MMDetetion etc. + # Returns + Average precision, precision curve, recall curve + """ + + # Append sentinel values to beginning and end + if v5_metric: # New YOLOv5 metric, same as MMDetection and Detectron2 repositories + mrec = np.concatenate(([0.], recall, [1.0])) + else: # Old YOLOv5 metric, i.e. default YOLOv7 metric + mrec = np.concatenate(([0.], recall, [recall[-1] + 0.01])) + mpre = np.concatenate(([1.], precision, [0.])) + + # Compute the precision envelope + mpre = np.flip(np.maximum.accumulate(np.flip(mpre))) + + # Integrate area under curve + method = 'interp' # methods: 'continuous', 'interp' + if method == 'interp': + x = np.linspace(0, 1, 101) # 101-point interp (COCO) + ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate + else: # 'continuous' + i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes + ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve + + return ap, mpre, mrec + + +class ConfusionMatrix: + # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix + def __init__(self, nc, conf=0.25, iou_thres=0.45): + self.matrix = np.zeros((nc + 1, nc + 1)) + self.nc = nc # number of classes + self.conf = conf + self.iou_thres = iou_thres + + def process_batch(self, detections, labels): + """ + Return intersection-over-union (Jaccard index) of boxes. + Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + Arguments: + detections (Array[N, 6]), x1, y1, x2, y2, conf, class + labels (Array[M, 5]), class, x1, y1, x2, y2 + Returns: + None, updates confusion matrix accordingly + """ + detections = detections[detections[:, 4] > self.conf] + gt_classes = labels[:, 0].int() + detection_classes = detections[:, 5].int() + iou = general.box_iou(labels[:, 1:], detections[:, :4]) + + x = torch.where(iou > self.iou_thres) + if x[0].shape[0]: + matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() + if x[0].shape[0] > 1: + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 1], return_index=True)[1]] + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 0], return_index=True)[1]] + else: + matches = np.zeros((0, 3)) + + n = matches.shape[0] > 0 + m0, m1, _ = matches.transpose().astype(np.int16) + for i, gc in enumerate(gt_classes): + j = m0 == i + if n and sum(j) == 1: + self.matrix[gc, detection_classes[m1[j]]] += 1 # correct + else: + self.matrix[self.nc, gc] += 1 # background FP + + if n: + for i, dc in enumerate(detection_classes): + if not any(m1 == i): + self.matrix[dc, self.nc] += 1 # background FN + + def matrix(self): + return self.matrix + + def plot(self, save_dir='', names=()): + try: + import seaborn as sn + + array = self.matrix / (self.matrix.sum(0).reshape(1, self.nc + 1) + 1E-6) # normalize + array[array < 0.005] = np.nan # don't annotate (would appear as 0.00) + + fig = plt.figure(figsize=(12, 9), tight_layout=True) + sn.set(font_scale=1.0 if self.nc < 50 else 0.8) # for label size + labels = (0 < len(names) < 99) and len(names) == self.nc # apply names to ticklabels + sn.heatmap(array, annot=self.nc < 30, annot_kws={"size": 8}, cmap='Blues', fmt='.2f', square=True, + xticklabels=names + ['background FP'] if labels else "auto", + yticklabels=names + ['background FN'] if labels else "auto").set_facecolor((1, 1, 1)) + fig.axes[0].set_xlabel('True') + fig.axes[0].set_ylabel('Predicted') + fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) + except Exception as e: + pass + + def print(self): + for i in range(self.nc + 1): + print(' '.join(map(str, self.matrix[i]))) + + +# Plots ---------------------------------------------------------------------------------------------------------------- + +def plot_pr_curve(px, py, ap, save_dir='pr_curve.png', names=()): + # Precision-recall curve + fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) + py = np.stack(py, axis=1) + + if 0 < len(names) < 21: # display per-class legend if < 21 classes + for i, y in enumerate(py.T): + ax.plot(px, y, linewidth=1, label=f'{names[i]} {ap[i, 0]:.3f}') # plot(recall, precision) + else: + ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision) + + ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f mAP@0.5' % ap[:, 0].mean()) + ax.set_xlabel('Recall') + ax.set_ylabel('Precision') + ax.set_xlim(0, 1) + ax.set_ylim(0, 1) + plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") + fig.savefig(Path(save_dir), dpi=250) + + +def plot_mc_curve(px, py, save_dir='mc_curve.png', names=(), xlabel='Confidence', ylabel='Metric'): + # Metric-confidence curve + fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) + + if 0 < len(names) < 21: # display per-class legend if < 21 classes + for i, y in enumerate(py): + ax.plot(px, y, linewidth=1, label=f'{names[i]}') # plot(confidence, metric) + else: + ax.plot(px, py.T, linewidth=1, color='grey') # plot(confidence, metric) + + y = py.mean(0) + ax.plot(px, y, linewidth=3, color='blue', label=f'all classes {y.max():.2f} at {px[y.argmax()]:.3f}') + ax.set_xlabel(xlabel) + ax.set_ylabel(ylabel) + ax.set_xlim(0, 1) + ax.set_ylim(0, 1) + plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") + fig.savefig(Path(save_dir), dpi=250) diff --git a/human_detection/yolov7 skeleton/utils/plots.py b/human_detection/yolov7 skeleton/utils/plots.py new file mode 100644 index 00000000..fdd8d0e8 --- /dev/null +++ b/human_detection/yolov7 skeleton/utils/plots.py @@ -0,0 +1,489 @@ +# Plotting utils + +import glob +import math +import os +import random +from copy import copy +from pathlib import Path + +import cv2 +import matplotlib +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import seaborn as sns +import torch +import yaml +from PIL import Image, ImageDraw, ImageFont +from scipy.signal import butter, filtfilt + +from utils.general import xywh2xyxy, xyxy2xywh +from utils.metrics import fitness + +# Settings +matplotlib.rc('font', **{'size': 11}) +matplotlib.use('Agg') # for writing to files only + + +def color_list(): + # Return first 10 plt colors as (r,g,b) https://stackoverflow.com/questions/51350872/python-from-color-name-to-rgb + def hex2rgb(h): + return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4)) + + return [hex2rgb(h) for h in matplotlib.colors.TABLEAU_COLORS.values()] # or BASE_ (8), CSS4_ (148), XKCD_ (949) + + +def hist2d(x, y, n=100): + # 2d histogram used in labels.png and evolve.png + xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n) + hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges)) + xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1) + yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1) + return np.log(hist[xidx, yidx]) + + +def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5): + # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy + def butter_lowpass(cutoff, fs, order): + nyq = 0.5 * fs + normal_cutoff = cutoff / nyq + return butter(order, normal_cutoff, btype='low', analog=False) + + b, a = butter_lowpass(cutoff, fs, order=order) + return filtfilt(b, a, data) # forward-backward filter + + +def plot_one_box(x, img, color=None, label=None, line_thickness=3): + # Plots one bounding box on image img + tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness + color = color or [random.randint(0, 255) for _ in range(3)] + c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3])) + cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA) + if label: + tf = max(tl - 1, 1) # font thickness + t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] + c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3 + cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled + cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) + + +def plot_one_box_PIL(box, img, color=None, label=None, line_thickness=None): + img = Image.fromarray(img) + draw = ImageDraw.Draw(img) + line_thickness = line_thickness or max(int(min(img.size) / 200), 2) + draw.rectangle(box, width=line_thickness, outline=tuple(color)) # plot + if label: + fontsize = max(round(max(img.size) / 40), 12) + font = ImageFont.truetype("Arial.ttf", fontsize) + txt_width, txt_height = font.getsize(label) + draw.rectangle([box[0], box[1] - txt_height + 4, box[0] + txt_width, box[1]], fill=tuple(color)) + draw.text((box[0], box[1] - txt_height + 1), label, fill=(255, 255, 255), font=font) + return np.asarray(img) + + +def plot_wh_methods(): # from utils.plots import *; plot_wh_methods() + # Compares the two methods for width-height anchor multiplication + # https://github.com/ultralytics/yolov3/issues/168 + x = np.arange(-4.0, 4.0, .1) + ya = np.exp(x) + yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2 + + fig = plt.figure(figsize=(6, 3), tight_layout=True) + plt.plot(x, ya, '.-', label='YOLOv3') + plt.plot(x, yb ** 2, '.-', label='YOLOR ^2') + plt.plot(x, yb ** 1.6, '.-', label='YOLOR ^1.6') + plt.xlim(left=-4, right=4) + plt.ylim(bottom=0, top=6) + plt.xlabel('input') + plt.ylabel('output') + plt.grid() + plt.legend() + fig.savefig('comparison.png', dpi=200) + + +def output_to_target(output): + # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] + targets = [] + for i, o in enumerate(output): + for *box, conf, cls in o.cpu().numpy(): + targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf]) + return np.array(targets) + + +def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16): + # Plot image grid with labels + + if isinstance(images, torch.Tensor): + images = images.cpu().float().numpy() + if isinstance(targets, torch.Tensor): + targets = targets.cpu().numpy() + + # un-normalise + if np.max(images[0]) <= 1: + images *= 255 + + tl = 3 # line thickness + tf = max(tl - 1, 1) # font thickness + bs, _, h, w = images.shape # batch size, _, height, width + bs = min(bs, max_subplots) # limit plot images + ns = np.ceil(bs ** 0.5) # number of subplots (square) + + # Check if we should resize + scale_factor = max_size / max(h, w) + if scale_factor < 1: + h = math.ceil(scale_factor * h) + w = math.ceil(scale_factor * w) + + colors = color_list() # list of colors + mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init + for i, img in enumerate(images): + if i == max_subplots: # if last batch has fewer images than we expect + break + + block_x = int(w * (i // ns)) + block_y = int(h * (i % ns)) + + img = img.transpose(1, 2, 0) + if scale_factor < 1: + img = cv2.resize(img, (w, h)) + + mosaic[block_y:block_y + h, block_x:block_x + w, :] = img + if len(targets) > 0: + image_targets = targets[targets[:, 0] == i] + boxes = xywh2xyxy(image_targets[:, 2:6]).T + classes = image_targets[:, 1].astype('int') + labels = image_targets.shape[1] == 6 # labels if no conf column + conf = None if labels else image_targets[:, 6] # check for confidence presence (label vs pred) + + if boxes.shape[1]: + if boxes.max() <= 1.01: # if normalized with tolerance 0.01 + boxes[[0, 2]] *= w # scale to pixels + boxes[[1, 3]] *= h + elif scale_factor < 1: # absolute coords need scale if image scales + boxes *= scale_factor + boxes[[0, 2]] += block_x + boxes[[1, 3]] += block_y + for j, box in enumerate(boxes.T): + cls = int(classes[j]) + color = colors[cls % len(colors)] + cls = names[cls] if names else cls + if labels or conf[j] > 0.25: # 0.25 conf thresh + label = '%s' % cls if labels else '%s %.1f' % (cls, conf[j]) + plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl) + + # Draw image filename labels + if paths: + label = Path(paths[i]).name[:40] # trim to 40 char + t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] + cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf, + lineType=cv2.LINE_AA) + + # Image border + cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3) + + if fname: + r = min(1280. / max(h, w) / ns, 1.0) # ratio to limit image size + mosaic = cv2.resize(mosaic, (int(ns * w * r), int(ns * h * r)), interpolation=cv2.INTER_AREA) + # cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB)) # cv2 save + Image.fromarray(mosaic).save(fname) # PIL save + return mosaic + + +def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''): + # Plot LR simulating training for full epochs + optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals + y = [] + for _ in range(epochs): + scheduler.step() + y.append(optimizer.param_groups[0]['lr']) + plt.plot(y, '.-', label='LR') + plt.xlabel('epoch') + plt.ylabel('LR') + plt.grid() + plt.xlim(0, epochs) + plt.ylim(0) + plt.savefig(Path(save_dir) / 'LR.png', dpi=200) + plt.close() + + +def plot_test_txt(): # from utils.plots import *; plot_test() + # Plot test.txt histograms + x = np.loadtxt('test.txt', dtype=np.float32) + box = xyxy2xywh(x[:, :4]) + cx, cy = box[:, 0], box[:, 1] + + fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True) + ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0) + ax.set_aspect('equal') + plt.savefig('hist2d.png', dpi=300) + + fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True) + ax[0].hist(cx, bins=600) + ax[1].hist(cy, bins=600) + plt.savefig('hist1d.png', dpi=200) + + +def plot_targets_txt(): # from utils.plots import *; plot_targets_txt() + # Plot targets.txt histograms + x = np.loadtxt('targets.txt', dtype=np.float32).T + s = ['x targets', 'y targets', 'width targets', 'height targets'] + fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True) + ax = ax.ravel() + for i in range(4): + ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std())) + ax[i].legend() + ax[i].set_title(s[i]) + plt.savefig('targets.jpg', dpi=200) + + +def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_txt() + # Plot study.txt generated by test.py + fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True) + # ax = ax.ravel() + + fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True) + # for f in [Path(path) / f'study_coco_{x}.txt' for x in ['yolor-p6', 'yolor-w6', 'yolor-e6', 'yolor-d6']]: + for f in sorted(Path(path).glob('study*.txt')): + y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T + x = np.arange(y.shape[1]) if x is None else np.array(x) + s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)'] + # for i in range(7): + # ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8) + # ax[i].set_title(s[i]) + + j = y[3].argmax() + 1 + ax2.plot(y[6, 1:j], y[3, 1:j] * 1E2, '.-', linewidth=2, markersize=8, + label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO')) + + ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5], + 'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet') + + ax2.grid(alpha=0.2) + ax2.set_yticks(np.arange(20, 60, 5)) + ax2.set_xlim(0, 57) + ax2.set_ylim(30, 55) + ax2.set_xlabel('GPU Speed (ms/img)') + ax2.set_ylabel('COCO AP val') + ax2.legend(loc='lower right') + plt.savefig(str(Path(path).name) + '.png', dpi=300) + + +def plot_labels(labels, names=(), save_dir=Path(''), loggers=None): + # plot dataset labels + print('Plotting labels... ') + c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes + nc = int(c.max() + 1) # number of classes + colors = color_list() + x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height']) + + # seaborn correlogram + sns.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9)) + plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200) + plt.close() + + # matplotlib labels + matplotlib.use('svg') # faster + ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel() + ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8) + ax[0].set_ylabel('instances') + if 0 < len(names) < 30: + ax[0].set_xticks(range(len(names))) + ax[0].set_xticklabels(names, rotation=90, fontsize=10) + else: + ax[0].set_xlabel('classes') + sns.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9) + sns.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9) + + # rectangles + labels[:, 1:3] = 0.5 # center + labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000 + img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255) + for cls, *box in labels[:1000]: + ImageDraw.Draw(img).rectangle(box, width=1, outline=colors[int(cls) % 10]) # plot + ax[1].imshow(img) + ax[1].axis('off') + + for a in [0, 1, 2, 3]: + for s in ['top', 'right', 'left', 'bottom']: + ax[a].spines[s].set_visible(False) + + plt.savefig(save_dir / 'labels.jpg', dpi=200) + matplotlib.use('Agg') + plt.close() + + # loggers + for k, v in loggers.items() or {}: + if k == 'wandb' and v: + v.log({"Labels": [v.Image(str(x), caption=x.name) for x in save_dir.glob('*labels*.jpg')]}, commit=False) + + +def plot_evolution(yaml_file='data/hyp.finetune.yaml'): # from utils.plots import *; plot_evolution() + # Plot hyperparameter evolution results in evolve.txt + with open(yaml_file) as f: + hyp = yaml.load(f, Loader=yaml.SafeLoader) + x = np.loadtxt('evolve.txt', ndmin=2) + f = fitness(x) + # weights = (f - f.min()) ** 2 # for weighted results + plt.figure(figsize=(10, 12), tight_layout=True) + matplotlib.rc('font', **{'size': 8}) + for i, (k, v) in enumerate(hyp.items()): + y = x[:, i + 7] + # mu = (y * weights).sum() / weights.sum() # best weighted result + mu = y[f.argmax()] # best single result + plt.subplot(6, 5, i + 1) + plt.scatter(y, f, c=hist2d(y, f, 20), cmap='viridis', alpha=.8, edgecolors='none') + plt.plot(mu, f.max(), 'k+', markersize=15) + plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters + if i % 5 != 0: + plt.yticks([]) + print('%15s: %.3g' % (k, mu)) + plt.savefig('evolve.png', dpi=200) + print('\nPlot saved as evolve.png') + + +def profile_idetection(start=0, stop=0, labels=(), save_dir=''): + # Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection() + ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel() + s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS'] + files = list(Path(save_dir).glob('frames*.txt')) + for fi, f in enumerate(files): + try: + results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows + n = results.shape[1] # number of rows + x = np.arange(start, min(stop, n) if stop else n) + results = results[:, x] + t = (results[0] - results[0].min()) # set t0=0s + results[0] = x + for i, a in enumerate(ax): + if i < len(results): + label = labels[fi] if len(labels) else f.stem.replace('frames_', '') + a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5) + a.set_title(s[i]) + a.set_xlabel('time (s)') + # if fi == len(files) - 1: + # a.set_ylim(bottom=0) + for side in ['top', 'right']: + a.spines[side].set_visible(False) + else: + a.remove() + except Exception as e: + print('Warning: Plotting error for %s; %s' % (f, e)) + + ax[1].legend() + plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200) + + +def plot_results_overlay(start=0, stop=0): # from utils.plots import *; plot_results_overlay() + # Plot training 'results*.txt', overlaying train and val losses + s = ['train', 'train', 'train', 'Precision', 'mAP@0.5', 'val', 'val', 'val', 'Recall', 'mAP@0.5:0.95'] # legends + t = ['Box', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles + for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')): + results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T + n = results.shape[1] # number of rows + x = range(start, min(stop, n) if stop else n) + fig, ax = plt.subplots(1, 5, figsize=(14, 3.5), tight_layout=True) + ax = ax.ravel() + for i in range(5): + for j in [i, i + 5]: + y = results[j, x] + ax[i].plot(x, y, marker='.', label=s[j]) + # y_smooth = butter_lowpass_filtfilt(y) + # ax[i].plot(x, np.gradient(y_smooth), marker='.', label=s[j]) + + ax[i].set_title(t[i]) + ax[i].legend() + ax[i].set_ylabel(f) if i == 0 else None # add filename + fig.savefig(f.replace('.txt', '.png'), dpi=200) + + +def plot_results(start=0, stop=0, bucket='', id=(), labels=(), save_dir=''): + # Plot training 'results*.txt'. from utils.plots import *; plot_results(save_dir='runs/train/exp') + fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True) + ax = ax.ravel() + s = ['Box', 'Objectness', 'Classification', 'Precision', 'Recall', + 'val Box', 'val Objectness', 'val Classification', 'mAP@0.5', 'mAP@0.5:0.95'] + if bucket: + # files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id] + files = ['results%g.txt' % x for x in id] + c = ('gsutil cp ' + '%s ' * len(files) + '.') % tuple('gs://%s/results%g.txt' % (bucket, x) for x in id) + os.system(c) + else: + files = list(Path(save_dir).glob('results*.txt')) + assert len(files), 'No results.txt files found in %s, nothing to plot.' % os.path.abspath(save_dir) + for fi, f in enumerate(files): + try: + results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T + n = results.shape[1] # number of rows + x = range(start, min(stop, n) if stop else n) + for i in range(10): + y = results[i, x] + if i in [0, 1, 2, 5, 6, 7]: + y[y == 0] = np.nan # don't show zero loss values + # y /= y[0] # normalize + label = labels[fi] if len(labels) else f.stem + ax[i].plot(x, y, marker='.', label=label, linewidth=2, markersize=8) + ax[i].set_title(s[i]) + # if i in [5, 6, 7]: # share train and val loss y axes + # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) + except Exception as e: + print('Warning: Plotting error for %s; %s' % (f, e)) + + ax[1].legend() + fig.savefig(Path(save_dir) / 'results.png', dpi=200) + + +def output_to_keypoint(output): + # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] + targets = [] + for i, o in enumerate(output): + kpts = o[:,6:] + o = o[:,:6] + for index, (*box, conf, cls) in enumerate(o.detach().cpu().numpy()): + targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf, *list(kpts.detach().cpu().numpy()[index])]) + return np.array(targets) + + +def plot_skeleton_kpts(im, kpts, steps, orig_shape=None): + #Plot the skeleton and keypointsfor coco datatset + palette = np.array([[255, 128, 0], [255, 153, 51], [255, 178, 102], + [230, 230, 0], [255, 153, 255], [153, 204, 255], + [255, 102, 255], [255, 51, 255], [102, 178, 255], + [51, 153, 255], [255, 153, 153], [255, 102, 102], + [255, 51, 51], [153, 255, 153], [102, 255, 102], + [51, 255, 51], [0, 255, 0], [0, 0, 255], [255, 0, 0], + [255, 255, 255]]) + + skeleton = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12], + [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3], + [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]] + + pose_limb_color = palette[[9, 9, 9, 9, 7, 7, 7, 0, 0, 0, 0, 0, 16, 16, 16, 16, 16, 16, 16]] + pose_kpt_color = palette[[16, 16, 16, 16, 16, 0, 0, 0, 0, 0, 0, 9, 9, 9, 9, 9, 9]] + radius = 5 + num_kpts = len(kpts) // steps + + for kid in range(num_kpts): + r, g, b = pose_kpt_color[kid] + x_coord, y_coord = kpts[steps * kid], kpts[steps * kid + 1] + if not (x_coord % 640 == 0 or y_coord % 640 == 0): + if steps == 3: + conf = kpts[steps * kid + 2] + if conf < 0.5: + continue + cv2.circle(im, (int(x_coord), int(y_coord)), radius, (int(r), int(g), int(b)), -1) + + for sk_id, sk in enumerate(skeleton): + r, g, b = pose_limb_color[sk_id] + pos1 = (int(kpts[(sk[0]-1)*steps]), int(kpts[(sk[0]-1)*steps+1])) + pos2 = (int(kpts[(sk[1]-1)*steps]), int(kpts[(sk[1]-1)*steps+1])) + if steps == 3: + conf1 = kpts[(sk[0]-1)*steps+2] + conf2 = kpts[(sk[1]-1)*steps+2] + if conf1<0.5 or conf2<0.5: + continue + if pos1[0]%640 == 0 or pos1[1]%640==0 or pos1[0]<0 or pos1[1]<0: + continue + if pos2[0] % 640 == 0 or pos2[1] % 640 == 0 or pos2[0]<0 or pos2[1]<0: + continue + cv2.line(im, pos1, pos2, (int(r), int(g), int(b)), thickness=2) diff --git a/human_detection/yolov7 skeleton/utils/torch_utils.py b/human_detection/yolov7 skeleton/utils/torch_utils.py new file mode 100644 index 00000000..1e631b55 --- /dev/null +++ b/human_detection/yolov7 skeleton/utils/torch_utils.py @@ -0,0 +1,374 @@ +# YOLOR PyTorch utils + +import datetime +import logging +import math +import os +import platform +import subprocess +import time +from contextlib import contextmanager +from copy import deepcopy +from pathlib import Path + +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +import torch.nn.functional as F +import torchvision + +try: + import thop # for FLOPS computation +except ImportError: + thop = None +logger = logging.getLogger(__name__) + + +@contextmanager +def torch_distributed_zero_first(local_rank: int): + """ + Decorator to make all processes in distributed training wait for each local_master to do something. + """ + if local_rank not in [-1, 0]: + torch.distributed.barrier() + yield + if local_rank == 0: + torch.distributed.barrier() + + +def init_torch_seeds(seed=0): + # Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html + torch.manual_seed(seed) + if seed == 0: # slower, more reproducible + cudnn.benchmark, cudnn.deterministic = False, True + else: # faster, less reproducible + cudnn.benchmark, cudnn.deterministic = True, False + + +def date_modified(path=__file__): + # return human-readable file modification date, i.e. '2021-3-26' + t = datetime.datetime.fromtimestamp(Path(path).stat().st_mtime) + return f'{t.year}-{t.month}-{t.day}' + + +def git_describe(path=Path(__file__).parent): # path must be a directory + # return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe + s = f'git -C {path} describe --tags --long --always' + try: + return subprocess.check_output(s, shell=True, stderr=subprocess.STDOUT).decode()[:-1] + except subprocess.CalledProcessError as e: + return '' # not a git repository + + +def select_device(device='', batch_size=None): + # device = 'cpu' or '0' or '0,1,2,3' + s = f'YOLOR 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string + cpu = device.lower() == 'cpu' + if cpu: + os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False + elif device: # non-cpu device requested + os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable + assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability + + cuda = not cpu and torch.cuda.is_available() + if cuda: + n = torch.cuda.device_count() + if n > 1 and batch_size: # check that batch_size is compatible with device_count + assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}' + space = ' ' * len(s) + for i, d in enumerate(device.split(',') if device else range(n)): + p = torch.cuda.get_device_properties(i) + s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\n" # bytes to MB + else: + s += 'CPU\n' + + logger.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe + return torch.device('cuda:0' if cuda else 'cpu') + + +def time_synchronized(): + # pytorch-accurate time + if torch.cuda.is_available(): + torch.cuda.synchronize() + return time.time() + + +def profile(x, ops, n=100, device=None): + # profile a pytorch module or list of modules. Example usage: + # x = torch.randn(16, 3, 640, 640) # input + # m1 = lambda x: x * torch.sigmoid(x) + # m2 = nn.SiLU() + # profile(x, [m1, m2], n=100) # profile speed over 100 iterations + + device = device or torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') + x = x.to(device) + x.requires_grad = True + print(torch.__version__, device.type, torch.cuda.get_device_properties(0) if device.type == 'cuda' else '') + print(f"\n{'Params':>12s}{'GFLOPS':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}") + for m in ops if isinstance(ops, list) else [ops]: + m = m.to(device) if hasattr(m, 'to') else m # device + m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m # type + dtf, dtb, t = 0., 0., [0., 0., 0.] # dt forward, backward + try: + flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPS + except: + flops = 0 + + for _ in range(n): + t[0] = time_synchronized() + y = m(x) + t[1] = time_synchronized() + try: + _ = y.sum().backward() + t[2] = time_synchronized() + except: # no backward method + t[2] = float('nan') + dtf += (t[1] - t[0]) * 1000 / n # ms per op forward + dtb += (t[2] - t[1]) * 1000 / n # ms per op backward + + s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list' + s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list' + p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters + print(f'{p:12}{flops:12.4g}{dtf:16.4g}{dtb:16.4g}{str(s_in):>24s}{str(s_out):>24s}') + + +def is_parallel(model): + return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel) + + +def intersect_dicts(da, db, exclude=()): + # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values + return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape} + + +def initialize_weights(model): + for m in model.modules(): + t = type(m) + if t is nn.Conv2d: + pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif t is nn.BatchNorm2d: + m.eps = 1e-3 + m.momentum = 0.03 + elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]: + m.inplace = True + + +def find_modules(model, mclass=nn.Conv2d): + # Finds layer indices matching module class 'mclass' + return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)] + + +def sparsity(model): + # Return global model sparsity + a, b = 0., 0. + for p in model.parameters(): + a += p.numel() + b += (p == 0).sum() + return b / a + + +def prune(model, amount=0.3): + # Prune model to requested global sparsity + import torch.nn.utils.prune as prune + print('Pruning model... ', end='') + for name, m in model.named_modules(): + if isinstance(m, nn.Conv2d): + prune.l1_unstructured(m, name='weight', amount=amount) # prune + prune.remove(m, 'weight') # make permanent + print(' %.3g global sparsity' % sparsity(model)) + + +def fuse_conv_and_bn(conv, bn): + # Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/ + fusedconv = nn.Conv2d(conv.in_channels, + conv.out_channels, + kernel_size=conv.kernel_size, + stride=conv.stride, + padding=conv.padding, + groups=conv.groups, + bias=True).requires_grad_(False).to(conv.weight.device) + + # prepare filters + w_conv = conv.weight.clone().view(conv.out_channels, -1) + w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) + fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape)) + + # prepare spatial bias + b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias + b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) + fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) + + return fusedconv + + +def model_info(model, verbose=False, img_size=640): + # Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320] + n_p = sum(x.numel() for x in model.parameters()) # number parameters + n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients + if verbose: + print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma')) + for i, (name, p) in enumerate(model.named_parameters()): + name = name.replace('module_list.', '') + print('%5g %40s %9s %12g %20s %10.3g %10.3g' % + (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std())) + + try: # FLOPS + from thop import profile + stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 + img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input + flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPS + img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float + fs = ', %.1f GFLOPS' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPS + except (ImportError, Exception): + fs = '' + + logger.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}") + + +def load_classifier(name='resnet101', n=2): + # Loads a pretrained model reshaped to n-class output + model = torchvision.models.__dict__[name](pretrained=True) + + # ResNet model properties + # input_size = [3, 224, 224] + # input_space = 'RGB' + # input_range = [0, 1] + # mean = [0.485, 0.456, 0.406] + # std = [0.229, 0.224, 0.225] + + # Reshape output to n classes + filters = model.fc.weight.shape[1] + model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True) + model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True) + model.fc.out_features = n + return model + + +def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) + # scales img(bs,3,y,x) by ratio constrained to gs-multiple + if ratio == 1.0: + return img + else: + h, w = img.shape[2:] + s = (int(h * ratio), int(w * ratio)) # new size + img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize + if not same_shape: # pad/crop img + h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)] + return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean + + +def copy_attr(a, b, include=(), exclude=()): + # Copy attributes from b to a, options to only include [...] and to exclude [...] + for k, v in b.__dict__.items(): + if (len(include) and k not in include) or k.startswith('_') or k in exclude: + continue + else: + setattr(a, k, v) + + +class ModelEMA: + """ Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models + Keep a moving average of everything in the model state_dict (parameters and buffers). + This is intended to allow functionality like + https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage + A smoothed version of the weights is necessary for some training schemes to perform well. + This class is sensitive where it is initialized in the sequence of model init, + GPU assignment and distributed training wrappers. + """ + + def __init__(self, model, decay=0.9999, updates=0): + # Create EMA + self.ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA + # if next(model.parameters()).device.type != 'cpu': + # self.ema.half() # FP16 EMA + self.updates = updates # number of EMA updates + self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs) + for p in self.ema.parameters(): + p.requires_grad_(False) + + def update(self, model): + # Update EMA parameters + with torch.no_grad(): + self.updates += 1 + d = self.decay(self.updates) + + msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict + for k, v in self.ema.state_dict().items(): + if v.dtype.is_floating_point: + v *= d + v += (1. - d) * msd[k].detach() + + def update_attr(self, model, include=(), exclude=('process_group', 'reducer')): + # Update EMA attributes + copy_attr(self.ema, model, include, exclude) + + +class BatchNormXd(torch.nn.modules.batchnorm._BatchNorm): + def _check_input_dim(self, input): + # The only difference between BatchNorm1d, BatchNorm2d, BatchNorm3d, etc + # is this method that is overwritten by the sub-class + # This original goal of this method was for tensor sanity checks + # If you're ok bypassing those sanity checks (eg. if you trust your inference + # to provide the right dimensional inputs), then you can just use this method + # for easy conversion from SyncBatchNorm + # (unfortunately, SyncBatchNorm does not store the original class - if it did + # we could return the one that was originally created) + return + +def revert_sync_batchnorm(module): + # this is very similar to the function that it is trying to revert: + # https://github.com/pytorch/pytorch/blob/c8b3686a3e4ba63dc59e5dcfe5db3430df256833/torch/nn/modules/batchnorm.py#L679 + module_output = module + if isinstance(module, torch.nn.modules.batchnorm.SyncBatchNorm): + new_cls = BatchNormXd + module_output = BatchNormXd(module.num_features, + module.eps, module.momentum, + module.affine, + module.track_running_stats) + if module.affine: + with torch.no_grad(): + module_output.weight = module.weight + module_output.bias = module.bias + module_output.running_mean = module.running_mean + module_output.running_var = module.running_var + module_output.num_batches_tracked = module.num_batches_tracked + if hasattr(module, "qconfig"): + module_output.qconfig = module.qconfig + for name, child in module.named_children(): + module_output.add_module(name, revert_sync_batchnorm(child)) + del module + return module_output + + +class TracedModel(nn.Module): + + def __init__(self, model=None, device=None, img_size=(640,640)): + super(TracedModel, self).__init__() + + print(" Convert model to Traced-model... ") + self.stride = model.stride + self.names = model.names + self.model = model + + self.model = revert_sync_batchnorm(self.model) + self.model.to('cpu') + self.model.eval() + + self.detect_layer = self.model.model[-1] + self.model.traced = True + + rand_example = torch.rand(1, 3, img_size, img_size) + + traced_script_module = torch.jit.trace(self.model, rand_example, strict=False) + #traced_script_module = torch.jit.script(self.model) + traced_script_module.save("traced_model.pt") + print(" traced_script_module saved! ") + self.model = traced_script_module + self.model.to(device) + self.detect_layer.to(device) + print(" model is traced! \n") + + def forward(self, x, augment=False, profile=False): + out = self.model(x) + out = self.detect_layer(out) + return out \ No newline at end of file diff --git a/human_detection/yolov7 skeleton/utils/wandb_logging/__init__.py b/human_detection/yolov7 skeleton/utils/wandb_logging/__init__.py new file mode 100644 index 00000000..84952a81 --- /dev/null +++ b/human_detection/yolov7 skeleton/utils/wandb_logging/__init__.py @@ -0,0 +1 @@ +# init \ No newline at end of file diff --git a/human_detection/yolov7 skeleton/utils/wandb_logging/log_dataset.py b/human_detection/yolov7 skeleton/utils/wandb_logging/log_dataset.py new file mode 100644 index 00000000..74cd6c6c --- /dev/null +++ b/human_detection/yolov7 skeleton/utils/wandb_logging/log_dataset.py @@ -0,0 +1,24 @@ +import argparse + +import yaml + +from wandb_utils import WandbLogger + +WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' + + +def create_dataset_artifact(opt): + with open(opt.data) as f: + data = yaml.load(f, Loader=yaml.SafeLoader) # data dict + logger = WandbLogger(opt, '', None, data, job_type='Dataset Creation') + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--data', type=str, default='data/coco.yaml', help='data.yaml path') + parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset') + parser.add_argument('--project', type=str, default='YOLOR', help='name of W&B Project') + opt = parser.parse_args() + opt.resume = False # Explicitly disallow resume check for dataset upload job + + create_dataset_artifact(opt) diff --git a/human_detection/yolov7 skeleton/utils/wandb_logging/wandb_utils.py b/human_detection/yolov7 skeleton/utils/wandb_logging/wandb_utils.py new file mode 100644 index 00000000..aec7c5f4 --- /dev/null +++ b/human_detection/yolov7 skeleton/utils/wandb_logging/wandb_utils.py @@ -0,0 +1,306 @@ +import json +import sys +from pathlib import Path + +import torch +import yaml +from tqdm import tqdm + +sys.path.append(str(Path(__file__).parent.parent.parent)) # add utils/ to path +from utils.datasets import LoadImagesAndLabels +from utils.datasets import img2label_paths +from utils.general import colorstr, xywh2xyxy, check_dataset + +try: + import wandb + from wandb import init, finish +except ImportError: + wandb = None + +WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' + + +def remove_prefix(from_string, prefix=WANDB_ARTIFACT_PREFIX): + return from_string[len(prefix):] + + +def check_wandb_config_file(data_config_file): + wandb_config = '_wandb.'.join(data_config_file.rsplit('.', 1)) # updated data.yaml path + if Path(wandb_config).is_file(): + return wandb_config + return data_config_file + + +def get_run_info(run_path): + run_path = Path(remove_prefix(run_path, WANDB_ARTIFACT_PREFIX)) + run_id = run_path.stem + project = run_path.parent.stem + model_artifact_name = 'run_' + run_id + '_model' + return run_id, project, model_artifact_name + + +def check_wandb_resume(opt): + process_wandb_config_ddp_mode(opt) if opt.global_rank not in [-1, 0] else None + if isinstance(opt.resume, str): + if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): + if opt.global_rank not in [-1, 0]: # For resuming DDP runs + run_id, project, model_artifact_name = get_run_info(opt.resume) + api = wandb.Api() + artifact = api.artifact(project + '/' + model_artifact_name + ':latest') + modeldir = artifact.download() + opt.weights = str(Path(modeldir) / "last.pt") + return True + return None + + +def process_wandb_config_ddp_mode(opt): + with open(opt.data) as f: + data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict + train_dir, val_dir = None, None + if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX): + api = wandb.Api() + train_artifact = api.artifact(remove_prefix(data_dict['train']) + ':' + opt.artifact_alias) + train_dir = train_artifact.download() + train_path = Path(train_dir) / 'data/images/' + data_dict['train'] = str(train_path) + + if isinstance(data_dict['val'], str) and data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX): + api = wandb.Api() + val_artifact = api.artifact(remove_prefix(data_dict['val']) + ':' + opt.artifact_alias) + val_dir = val_artifact.download() + val_path = Path(val_dir) / 'data/images/' + data_dict['val'] = str(val_path) + if train_dir or val_dir: + ddp_data_path = str(Path(val_dir) / 'wandb_local_data.yaml') + with open(ddp_data_path, 'w') as f: + yaml.dump(data_dict, f) + opt.data = ddp_data_path + + +class WandbLogger(): + def __init__(self, opt, name, run_id, data_dict, job_type='Training'): + # Pre-training routine -- + self.job_type = job_type + self.wandb, self.wandb_run, self.data_dict = wandb, None if not wandb else wandb.run, data_dict + # It's more elegant to stick to 1 wandb.init call, but useful config data is overwritten in the WandbLogger's wandb.init call + if isinstance(opt.resume, str): # checks resume from artifact + if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): + run_id, project, model_artifact_name = get_run_info(opt.resume) + model_artifact_name = WANDB_ARTIFACT_PREFIX + model_artifact_name + assert wandb, 'install wandb to resume wandb runs' + # Resume wandb-artifact:// runs here| workaround for not overwriting wandb.config + self.wandb_run = wandb.init(id=run_id, project=project, resume='allow') + opt.resume = model_artifact_name + elif self.wandb: + self.wandb_run = wandb.init(config=opt, + resume="allow", + project='YOLOR' if opt.project == 'runs/train' else Path(opt.project).stem, + name=name, + job_type=job_type, + id=run_id) if not wandb.run else wandb.run + if self.wandb_run: + if self.job_type == 'Training': + if not opt.resume: + wandb_data_dict = self.check_and_upload_dataset(opt) if opt.upload_dataset else data_dict + # Info useful for resuming from artifacts + self.wandb_run.config.opt = vars(opt) + self.wandb_run.config.data_dict = wandb_data_dict + self.data_dict = self.setup_training(opt, data_dict) + if self.job_type == 'Dataset Creation': + self.data_dict = self.check_and_upload_dataset(opt) + else: + prefix = colorstr('wandb: ') + print(f"{prefix}Install Weights & Biases for YOLOR logging with 'pip install wandb' (recommended)") + + def check_and_upload_dataset(self, opt): + assert wandb, 'Install wandb to upload dataset' + check_dataset(self.data_dict) + config_path = self.log_dataset_artifact(opt.data, + opt.single_cls, + 'YOLOR' if opt.project == 'runs/train' else Path(opt.project).stem) + print("Created dataset config file ", config_path) + with open(config_path) as f: + wandb_data_dict = yaml.load(f, Loader=yaml.SafeLoader) + return wandb_data_dict + + def setup_training(self, opt, data_dict): + self.log_dict, self.current_epoch, self.log_imgs = {}, 0, 16 # Logging Constants + self.bbox_interval = opt.bbox_interval + if isinstance(opt.resume, str): + modeldir, _ = self.download_model_artifact(opt) + if modeldir: + self.weights = Path(modeldir) / "last.pt" + config = self.wandb_run.config + opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp = str( + self.weights), config.save_period, config.total_batch_size, config.bbox_interval, config.epochs, \ + config.opt['hyp'] + data_dict = dict(self.wandb_run.config.data_dict) # eliminates the need for config file to resume + if 'val_artifact' not in self.__dict__: # If --upload_dataset is set, use the existing artifact, don't download + self.train_artifact_path, self.train_artifact = self.download_dataset_artifact(data_dict.get('train'), + opt.artifact_alias) + self.val_artifact_path, self.val_artifact = self.download_dataset_artifact(data_dict.get('val'), + opt.artifact_alias) + self.result_artifact, self.result_table, self.val_table, self.weights = None, None, None, None + if self.train_artifact_path is not None: + train_path = Path(self.train_artifact_path) / 'data/images/' + data_dict['train'] = str(train_path) + if self.val_artifact_path is not None: + val_path = Path(self.val_artifact_path) / 'data/images/' + data_dict['val'] = str(val_path) + self.val_table = self.val_artifact.get("val") + self.map_val_table_path() + if self.val_artifact is not None: + self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") + self.result_table = wandb.Table(["epoch", "id", "prediction", "avg_confidence"]) + if opt.bbox_interval == -1: + self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1 + return data_dict + + def download_dataset_artifact(self, path, alias): + if isinstance(path, str) and path.startswith(WANDB_ARTIFACT_PREFIX): + dataset_artifact = wandb.use_artifact(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias) + assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'" + datadir = dataset_artifact.download() + return datadir, dataset_artifact + return None, None + + def download_model_artifact(self, opt): + if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): + model_artifact = wandb.use_artifact(remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX) + ":latest") + assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist' + modeldir = model_artifact.download() + epochs_trained = model_artifact.metadata.get('epochs_trained') + total_epochs = model_artifact.metadata.get('total_epochs') + assert epochs_trained < total_epochs, 'training to %g epochs is finished, nothing to resume.' % ( + total_epochs) + return modeldir, model_artifact + return None, None + + def log_model(self, path, opt, epoch, fitness_score, best_model=False): + model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', type='model', metadata={ + 'original_url': str(path), + 'epochs_trained': epoch + 1, + 'save period': opt.save_period, + 'project': opt.project, + 'total_epochs': opt.epochs, + 'fitness_score': fitness_score + }) + model_artifact.add_file(str(path / 'last.pt'), name='last.pt') + wandb.log_artifact(model_artifact, + aliases=['latest', 'epoch ' + str(self.current_epoch), 'best' if best_model else '']) + print("Saving model artifact on epoch ", epoch + 1) + + def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False): + with open(data_file) as f: + data = yaml.load(f, Loader=yaml.SafeLoader) # data dict + nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names']) + names = {k: v for k, v in enumerate(names)} # to index dictionary + self.train_artifact = self.create_dataset_table(LoadImagesAndLabels( + data['train']), names, name='train') if data.get('train') else None + self.val_artifact = self.create_dataset_table(LoadImagesAndLabels( + data['val']), names, name='val') if data.get('val') else None + if data.get('train'): + data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'train') + if data.get('val'): + data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'val') + path = data_file if overwrite_config else '_wandb.'.join(data_file.rsplit('.', 1)) # updated data.yaml path + data.pop('download', None) + with open(path, 'w') as f: + yaml.dump(data, f) + + if self.job_type == 'Training': # builds correct artifact pipeline graph + self.wandb_run.use_artifact(self.val_artifact) + self.wandb_run.use_artifact(self.train_artifact) + self.val_artifact.wait() + self.val_table = self.val_artifact.get('val') + self.map_val_table_path() + else: + self.wandb_run.log_artifact(self.train_artifact) + self.wandb_run.log_artifact(self.val_artifact) + return path + + def map_val_table_path(self): + self.val_table_map = {} + print("Mapping dataset") + for i, data in enumerate(tqdm(self.val_table.data)): + self.val_table_map[data[3]] = data[0] + + def create_dataset_table(self, dataset, class_to_id, name='dataset'): + # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging + artifact = wandb.Artifact(name=name, type="dataset") + img_files = tqdm([dataset.path]) if isinstance(dataset.path, str) and Path(dataset.path).is_dir() else None + img_files = tqdm(dataset.img_files) if not img_files else img_files + for img_file in img_files: + if Path(img_file).is_dir(): + artifact.add_dir(img_file, name='data/images') + labels_path = 'labels'.join(dataset.path.rsplit('images', 1)) + artifact.add_dir(labels_path, name='data/labels') + else: + artifact.add_file(img_file, name='data/images/' + Path(img_file).name) + label_file = Path(img2label_paths([img_file])[0]) + artifact.add_file(str(label_file), + name='data/labels/' + label_file.name) if label_file.exists() else None + table = wandb.Table(columns=["id", "train_image", "Classes", "name"]) + class_set = wandb.Classes([{'id': id, 'name': name} for id, name in class_to_id.items()]) + for si, (img, labels, paths, shapes) in enumerate(tqdm(dataset)): + height, width = shapes[0] + labels[:, 2:] = (xywh2xyxy(labels[:, 2:].view(-1, 4))) * torch.Tensor([width, height, width, height]) + box_data, img_classes = [], {} + for cls, *xyxy in labels[:, 1:].tolist(): + cls = int(cls) + box_data.append({"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, + "class_id": cls, + "box_caption": "%s" % (class_to_id[cls]), + "scores": {"acc": 1}, + "domain": "pixel"}) + img_classes[cls] = class_to_id[cls] + boxes = {"ground_truth": {"box_data": box_data, "class_labels": class_to_id}} # inference-space + table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), json.dumps(img_classes), + Path(paths).name) + artifact.add(table, name) + return artifact + + def log_training_progress(self, predn, path, names): + if self.val_table and self.result_table: + class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()]) + box_data = [] + total_conf = 0 + for *xyxy, conf, cls in predn.tolist(): + if conf >= 0.25: + box_data.append( + {"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, + "class_id": int(cls), + "box_caption": "%s %.3f" % (names[cls], conf), + "scores": {"class_score": conf}, + "domain": "pixel"}) + total_conf = total_conf + conf + boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space + id = self.val_table_map[Path(path).name] + self.result_table.add_data(self.current_epoch, + id, + wandb.Image(self.val_table.data[id][1], boxes=boxes, classes=class_set), + total_conf / max(1, len(box_data)) + ) + + def log(self, log_dict): + if self.wandb_run: + for key, value in log_dict.items(): + self.log_dict[key] = value + + def end_epoch(self, best_result=False): + if self.wandb_run: + wandb.log(self.log_dict) + self.log_dict = {} + if self.result_artifact: + train_results = wandb.JoinedTable(self.val_table, self.result_table, "id") + self.result_artifact.add(train_results, 'result') + wandb.log_artifact(self.result_artifact, aliases=['latest', 'epoch ' + str(self.current_epoch), + ('best' if best_result else '')]) + self.result_table = wandb.Table(["epoch", "id", "prediction", "avg_confidence"]) + self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") + + def finish_run(self): + if self.wandb_run: + if self.log_dict: + wandb.log(self.log_dict) + wandb.run.finish() From f4cb41230e35fd0e5aeddb413dac1a5597b79eea Mon Sep 17 00:00:00 2001 From: Nathan Date: Mon, 31 Jul 2023 23:15:10 +0700 Subject: [PATCH 02/67] Update readme.md --- human_detection/yolov7 skeleton/readme.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/human_detection/yolov7 skeleton/readme.md b/human_detection/yolov7 skeleton/readme.md index 4778cca2..594fd9a8 100644 --- a/human_detection/yolov7 skeleton/readme.md +++ b/human_detection/yolov7 skeleton/readme.md @@ -3,4 +3,5 @@ `models/` `utils/` `sort.py` is downloaded from https://github.com/WongKinYiu/yolov7 (with no modification) Usage: -```python main.py --weights-file yolov7.pt --nosave --view-img --show-fps --track --show-track-lines --classes 0 --no-trace --source video.mp4 ``` \ No newline at end of file +1. download the model and test video here: https://utoronto-my.sharepoint.com/:f:/r/personal/nathan_hung_mail_utoronto_ca/Documents/yolov7?csf=1&web=1&e=pSSspq +2. run ```python main.py --weights-file yolov7.pt --nosave --view-img --show-fps --track --show-track-lines --classes 0 --no-trace --source video.mp4 ``` \ No newline at end of file From 6eac8730416be4917bb8e4bf7fef7a14ebefb4ef Mon Sep 17 00:00:00 2001 From: Nathan Date: Tue, 1 Aug 2023 00:16:32 +0700 Subject: [PATCH 03/67] fixed fps display and removed unused code --- human_detection/yolov7 skeleton/main.py | 131 +++++++++--------------- 1 file changed, 51 insertions(+), 80 deletions(-) diff --git a/human_detection/yolov7 skeleton/main.py b/human_detection/yolov7 skeleton/main.py index 3c86db38..599cd350 100644 --- a/human_detection/yolov7 skeleton/main.py +++ b/human_detection/yolov7 skeleton/main.py @@ -6,16 +6,12 @@ import numpy as np from models.experimental import attempt_load -from utils.datasets import LoadStreams, LoadImages -from utils.general import check_img_size, check_requirements, \ - check_imshow, non_max_suppression, apply_classifier, \ - scale_coords, xyxy2xywh, strip_optimizer, set_logging, \ - increment_path -from utils.plots import plot_one_box -from utils.torch_utils import select_device, load_classifier, time_synchronized, TracedModel +from utils.datasets import LoadImages +from utils.general import check_img_size, non_max_suppression, scale_coords, set_logging, increment_path +from utils.torch_utils import select_device, TracedModel import sort -"""Function to Draw Bounding boxes""" +"""Function to draw bounding boxes""" def draw_boxes(img, bbox, identities=None, categories=None, confidences = None, names=None, colors = None): for i, box in enumerate(bbox): x1, y1, x2, y2 = [int(i) for i in box] @@ -40,7 +36,7 @@ def draw_boxes(img, bbox, identities=None, categories=None, confidences = None, return img - +"""Function to draw tracking lines""" def draw_track_lines(im0, tracks, sort_tracker, thickness): for t, track in enumerate(tracks): # loop over tracks track_color = sort_tracker.color_list[t] # Get the color for the current track from the color_list of sort_tracker @@ -74,14 +70,7 @@ def __init__(self): # Set Dataloader source = opt.source self.vid_path, self.vid_writer = None, None - self.webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( - ('rtsp://', 'rtmp://', 'http://', 'https://')) - if self.webcam: - opt.view_img = check_imshow() - torch.backends.cudnn.benchmark = True # set True to speed up constant image size inference - self.dataset = LoadStreams(source, img_size=self.imgsize, stride=self.stride) - else: - self.dataset = LoadImages(source, img_size=self.imgsize, stride=self.stride) + torch.backends.cudnn.benchmark = True # set True to speed up constant image size inference # defining option flags self.save_img = not opt.nosave and not source.endswith('.txt') # save inference images @@ -89,70 +78,52 @@ def __init__(self): if not opt.nosave: self.save_dir.mkdir(parents=True) # make dir - # # Second-stage classifier. - # # This is commented because we are just detecting people, and we don't need to classify the detected stuffs - # classify = False - # if classify: - # modelc = load_classifier(name='resnet101', n=2) # initialize - # modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(device).eval() + # Names and colors of the detected objects' classes self.names = self.model.module.names if hasattr(self.model, 'module') else self.model.names self.colors = [[np.random.randint(0, 255) for _ in range(3)] for _ in self.names] def process_video_file(self): - # Get names and colors - for path, img, im0s, vid_cap in self.dataset: - self.detect(img, self.imgsize, im0s, path, vid_cap) + self.dataset = LoadImages(opt.source, img_size=self.imgsize, stride=self.stride) + for path, img, img_original, vid_cap in self.dataset: + self.detect(img, self.imgsize, img_original, path, vid_cap) + + def process_frame(self, image_frame): - im0s = np.array(image_frame) - image_frame = cv2.resize(image_frame, (576,640)) - img = np.array(image_frame).transpose(2, 0, 1) - self.detect(img, self.imgsize, image_frame) - def detect(self, img, imgsize, im0s, path=None, vid_cap=False): - old_img_w = old_img_h = imgsize - old_img_b = 1 ### idk what is this . need to understand later - - startTime = 0 + img_original = np.array(image_frame) + img = cv2.resize(image_frame, (576,640)) + img = np.array(img).transpose(2, 0, 1) + return self.detect(img, self.imgsize, img_original) + + + def detect(self, img, imgsize, im0, path=None, vid_cap=False): + startTime = time.time() img = torch.from_numpy(img).to(self.device) img = img.half() if self.use_half_precision else img.float() # uint8 to FP16 or FP32 img /= 255.0 # 0~255 to 0.0~1.0 if img.ndimension() == 3: img = img.unsqueeze(0) - # Warmup. Not sure why or even if this is necessary. ### to be tested - if self.device.type != 'cpu' and (old_img_b != img.shape[0] or old_img_h != img.shape[2] or old_img_w != img.shape[3]): - old_img_b = img.shape[0] - old_img_h = img.shape[2] - old_img_w = img.shape[3] + # Warmup. Not sure why or even if this is necessary. ### to be tested. ### maybe a flag should be added to avoid multiple warmups + if self.device.type != 'cpu' and (img.shape[0]!=1 or imgsize != img.shape[2] or imgsize != img.shape[3]): for i in range(3): self.model(img, augment=opt.augment)[0] # Inference - time1 = time_synchronized() pred = self.model(img, augment=opt.augment)[0] - time2 = time_synchronized() # Apply NMS pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms) - time3 = time_synchronized() - - # # Apply second-stage classifier - # if classify: - # pred = apply_classifier(pred, modelc, img, im0s) #TESTING ###to be removed if len(pred)!=1: - print("\n",len(pred)) + print("\n WARNING IN YOLOV7: if len(pred)!=1: ",len(pred)) exit() # Process detections for i, det in enumerate(pred): # detections per image - if self.webcam: # batch_size >= 1 - p, output_string, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), self.dataset.count - else: - p, output_string, im0, frame = path, '', im0s, getattr(self.dataset, 'frame', 0) + output_string = '' - gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh if len(det)!=0: # Rescale boxes from img_size to im0 size det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() @@ -192,17 +163,16 @@ def detect(self, img, imgsize, im0s, path=None, vid_cap=False): for c in det[:, -1].unique(): n = (det[:, -1] == c).sum() # detections per class output_string += f"{n} {self.names[int(c)]}, " # add to string + else: + bbox_xyxy=None - # Print time - print(f'{output_string}Done. ({(1E3 * (time2 - time1)):.1f}ms) Inference, ({(1E3 * (time3 - time2)):.1f}ms) NMS') - + print(f'[INFO] {output_string}') # Show result on live cv2 window view: FPS - if opt.show_fps and self.dataset.mode != 'image' : + if opt.show_fps : currentTime = time.time() fps = 1/(currentTime - startTime) - startTime = currentTime - cv2.putText(im0, "FPS: " + str(round(fps, 3)), (20, 70), cv2.FONT_HERSHEY_PLAIN, 2, (0,255,0),2) + cv2.putText(im0, "FPS: " + str(round(fps, 4)), (20, 70), cv2.FONT_HERSHEY_PLAIN, 2, (0,255,0),2) # Show result on live cv2 window view: image if opt.view_img: cv2.imshow("yolov7 preview", im0) @@ -210,26 +180,27 @@ def detect(self, img, imgsize, im0s, path=None, vid_cap=False): # Save results (image with detections) to local file. - if self.save_img: - p = Path(p) # to Path - save_path = str(self.save_dir / p.name) # img.jpg - if self.dataset.mode == 'image': - cv2.imwrite(save_path, im0) - print(f" The image with the result is saved in: {save_path}") - else: # 'video' or 'stream' - if self.vid_path != save_path: # new video - self.vid_path = save_path - if isinstance(self.vid_writer, cv2.VideoWriter): - self.vid_writer.release() # release previous video writer - if vid_cap: # video - fps = vid_cap.get(cv2.CAP_PROP_FPS) - w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) - h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) - else: # stream - fps, w, h = 30, im0.shape[1], im0.shape[0] - save_path += '.mp4' - self.vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) - self.vid_writer.write(im0) + if self.save_img and path!=None: + path = Path(path) # to Path + save_path = str(self.save_dir / path.name) # img.jpg + # if self.dataset.mode == 'image': + # cv2.imwrite(save_path, im0) + # print(f" The image with the result is saved in: {save_path}") + # else: # 'video' or 'stream' + if self.vid_path != save_path: # new video + self.vid_path = save_path + if isinstance(self.vid_writer, cv2.VideoWriter): + self.vid_writer.release() # release previous video writer + if vid_cap: # video + fps = vid_cap.get(cv2.CAP_PROP_FPS) + w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + else: # stream + fps, w, h = 30, im0.shape[1], im0.shape[0] + save_path += '.mp4' + self.vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) + self.vid_writer.write(im0) + return bbox_xyxy if __name__ == '__main__': @@ -275,7 +246,7 @@ def detect(self, img, imgsize, im0s, path=None, vid_cap=False): with torch.no_grad(): #deactivate the autograd engine to save memory and speed up computations. On cpu, the speed is 15% faster with this yolo_sort_tracker=Yolo_sort_tracker() - if webcam:=1:#opt.source.isnumeric(): + if webcam:=opt.source.isnumeric(): mywebcam = cv2.VideoCapture(0) while 1: _, image_frame = mywebcam.read() From 5bbe8825a774bf25d182283429022b5ae0a92691 Mon Sep 17 00:00:00 2001 From: Nathan Date: Thu, 3 Aug 2023 17:43:12 +0700 Subject: [PATCH 04/67] added default options so command line opt is no longer required to run model also cleaned up some confusing variable names --- human_detection/yolov7 skeleton/main.py | 118 +++++++++++++++------- human_detection/yolov7 skeleton/readme.md | 4 +- 2 files changed, 85 insertions(+), 37 deletions(-) diff --git a/human_detection/yolov7 skeleton/main.py b/human_detection/yolov7 skeleton/main.py index 599cd350..af53fa52 100644 --- a/human_detection/yolov7 skeleton/main.py +++ b/human_detection/yolov7 skeleton/main.py @@ -12,10 +12,10 @@ import sort """Function to draw bounding boxes""" -def draw_boxes(img, bbox, identities=None, categories=None, confidences = None, names=None, colors = None): +def draw_boxes(img, bbox, identities=None, categories=None, confidences=None, names=None, colors=None, thickness=2, hide_bounding_box=False,hide_labels=False): for i, box in enumerate(bbox): x1, y1, x2, y2 = [int(i) for i in box] - tl = opt.thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness + tl = thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness cat = int(categories[i]) if categories is not None else 0 id = int(identities[i]) if identities is not None else 0 @@ -23,10 +23,10 @@ def draw_boxes(img, bbox, identities=None, categories=None, confidences = None, color = colors[cat] - if not opt.nobbox: + if not hide_bounding_box: cv2.rectangle(img, (x1, y1), (x2, y2), color, tl) - if not opt.nolabel: + if not hide_labels: label = str(id) + ":"+ names[cat] if identities is not None else f'{names[cat]} {confidences[i]:.2f}' tf = max(tl - 1, 1) # font thickness t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] @@ -48,18 +48,27 @@ def draw_track_lines(im0, tracks, sort_tracker, thickness): cv2.line(im0, current_point, next_point, track_color, thickness=thickness) class Yolo_sort_tracker: - def __init__(self): + def __init__(self, + device='cpu', + weights_file='yolov7.pt', + img_size=640, + traced_model_already_exists=True, + source='webcam', + save_result=False, + output_folder='/', + output_file_name='yolov7results.mp4' + ): # Initialize set_logging() - self.device = select_device(opt.device) + self.device = select_device(device) self.use_half_precision = self.device.type != 'cpu' # enable half precision if on GPU (only supported on CUDA) # Load model - self.model = attempt_load(opt.weights_file, map_location=self.device) # load FP32 model + self.model = attempt_load(weights_file, map_location=self.device) # load FP32 model self.stride = int(self.model.stride.max()) # model stride, which is the step size or the number of units the sliding window moves when performing operations like convolution or pooling - self.imgsize = check_img_size(opt.img_size, s=self.stride) # check img_size - if not opt.no_trace: - self.model = TracedModel(self.model, self.device, opt.img_size) + self.imgsize = check_img_size(img_size, s=self.stride) # check img_size + if not traced_model_already_exists: + self.model = TracedModel(self.model, self.device, img_size) if self.use_half_precision: self.model.half() # to FP16 @@ -68,14 +77,13 @@ def __init__(self): self.model(torch.zeros(1, 3, self.imgsize, self.imgsize).to(self.device).type_as(next(self.model.parameters()))) # Set Dataloader - source = opt.source self.vid_path, self.vid_writer = None, None torch.backends.cudnn.benchmark = True # set True to speed up constant image size inference # defining option flags - self.save_img = not opt.nosave and not source.endswith('.txt') # save inference images - self.save_dir = Path(increment_path(Path(opt.project) / opt.name)) # increment run - if not opt.nosave: + self.save_dir = Path(increment_path(Path(output_folder) / output_file_name)) # increment run + self.save_result=save_result + if save_result: self.save_dir.mkdir(parents=True) # make dir # Names and colors of the detected objects' classes @@ -83,10 +91,24 @@ def __init__(self): self.colors = [[np.random.randint(0, 255) for _ in range(3)] for _ in self.names] - def process_video_file(self): + def process_video_file(self,opt): self.dataset = LoadImages(opt.source, img_size=self.imgsize, stride=self.stride) for path, img, img_original, vid_cap in self.dataset: - self.detect(img, self.imgsize, img_original, path, vid_cap) + self.detect(img, self.imgsize, img_original, + path=path, + vid_cap=vid_cap, + show_fps=opt.show_fps, + view_img=opt.view_img, + show_track_lines=opt.show_track_lines, + line_thickness=opt.thickness, + disable_tracking=opt.disable_tracking, + hide_bounding_box=opt.nobbox, + hide_labels=opt.nolabel, + enable_augment=opt.augment, + conf_thres=opt.conf_thres, + iou_thres=opt.iou_thres, + detection_object_classes=opt.classes, + enable_agnostic_nms=opt.agnostic_nms) def process_frame(self, image_frame): @@ -96,7 +118,23 @@ def process_frame(self, image_frame): return self.detect(img, self.imgsize, img_original) - def detect(self, img, imgsize, im0, path=None, vid_cap=False): + def detect(self, + img, imgsize, im0, + path=None, + vid_cap=False, + show_fps=True, + view_img=True, + show_track_lines=True, + line_thickness=2, + disable_tracking=False, + hide_bounding_box=False, + hide_labels=False, + enable_augment=True, + conf_thres=0.25, + iou_thres=0.45, + detection_object_classes=[0,], + enable_agnostic_nms=True + ): startTime = time.time() img = torch.from_numpy(img).to(self.device) img = img.half() if self.use_half_precision else img.float() # uint8 to FP16 or FP32 @@ -107,13 +145,13 @@ def detect(self, img, imgsize, im0, path=None, vid_cap=False): # Warmup. Not sure why or even if this is necessary. ### to be tested. ### maybe a flag should be added to avoid multiple warmups if self.device.type != 'cpu' and (img.shape[0]!=1 or imgsize != img.shape[2] or imgsize != img.shape[3]): for i in range(3): - self.model(img, augment=opt.augment)[0] + self.model(img, augment=enable_augment)[0] # Inference - pred = self.model(img, augment=opt.augment)[0] + pred = self.model(img, augment=enable_augment)[0] # Apply NMS - pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms) + pred = non_max_suppression(pred, conf_thres, iou_thres, classes=detection_object_classes, agnostic=enable_agnostic_nms) #TESTING ###to be removed if len(pred)!=1: @@ -133,7 +171,7 @@ def detect(self, img, imgsize, im0, path=None, vid_cap=False): for x1,y1,x2,y2,conf,detclass in det.cpu().detach().numpy(): dets_to_sort = np.vstack((dets_to_sort, np.array([x1, y1, x2, y2, conf, detclass]))) - if opt.track: + if not disable_tracking: tracked_dets = sort_tracker.update(dets_to_sort, unique_color=True) tracks =sort_tracker.getTrackers() if len(tracked_dets)>0: @@ -141,8 +179,8 @@ def detect(self, img, imgsize, im0, path=None, vid_cap=False): identities = tracked_dets[:, 8] categories = tracked_dets[:, 4] confidences = None - if opt.show_track_lines: - draw_track_lines(im0, tracks, sort_tracker, opt.thickness) + if show_track_lines: + draw_track_lines(im0, tracks, sort_tracker, line_thickness) else: ### not sure if this is possible bbox_xyxy = dets_to_sort[:,:4] @@ -157,7 +195,7 @@ def detect(self, img, imgsize, im0, path=None, vid_cap=False): categories = dets_to_sort[:, 5] confidences = dets_to_sort[:, 4] # draw bounding boxes for visualization - im0 = draw_boxes(im0, bbox_xyxy, identities, categories, confidences, self.names, self.colors) + im0 = draw_boxes(im0, bbox_xyxy, identities, categories, confidences, self.names, self.colors, line_thickness,hide_bounding_box,hide_labels) # prepare print results for c in det[:, -1].unique(): @@ -169,18 +207,18 @@ def detect(self, img, imgsize, im0, path=None, vid_cap=False): print(f'[INFO] {output_string}') # Show result on live cv2 window view: FPS - if opt.show_fps : + if show_fps : currentTime = time.time() fps = 1/(currentTime - startTime) cv2.putText(im0, "FPS: " + str(round(fps, 4)), (20, 70), cv2.FONT_HERSHEY_PLAIN, 2, (0,255,0),2) # Show result on live cv2 window view: image - if opt.view_img: + if view_img: cv2.imshow("yolov7 preview", im0) cv2.waitKey(1) # 1 millisecond # Save results (image with detections) to local file. - if self.save_img and path!=None: + if self.save_result and path!=None: path = Path(path) # to Path save_path = str(self.save_dir / path.name) # img.jpg # if self.dataset.mode == 'image': @@ -209,8 +247,8 @@ def detect(self, img, imgsize, im0, path=None, vid_cap=False): parser = argparse.ArgumentParser() # Files and devices: parser.add_argument('--weights-file', nargs='+', type=str, default='yolov7.pt', help='model.pt path(s)') - parser.add_argument('--no-trace', action='store_true', help='don`t trace model (if traced_model.pt already exist this can save time)') - parser.add_argument('--source', type=str, default='inference/images', help='video source to process') # mp4 file/folder, 0 for webcam + parser.add_argument('--no-trace', action='store_true', help='don`t trace model (if traced_model.pt already exist this can save time)') # Model tracing determines all the operations that are executed when a model parses input data through its linear layers. Just like downloading a model, it only needs to run once. Once the traced_model.pt is generated, this operation is no longer needed + parser.add_argument('--source', type=str, default='inference/images', help='video source to process') # mp4 file/folder, 'webcam' for webcam parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') # Hyperparameters: parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)') @@ -227,7 +265,7 @@ def detect(self, img, imgsize, im0, path=None, vid_cap=False): parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') parser.add_argument('--augment', action='store_true', help='augmented inference') # SORT tracking options - parser.add_argument('--track', action='store_true', help='run tracking') + parser.add_argument('--disable-tracking', action='store_false', help='disable tracking') # Appearance option (what to display on screen or output video) parser.add_argument('--show-track-lines', action='store_true', help='show tracked path') parser.add_argument('--show-fps', action='store_true', help='show fps') @@ -236,23 +274,31 @@ def detect(self, img, imgsize, im0, path=None, vid_cap=False): parser.add_argument('--nolabel', action='store_true', help='don`t show label') opt = parser.parse_args() - print(opt) - + # define the SORT tracker sort_tracker = sort.Sort(max_age=5, min_hits=2, iou_threshold=0.2) with torch.no_grad(): #deactivate the autograd engine to save memory and speed up computations. On cpu, the speed is 15% faster with this - yolo_sort_tracker=Yolo_sort_tracker() - - if webcam:=opt.source.isnumeric(): + yolo_sort_tracker=Yolo_sort_tracker( + device=opt.device, + weights_file=opt.weights_file, + img_size=opt.img_size, + traced_model_already_exists=opt.no_trace, + source=opt.source, + save_result=not opt.nosave, + output_folder=opt.project, + output_file_name=opt.name + ) + + if opt.source=='webcam': mywebcam = cv2.VideoCapture(0) while 1: _, image_frame = mywebcam.read() bounding_boxes=yolo_sort_tracker.process_frame(image_frame) print(bounding_boxes) else: - yolo_sort_tracker.process_video_file() + yolo_sort_tracker.process_video_file(opt) ### fix up file specific stuffs \ No newline at end of file diff --git a/human_detection/yolov7 skeleton/readme.md b/human_detection/yolov7 skeleton/readme.md index 594fd9a8..7afcaa33 100644 --- a/human_detection/yolov7 skeleton/readme.md +++ b/human_detection/yolov7 skeleton/readme.md @@ -4,4 +4,6 @@ Usage: 1. download the model and test video here: https://utoronto-my.sharepoint.com/:f:/r/personal/nathan_hung_mail_utoronto_ca/Documents/yolov7?csf=1&web=1&e=pSSspq -2. run ```python main.py --weights-file yolov7.pt --nosave --view-img --show-fps --track --show-track-lines --classes 0 --no-trace --source video.mp4 ``` \ No newline at end of file +2. run ```python main.py --weights-file yolov7.pt --nosave --view-img --show-fps --show-track-lines --classes 0 --no-trace --source video.mp4 ``` to run inference on __video.mp4__ + +to run inference on webcam, use ```python main.py --weights-file yolov7.pt --nosave --view-img --show-fps --show-track-lines --classes 0 --no-trace --source webcam``` From 848af703451f0852b5e94117b2ff7ec8b741e626 Mon Sep 17 00:00:00 2001 From: Nathan Date: Thu, 3 Aug 2023 17:45:22 +0700 Subject: [PATCH 05/67] rename main.py to yolov7.py --- human_detection/yolov7 skeleton/{main.py => yolov7.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename human_detection/yolov7 skeleton/{main.py => yolov7.py} (100%) diff --git a/human_detection/yolov7 skeleton/main.py b/human_detection/yolov7 skeleton/yolov7.py similarity index 100% rename from human_detection/yolov7 skeleton/main.py rename to human_detection/yolov7 skeleton/yolov7.py From b354b3c6bb26885f48bee472757acf2e65fc1658 Mon Sep 17 00:00:00 2001 From: Nathan Date: Thu, 3 Aug 2023 17:52:41 +0700 Subject: [PATCH 06/67] initialize sort tracker in the constructor --- human_detection/yolov7 skeleton/readme.md | 4 ++-- human_detection/yolov7 skeleton/yolov7.py | 12 +++++------- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/human_detection/yolov7 skeleton/readme.md b/human_detection/yolov7 skeleton/readme.md index 7afcaa33..61062260 100644 --- a/human_detection/yolov7 skeleton/readme.md +++ b/human_detection/yolov7 skeleton/readme.md @@ -4,6 +4,6 @@ Usage: 1. download the model and test video here: https://utoronto-my.sharepoint.com/:f:/r/personal/nathan_hung_mail_utoronto_ca/Documents/yolov7?csf=1&web=1&e=pSSspq -2. run ```python main.py --weights-file yolov7.pt --nosave --view-img --show-fps --show-track-lines --classes 0 --no-trace --source video.mp4 ``` to run inference on __video.mp4__ +2. run ```python yolov7.py --weights-file yolov7.pt --nosave --view-img --show-fps --show-track-lines --classes 0 --no-trace --source video.mp4 ``` to run inference on __video.mp4__ -to run inference on webcam, use ```python main.py --weights-file yolov7.pt --nosave --view-img --show-fps --show-track-lines --classes 0 --no-trace --source webcam``` +to run inference on webcam, use ```python yolov7.py --weights-file yolov7.pt --nosave --view-img --show-fps --show-track-lines --classes 0 --no-trace --source webcam``` diff --git a/human_detection/yolov7 skeleton/yolov7.py b/human_detection/yolov7 skeleton/yolov7.py index af53fa52..f1de6e21 100644 --- a/human_detection/yolov7 skeleton/yolov7.py +++ b/human_detection/yolov7 skeleton/yolov7.py @@ -90,6 +90,8 @@ def __init__(self, self.names = self.model.module.names if hasattr(self.model, 'module') else self.model.names self.colors = [[np.random.randint(0, 255) for _ in range(3)] for _ in self.names] + # Initialize SORT tracker + self.sort_tracker = sort.Sort(max_age=5, min_hits=2, iou_threshold=0.2) def process_video_file(self,opt): self.dataset = LoadImages(opt.source, img_size=self.imgsize, stride=self.stride) @@ -172,15 +174,15 @@ def detect(self, dets_to_sort = np.vstack((dets_to_sort, np.array([x1, y1, x2, y2, conf, detclass]))) if not disable_tracking: - tracked_dets = sort_tracker.update(dets_to_sort, unique_color=True) - tracks =sort_tracker.getTrackers() + tracked_dets = self.sort_tracker.update(dets_to_sort, unique_color=True) + tracks =self.sort_tracker.getTrackers() if len(tracked_dets)>0: bbox_xyxy = tracked_dets[:,:4] identities = tracked_dets[:, 8] categories = tracked_dets[:, 4] confidences = None if show_track_lines: - draw_track_lines(im0, tracks, sort_tracker, line_thickness) + draw_track_lines(im0, tracks, self.sort_tracker, line_thickness) else: ### not sure if this is possible bbox_xyxy = dets_to_sort[:,:4] @@ -275,10 +277,6 @@ def detect(self, opt = parser.parse_args() - # define the SORT tracker - sort_tracker = sort.Sort(max_age=5, - min_hits=2, - iou_threshold=0.2) with torch.no_grad(): #deactivate the autograd engine to save memory and speed up computations. On cpu, the speed is 15% faster with this yolo_sort_tracker=Yolo_sort_tracker( From 9989390b2063ca26763bd038d0a2248897a5f628 Mon Sep 17 00:00:00 2001 From: Nathan Date: Thu, 3 Aug 2023 17:57:05 +0700 Subject: [PATCH 07/67] added more comments for the options --- human_detection/yolov7 skeleton/yolov7.py | 24 +++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/human_detection/yolov7 skeleton/yolov7.py b/human_detection/yolov7 skeleton/yolov7.py index f1de6e21..211857ff 100644 --- a/human_detection/yolov7 skeleton/yolov7.py +++ b/human_detection/yolov7 skeleton/yolov7.py @@ -113,24 +113,24 @@ def process_video_file(self,opt): enable_agnostic_nms=opt.agnostic_nms) - def process_frame(self, image_frame): + def process_frame(self, image_frame, view_img=False): img_original = np.array(image_frame) img = cv2.resize(image_frame, (576,640)) img = np.array(img).transpose(2, 0, 1) - return self.detect(img, self.imgsize, img_original) + return self.detect(img, self.imgsize, img_original,view_img=view_img) def detect(self, img, imgsize, im0, - path=None, - vid_cap=False, - show_fps=True, - view_img=True, - show_track_lines=True, - line_thickness=2, - disable_tracking=False, - hide_bounding_box=False, - hide_labels=False, + path=None, # for saving result + vid_cap=False, # for saving result + show_fps=True, # for viewing result + view_img=True, # for viewing result + show_track_lines=True, # for viewin result + line_thickness=2, # for viewing result + disable_tracking=False, + hide_bounding_box=False, # for viewing result + hide_labels=False, # for viewing result enable_augment=True, conf_thres=0.25, iou_thres=0.45, @@ -294,7 +294,7 @@ def detect(self, mywebcam = cv2.VideoCapture(0) while 1: _, image_frame = mywebcam.read() - bounding_boxes=yolo_sort_tracker.process_frame(image_frame) + bounding_boxes=yolo_sort_tracker.process_frame(image_frame,view_img=True) print(bounding_boxes) else: yolo_sort_tracker.process_video_file(opt) From a54bf7b2dd85de715e953a15ed2c309f17b43af7 Mon Sep 17 00:00:00 2001 From: Nathan Date: Thu, 3 Aug 2023 18:23:00 +0700 Subject: [PATCH 08/67] output tracking identities and confidences --- human_detection/yolov7 skeleton/yolov7.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/human_detection/yolov7 skeleton/yolov7.py b/human_detection/yolov7 skeleton/yolov7.py index 211857ff..c8995267 100644 --- a/human_detection/yolov7 skeleton/yolov7.py +++ b/human_detection/yolov7 skeleton/yolov7.py @@ -96,7 +96,7 @@ def __init__(self, def process_video_file(self,opt): self.dataset = LoadImages(opt.source, img_size=self.imgsize, stride=self.stride) for path, img, img_original, vid_cap in self.dataset: - self.detect(img, self.imgsize, img_original, + results = self.detect(img, self.imgsize, img_original, path=path, vid_cap=vid_cap, show_fps=opt.show_fps, @@ -111,27 +111,28 @@ def process_video_file(self,opt): iou_thres=opt.iou_thres, detection_object_classes=opt.classes, enable_agnostic_nms=opt.agnostic_nms) + print(results) def process_frame(self, image_frame, view_img=False): img_original = np.array(image_frame) img = cv2.resize(image_frame, (576,640)) img = np.array(img).transpose(2, 0, 1) - return self.detect(img, self.imgsize, img_original,view_img=view_img) + return self.detect(img, self.imgsize, img_original,view_img=view_img,show_fps=view_img) def detect(self, img, imgsize, im0, path=None, # for saving result vid_cap=False, # for saving result - show_fps=True, # for viewing result - view_img=True, # for viewing result + show_fps=False, # for viewing result + view_img=False, # for viewing result show_track_lines=True, # for viewin result line_thickness=2, # for viewing result disable_tracking=False, hide_bounding_box=False, # for viewing result hide_labels=False, # for viewing result - enable_augment=True, + enable_augment=False, conf_thres=0.25, iou_thres=0.45, detection_object_classes=[0,], @@ -180,7 +181,7 @@ def detect(self, bbox_xyxy = tracked_dets[:,:4] identities = tracked_dets[:, 8] categories = tracked_dets[:, 4] - confidences = None + confidences = None ###dets_to_sort[:, 4] if show_track_lines: draw_track_lines(im0, tracks, self.sort_tracker, line_thickness) else: @@ -204,7 +205,7 @@ def detect(self, n = (det[:, -1] == c).sum() # detections per class output_string += f"{n} {self.names[int(c)]}, " # add to string else: - bbox_xyxy=None + bbox_xyxy=identities=confidences=None print(f'[INFO] {output_string}') @@ -240,7 +241,7 @@ def detect(self, save_path += '.mp4' self.vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) self.vid_writer.write(im0) - return bbox_xyxy + return bbox_xyxy, identities, confidences if __name__ == '__main__': @@ -267,7 +268,7 @@ def detect(self, parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') parser.add_argument('--augment', action='store_true', help='augmented inference') # SORT tracking options - parser.add_argument('--disable-tracking', action='store_false', help='disable tracking') + parser.add_argument('--disable-tracking', action='store_true', help='disable tracking') # Appearance option (what to display on screen or output video) parser.add_argument('--show-track-lines', action='store_true', help='show tracked path') parser.add_argument('--show-fps', action='store_true', help='show fps') @@ -294,7 +295,7 @@ def detect(self, mywebcam = cv2.VideoCapture(0) while 1: _, image_frame = mywebcam.read() - bounding_boxes=yolo_sort_tracker.process_frame(image_frame,view_img=True) + bounding_boxes, identities, confidences=yolo_sort_tracker.process_frame(image_frame,view_img=True) print(bounding_boxes) else: yolo_sort_tracker.process_video_file(opt) From 4fe1df7bf7cbd2b72fb4d04c65943465b1eed185 Mon Sep 17 00:00:00 2001 From: Nathan Date: Thu, 3 Aug 2023 18:23:52 +0700 Subject: [PATCH 09/67] added example_usage.py --- .../yolov7 skeleton/example_usage.py | 22 +++++++++++++++++++ human_detection/yolov7 skeleton/readme.md | 8 +++++-- 2 files changed, 28 insertions(+), 2 deletions(-) create mode 100644 human_detection/yolov7 skeleton/example_usage.py diff --git a/human_detection/yolov7 skeleton/example_usage.py b/human_detection/yolov7 skeleton/example_usage.py new file mode 100644 index 00000000..b6933fde --- /dev/null +++ b/human_detection/yolov7 skeleton/example_usage.py @@ -0,0 +1,22 @@ +''' +NOTE: +change [ view_img=False ] to [ view_img=True ] to enable live preview +''' + +import yolov7 +import torch +import cv2 + + +with torch.no_grad(): + yolo_sort_tracker=yolov7.Yolo_sort_tracker() + mywebcam = cv2.VideoCapture(0) + + +while 1: + with torch.no_grad(): + image_frame = mywebcam.read()[1] + bounding_boxes, identities, confidences=yolo_sort_tracker.process_frame(image_frame,view_img=False) + print(bounding_boxes) + print(identities) + print("\n") diff --git a/human_detection/yolov7 skeleton/readme.md b/human_detection/yolov7 skeleton/readme.md index 61062260..6774853a 100644 --- a/human_detection/yolov7 skeleton/readme.md +++ b/human_detection/yolov7 skeleton/readme.md @@ -4,6 +4,10 @@ Usage: 1. download the model and test video here: https://utoronto-my.sharepoint.com/:f:/r/personal/nathan_hung_mail_utoronto_ca/Documents/yolov7?csf=1&web=1&e=pSSspq -2. run ```python yolov7.py --weights-file yolov7.pt --nosave --view-img --show-fps --show-track-lines --classes 0 --no-trace --source video.mp4 ``` to run inference on __video.mp4__ +2. run ``` +python yolov7.py --weights-file yolov7.pt --nosave --view-img --show-fps --show-track-lines --classes 0 --no-trace --source video.mp4 +``` to run inference on __video.mp4__ -to run inference on webcam, use ```python yolov7.py --weights-file yolov7.pt --nosave --view-img --show-fps --show-track-lines --classes 0 --no-trace --source webcam``` +to run inference on webcam, use ``` +python yolov7.py --weights-file yolov7.pt --nosave --view-img --show-fps --show-track-lines --classes 0 --no-trace --source webcam +``` From f23b58c3ca8697e1f58ebfdbefe5306ba012ee93 Mon Sep 17 00:00:00 2001 From: Nathan Date: Thu, 3 Aug 2023 20:50:19 +0700 Subject: [PATCH 10/67] Create human_detection_node.py --- .../yolov7 skeleton/human_detection_node.py | 470 ++++++++++++++++++ 1 file changed, 470 insertions(+) create mode 100644 human_detection/yolov7 skeleton/human_detection_node.py diff --git a/human_detection/yolov7 skeleton/human_detection_node.py b/human_detection/yolov7 skeleton/human_detection_node.py new file mode 100644 index 00000000..f7f44b12 --- /dev/null +++ b/human_detection/yolov7 skeleton/human_detection_node.py @@ -0,0 +1,470 @@ +import argparse +from cv_bridge import CvBridge +from geometry_msgs.msg import Point +from geometry_msgs.msg import PoseStamped +import math +import numpy as np +import os +import rclpy +from rclpy.node import Node +from sensor_msgs.msg import Image, PointCloud2 +import sensor_msgs_py.point_cloud2 as pc2 +from std_msgs.msg import Bool, Float32, String +import tarfile +import tensorflow as tf +import tensorflow_hub as hub +import time +import urllib.request + + +#my own imports +from ascii_numbers import ascii_numbers + + +#config constants +MODEL_URL = "https://tfhub.dev/google/movenet/multipose/lightning/1?tf-hub-format=compressed" +SAVED_MODEL_PATH = "/home/trailbot/trail_ws/multipose_model" +people_detection_threshold = 0.4 +point_detection_threshold = 0.4 +distance_where_lidar_stops_working = 0.4 +image_width=1280 +image_height=1024 +camera_transformation_k = """ + 628.5359544 0 676.9575694 + 0 627.7249542 532.7206716 + 0 0 1 +""" +rotation_matrix = """ + -0.007495781893 -0.0006277316155 0.9999717092 + -0.9999516401 -0.006361853422 -0.007499625104 + 0.006366381192 -0.9999795662 -0.0005800141927 +""" +translation_vector = np.array([-0.06024059837, -0.08180891509, -0.3117851288]) +# if this is -1, node will publish constantly (as camera FPS) +publishing_frequency = 0.5 #Hz + + +#globals +parser_args = tuple() +model = None +debug_mode = False + + +def parse_arguments(): + """ + handle command line arguments + """ + parser = argparse.ArgumentParser(description='Example command-line parser') + parser.add_argument( + '-v', + '--verbose', + action='store_true', + help='Enable print_verbose_only outputs') + parser.add_argument( + '-d', + '--download_model', + action='store_true', + help='Flag to download the model. This must be ran at least once') + parser.add_argument( + '-r', + '--ros-args', + action='store_true', + help='temp fix') + return parser.parse_args() + + +def download_model(): + # Create the directory if it doesn't exist + os.makedirs(os.path.dirname(SAVED_MODEL_PATH), exist_ok=True) + + # Download the compressed model from the URL + model_path, _ = urllib.request.urlretrieve(MODEL_URL) + + # Extract the compressed model to the specified path + with tarfile.open(model_path, "r:gz") as tar: + tar.extractall(SAVED_MODEL_PATH) + + model = hub.load(SAVED_MODEL_PATH).signatures['serving_default'] + return model + + +def load_saved_model(): + """ + load the saved model from SAVED_MODEL_PATH + """ + if not os.path.exists(SAVED_MODEL_PATH): + raise FileNotFoundError(f"Model not found at {SAVED_MODEL_PATH}") + model = hub.load(SAVED_MODEL_PATH).signatures['serving_default'] + return model + + +def print_verbose_only(*args, **kwargs): + """ + print only if verbose==True + """ + if parser_args.verbose: + print(*args, **kwargs) + + +def movenet(input_image, model): + """ + movenet model: + Gets input image and outputs array of keypoints with certainty score + downloaded from #https://tfhub.dev/google/movenet/multipose/lightning/1 + """ + # SavedModel format expects tensor type of int32. + input_image = tf.cast(input_image, dtype=tf.int32) + outputs = model(input_image) # Output is a [1, 6, 56] tensor. + + # The first 17 * 3 elements are the keypoint locations and scores in the + # format: [y_0, x_0, s_0, y_1, x_1, s_1, …, y_16, x_16, s_16], where y_i, + # x_i, s_i are the yx-coordinates (normalized to image frame, e.g. range + # in [0.0, 1.0]) and confidence scores of the i-th joint correspondingly. + # The order of the 17 keypoint joints is: [nose, left eye, right eye, left + # ear, right ear, left shoulder, right shoulder, left elbow, right elbow, + # left wrist, right wrist, left hip, right hip, left knee, right knee, + # left ankle, right ankle]. The remaining 5 elements [ymin, xmin, ymax, + # xmax, score] represent the region of the bounding box (in normalized + # coordinates) and the confidence score of the instance + keypoints = outputs['output_0'].numpy() + + count_of_people = np.sum(keypoints[0, :, -1] > people_detection_threshold ) + # print_verbose_only("count_of_people", count_of_people) + + # there are 6 people + # there are 17 body points and therefore 3*17=51 numbers per person + return keypoints[:, :, :51].reshape((6, 17, 3))[0] + + +def is_there_person(points): + """ + return True/False of whether there is a person + """ + visible_joints = np.sum(points[:, -1] > point_detection_threshold) + return visible_joints >= 3 + + +def is_person_facing_camera(points): + """ + return True/False depending on if the person is facing camera or not + """ + LEFT_EYE = 1 + NOSE = 0 + RIGHT_EYE = 2 + visible_joints_face = np.sum(points[:5, -1] > point_detection_threshold) + facing_forward = points[LEFT_EYE][1] > points[NOSE][1] > points[RIGHT_EYE][1] + return visible_joints_face >= 3 and facing_forward + + +def get_heading_angle( + points, + fov=90, + image_width=1, + offset=0, + scaling=1): + """ + get the heading angle from the camera's perspective to the person, + in degree, relative to the center of the field of view + """ + visible_points = points[points[:, -1] > point_detection_threshold] + x_mean = np.mean(visible_points[:, 1]) + x_angle_radian = math.atan( + (x_mean - (image_width / 2)) / (image_width / 2) * math.tan(math.radians(fov / 2))) + return offset + scaling * math.degrees(x_angle_radian) + + +def get_x_y_coord( + points, + ): + visible_points = points[points[:, -1] > point_detection_threshold] + x_mean = np.mean(visible_points[:, 1]) + y_mean = np.mean(visible_points[:, 0]) + return x_mean * image_width, y_mean * image_height + + +def process_frame(image, person_array): + """ + process a frame. Determine keypoints and number of people and + heading angle. + """ + + input_size = 256 + input_image = tf.expand_dims(image, axis=0) + input_image = tf.image.resize_with_pad(input_image, input_size, input_size) + + # Run model inference + keypoints = movenet(input_image, model) + + person_array[0].heading_angle = get_heading_angle(keypoints) + person_array[0].x, person_array[0].y = get_x_y_coord(keypoints) + person_array[0].on_screen = is_there_person(keypoints) + + is_there_anyone = is_there_person(keypoints) + return is_there_anyone + + +class Person: + """ + struct to store information for a detected person + """ + def __init__(self): + self.x = -1.0 + self.y = -1.0 + self.z = -1.0 + self.on_screen = False + self.heading_angle = 0.0 + + +class LidarCameraSubscriber(Node): + def print_and_log(self, string): + self.get_logger().info(string) + print(string) + + + def __init__(self): + #make array of 6 person + self.person_array = [Person() for _ in range(6)] + self.is_there_anyone = False + self.cur_state = "" + + super().__init__('image_subscriber') + self.camera_subscription = self.create_subscription( + Image, + 'camera', + self.camera_callback, + 10) + self.camera_subscription + self.bridge = CvBridge() + + self.lidar_subscription = self.create_subscription( + PointCloud2, + 'velodyne_points', + self.lidar_callback, + 10) + self.lidar_subscription + + self.state_subscription = self.create_subscription( + String, + '/trailbot_state', + self.state_callback, + 10) + + + #topics to publish + self.is_person_publisher = self.create_publisher( + Bool, + 'is_person_topic', + 10) + # self.angle_publisher = self.create_publisher( + # Float32, + # 'angle_topic', + # 10) + self.pose_publisher = self.create_publisher( + PoseStamped, + 'target_location', + 10) + self.timestamp = 0 + #run the publish_message function according to publishing_frequency + self.create_timer(1/publishing_frequency, self.publish_message) + self.print_and_log('Human Detection ready...') + for num in ascii_numbers[-6:]: + self.print_and_log(f"\n{num}\n") + time.sleep(1) + + + def state_callback(self, msg): + self.cur_state = msg.data + + + def camera_callback(self, msg): + if self.cur_state!="SearchState": + return + + cv_image = self.bridge.imgmsg_to_cv2( + msg, desired_encoding='passthrough') + self.is_there_anyone = process_frame(cv_image,self.person_array) + self.timestamp = msg.header.stamp + + + def lidar_callback(self, msg): + if self.cur_state!="SearchState": + return + + if not self.is_there_anyone: + return + + # Deserialize PointCloud2 data into xyz points + point_gen = pc2.read_points( + msg, field_names=( + "x", "y", "z"), skip_nans=True) + # points = np.array(list(point_gen)) + points = [[x, y, z] for x, y, z in point_gen] + points = np.array(points) + points2d = convert_to_camera_frame(points) + + #update depth for every person + for person in self.person_array: + if not person.on_screen: + person.z = -1.0 + else: + person.z = estimate_depth(person.x, person.y, points2d) + self.timestamp = msg.header.stamp + # if this is -1, node will publish constantly (as camera FPS) + if not publishing_frequency>0: + self.publish_message("lidar") + t + def publish_message(self,source_str="timer"): + """ publish message is somebody is detected""" + if self.cur_state!="SearchState": + return + if not self.is_there_anyone: + return + #person0 for debugging purpse + person0 = self.person_array[0] + message = f"{source_str:<7}" + message += f" person: {'YES' if self.is_there_anyone else 'NO '}" + message += f" angle: {person0.heading_angle:<20}" + message += f"person_coordinate: {person0.x:<22} {person0.y:<22} {person0.z:22}" + print_verbose_only(message) + self.print_and_log(message) + + # Publish the message + is_person_msg = Bool() + is_person_msg.data = bool(self.is_there_anyone) + self.is_person_publisher.publish(is_person_msg) + + # angle_msg = Float32() + # angle_msg.data = angle + # self.angle_publisher.publish(angle_msg) + + pose_stamped_msg = PoseStamped() + pose_stamped_msg.header.stamp = self.timestamp + pose_stamped_msg.header.frame_id = "velodyne" + + lidar_x,lidar_y,lidar_z = convert_to_lidar_frame((person0.x,person0.y,person0.z)) + + #position + pose_stamped_msg.pose.position.x = lidar_x + pose_stamped_msg.pose.position.y = lidar_y + pose_stamped_msg.pose.position.z = lidar_z + + #orientation + yaw = math.atan2(lidar_y, lidar_x) + pose_stamped_msg.pose.orientation.x = 0.0 + pose_stamped_msg.pose.orientation.y = 0.0 + pose_stamped_msg.pose.orientation.z = math.sin(yaw/2) + pose_stamped_msg.pose.orientation.w = math.cos(yaw / 2) + + self.pose_publisher.publish(pose_stamped_msg) + +def read_space_separated_matrix(string): + """ + convert space separated matrix string to np matrix + """ + lines = string.strip().split('\n') + matrix = [] + for line in lines: + values = line.split() # Exclude the first element 'rotation_matrix' + matrix.append([float(value) for value in values]) + numpy_matrix = np.array(matrix) + return numpy_matrix + + +def parse_global_matrix(): + global rotation_matrix, translation_vector, camera_transformation_k + camera_transformation_k = read_space_separated_matrix(camera_transformation_k) + rotation_matrix = read_space_separated_matrix(rotation_matrix).T + + global inverse_camera_transformation_k, inverse_rotation_matrix + inverse_camera_transformation_k = np.linalg.inv(camera_transformation_k) + inverse_rotation_matrix = np.linalg.inv(rotation_matrix) + + +def convert_to_lidar_frame(uv_coordinate): + """ + convert 2d camera coordinate + depth into 3d lidar frame + """ + point_cloud = np.empty( (3,) , dtype=float) + point_cloud[2] = uv_coordinate[2] + point_cloud[1] = ( image_height - uv_coordinate[1] )*point_cloud[2] + point_cloud[0] = uv_coordinate[0]*point_cloud[2] + + point_cloud = inverse_camera_transformation_k @ point_cloud + point_cloud = inverse_rotation_matrix @ (point_cloud-translation_vector) + return point_cloud + + +def convert_to_camera_frame(point_cloud): + """ + convert 3d lidar data into 2d coordinate of the camera frame + depth + """ + length = point_cloud.shape[0] + translation = np.tile(translation_vector, (length, 1)).T + + point_cloud = point_cloud.T + point_cloud = rotation_matrix@point_cloud + translation + point_cloud = camera_transformation_k @ point_cloud + + uv_coordinate = np.empty_like(point_cloud) + + """ + uv = [x/z, y/z, z], and y is opposite so the minus imageheight + """ + uv_coordinate[0] = point_cloud[0] / point_cloud[2] + uv_coordinate[1] = image_height - point_cloud[1] / point_cloud[2] + uv_coordinate[2] = point_cloud[2] + + uv_depth = uv_coordinate[2, :] + filtered_uv_coordinate = uv_coordinate[:, uv_depth >= 0] + return filtered_uv_coordinate + + +def estimate_depth(x, y, np_2d_array): + """ + estimate the depth by finding points closest to x,y from thhe 2d array + """ + # Calculate the distance between each point and the target coordinates (x, y) + distances_sq = (np_2d_array[0,:] - x) ** 2 + (np_2d_array[1,:] - y) ** 2 + + # Find the indices of the k nearest points + k = 5 # Number of nearest neighbors we want + closest_indices = np.argpartition(distances_sq, k)[:k] + pixel_distance_threshold = 2000 + + valid_indices = [idx for idx in closest_indices if distances_sq[idx]<=pixel_distance_threshold] + if len(valid_indices) == 0: + # lidar points disappears usually around 0.4m + return distance_where_lidar_stops_working + + filtered_indices = np.array(valid_indices) + # Get the depth value of the closest point + closest_depths = np_2d_array[2,filtered_indices] + + return np.mean(closest_depths) + + +def main(args=None): + global parser_args + global model + + parse_global_matrix() + parser_args = parse_arguments() + if debug_mode: + parser_args.verbose = True + + if parser_args.download_model: + print('downloading model...') + model = download_model() + else: + model = load_saved_model() + # print("Human detection ready...") + rclpy.init(args=args) + subscriber = LidarCameraSubscriber() + rclpy.spin(subscriber) + subscriber.destroy_node() + rclpy.shutdown() + +if __name__ == '__main__': + debug_mode = True + print("\n\nDEBUG MODE ON\n\n") + main() From c2198ba4f13f3fafb9aace463d27e98cb19a96f8 Mon Sep 17 00:00:00 2001 From: Nathan Date: Thu, 3 Aug 2023 20:59:24 +0700 Subject: [PATCH 11/67] changed movenet to yolov7 --- .../yolov7 skeleton/human_detection_node.py | 105 +++++++----------- 1 file changed, 42 insertions(+), 63 deletions(-) diff --git a/human_detection/yolov7 skeleton/human_detection_node.py b/human_detection/yolov7 skeleton/human_detection_node.py index f7f44b12..1355db27 100644 --- a/human_detection/yolov7 skeleton/human_detection_node.py +++ b/human_detection/yolov7 skeleton/human_detection_node.py @@ -22,7 +22,7 @@ #config constants -MODEL_URL = "https://tfhub.dev/google/movenet/multipose/lightning/1?tf-hub-format=compressed" +MODEL_URL = "https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7.pt" SAVED_MODEL_PATH = "/home/trailbot/trail_ws/multipose_model" people_detection_threshold = 0.4 point_detection_threshold = 0.4 @@ -106,36 +106,6 @@ def print_verbose_only(*args, **kwargs): print(*args, **kwargs) -def movenet(input_image, model): - """ - movenet model: - Gets input image and outputs array of keypoints with certainty score - downloaded from #https://tfhub.dev/google/movenet/multipose/lightning/1 - """ - # SavedModel format expects tensor type of int32. - input_image = tf.cast(input_image, dtype=tf.int32) - outputs = model(input_image) # Output is a [1, 6, 56] tensor. - - # The first 17 * 3 elements are the keypoint locations and scores in the - # format: [y_0, x_0, s_0, y_1, x_1, s_1, …, y_16, x_16, s_16], where y_i, - # x_i, s_i are the yx-coordinates (normalized to image frame, e.g. range - # in [0.0, 1.0]) and confidence scores of the i-th joint correspondingly. - # The order of the 17 keypoint joints is: [nose, left eye, right eye, left - # ear, right ear, left shoulder, right shoulder, left elbow, right elbow, - # left wrist, right wrist, left hip, right hip, left knee, right knee, - # left ankle, right ankle]. The remaining 5 elements [ymin, xmin, ymax, - # xmax, score] represent the region of the bounding box (in normalized - # coordinates) and the confidence score of the instance - keypoints = outputs['output_0'].numpy() - - count_of_people = np.sum(keypoints[0, :, -1] > people_detection_threshold ) - # print_verbose_only("count_of_people", count_of_people) - - # there are 6 people - # there are 17 body points and therefore 3*17=51 numbers per person - return keypoints[:, :, :51].reshape((6, 17, 3))[0] - - def is_there_person(points): """ return True/False of whether there is a person @@ -156,8 +126,28 @@ def is_person_facing_camera(points): return visible_joints_face >= 3 and facing_forward -def get_heading_angle( +def get_x_y_coord( points, + ): + visible_points = points[points[:, -1] > point_detection_threshold] + x_mean = np.mean(visible_points[:, 1]) + y_mean = np.mean(visible_points[:, 0]) + return x_mean * image_width, y_mean * image_height + + +import yolov7 +import torch +import cv2 +with torch.no_grad(): + yolo_sort_tracker=yolov7.Yolo_sort_tracker() +def xyxy_to_centroid(xyxy): + x1, y1, x2, y2 = xyxy + centroid_x = (x1 + x2) / 2 + centroid_y = (y1 + y2) / 2 + return (centroid_x, centroid_y) + +def get_heading_angle( + centroid, fov=90, image_width=1, offset=0, @@ -166,41 +156,27 @@ def get_heading_angle( get the heading angle from the camera's perspective to the person, in degree, relative to the center of the field of view """ - visible_points = points[points[:, -1] > point_detection_threshold] - x_mean = np.mean(visible_points[:, 1]) + centroid_x, centroid_y = centroid x_angle_radian = math.atan( - (x_mean - (image_width / 2)) / (image_width / 2) * math.tan(math.radians(fov / 2))) + (centroid_x - (image_width / 2)) / (image_width / 2) * math.tan(math.radians(fov / 2))) return offset + scaling * math.degrees(x_angle_radian) - - -def get_x_y_coord( - points, - ): - visible_points = points[points[:, -1] > point_detection_threshold] - x_mean = np.mean(visible_points[:, 1]) - y_mean = np.mean(visible_points[:, 0]) - return x_mean * image_width, y_mean * image_height - - -def process_frame(image, person_array): +def process_frame(image): """ process a frame. Determine keypoints and number of people and heading angle. """ - - input_size = 256 - input_image = tf.expand_dims(image, axis=0) - input_image = tf.image.resize_with_pad(input_image, input_size, input_size) - # Run model inference - keypoints = movenet(input_image, model) - - person_array[0].heading_angle = get_heading_angle(keypoints) - person_array[0].x, person_array[0].y = get_x_y_coord(keypoints) - person_array[0].on_screen = is_there_person(keypoints) - - is_there_anyone = is_there_person(keypoints) - return is_there_anyone + person_array = [] + bounding_boxes, identities, confidences=yolo_sort_tracker.process_frame(image,view_img=False) + for i in range(len(bounding_boxes)): + p = Person() + centroid = xyxy_to_centroid(bounding_boxes[i]) + p.heading_angle = get_heading_angle(centroid) + p.x, p.y = centroid + p.on_screen=True + p.id = identities[i] + person_array.append(p) + return person_array class Person: @@ -213,6 +189,7 @@ def __init__(self): self.z = -1.0 self.on_screen = False self.heading_angle = 0.0 + self.id = 0 class LidarCameraSubscriber(Node): @@ -223,7 +200,7 @@ def print_and_log(self, string): def __init__(self): #make array of 6 person - self.person_array = [Person() for _ in range(6)] + self.person_array = [] self.is_there_anyone = False self.cur_state = "" @@ -282,7 +259,8 @@ def camera_callback(self, msg): cv_image = self.bridge.imgmsg_to_cv2( msg, desired_encoding='passthrough') - self.is_there_anyone = process_frame(cv_image,self.person_array) + self.person_array = process_frame(cv_image) + self.is_there_anyone = len(self.person_array)>0 self.timestamp = msg.header.stamp @@ -312,7 +290,7 @@ def lidar_callback(self, msg): # if this is -1, node will publish constantly (as camera FPS) if not publishing_frequency>0: self.publish_message("lidar") - t + def publish_message(self,source_str="timer"): """ publish message is somebody is detected""" if self.cur_state!="SearchState": @@ -337,6 +315,7 @@ def publish_message(self,source_str="timer"): # angle_msg.data = angle # self.angle_publisher.publish(angle_msg) + pose_stamped_msg = PoseStamped() pose_stamped_msg.header.stamp = self.timestamp pose_stamped_msg.header.frame_id = "velodyne" From b80b88654e3d5d3de112d8b6ff5e3e89fa1387eb Mon Sep 17 00:00:00 2001 From: Nathan <30803215+Nathan903@users.noreply.github.com> Date: Mon, 7 Aug 2023 09:27:24 -0400 Subject: [PATCH 12/67] Removing globals in human detection (#50) * remove all globals * moved all constants to yaml file * fixed typo and added ApproachState --- human_detection/configs.yaml | 17 ++ .../human_detection/human_detection_node.py | 192 ++++++++++-------- 2 files changed, 123 insertions(+), 86 deletions(-) create mode 100644 human_detection/configs.yaml diff --git a/human_detection/configs.yaml b/human_detection/configs.yaml new file mode 100644 index 00000000..dc3df24c --- /dev/null +++ b/human_detection/configs.yaml @@ -0,0 +1,17 @@ +MODEL_URL: "https://tfhub.dev/google/movenet/multipose/lightning/1?tf-hub-format=compressed" +SAVED_MODEL_PATH: "/home/trailbot/trail_ws/multipose_model" +people_detection_threshold: 0.4 +point_detection_threshold: 0.4 +distance_where_lidar_stops_working: 0.4 +image_width: 1280 +image_height: 1024 +camera_transformation_k: | + 628.5359544 0 676.9575694 + 0 627.7249542 532.7206716 + 0 0 1 +rotation_matrix: | + -0.007495781893 -0.0006277316155 0.9999717092 + -0.9999516401 -0.006361853422 -0.007499625104 + 0.006366381192 -0.9999795662 -0.0005800141927 +translation_vector: [-0.06024059837, -0.08180891509, -0.3117851288] +publishing_frequency: 0.5 diff --git a/human_detection/human_detection/human_detection_node.py b/human_detection/human_detection/human_detection_node.py index f7f44b12..5ca59623 100644 --- a/human_detection/human_detection/human_detection_node.py +++ b/human_detection/human_detection/human_detection_node.py @@ -15,40 +15,7 @@ import tensorflow_hub as hub import time import urllib.request - - -#my own imports -from ascii_numbers import ascii_numbers - - -#config constants -MODEL_URL = "https://tfhub.dev/google/movenet/multipose/lightning/1?tf-hub-format=compressed" -SAVED_MODEL_PATH = "/home/trailbot/trail_ws/multipose_model" -people_detection_threshold = 0.4 -point_detection_threshold = 0.4 -distance_where_lidar_stops_working = 0.4 -image_width=1280 -image_height=1024 -camera_transformation_k = """ - 628.5359544 0 676.9575694 - 0 627.7249542 532.7206716 - 0 0 1 -""" -rotation_matrix = """ - -0.007495781893 -0.0006277316155 0.9999717092 - -0.9999516401 -0.006361853422 -0.007499625104 - 0.006366381192 -0.9999795662 -0.0005800141927 -""" -translation_vector = np.array([-0.06024059837, -0.08180891509, -0.3117851288]) -# if this is -1, node will publish constantly (as camera FPS) -publishing_frequency = 0.5 #Hz - - -#globals -parser_args = tuple() -model = None -debug_mode = False - +import yaml def parse_arguments(): """ @@ -73,7 +40,7 @@ def parse_arguments(): return parser.parse_args() -def download_model(): +def download_model(SAVED_MODEL_PATH,MODEL_URL): # Create the directory if it doesn't exist os.makedirs(os.path.dirname(SAVED_MODEL_PATH), exist_ok=True) @@ -88,7 +55,7 @@ def download_model(): return model -def load_saved_model(): +def load_saved_model(SAVED_MODEL_PATH): """ load the saved model from SAVED_MODEL_PATH """ @@ -98,7 +65,7 @@ def load_saved_model(): return model -def print_verbose_only(*args, **kwargs): +def print_verbose_only(parser_args,*args, **kwargs): """ print only if verbose==True """ @@ -106,7 +73,7 @@ def print_verbose_only(*args, **kwargs): print(*args, **kwargs) -def movenet(input_image, model): +def movenet(input_image, model,configs): """ movenet model: Gets input image and outputs array of keypoints with certainty score @@ -128,7 +95,7 @@ def movenet(input_image, model): # coordinates) and the confidence score of the instance keypoints = outputs['output_0'].numpy() - count_of_people = np.sum(keypoints[0, :, -1] > people_detection_threshold ) + count_of_people = np.sum(keypoints[0, :, -1] > configs['people_detection_threshold'] ) # print_verbose_only("count_of_people", count_of_people) # there are 6 people @@ -136,28 +103,29 @@ def movenet(input_image, model): return keypoints[:, :, :51].reshape((6, 17, 3))[0] -def is_there_person(points): +def is_there_person(points, configs): """ return True/False of whether there is a person """ - visible_joints = np.sum(points[:, -1] > point_detection_threshold) + visible_joints = np.sum(points[:, -1] > configs['point_detection_threshold']) return visible_joints >= 3 -def is_person_facing_camera(points): +def is_person_facing_camera(points, configs): """ return True/False depending on if the person is facing camera or not """ LEFT_EYE = 1 NOSE = 0 RIGHT_EYE = 2 - visible_joints_face = np.sum(points[:5, -1] > point_detection_threshold) + visible_joints_face = np.sum(points[:5, -1] > configs['point_detection_threshold']) facing_forward = points[LEFT_EYE][1] > points[NOSE][1] > points[RIGHT_EYE][1] return visible_joints_face >= 3 and facing_forward def get_heading_angle( points, + configs, fov=90, image_width=1, offset=0, @@ -166,7 +134,7 @@ def get_heading_angle( get the heading angle from the camera's perspective to the person, in degree, relative to the center of the field of view """ - visible_points = points[points[:, -1] > point_detection_threshold] + visible_points = points[points[:, -1] > configs['point_detection_threshold']] x_mean = np.mean(visible_points[:, 1]) x_angle_radian = math.atan( (x_mean - (image_width / 2)) / (image_width / 2) * math.tan(math.radians(fov / 2))) @@ -175,14 +143,17 @@ def get_heading_angle( def get_x_y_coord( points, + configs ): - visible_points = points[points[:, -1] > point_detection_threshold] + image_width = configs['image_width'] + image_height = configs['image_height'] + visible_points = points[points[:, -1] > configs['point_detection_threshold']] x_mean = np.mean(visible_points[:, 1]) y_mean = np.mean(visible_points[:, 0]) return x_mean * image_width, y_mean * image_height -def process_frame(image, person_array): +def process_frame(model, image, person_array,configs): """ process a frame. Determine keypoints and number of people and heading angle. @@ -193,13 +164,12 @@ def process_frame(image, person_array): input_image = tf.image.resize_with_pad(input_image, input_size, input_size) # Run model inference - keypoints = movenet(input_image, model) - - person_array[0].heading_angle = get_heading_angle(keypoints) - person_array[0].x, person_array[0].y = get_x_y_coord(keypoints) - person_array[0].on_screen = is_there_person(keypoints) + keypoints = movenet(input_image, model,configs) - is_there_anyone = is_there_person(keypoints) + person_array[0].heading_angle = get_heading_angle(keypoints,configs) + person_array[0].x, person_array[0].y = get_x_y_coord(keypoints,configs) + person_array[0].on_screen = is_there_person(keypoints, configs) + is_there_anyone = is_there_person(keypoints, configs) return is_there_anyone @@ -221,11 +191,22 @@ def print_and_log(self, string): print(string) - def __init__(self): + def __init__(self,parser_args,model,configs): #make array of 6 person self.person_array = [Person() for _ in range(6)] self.is_there_anyone = False self.cur_state = "" + self.parser_args = parser_args + self.model = model + self.configs=configs + + camera_transformation_k = configs['camera_transformation_k'] + self.camera_transformation_k = read_space_separated_matrix(camera_transformation_k) + rotation_matrix = configs['rotation_matrix'] + self.rotation_matrix = read_space_separated_matrix(rotation_matrix).T + self.translation_vector = np.array(configs['translation_vector']) + self.inverse_camera_transformation_k = np.linalg.inv(self.camera_transformation_k) + self.inverse_rotation_matrix = np.linalg.inv(self.rotation_matrix) super().__init__('image_subscriber') self.camera_subscription = self.create_subscription( @@ -264,9 +245,40 @@ def __init__(self): 'target_location', 10) self.timestamp = 0 + + # if this is -1, node will publish constantly (as camera FPS) + self.publishing_frequency = configs['publishing_frequency'] + #run the publish_message function according to publishing_frequency - self.create_timer(1/publishing_frequency, self.publish_message) + self.create_timer(1/self.publishing_frequency, self.publish_message) self.print_and_log('Human Detection ready...') + + ascii_numbers = r""" + ____ _ _ _ ____ + |___ | | | |___ + | | \/ |___ + + ____ ____ _ _ ____ + |___ | | | | |__/ + | |__| |__| | \ + + ___ _ _ ____ ____ ____ + | |__| |__/ |___ |___ + | | | | \ |___ |___ + + ___ _ _ _ ____ + | | | | | | + | |_|_| |__| + + ____ _ _ ____ + | | |\ | |___ + |__| | \| |___ + + ____ ___ ____ ____ ___ ____ ___ + [__ | |__| |__/ | |___ | \ | + ___] | | | | \ | |___ |__/ . + """.strip().split('\n\n') + for num in ascii_numbers[-6:]: self.print_and_log(f"\n{num}\n") time.sleep(1) @@ -277,17 +289,17 @@ def state_callback(self, msg): def camera_callback(self, msg): - if self.cur_state!="SearchState": + if self.cur_state!="SearchState" and self.cur_state!="ApproachState": return cv_image = self.bridge.imgmsg_to_cv2( msg, desired_encoding='passthrough') - self.is_there_anyone = process_frame(cv_image,self.person_array) + self.is_there_anyone = process_frame(self.model, cv_image,self.person_array, self.configs) self.timestamp = msg.header.stamp def lidar_callback(self, msg): - if self.cur_state!="SearchState": + if self.cur_state!="SearchState" and self.cur_state!="ApproachState": return if not self.is_there_anyone: @@ -300,22 +312,26 @@ def lidar_callback(self, msg): # points = np.array(list(point_gen)) points = [[x, y, z] for x, y, z in point_gen] points = np.array(points) - points2d = convert_to_camera_frame(points) + points2d = convert_to_camera_frame( + points, + self.camera_transformation_k, + self.rotation_matrix, + self.translation_vector) #update depth for every person for person in self.person_array: if not person.on_screen: person.z = -1.0 else: - person.z = estimate_depth(person.x, person.y, points2d) + person.z = estimate_depth(person.x, person.y, points2d,self.configs) self.timestamp = msg.header.stamp # if this is -1, node will publish constantly (as camera FPS) - if not publishing_frequency>0: + if not self.publishing_frequency>0: self.publish_message("lidar") - t + def publish_message(self,source_str="timer"): """ publish message is somebody is detected""" - if self.cur_state!="SearchState": + if self.cur_state!="SearchState" and self.cur_state!="ApproachState": return if not self.is_there_anyone: return @@ -325,7 +341,7 @@ def publish_message(self,source_str="timer"): message += f" person: {'YES' if self.is_there_anyone else 'NO '}" message += f" angle: {person0.heading_angle:<20}" message += f"person_coordinate: {person0.x:<22} {person0.y:<22} {person0.z:22}" - print_verbose_only(message) + print_verbose_only(self.parser_args, message) self.print_and_log(message) # Publish the message @@ -341,7 +357,11 @@ def publish_message(self,source_str="timer"): pose_stamped_msg.header.stamp = self.timestamp pose_stamped_msg.header.frame_id = "velodyne" - lidar_x,lidar_y,lidar_z = convert_to_lidar_frame((person0.x,person0.y,person0.z)) + lidar_x,lidar_y,lidar_z = convert_to_lidar_frame( + (person0.x,person0.y,person0.z), + self.inverse_camera_transformation_k, + self.inverse_rotation_matrix, + self.translation_vector) #position pose_stamped_msg.pose.position.x = lidar_x @@ -370,17 +390,11 @@ def read_space_separated_matrix(string): return numpy_matrix -def parse_global_matrix(): - global rotation_matrix, translation_vector, camera_transformation_k - camera_transformation_k = read_space_separated_matrix(camera_transformation_k) - rotation_matrix = read_space_separated_matrix(rotation_matrix).T - - global inverse_camera_transformation_k, inverse_rotation_matrix - inverse_camera_transformation_k = np.linalg.inv(camera_transformation_k) - inverse_rotation_matrix = np.linalg.inv(rotation_matrix) - - -def convert_to_lidar_frame(uv_coordinate): +def convert_to_lidar_frame( + uv_coordinate, + inverse_camera_transformation_k, + inverse_rotation_matrix, + translation_vector): """ convert 2d camera coordinate + depth into 3d lidar frame """ @@ -394,7 +408,11 @@ def convert_to_lidar_frame(uv_coordinate): return point_cloud -def convert_to_camera_frame(point_cloud): +def convert_to_camera_frame( + point_cloud, + camera_transformation_k, + rotation_matrix, + translation_vector): """ convert 3d lidar data into 2d coordinate of the camera frame + depth """ @@ -419,7 +437,7 @@ def convert_to_camera_frame(point_cloud): return filtered_uv_coordinate -def estimate_depth(x, y, np_2d_array): +def estimate_depth(x, y, np_2d_array,configs): """ estimate the depth by finding points closest to x,y from thhe 2d array """ @@ -434,6 +452,7 @@ def estimate_depth(x, y, np_2d_array): valid_indices = [idx for idx in closest_indices if distances_sq[idx]<=pixel_distance_threshold] if len(valid_indices) == 0: # lidar points disappears usually around 0.4m + distance_where_lidar_stops_working = configs['distance_where_lidar_stops_working'] return distance_where_lidar_stops_working filtered_indices = np.array(valid_indices) @@ -443,28 +462,29 @@ def estimate_depth(x, y, np_2d_array): return np.mean(closest_depths) -def main(args=None): - global parser_args - global model +def main(args=None, debug_mode=False): + + with open('configs.yaml', 'r') as file: + configs = yaml.safe_load(file) + MODEL_URL = configs['MODEL_URL'] + SAVED_MODEL_PATH = configs['SAVED_MODEL_PATH'] - parse_global_matrix() parser_args = parse_arguments() if debug_mode: parser_args.verbose = True if parser_args.download_model: print('downloading model...') - model = download_model() + model = download_model(SAVED_MODEL_PATH,MODEL_URL) else: - model = load_saved_model() + model = load_saved_model(SAVED_MODEL_PATH) # print("Human detection ready...") rclpy.init(args=args) - subscriber = LidarCameraSubscriber() + subscriber = LidarCameraSubscriber(parser_args,model,configs) rclpy.spin(subscriber) subscriber.destroy_node() rclpy.shutdown() if __name__ == '__main__': - debug_mode = True print("\n\nDEBUG MODE ON\n\n") - main() + main(debug_mode=True) From bea10062a462f5f760c96f31d3b3ccdae9df3ae5 Mon Sep 17 00:00:00 2001 From: Nathan Date: Mon, 7 Aug 2023 21:29:26 +0800 Subject: [PATCH 13/67] Update example_usage.py --- human_detection/yolov7 skeleton/example_usage.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/human_detection/yolov7 skeleton/example_usage.py b/human_detection/yolov7 skeleton/example_usage.py index b6933fde..3a899078 100644 --- a/human_detection/yolov7 skeleton/example_usage.py +++ b/human_detection/yolov7 skeleton/example_usage.py @@ -17,6 +17,6 @@ with torch.no_grad(): image_frame = mywebcam.read()[1] bounding_boxes, identities, confidences=yolo_sort_tracker.process_frame(image_frame,view_img=False) - print(bounding_boxes) - print(identities) + print("bounding_boxes:",bounding_boxes) + print("identities:",identities) print("\n") From 375b5f30ae9775b1d01120675bcac775522acd86 Mon Sep 17 00:00:00 2001 From: Nathan Date: Mon, 7 Aug 2023 21:31:10 +0800 Subject: [PATCH 14/67] Delete human_detection_node.py --- .../yolov7 skeleton/human_detection_node.py | 449 ------------------ 1 file changed, 449 deletions(-) delete mode 100644 human_detection/yolov7 skeleton/human_detection_node.py diff --git a/human_detection/yolov7 skeleton/human_detection_node.py b/human_detection/yolov7 skeleton/human_detection_node.py deleted file mode 100644 index 1355db27..00000000 --- a/human_detection/yolov7 skeleton/human_detection_node.py +++ /dev/null @@ -1,449 +0,0 @@ -import argparse -from cv_bridge import CvBridge -from geometry_msgs.msg import Point -from geometry_msgs.msg import PoseStamped -import math -import numpy as np -import os -import rclpy -from rclpy.node import Node -from sensor_msgs.msg import Image, PointCloud2 -import sensor_msgs_py.point_cloud2 as pc2 -from std_msgs.msg import Bool, Float32, String -import tarfile -import tensorflow as tf -import tensorflow_hub as hub -import time -import urllib.request - - -#my own imports -from ascii_numbers import ascii_numbers - - -#config constants -MODEL_URL = "https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7.pt" -SAVED_MODEL_PATH = "/home/trailbot/trail_ws/multipose_model" -people_detection_threshold = 0.4 -point_detection_threshold = 0.4 -distance_where_lidar_stops_working = 0.4 -image_width=1280 -image_height=1024 -camera_transformation_k = """ - 628.5359544 0 676.9575694 - 0 627.7249542 532.7206716 - 0 0 1 -""" -rotation_matrix = """ - -0.007495781893 -0.0006277316155 0.9999717092 - -0.9999516401 -0.006361853422 -0.007499625104 - 0.006366381192 -0.9999795662 -0.0005800141927 -""" -translation_vector = np.array([-0.06024059837, -0.08180891509, -0.3117851288]) -# if this is -1, node will publish constantly (as camera FPS) -publishing_frequency = 0.5 #Hz - - -#globals -parser_args = tuple() -model = None -debug_mode = False - - -def parse_arguments(): - """ - handle command line arguments - """ - parser = argparse.ArgumentParser(description='Example command-line parser') - parser.add_argument( - '-v', - '--verbose', - action='store_true', - help='Enable print_verbose_only outputs') - parser.add_argument( - '-d', - '--download_model', - action='store_true', - help='Flag to download the model. This must be ran at least once') - parser.add_argument( - '-r', - '--ros-args', - action='store_true', - help='temp fix') - return parser.parse_args() - - -def download_model(): - # Create the directory if it doesn't exist - os.makedirs(os.path.dirname(SAVED_MODEL_PATH), exist_ok=True) - - # Download the compressed model from the URL - model_path, _ = urllib.request.urlretrieve(MODEL_URL) - - # Extract the compressed model to the specified path - with tarfile.open(model_path, "r:gz") as tar: - tar.extractall(SAVED_MODEL_PATH) - - model = hub.load(SAVED_MODEL_PATH).signatures['serving_default'] - return model - - -def load_saved_model(): - """ - load the saved model from SAVED_MODEL_PATH - """ - if not os.path.exists(SAVED_MODEL_PATH): - raise FileNotFoundError(f"Model not found at {SAVED_MODEL_PATH}") - model = hub.load(SAVED_MODEL_PATH).signatures['serving_default'] - return model - - -def print_verbose_only(*args, **kwargs): - """ - print only if verbose==True - """ - if parser_args.verbose: - print(*args, **kwargs) - - -def is_there_person(points): - """ - return True/False of whether there is a person - """ - visible_joints = np.sum(points[:, -1] > point_detection_threshold) - return visible_joints >= 3 - - -def is_person_facing_camera(points): - """ - return True/False depending on if the person is facing camera or not - """ - LEFT_EYE = 1 - NOSE = 0 - RIGHT_EYE = 2 - visible_joints_face = np.sum(points[:5, -1] > point_detection_threshold) - facing_forward = points[LEFT_EYE][1] > points[NOSE][1] > points[RIGHT_EYE][1] - return visible_joints_face >= 3 and facing_forward - - -def get_x_y_coord( - points, - ): - visible_points = points[points[:, -1] > point_detection_threshold] - x_mean = np.mean(visible_points[:, 1]) - y_mean = np.mean(visible_points[:, 0]) - return x_mean * image_width, y_mean * image_height - - -import yolov7 -import torch -import cv2 -with torch.no_grad(): - yolo_sort_tracker=yolov7.Yolo_sort_tracker() -def xyxy_to_centroid(xyxy): - x1, y1, x2, y2 = xyxy - centroid_x = (x1 + x2) / 2 - centroid_y = (y1 + y2) / 2 - return (centroid_x, centroid_y) - -def get_heading_angle( - centroid, - fov=90, - image_width=1, - offset=0, - scaling=1): - """ - get the heading angle from the camera's perspective to the person, - in degree, relative to the center of the field of view - """ - centroid_x, centroid_y = centroid - x_angle_radian = math.atan( - (centroid_x - (image_width / 2)) / (image_width / 2) * math.tan(math.radians(fov / 2))) - return offset + scaling * math.degrees(x_angle_radian) -def process_frame(image): - """ - process a frame. Determine keypoints and number of people and - heading angle. - """ - # Run model inference - person_array = [] - bounding_boxes, identities, confidences=yolo_sort_tracker.process_frame(image,view_img=False) - for i in range(len(bounding_boxes)): - p = Person() - centroid = xyxy_to_centroid(bounding_boxes[i]) - p.heading_angle = get_heading_angle(centroid) - p.x, p.y = centroid - p.on_screen=True - p.id = identities[i] - person_array.append(p) - return person_array - - -class Person: - """ - struct to store information for a detected person - """ - def __init__(self): - self.x = -1.0 - self.y = -1.0 - self.z = -1.0 - self.on_screen = False - self.heading_angle = 0.0 - self.id = 0 - - -class LidarCameraSubscriber(Node): - def print_and_log(self, string): - self.get_logger().info(string) - print(string) - - - def __init__(self): - #make array of 6 person - self.person_array = [] - self.is_there_anyone = False - self.cur_state = "" - - super().__init__('image_subscriber') - self.camera_subscription = self.create_subscription( - Image, - 'camera', - self.camera_callback, - 10) - self.camera_subscription - self.bridge = CvBridge() - - self.lidar_subscription = self.create_subscription( - PointCloud2, - 'velodyne_points', - self.lidar_callback, - 10) - self.lidar_subscription - - self.state_subscription = self.create_subscription( - String, - '/trailbot_state', - self.state_callback, - 10) - - - #topics to publish - self.is_person_publisher = self.create_publisher( - Bool, - 'is_person_topic', - 10) - # self.angle_publisher = self.create_publisher( - # Float32, - # 'angle_topic', - # 10) - self.pose_publisher = self.create_publisher( - PoseStamped, - 'target_location', - 10) - self.timestamp = 0 - #run the publish_message function according to publishing_frequency - self.create_timer(1/publishing_frequency, self.publish_message) - self.print_and_log('Human Detection ready...') - for num in ascii_numbers[-6:]: - self.print_and_log(f"\n{num}\n") - time.sleep(1) - - - def state_callback(self, msg): - self.cur_state = msg.data - - - def camera_callback(self, msg): - if self.cur_state!="SearchState": - return - - cv_image = self.bridge.imgmsg_to_cv2( - msg, desired_encoding='passthrough') - self.person_array = process_frame(cv_image) - self.is_there_anyone = len(self.person_array)>0 - self.timestamp = msg.header.stamp - - - def lidar_callback(self, msg): - if self.cur_state!="SearchState": - return - - if not self.is_there_anyone: - return - - # Deserialize PointCloud2 data into xyz points - point_gen = pc2.read_points( - msg, field_names=( - "x", "y", "z"), skip_nans=True) - # points = np.array(list(point_gen)) - points = [[x, y, z] for x, y, z in point_gen] - points = np.array(points) - points2d = convert_to_camera_frame(points) - - #update depth for every person - for person in self.person_array: - if not person.on_screen: - person.z = -1.0 - else: - person.z = estimate_depth(person.x, person.y, points2d) - self.timestamp = msg.header.stamp - # if this is -1, node will publish constantly (as camera FPS) - if not publishing_frequency>0: - self.publish_message("lidar") - - def publish_message(self,source_str="timer"): - """ publish message is somebody is detected""" - if self.cur_state!="SearchState": - return - if not self.is_there_anyone: - return - #person0 for debugging purpse - person0 = self.person_array[0] - message = f"{source_str:<7}" - message += f" person: {'YES' if self.is_there_anyone else 'NO '}" - message += f" angle: {person0.heading_angle:<20}" - message += f"person_coordinate: {person0.x:<22} {person0.y:<22} {person0.z:22}" - print_verbose_only(message) - self.print_and_log(message) - - # Publish the message - is_person_msg = Bool() - is_person_msg.data = bool(self.is_there_anyone) - self.is_person_publisher.publish(is_person_msg) - - # angle_msg = Float32() - # angle_msg.data = angle - # self.angle_publisher.publish(angle_msg) - - - pose_stamped_msg = PoseStamped() - pose_stamped_msg.header.stamp = self.timestamp - pose_stamped_msg.header.frame_id = "velodyne" - - lidar_x,lidar_y,lidar_z = convert_to_lidar_frame((person0.x,person0.y,person0.z)) - - #position - pose_stamped_msg.pose.position.x = lidar_x - pose_stamped_msg.pose.position.y = lidar_y - pose_stamped_msg.pose.position.z = lidar_z - - #orientation - yaw = math.atan2(lidar_y, lidar_x) - pose_stamped_msg.pose.orientation.x = 0.0 - pose_stamped_msg.pose.orientation.y = 0.0 - pose_stamped_msg.pose.orientation.z = math.sin(yaw/2) - pose_stamped_msg.pose.orientation.w = math.cos(yaw / 2) - - self.pose_publisher.publish(pose_stamped_msg) - -def read_space_separated_matrix(string): - """ - convert space separated matrix string to np matrix - """ - lines = string.strip().split('\n') - matrix = [] - for line in lines: - values = line.split() # Exclude the first element 'rotation_matrix' - matrix.append([float(value) for value in values]) - numpy_matrix = np.array(matrix) - return numpy_matrix - - -def parse_global_matrix(): - global rotation_matrix, translation_vector, camera_transformation_k - camera_transformation_k = read_space_separated_matrix(camera_transformation_k) - rotation_matrix = read_space_separated_matrix(rotation_matrix).T - - global inverse_camera_transformation_k, inverse_rotation_matrix - inverse_camera_transformation_k = np.linalg.inv(camera_transformation_k) - inverse_rotation_matrix = np.linalg.inv(rotation_matrix) - - -def convert_to_lidar_frame(uv_coordinate): - """ - convert 2d camera coordinate + depth into 3d lidar frame - """ - point_cloud = np.empty( (3,) , dtype=float) - point_cloud[2] = uv_coordinate[2] - point_cloud[1] = ( image_height - uv_coordinate[1] )*point_cloud[2] - point_cloud[0] = uv_coordinate[0]*point_cloud[2] - - point_cloud = inverse_camera_transformation_k @ point_cloud - point_cloud = inverse_rotation_matrix @ (point_cloud-translation_vector) - return point_cloud - - -def convert_to_camera_frame(point_cloud): - """ - convert 3d lidar data into 2d coordinate of the camera frame + depth - """ - length = point_cloud.shape[0] - translation = np.tile(translation_vector, (length, 1)).T - - point_cloud = point_cloud.T - point_cloud = rotation_matrix@point_cloud + translation - point_cloud = camera_transformation_k @ point_cloud - - uv_coordinate = np.empty_like(point_cloud) - - """ - uv = [x/z, y/z, z], and y is opposite so the minus imageheight - """ - uv_coordinate[0] = point_cloud[0] / point_cloud[2] - uv_coordinate[1] = image_height - point_cloud[1] / point_cloud[2] - uv_coordinate[2] = point_cloud[2] - - uv_depth = uv_coordinate[2, :] - filtered_uv_coordinate = uv_coordinate[:, uv_depth >= 0] - return filtered_uv_coordinate - - -def estimate_depth(x, y, np_2d_array): - """ - estimate the depth by finding points closest to x,y from thhe 2d array - """ - # Calculate the distance between each point and the target coordinates (x, y) - distances_sq = (np_2d_array[0,:] - x) ** 2 + (np_2d_array[1,:] - y) ** 2 - - # Find the indices of the k nearest points - k = 5 # Number of nearest neighbors we want - closest_indices = np.argpartition(distances_sq, k)[:k] - pixel_distance_threshold = 2000 - - valid_indices = [idx for idx in closest_indices if distances_sq[idx]<=pixel_distance_threshold] - if len(valid_indices) == 0: - # lidar points disappears usually around 0.4m - return distance_where_lidar_stops_working - - filtered_indices = np.array(valid_indices) - # Get the depth value of the closest point - closest_depths = np_2d_array[2,filtered_indices] - - return np.mean(closest_depths) - - -def main(args=None): - global parser_args - global model - - parse_global_matrix() - parser_args = parse_arguments() - if debug_mode: - parser_args.verbose = True - - if parser_args.download_model: - print('downloading model...') - model = download_model() - else: - model = load_saved_model() - # print("Human detection ready...") - rclpy.init(args=args) - subscriber = LidarCameraSubscriber() - rclpy.spin(subscriber) - subscriber.destroy_node() - rclpy.shutdown() - -if __name__ == '__main__': - debug_mode = True - print("\n\nDEBUG MODE ON\n\n") - main() From 3b2e80ef14dcd8f5fd7f5bb712b32f5632654480 Mon Sep 17 00:00:00 2001 From: Nathan Date: Mon, 7 Aug 2023 21:36:24 +0800 Subject: [PATCH 15/67] rename folder to remove space --- human_detection/{yolov7 skeleton => yolov7_skeleton}/.gitignore | 0 human_detection/{yolov7 skeleton => yolov7_skeleton}/commands.txt | 0 .../{yolov7 skeleton => yolov7_skeleton}/example_usage.py | 0 .../{yolov7 skeleton => yolov7_skeleton}/models/__init__.py | 0 .../{yolov7 skeleton => yolov7_skeleton}/models/common.py | 0 .../{yolov7 skeleton => yolov7_skeleton}/models/experimental.py | 0 .../{yolov7 skeleton => yolov7_skeleton}/models/yolo.py | 0 human_detection/{yolov7 skeleton => yolov7_skeleton}/readme.md | 0 human_detection/{yolov7 skeleton => yolov7_skeleton}/sort.py | 0 .../{yolov7 skeleton => yolov7_skeleton}/utils/__init__.py | 0 .../{yolov7 skeleton => yolov7_skeleton}/utils/activations.py | 0 .../{yolov7 skeleton => yolov7_skeleton}/utils/add_nms.py | 0 .../{yolov7 skeleton => yolov7_skeleton}/utils/autoanchor.py | 0 .../{yolov7 skeleton => yolov7_skeleton}/utils/aws/__init__.py | 0 .../{yolov7 skeleton => yolov7_skeleton}/utils/aws/mime.sh | 0 .../{yolov7 skeleton => yolov7_skeleton}/utils/aws/resume.py | 0 .../{yolov7 skeleton => yolov7_skeleton}/utils/aws/userdata.sh | 0 .../{yolov7 skeleton => yolov7_skeleton}/utils/datasets.py | 0 .../{yolov7 skeleton => yolov7_skeleton}/utils/general.py | 0 .../utils/google_app_engine/Dockerfile | 0 .../utils/google_app_engine/additional_requirements.txt | 0 .../utils/google_app_engine/app.yaml | 0 .../{yolov7 skeleton => yolov7_skeleton}/utils/google_utils.py | 0 .../{yolov7 skeleton => yolov7_skeleton}/utils/loss.py | 0 .../{yolov7 skeleton => yolov7_skeleton}/utils/metrics.py | 0 .../{yolov7 skeleton => yolov7_skeleton}/utils/plots.py | 0 .../{yolov7 skeleton => yolov7_skeleton}/utils/torch_utils.py | 0 .../utils/wandb_logging/__init__.py | 0 .../utils/wandb_logging/log_dataset.py | 0 .../utils/wandb_logging/wandb_utils.py | 0 human_detection/{yolov7 skeleton => yolov7_skeleton}/yolov7.py | 0 31 files changed, 0 insertions(+), 0 deletions(-) rename human_detection/{yolov7 skeleton => yolov7_skeleton}/.gitignore (100%) rename human_detection/{yolov7 skeleton => yolov7_skeleton}/commands.txt (100%) rename human_detection/{yolov7 skeleton => yolov7_skeleton}/example_usage.py (100%) rename human_detection/{yolov7 skeleton => yolov7_skeleton}/models/__init__.py (100%) rename human_detection/{yolov7 skeleton => yolov7_skeleton}/models/common.py (100%) rename human_detection/{yolov7 skeleton => yolov7_skeleton}/models/experimental.py (100%) rename human_detection/{yolov7 skeleton => yolov7_skeleton}/models/yolo.py (100%) rename human_detection/{yolov7 skeleton => yolov7_skeleton}/readme.md (100%) rename human_detection/{yolov7 skeleton => yolov7_skeleton}/sort.py (100%) rename human_detection/{yolov7 skeleton => yolov7_skeleton}/utils/__init__.py (100%) rename human_detection/{yolov7 skeleton => yolov7_skeleton}/utils/activations.py (100%) rename human_detection/{yolov7 skeleton => yolov7_skeleton}/utils/add_nms.py (100%) rename human_detection/{yolov7 skeleton => yolov7_skeleton}/utils/autoanchor.py (100%) rename human_detection/{yolov7 skeleton => yolov7_skeleton}/utils/aws/__init__.py (100%) rename human_detection/{yolov7 skeleton => yolov7_skeleton}/utils/aws/mime.sh (100%) rename human_detection/{yolov7 skeleton => yolov7_skeleton}/utils/aws/resume.py (100%) rename human_detection/{yolov7 skeleton => yolov7_skeleton}/utils/aws/userdata.sh (100%) rename human_detection/{yolov7 skeleton => yolov7_skeleton}/utils/datasets.py (100%) rename human_detection/{yolov7 skeleton => yolov7_skeleton}/utils/general.py (100%) rename human_detection/{yolov7 skeleton => yolov7_skeleton}/utils/google_app_engine/Dockerfile (100%) rename human_detection/{yolov7 skeleton => yolov7_skeleton}/utils/google_app_engine/additional_requirements.txt (100%) rename human_detection/{yolov7 skeleton => yolov7_skeleton}/utils/google_app_engine/app.yaml (100%) rename human_detection/{yolov7 skeleton => yolov7_skeleton}/utils/google_utils.py (100%) rename human_detection/{yolov7 skeleton => yolov7_skeleton}/utils/loss.py (100%) rename human_detection/{yolov7 skeleton => yolov7_skeleton}/utils/metrics.py (100%) rename human_detection/{yolov7 skeleton => yolov7_skeleton}/utils/plots.py (100%) rename human_detection/{yolov7 skeleton => yolov7_skeleton}/utils/torch_utils.py (100%) rename human_detection/{yolov7 skeleton => yolov7_skeleton}/utils/wandb_logging/__init__.py (100%) rename human_detection/{yolov7 skeleton => yolov7_skeleton}/utils/wandb_logging/log_dataset.py (100%) rename human_detection/{yolov7 skeleton => yolov7_skeleton}/utils/wandb_logging/wandb_utils.py (100%) rename human_detection/{yolov7 skeleton => yolov7_skeleton}/yolov7.py (100%) diff --git a/human_detection/yolov7 skeleton/.gitignore b/human_detection/yolov7_skeleton/.gitignore similarity index 100% rename from human_detection/yolov7 skeleton/.gitignore rename to human_detection/yolov7_skeleton/.gitignore diff --git a/human_detection/yolov7 skeleton/commands.txt b/human_detection/yolov7_skeleton/commands.txt similarity index 100% rename from human_detection/yolov7 skeleton/commands.txt rename to human_detection/yolov7_skeleton/commands.txt diff --git a/human_detection/yolov7 skeleton/example_usage.py b/human_detection/yolov7_skeleton/example_usage.py similarity index 100% rename from human_detection/yolov7 skeleton/example_usage.py rename to human_detection/yolov7_skeleton/example_usage.py diff --git a/human_detection/yolov7 skeleton/models/__init__.py b/human_detection/yolov7_skeleton/models/__init__.py similarity index 100% rename from human_detection/yolov7 skeleton/models/__init__.py rename to human_detection/yolov7_skeleton/models/__init__.py diff --git a/human_detection/yolov7 skeleton/models/common.py b/human_detection/yolov7_skeleton/models/common.py similarity index 100% rename from human_detection/yolov7 skeleton/models/common.py rename to human_detection/yolov7_skeleton/models/common.py diff --git a/human_detection/yolov7 skeleton/models/experimental.py b/human_detection/yolov7_skeleton/models/experimental.py similarity index 100% rename from human_detection/yolov7 skeleton/models/experimental.py rename to human_detection/yolov7_skeleton/models/experimental.py diff --git a/human_detection/yolov7 skeleton/models/yolo.py b/human_detection/yolov7_skeleton/models/yolo.py similarity index 100% rename from human_detection/yolov7 skeleton/models/yolo.py rename to human_detection/yolov7_skeleton/models/yolo.py diff --git a/human_detection/yolov7 skeleton/readme.md b/human_detection/yolov7_skeleton/readme.md similarity index 100% rename from human_detection/yolov7 skeleton/readme.md rename to human_detection/yolov7_skeleton/readme.md diff --git a/human_detection/yolov7 skeleton/sort.py b/human_detection/yolov7_skeleton/sort.py similarity index 100% rename from human_detection/yolov7 skeleton/sort.py rename to human_detection/yolov7_skeleton/sort.py diff --git a/human_detection/yolov7 skeleton/utils/__init__.py b/human_detection/yolov7_skeleton/utils/__init__.py similarity index 100% rename from human_detection/yolov7 skeleton/utils/__init__.py rename to human_detection/yolov7_skeleton/utils/__init__.py diff --git a/human_detection/yolov7 skeleton/utils/activations.py b/human_detection/yolov7_skeleton/utils/activations.py similarity index 100% rename from human_detection/yolov7 skeleton/utils/activations.py rename to human_detection/yolov7_skeleton/utils/activations.py diff --git a/human_detection/yolov7 skeleton/utils/add_nms.py b/human_detection/yolov7_skeleton/utils/add_nms.py similarity index 100% rename from human_detection/yolov7 skeleton/utils/add_nms.py rename to human_detection/yolov7_skeleton/utils/add_nms.py diff --git a/human_detection/yolov7 skeleton/utils/autoanchor.py b/human_detection/yolov7_skeleton/utils/autoanchor.py similarity index 100% rename from human_detection/yolov7 skeleton/utils/autoanchor.py rename to human_detection/yolov7_skeleton/utils/autoanchor.py diff --git a/human_detection/yolov7 skeleton/utils/aws/__init__.py b/human_detection/yolov7_skeleton/utils/aws/__init__.py similarity index 100% rename from human_detection/yolov7 skeleton/utils/aws/__init__.py rename to human_detection/yolov7_skeleton/utils/aws/__init__.py diff --git a/human_detection/yolov7 skeleton/utils/aws/mime.sh b/human_detection/yolov7_skeleton/utils/aws/mime.sh similarity index 100% rename from human_detection/yolov7 skeleton/utils/aws/mime.sh rename to human_detection/yolov7_skeleton/utils/aws/mime.sh diff --git a/human_detection/yolov7 skeleton/utils/aws/resume.py b/human_detection/yolov7_skeleton/utils/aws/resume.py similarity index 100% rename from human_detection/yolov7 skeleton/utils/aws/resume.py rename to human_detection/yolov7_skeleton/utils/aws/resume.py diff --git a/human_detection/yolov7 skeleton/utils/aws/userdata.sh b/human_detection/yolov7_skeleton/utils/aws/userdata.sh similarity index 100% rename from human_detection/yolov7 skeleton/utils/aws/userdata.sh rename to human_detection/yolov7_skeleton/utils/aws/userdata.sh diff --git a/human_detection/yolov7 skeleton/utils/datasets.py b/human_detection/yolov7_skeleton/utils/datasets.py similarity index 100% rename from human_detection/yolov7 skeleton/utils/datasets.py rename to human_detection/yolov7_skeleton/utils/datasets.py diff --git a/human_detection/yolov7 skeleton/utils/general.py b/human_detection/yolov7_skeleton/utils/general.py similarity index 100% rename from human_detection/yolov7 skeleton/utils/general.py rename to human_detection/yolov7_skeleton/utils/general.py diff --git a/human_detection/yolov7 skeleton/utils/google_app_engine/Dockerfile b/human_detection/yolov7_skeleton/utils/google_app_engine/Dockerfile similarity index 100% rename from human_detection/yolov7 skeleton/utils/google_app_engine/Dockerfile rename to human_detection/yolov7_skeleton/utils/google_app_engine/Dockerfile diff --git a/human_detection/yolov7 skeleton/utils/google_app_engine/additional_requirements.txt b/human_detection/yolov7_skeleton/utils/google_app_engine/additional_requirements.txt similarity index 100% rename from human_detection/yolov7 skeleton/utils/google_app_engine/additional_requirements.txt rename to human_detection/yolov7_skeleton/utils/google_app_engine/additional_requirements.txt diff --git a/human_detection/yolov7 skeleton/utils/google_app_engine/app.yaml b/human_detection/yolov7_skeleton/utils/google_app_engine/app.yaml similarity index 100% rename from human_detection/yolov7 skeleton/utils/google_app_engine/app.yaml rename to human_detection/yolov7_skeleton/utils/google_app_engine/app.yaml diff --git a/human_detection/yolov7 skeleton/utils/google_utils.py b/human_detection/yolov7_skeleton/utils/google_utils.py similarity index 100% rename from human_detection/yolov7 skeleton/utils/google_utils.py rename to human_detection/yolov7_skeleton/utils/google_utils.py diff --git a/human_detection/yolov7 skeleton/utils/loss.py b/human_detection/yolov7_skeleton/utils/loss.py similarity index 100% rename from human_detection/yolov7 skeleton/utils/loss.py rename to human_detection/yolov7_skeleton/utils/loss.py diff --git a/human_detection/yolov7 skeleton/utils/metrics.py b/human_detection/yolov7_skeleton/utils/metrics.py similarity index 100% rename from human_detection/yolov7 skeleton/utils/metrics.py rename to human_detection/yolov7_skeleton/utils/metrics.py diff --git a/human_detection/yolov7 skeleton/utils/plots.py b/human_detection/yolov7_skeleton/utils/plots.py similarity index 100% rename from human_detection/yolov7 skeleton/utils/plots.py rename to human_detection/yolov7_skeleton/utils/plots.py diff --git a/human_detection/yolov7 skeleton/utils/torch_utils.py b/human_detection/yolov7_skeleton/utils/torch_utils.py similarity index 100% rename from human_detection/yolov7 skeleton/utils/torch_utils.py rename to human_detection/yolov7_skeleton/utils/torch_utils.py diff --git a/human_detection/yolov7 skeleton/utils/wandb_logging/__init__.py b/human_detection/yolov7_skeleton/utils/wandb_logging/__init__.py similarity index 100% rename from human_detection/yolov7 skeleton/utils/wandb_logging/__init__.py rename to human_detection/yolov7_skeleton/utils/wandb_logging/__init__.py diff --git a/human_detection/yolov7 skeleton/utils/wandb_logging/log_dataset.py b/human_detection/yolov7_skeleton/utils/wandb_logging/log_dataset.py similarity index 100% rename from human_detection/yolov7 skeleton/utils/wandb_logging/log_dataset.py rename to human_detection/yolov7_skeleton/utils/wandb_logging/log_dataset.py diff --git a/human_detection/yolov7 skeleton/utils/wandb_logging/wandb_utils.py b/human_detection/yolov7_skeleton/utils/wandb_logging/wandb_utils.py similarity index 100% rename from human_detection/yolov7 skeleton/utils/wandb_logging/wandb_utils.py rename to human_detection/yolov7_skeleton/utils/wandb_logging/wandb_utils.py diff --git a/human_detection/yolov7 skeleton/yolov7.py b/human_detection/yolov7_skeleton/yolov7.py similarity index 100% rename from human_detection/yolov7 skeleton/yolov7.py rename to human_detection/yolov7_skeleton/yolov7.py From 5d111a4b18a6ea1f4b7a39ad9cf5bf0284846f1e Mon Sep 17 00:00:00 2001 From: Nathan Date: Mon, 7 Aug 2023 21:37:46 +0800 Subject: [PATCH 16/67] Update readme.md --- human_detection/yolov7_skeleton/readme.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/human_detection/yolov7_skeleton/readme.md b/human_detection/yolov7_skeleton/readme.md index 6774853a..67373ccf 100644 --- a/human_detection/yolov7_skeleton/readme.md +++ b/human_detection/yolov7_skeleton/readme.md @@ -1,6 +1,6 @@ -`main.py ` is taken from https://github.com/haroonshakeel/yolov7-object-tracking/blob/main/detect_or_track.py and modified +`yolov7.py ` is the yolov7 class for human tracking. It is taken from https://github.com/haroonshakeel/yolov7-object-tracking/blob/main/detect_or_track.py and modified -`models/` `utils/` `sort.py` is downloaded from https://github.com/WongKinYiu/yolov7 (with no modification) +`models/`, `utils/`, `sort.py` are yolov7 helper files, downloaded from https://github.com/WongKinYiu/yolov7 (with no modification) Usage: 1. download the model and test video here: https://utoronto-my.sharepoint.com/:f:/r/personal/nathan_hung_mail_utoronto_ca/Documents/yolov7?csf=1&web=1&e=pSSspq From 9d385e60dd5efed4c06046184340e8e418a4f8c1 Mon Sep 17 00:00:00 2001 From: Nathan Date: Mon, 7 Aug 2023 21:38:12 +0800 Subject: [PATCH 17/67] Update readme.md --- human_detection/yolov7_skeleton/readme.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/human_detection/yolov7_skeleton/readme.md b/human_detection/yolov7_skeleton/readme.md index 67373ccf..61b7e31f 100644 --- a/human_detection/yolov7_skeleton/readme.md +++ b/human_detection/yolov7_skeleton/readme.md @@ -2,6 +2,8 @@ `models/`, `utils/`, `sort.py` are yolov7 helper files, downloaded from https://github.com/WongKinYiu/yolov7 (with no modification) +`example_usage.py` is an example that uses the yolov7 class. + Usage: 1. download the model and test video here: https://utoronto-my.sharepoint.com/:f:/r/personal/nathan_hung_mail_utoronto_ca/Documents/yolov7?csf=1&web=1&e=pSSspq 2. run ``` From c41c6282aec31354972fba0c5aa7149cbff66c1e Mon Sep 17 00:00:00 2001 From: Nathan Date: Mon, 7 Aug 2023 22:00:33 +0800 Subject: [PATCH 18/67] fixed global var error --- .../human_detection/human_detection_node.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/human_detection/human_detection/human_detection_node.py b/human_detection/human_detection/human_detection_node.py index 5ca59623..07bb4ace 100644 --- a/human_detection/human_detection/human_detection_node.py +++ b/human_detection/human_detection/human_detection_node.py @@ -195,7 +195,7 @@ def __init__(self,parser_args,model,configs): #make array of 6 person self.person_array = [Person() for _ in range(6)] self.is_there_anyone = False - self.cur_state = "" + self.cur_state = "SearchState" # initial state self.parser_args = parser_args self.model = model self.configs=configs @@ -316,7 +316,8 @@ def lidar_callback(self, msg): points, self.camera_transformation_k, self.rotation_matrix, - self.translation_vector) + self.translation_vector, + self.configs) #update depth for every person for person in self.person_array: @@ -361,7 +362,8 @@ def publish_message(self,source_str="timer"): (person0.x,person0.y,person0.z), self.inverse_camera_transformation_k, self.inverse_rotation_matrix, - self.translation_vector) + self.translation_vector, + self.configs) #position pose_stamped_msg.pose.position.x = lidar_x @@ -394,10 +396,13 @@ def convert_to_lidar_frame( uv_coordinate, inverse_camera_transformation_k, inverse_rotation_matrix, - translation_vector): + translation_vector, + configs): """ convert 2d camera coordinate + depth into 3d lidar frame """ + image_height = configs['image_height'] + point_cloud = np.empty( (3,) , dtype=float) point_cloud[2] = uv_coordinate[2] point_cloud[1] = ( image_height - uv_coordinate[1] )*point_cloud[2] @@ -412,10 +417,13 @@ def convert_to_camera_frame( point_cloud, camera_transformation_k, rotation_matrix, - translation_vector): + translation_vector, + configs): """ convert 3d lidar data into 2d coordinate of the camera frame + depth """ + image_height = configs['image_height'] + length = point_cloud.shape[0] translation = np.tile(translation_vector, (length, 1)).T From 82ed90f150220bfdc506e151826f1a67050f2089 Mon Sep 17 00:00:00 2001 From: Nathan Date: Mon, 7 Aug 2023 22:02:07 +0800 Subject: [PATCH 19/67] copy human_detection_node to yolov7 --- .../yolov7_skeleton/ascii_numbers.py | 32 ++ .../yolov7_skeleton/human_detection_node.py | 498 ++++++++++++++++++ 2 files changed, 530 insertions(+) create mode 100644 human_detection/yolov7_skeleton/ascii_numbers.py create mode 100644 human_detection/yolov7_skeleton/human_detection_node.py diff --git a/human_detection/yolov7_skeleton/ascii_numbers.py b/human_detection/yolov7_skeleton/ascii_numbers.py new file mode 100644 index 00000000..42272f0b --- /dev/null +++ b/human_detection/yolov7_skeleton/ascii_numbers.py @@ -0,0 +1,32 @@ +#taken from https://patorjk.com/software/taag/#p=display&f=Cybermedium + +ascii_numbers = r""" +____ _ _ _ ____ +|___ | | | |___ +| | \/ |___ + +____ ____ _ _ ____ +|___ | | | | |__/ +| |__| |__| | \ + +___ _ _ ____ ____ ____ + | |__| |__/ |___ |___ + | | | | \ |___ |___ + +___ _ _ _ ____ + | | | | | | + | |_|_| |__| + +____ _ _ ____ +| | |\ | |___ +|__| | \| |___ + +____ ___ ____ ____ ___ ____ ___ +[__ | |__| |__/ | |___ | \ | +___] | | | | \ | |___ |__/ . +""".strip().split('\n\n') + +if __name__ == '__main__': + print(ascii_numbers) + for num in ascii_numbers: + print(num,'\n\n\n') diff --git a/human_detection/yolov7_skeleton/human_detection_node.py b/human_detection/yolov7_skeleton/human_detection_node.py new file mode 100644 index 00000000..07bb4ace --- /dev/null +++ b/human_detection/yolov7_skeleton/human_detection_node.py @@ -0,0 +1,498 @@ +import argparse +from cv_bridge import CvBridge +from geometry_msgs.msg import Point +from geometry_msgs.msg import PoseStamped +import math +import numpy as np +import os +import rclpy +from rclpy.node import Node +from sensor_msgs.msg import Image, PointCloud2 +import sensor_msgs_py.point_cloud2 as pc2 +from std_msgs.msg import Bool, Float32, String +import tarfile +import tensorflow as tf +import tensorflow_hub as hub +import time +import urllib.request +import yaml + +def parse_arguments(): + """ + handle command line arguments + """ + parser = argparse.ArgumentParser(description='Example command-line parser') + parser.add_argument( + '-v', + '--verbose', + action='store_true', + help='Enable print_verbose_only outputs') + parser.add_argument( + '-d', + '--download_model', + action='store_true', + help='Flag to download the model. This must be ran at least once') + parser.add_argument( + '-r', + '--ros-args', + action='store_true', + help='temp fix') + return parser.parse_args() + + +def download_model(SAVED_MODEL_PATH,MODEL_URL): + # Create the directory if it doesn't exist + os.makedirs(os.path.dirname(SAVED_MODEL_PATH), exist_ok=True) + + # Download the compressed model from the URL + model_path, _ = urllib.request.urlretrieve(MODEL_URL) + + # Extract the compressed model to the specified path + with tarfile.open(model_path, "r:gz") as tar: + tar.extractall(SAVED_MODEL_PATH) + + model = hub.load(SAVED_MODEL_PATH).signatures['serving_default'] + return model + + +def load_saved_model(SAVED_MODEL_PATH): + """ + load the saved model from SAVED_MODEL_PATH + """ + if not os.path.exists(SAVED_MODEL_PATH): + raise FileNotFoundError(f"Model not found at {SAVED_MODEL_PATH}") + model = hub.load(SAVED_MODEL_PATH).signatures['serving_default'] + return model + + +def print_verbose_only(parser_args,*args, **kwargs): + """ + print only if verbose==True + """ + if parser_args.verbose: + print(*args, **kwargs) + + +def movenet(input_image, model,configs): + """ + movenet model: + Gets input image and outputs array of keypoints with certainty score + downloaded from #https://tfhub.dev/google/movenet/multipose/lightning/1 + """ + # SavedModel format expects tensor type of int32. + input_image = tf.cast(input_image, dtype=tf.int32) + outputs = model(input_image) # Output is a [1, 6, 56] tensor. + + # The first 17 * 3 elements are the keypoint locations and scores in the + # format: [y_0, x_0, s_0, y_1, x_1, s_1, …, y_16, x_16, s_16], where y_i, + # x_i, s_i are the yx-coordinates (normalized to image frame, e.g. range + # in [0.0, 1.0]) and confidence scores of the i-th joint correspondingly. + # The order of the 17 keypoint joints is: [nose, left eye, right eye, left + # ear, right ear, left shoulder, right shoulder, left elbow, right elbow, + # left wrist, right wrist, left hip, right hip, left knee, right knee, + # left ankle, right ankle]. The remaining 5 elements [ymin, xmin, ymax, + # xmax, score] represent the region of the bounding box (in normalized + # coordinates) and the confidence score of the instance + keypoints = outputs['output_0'].numpy() + + count_of_people = np.sum(keypoints[0, :, -1] > configs['people_detection_threshold'] ) + # print_verbose_only("count_of_people", count_of_people) + + # there are 6 people + # there are 17 body points and therefore 3*17=51 numbers per person + return keypoints[:, :, :51].reshape((6, 17, 3))[0] + + +def is_there_person(points, configs): + """ + return True/False of whether there is a person + """ + visible_joints = np.sum(points[:, -1] > configs['point_detection_threshold']) + return visible_joints >= 3 + + +def is_person_facing_camera(points, configs): + """ + return True/False depending on if the person is facing camera or not + """ + LEFT_EYE = 1 + NOSE = 0 + RIGHT_EYE = 2 + visible_joints_face = np.sum(points[:5, -1] > configs['point_detection_threshold']) + facing_forward = points[LEFT_EYE][1] > points[NOSE][1] > points[RIGHT_EYE][1] + return visible_joints_face >= 3 and facing_forward + + +def get_heading_angle( + points, + configs, + fov=90, + image_width=1, + offset=0, + scaling=1): + """ + get the heading angle from the camera's perspective to the person, + in degree, relative to the center of the field of view + """ + visible_points = points[points[:, -1] > configs['point_detection_threshold']] + x_mean = np.mean(visible_points[:, 1]) + x_angle_radian = math.atan( + (x_mean - (image_width / 2)) / (image_width / 2) * math.tan(math.radians(fov / 2))) + return offset + scaling * math.degrees(x_angle_radian) + + +def get_x_y_coord( + points, + configs + ): + image_width = configs['image_width'] + image_height = configs['image_height'] + visible_points = points[points[:, -1] > configs['point_detection_threshold']] + x_mean = np.mean(visible_points[:, 1]) + y_mean = np.mean(visible_points[:, 0]) + return x_mean * image_width, y_mean * image_height + + +def process_frame(model, image, person_array,configs): + """ + process a frame. Determine keypoints and number of people and + heading angle. + """ + + input_size = 256 + input_image = tf.expand_dims(image, axis=0) + input_image = tf.image.resize_with_pad(input_image, input_size, input_size) + + # Run model inference + keypoints = movenet(input_image, model,configs) + + person_array[0].heading_angle = get_heading_angle(keypoints,configs) + person_array[0].x, person_array[0].y = get_x_y_coord(keypoints,configs) + person_array[0].on_screen = is_there_person(keypoints, configs) + is_there_anyone = is_there_person(keypoints, configs) + return is_there_anyone + + +class Person: + """ + struct to store information for a detected person + """ + def __init__(self): + self.x = -1.0 + self.y = -1.0 + self.z = -1.0 + self.on_screen = False + self.heading_angle = 0.0 + + +class LidarCameraSubscriber(Node): + def print_and_log(self, string): + self.get_logger().info(string) + print(string) + + + def __init__(self,parser_args,model,configs): + #make array of 6 person + self.person_array = [Person() for _ in range(6)] + self.is_there_anyone = False + self.cur_state = "SearchState" # initial state + self.parser_args = parser_args + self.model = model + self.configs=configs + + camera_transformation_k = configs['camera_transformation_k'] + self.camera_transformation_k = read_space_separated_matrix(camera_transformation_k) + rotation_matrix = configs['rotation_matrix'] + self.rotation_matrix = read_space_separated_matrix(rotation_matrix).T + self.translation_vector = np.array(configs['translation_vector']) + self.inverse_camera_transformation_k = np.linalg.inv(self.camera_transformation_k) + self.inverse_rotation_matrix = np.linalg.inv(self.rotation_matrix) + + super().__init__('image_subscriber') + self.camera_subscription = self.create_subscription( + Image, + 'camera', + self.camera_callback, + 10) + self.camera_subscription + self.bridge = CvBridge() + + self.lidar_subscription = self.create_subscription( + PointCloud2, + 'velodyne_points', + self.lidar_callback, + 10) + self.lidar_subscription + + self.state_subscription = self.create_subscription( + String, + '/trailbot_state', + self.state_callback, + 10) + + + #topics to publish + self.is_person_publisher = self.create_publisher( + Bool, + 'is_person_topic', + 10) + # self.angle_publisher = self.create_publisher( + # Float32, + # 'angle_topic', + # 10) + self.pose_publisher = self.create_publisher( + PoseStamped, + 'target_location', + 10) + self.timestamp = 0 + + # if this is -1, node will publish constantly (as camera FPS) + self.publishing_frequency = configs['publishing_frequency'] + + #run the publish_message function according to publishing_frequency + self.create_timer(1/self.publishing_frequency, self.publish_message) + self.print_and_log('Human Detection ready...') + + ascii_numbers = r""" + ____ _ _ _ ____ + |___ | | | |___ + | | \/ |___ + + ____ ____ _ _ ____ + |___ | | | | |__/ + | |__| |__| | \ + + ___ _ _ ____ ____ ____ + | |__| |__/ |___ |___ + | | | | \ |___ |___ + + ___ _ _ _ ____ + | | | | | | + | |_|_| |__| + + ____ _ _ ____ + | | |\ | |___ + |__| | \| |___ + + ____ ___ ____ ____ ___ ____ ___ + [__ | |__| |__/ | |___ | \ | + ___] | | | | \ | |___ |__/ . + """.strip().split('\n\n') + + for num in ascii_numbers[-6:]: + self.print_and_log(f"\n{num}\n") + time.sleep(1) + + + def state_callback(self, msg): + self.cur_state = msg.data + + + def camera_callback(self, msg): + if self.cur_state!="SearchState" and self.cur_state!="ApproachState": + return + + cv_image = self.bridge.imgmsg_to_cv2( + msg, desired_encoding='passthrough') + self.is_there_anyone = process_frame(self.model, cv_image,self.person_array, self.configs) + self.timestamp = msg.header.stamp + + + def lidar_callback(self, msg): + if self.cur_state!="SearchState" and self.cur_state!="ApproachState": + return + + if not self.is_there_anyone: + return + + # Deserialize PointCloud2 data into xyz points + point_gen = pc2.read_points( + msg, field_names=( + "x", "y", "z"), skip_nans=True) + # points = np.array(list(point_gen)) + points = [[x, y, z] for x, y, z in point_gen] + points = np.array(points) + points2d = convert_to_camera_frame( + points, + self.camera_transformation_k, + self.rotation_matrix, + self.translation_vector, + self.configs) + + #update depth for every person + for person in self.person_array: + if not person.on_screen: + person.z = -1.0 + else: + person.z = estimate_depth(person.x, person.y, points2d,self.configs) + self.timestamp = msg.header.stamp + # if this is -1, node will publish constantly (as camera FPS) + if not self.publishing_frequency>0: + self.publish_message("lidar") + + def publish_message(self,source_str="timer"): + """ publish message is somebody is detected""" + if self.cur_state!="SearchState" and self.cur_state!="ApproachState": + return + if not self.is_there_anyone: + return + #person0 for debugging purpse + person0 = self.person_array[0] + message = f"{source_str:<7}" + message += f" person: {'YES' if self.is_there_anyone else 'NO '}" + message += f" angle: {person0.heading_angle:<20}" + message += f"person_coordinate: {person0.x:<22} {person0.y:<22} {person0.z:22}" + print_verbose_only(self.parser_args, message) + self.print_and_log(message) + + # Publish the message + is_person_msg = Bool() + is_person_msg.data = bool(self.is_there_anyone) + self.is_person_publisher.publish(is_person_msg) + + # angle_msg = Float32() + # angle_msg.data = angle + # self.angle_publisher.publish(angle_msg) + + pose_stamped_msg = PoseStamped() + pose_stamped_msg.header.stamp = self.timestamp + pose_stamped_msg.header.frame_id = "velodyne" + + lidar_x,lidar_y,lidar_z = convert_to_lidar_frame( + (person0.x,person0.y,person0.z), + self.inverse_camera_transformation_k, + self.inverse_rotation_matrix, + self.translation_vector, + self.configs) + + #position + pose_stamped_msg.pose.position.x = lidar_x + pose_stamped_msg.pose.position.y = lidar_y + pose_stamped_msg.pose.position.z = lidar_z + + #orientation + yaw = math.atan2(lidar_y, lidar_x) + pose_stamped_msg.pose.orientation.x = 0.0 + pose_stamped_msg.pose.orientation.y = 0.0 + pose_stamped_msg.pose.orientation.z = math.sin(yaw/2) + pose_stamped_msg.pose.orientation.w = math.cos(yaw / 2) + + self.pose_publisher.publish(pose_stamped_msg) + +def read_space_separated_matrix(string): + """ + convert space separated matrix string to np matrix + """ + lines = string.strip().split('\n') + matrix = [] + for line in lines: + values = line.split() # Exclude the first element 'rotation_matrix' + matrix.append([float(value) for value in values]) + numpy_matrix = np.array(matrix) + return numpy_matrix + + +def convert_to_lidar_frame( + uv_coordinate, + inverse_camera_transformation_k, + inverse_rotation_matrix, + translation_vector, + configs): + """ + convert 2d camera coordinate + depth into 3d lidar frame + """ + image_height = configs['image_height'] + + point_cloud = np.empty( (3,) , dtype=float) + point_cloud[2] = uv_coordinate[2] + point_cloud[1] = ( image_height - uv_coordinate[1] )*point_cloud[2] + point_cloud[0] = uv_coordinate[0]*point_cloud[2] + + point_cloud = inverse_camera_transformation_k @ point_cloud + point_cloud = inverse_rotation_matrix @ (point_cloud-translation_vector) + return point_cloud + + +def convert_to_camera_frame( + point_cloud, + camera_transformation_k, + rotation_matrix, + translation_vector, + configs): + """ + convert 3d lidar data into 2d coordinate of the camera frame + depth + """ + image_height = configs['image_height'] + + length = point_cloud.shape[0] + translation = np.tile(translation_vector, (length, 1)).T + + point_cloud = point_cloud.T + point_cloud = rotation_matrix@point_cloud + translation + point_cloud = camera_transformation_k @ point_cloud + + uv_coordinate = np.empty_like(point_cloud) + + """ + uv = [x/z, y/z, z], and y is opposite so the minus imageheight + """ + uv_coordinate[0] = point_cloud[0] / point_cloud[2] + uv_coordinate[1] = image_height - point_cloud[1] / point_cloud[2] + uv_coordinate[2] = point_cloud[2] + + uv_depth = uv_coordinate[2, :] + filtered_uv_coordinate = uv_coordinate[:, uv_depth >= 0] + return filtered_uv_coordinate + + +def estimate_depth(x, y, np_2d_array,configs): + """ + estimate the depth by finding points closest to x,y from thhe 2d array + """ + # Calculate the distance between each point and the target coordinates (x, y) + distances_sq = (np_2d_array[0,:] - x) ** 2 + (np_2d_array[1,:] - y) ** 2 + + # Find the indices of the k nearest points + k = 5 # Number of nearest neighbors we want + closest_indices = np.argpartition(distances_sq, k)[:k] + pixel_distance_threshold = 2000 + + valid_indices = [idx for idx in closest_indices if distances_sq[idx]<=pixel_distance_threshold] + if len(valid_indices) == 0: + # lidar points disappears usually around 0.4m + distance_where_lidar_stops_working = configs['distance_where_lidar_stops_working'] + return distance_where_lidar_stops_working + + filtered_indices = np.array(valid_indices) + # Get the depth value of the closest point + closest_depths = np_2d_array[2,filtered_indices] + + return np.mean(closest_depths) + + +def main(args=None, debug_mode=False): + + with open('configs.yaml', 'r') as file: + configs = yaml.safe_load(file) + MODEL_URL = configs['MODEL_URL'] + SAVED_MODEL_PATH = configs['SAVED_MODEL_PATH'] + + parser_args = parse_arguments() + if debug_mode: + parser_args.verbose = True + + if parser_args.download_model: + print('downloading model...') + model = download_model(SAVED_MODEL_PATH,MODEL_URL) + else: + model = load_saved_model(SAVED_MODEL_PATH) + # print("Human detection ready...") + rclpy.init(args=args) + subscriber = LidarCameraSubscriber(parser_args,model,configs) + rclpy.spin(subscriber) + subscriber.destroy_node() + rclpy.shutdown() + +if __name__ == '__main__': + print("\n\nDEBUG MODE ON\n\n") + main(debug_mode=True) From 87883b2c230d2ac4c35807174279baf1758156cf Mon Sep 17 00:00:00 2001 From: Nathan Date: Mon, 7 Aug 2023 22:16:00 +0800 Subject: [PATCH 20/67] change movenet to yolov7 --- .../yolov7_skeleton/human_detection_node.py | 175 ++++++++---------- 1 file changed, 80 insertions(+), 95 deletions(-) diff --git a/human_detection/yolov7_skeleton/human_detection_node.py b/human_detection/yolov7_skeleton/human_detection_node.py index 07bb4ace..b7107381 100644 --- a/human_detection/yolov7_skeleton/human_detection_node.py +++ b/human_detection/yolov7_skeleton/human_detection_node.py @@ -4,19 +4,19 @@ from geometry_msgs.msg import PoseStamped import math import numpy as np -import os import rclpy from rclpy.node import Node from sensor_msgs.msg import Image, PointCloud2 import sensor_msgs_py.point_cloud2 as pc2 from std_msgs.msg import Bool, Float32, String -import tarfile -import tensorflow as tf -import tensorflow_hub as hub +# import tensorflow as tf +# import tensorflow_hub as hub import time -import urllib.request import yaml +import yolov7 +import torch + def parse_arguments(): """ handle command line arguments @@ -40,29 +40,11 @@ def parse_arguments(): return parser.parse_args() -def download_model(SAVED_MODEL_PATH,MODEL_URL): - # Create the directory if it doesn't exist - os.makedirs(os.path.dirname(SAVED_MODEL_PATH), exist_ok=True) - - # Download the compressed model from the URL - model_path, _ = urllib.request.urlretrieve(MODEL_URL) - - # Extract the compressed model to the specified path - with tarfile.open(model_path, "r:gz") as tar: - tar.extractall(SAVED_MODEL_PATH) +# def download_model(SAVED_MODEL_PATH,MODEL_URL): +# # Create the directory if it doesn't exist +# os.makedirs(os.path.dirname(SAVED_MODEL_PATH), exist_ok=True) - model = hub.load(SAVED_MODEL_PATH).signatures['serving_default'] - return model - - -def load_saved_model(SAVED_MODEL_PATH): - """ - load the saved model from SAVED_MODEL_PATH - """ - if not os.path.exists(SAVED_MODEL_PATH): - raise FileNotFoundError(f"Model not found at {SAVED_MODEL_PATH}") - model = hub.load(SAVED_MODEL_PATH).signatures['serving_default'] - return model +# ### to be finished def print_verbose_only(parser_args,*args, **kwargs): @@ -73,34 +55,34 @@ def print_verbose_only(parser_args,*args, **kwargs): print(*args, **kwargs) -def movenet(input_image, model,configs): - """ - movenet model: - Gets input image and outputs array of keypoints with certainty score - downloaded from #https://tfhub.dev/google/movenet/multipose/lightning/1 - """ - # SavedModel format expects tensor type of int32. - input_image = tf.cast(input_image, dtype=tf.int32) - outputs = model(input_image) # Output is a [1, 6, 56] tensor. - - # The first 17 * 3 elements are the keypoint locations and scores in the - # format: [y_0, x_0, s_0, y_1, x_1, s_1, …, y_16, x_16, s_16], where y_i, - # x_i, s_i are the yx-coordinates (normalized to image frame, e.g. range - # in [0.0, 1.0]) and confidence scores of the i-th joint correspondingly. - # The order of the 17 keypoint joints is: [nose, left eye, right eye, left - # ear, right ear, left shoulder, right shoulder, left elbow, right elbow, - # left wrist, right wrist, left hip, right hip, left knee, right knee, - # left ankle, right ankle]. The remaining 5 elements [ymin, xmin, ymax, - # xmax, score] represent the region of the bounding box (in normalized - # coordinates) and the confidence score of the instance - keypoints = outputs['output_0'].numpy() - - count_of_people = np.sum(keypoints[0, :, -1] > configs['people_detection_threshold'] ) - # print_verbose_only("count_of_people", count_of_people) - - # there are 6 people - # there are 17 body points and therefore 3*17=51 numbers per person - return keypoints[:, :, :51].reshape((6, 17, 3))[0] +# def movenet(input_image, model,configs): +# """ +# movenet model: +# Gets input image and outputs array of keypoints with certainty score +# downloaded from #https://tfhub.dev/google/movenet/multipose/lightning/1 +# """ +# # SavedModel format expects tensor type of int32. +# input_image = tf.cast(input_image, dtype=tf.int32) +# outputs = model(input_image) # Output is a [1, 6, 56] tensor. + +# # The first 17 * 3 elements are the keypoint locations and scores in the +# # format: [y_0, x_0, s_0, y_1, x_1, s_1, …, y_16, x_16, s_16], where y_i, +# # x_i, s_i are the yx-coordinates (normalized to image frame, e.g. range +# # in [0.0, 1.0]) and confidence scores of the i-th joint correspondingly. +# # The order of the 17 keypoint joints is: [nose, left eye, right eye, left +# # ear, right ear, left shoulder, right shoulder, left elbow, right elbow, +# # left wrist, right wrist, left hip, right hip, left knee, right knee, +# # left ankle, right ankle]. The remaining 5 elements [ymin, xmin, ymax, +# # xmax, score] represent the region of the bounding box (in normalized +# # coordinates) and the confidence score of the instance +# keypoints = outputs['output_0'].numpy() + +# count_of_people = np.sum(keypoints[0, :, -1] > configs['people_detection_threshold'] ) +# # print_verbose_only("count_of_people", count_of_people) + +# # there are 6 people +# # there are 17 body points and therefore 3*17=51 numbers per person +# return keypoints[:, :, :51].reshape((6, 17, 3))[0] def is_there_person(points, configs): @@ -123,24 +105,6 @@ def is_person_facing_camera(points, configs): return visible_joints_face >= 3 and facing_forward -def get_heading_angle( - points, - configs, - fov=90, - image_width=1, - offset=0, - scaling=1): - """ - get the heading angle from the camera's perspective to the person, - in degree, relative to the center of the field of view - """ - visible_points = points[points[:, -1] > configs['point_detection_threshold']] - x_mean = np.mean(visible_points[:, 1]) - x_angle_radian = math.atan( - (x_mean - (image_width / 2)) / (image_width / 2) * math.tan(math.radians(fov / 2))) - return offset + scaling * math.degrees(x_angle_radian) - - def get_x_y_coord( points, configs @@ -153,24 +117,42 @@ def get_x_y_coord( return x_mean * image_width, y_mean * image_height -def process_frame(model, image, person_array,configs): +def xyxy_to_centroid(xyxy): + x1, y1, x2, y2 = xyxy + centroid_x = (x1 + x2) / 2 + centroid_y = (y1 + y2) / 2 + return (centroid_x, centroid_y) +def get_heading_angle( + centroid, + fov=90, + image_width=1, + offset=0, + scaling=1): + """ + get the heading angle from the camera's perspective to the person, + in degree, relative to the center of the field of view + """ + centroid_x, centroid_y = centroid + x_angle_radian = math.atan( + (centroid_x - (image_width / 2)) / (image_width / 2) * math.tan(math.radians(fov / 2))) + return offset + scaling * math.degrees(x_angle_radian) +def process_frame(model,image,configs): """ process a frame. Determine keypoints and number of people and heading angle. """ - - input_size = 256 - input_image = tf.expand_dims(image, axis=0) - input_image = tf.image.resize_with_pad(input_image, input_size, input_size) - # Run model inference - keypoints = movenet(input_image, model,configs) - - person_array[0].heading_angle = get_heading_angle(keypoints,configs) - person_array[0].x, person_array[0].y = get_x_y_coord(keypoints,configs) - person_array[0].on_screen = is_there_person(keypoints, configs) - is_there_anyone = is_there_person(keypoints, configs) - return is_there_anyone + person_array = [] + bounding_boxes, identities, confidences=model.process_frame(image,view_img=False) + for i in range(len(bounding_boxes)): + person = Person() + centroid = xyxy_to_centroid(bounding_boxes[i]) + person.heading_angle = get_heading_angle(centroid) + person.x, person.y = centroid + person.on_screen=True + person.id = identities[i] + person_array.append(person) + return person_array class Person: @@ -183,6 +165,7 @@ def __init__(self): self.z = -1.0 self.on_screen = False self.heading_angle = 0.0 + self.id = 0 class LidarCameraSubscriber(Node): @@ -193,7 +176,7 @@ def print_and_log(self, string): def __init__(self,parser_args,model,configs): #make array of 6 person - self.person_array = [Person() for _ in range(6)] + self.person_array = [] self.is_there_anyone = False self.cur_state = "SearchState" # initial state self.parser_args = parser_args @@ -294,7 +277,8 @@ def camera_callback(self, msg): cv_image = self.bridge.imgmsg_to_cv2( msg, desired_encoding='passthrough') - self.is_there_anyone = process_frame(self.model, cv_image,self.person_array, self.configs) + self.person_array = process_frame(self.model, cv_image, self.configs) + self.is_there_anyone = len(self.person_array)>0 self.timestamp = msg.header.stamp @@ -481,14 +465,15 @@ def main(args=None, debug_mode=False): if debug_mode: parser_args.verbose = True - if parser_args.download_model: - print('downloading model...') - model = download_model(SAVED_MODEL_PATH,MODEL_URL) - else: - model = load_saved_model(SAVED_MODEL_PATH) - # print("Human detection ready...") + # if parser_args.download_model: + # print('downloading model...') + # download_model(SAVED_MODEL_PATH,MODEL_URL) + + with torch.no_grad(): + yolo_sort_tracker=yolov7.Yolo_sort_tracker(save_result=False) + rclpy.init(args=args) - subscriber = LidarCameraSubscriber(parser_args,model,configs) + subscriber = LidarCameraSubscriber(parser_args,yolo_sort_tracker,configs) rclpy.spin(subscriber) subscriber.destroy_node() rclpy.shutdown() From 91b4ef9cecd8f8cb28abf2931a6bd287f9f35568 Mon Sep 17 00:00:00 2001 From: Nathan Date: Mon, 7 Aug 2023 22:30:51 +0800 Subject: [PATCH 21/67] remove extra dependencies --- .../yolov7_skeleton/models/common.py | 22 +++++++++---------- .../yolov7_skeleton/utils/datasets.py | 19 ++++++++++------ .../yolov7_skeleton/utils/general.py | 4 ++-- 3 files changed, 25 insertions(+), 20 deletions(-) diff --git a/human_detection/yolov7_skeleton/models/common.py b/human_detection/yolov7_skeleton/models/common.py index edb5edc9..acc73c28 100644 --- a/human_detection/yolov7_skeleton/models/common.py +++ b/human_detection/yolov7_skeleton/models/common.py @@ -3,12 +3,12 @@ from pathlib import Path import numpy as np -import pandas as pd +# import pandas as pd import requests import torch import torch.nn as nn import torch.nn.functional as F -from torchvision.ops import DeformConv2d +# from torchvision.ops import DeformConv2d from PIL import Image from torch.cuda import amp @@ -990,15 +990,15 @@ def render(self): self.display(render=True) # render results return self.imgs - def pandas(self): - # return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0]) - new = copy(self) # return copy - ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns - cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns - for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]): - a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update - setattr(new, k, [pd.DataFrame(x, columns=c) for x in a]) - return new + # def pandas(self): + # # return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0]) + # new = copy(self) # return copy + # ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns + # cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns + # for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]): + # a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update + # setattr(new, k, [pd.DataFrame(x, columns=c) for x in a]) + # return new def tolist(self): # return a list of Detections objects, i.e. 'for result in results.tolist():' diff --git a/human_detection/yolov7_skeleton/utils/datasets.py b/human_detection/yolov7_skeleton/utils/datasets.py index 5fe4f7bc..65f61494 100644 --- a/human_detection/yolov7_skeleton/utils/datasets.py +++ b/human_detection/yolov7_skeleton/utils/datasets.py @@ -18,13 +18,18 @@ import torch.nn.functional as F from PIL import Image, ExifTags from torch.utils.data import Dataset -from tqdm import tqdm - -import pickle -from copy import deepcopy -#from pycocotools import mask as maskUtils -from torchvision.utils import save_image -from torchvision.ops import roi_pool, roi_align, ps_roi_pool, ps_roi_align +# from tqdm import tqdm +def tqdm(*args, **kwargs): + """ + This function does absolutely nothing and returns the first argument. + """ + return args[0] if args else None + +# import pickle +# from copy import deepcopy +# #from pycocotools import mask as maskUtils +# from torchvision.utils import save_image +# from torchvision.ops import roi_pool, roi_align, ps_roi_pool, ps_roi_align from utils.general import check_requirements, xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, \ resample_segments, clean_str diff --git a/human_detection/yolov7_skeleton/utils/general.py b/human_detection/yolov7_skeleton/utils/general.py index decdcc64..ab6b81ca 100644 --- a/human_detection/yolov7_skeleton/utils/general.py +++ b/human_detection/yolov7_skeleton/utils/general.py @@ -13,7 +13,7 @@ import cv2 import numpy as np -import pandas as pd +# import pandas as pd import torch import torchvision import yaml @@ -25,7 +25,7 @@ # Settings torch.set_printoptions(linewidth=320, precision=5, profile='long') np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 -pd.options.display.max_columns = 10 +# pd.options.display.max_columns = 10 cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) # NumExpr max threads From 3f4ff4efd9aa61d1e4ffbd73266f3cf4c2484212 Mon Sep 17 00:00:00 2001 From: Nathan Date: Thu, 17 Aug 2023 01:19:25 +0800 Subject: [PATCH 22/67] import vision_msgs --- human_detection/configs.yaml | 2 +- .../yolov7_skeleton/human_detection_node.py | 34 ++----------------- 2 files changed, 3 insertions(+), 33 deletions(-) diff --git a/human_detection/configs.yaml b/human_detection/configs.yaml index dc3df24c..036c51c8 100644 --- a/human_detection/configs.yaml +++ b/human_detection/configs.yaml @@ -1,5 +1,5 @@ MODEL_URL: "https://tfhub.dev/google/movenet/multipose/lightning/1?tf-hub-format=compressed" -SAVED_MODEL_PATH: "/home/trailbot/trail_ws/multipose_model" +SAVED_MODEL_PATH: "/mnt/c/Users/nathan/Documents/gits/TRAILBot/human_detection/multipose" people_detection_threshold: 0.4 point_detection_threshold: 0.4 distance_where_lidar_stops_working: 0.4 diff --git a/human_detection/yolov7_skeleton/human_detection_node.py b/human_detection/yolov7_skeleton/human_detection_node.py index b7107381..c63ad917 100644 --- a/human_detection/yolov7_skeleton/human_detection_node.py +++ b/human_detection/yolov7_skeleton/human_detection_node.py @@ -1,3 +1,5 @@ + +from vision_msgs.msg import Detection2D, Detection2DArray, Detection3DArray, Detection3D # sudo apt-get install ros-humble-vision-msgs import argparse from cv_bridge import CvBridge from geometry_msgs.msg import Point @@ -85,38 +87,6 @@ def print_verbose_only(parser_args,*args, **kwargs): # return keypoints[:, :, :51].reshape((6, 17, 3))[0] -def is_there_person(points, configs): - """ - return True/False of whether there is a person - """ - visible_joints = np.sum(points[:, -1] > configs['point_detection_threshold']) - return visible_joints >= 3 - - -def is_person_facing_camera(points, configs): - """ - return True/False depending on if the person is facing camera or not - """ - LEFT_EYE = 1 - NOSE = 0 - RIGHT_EYE = 2 - visible_joints_face = np.sum(points[:5, -1] > configs['point_detection_threshold']) - facing_forward = points[LEFT_EYE][1] > points[NOSE][1] > points[RIGHT_EYE][1] - return visible_joints_face >= 3 and facing_forward - - -def get_x_y_coord( - points, - configs - ): - image_width = configs['image_width'] - image_height = configs['image_height'] - visible_points = points[points[:, -1] > configs['point_detection_threshold']] - x_mean = np.mean(visible_points[:, 1]) - y_mean = np.mean(visible_points[:, 0]) - return x_mean * image_width, y_mean * image_height - - def xyxy_to_centroid(xyxy): x1, y1, x2, y2 = xyxy centroid_x = (x1 + x2) / 2 From 488226b1f69a5f58739d410711b2cee0d5549d34 Mon Sep 17 00:00:00 2001 From: Nathan Date: Thu, 17 Aug 2023 01:27:26 +0800 Subject: [PATCH 23/67] Update human_detection_node.py untested --- .../yolov7_skeleton/human_detection_node.py | 66 ++++++++++++------- 1 file changed, 44 insertions(+), 22 deletions(-) diff --git a/human_detection/yolov7_skeleton/human_detection_node.py b/human_detection/yolov7_skeleton/human_detection_node.py index c63ad917..6501a244 100644 --- a/human_detection/yolov7_skeleton/human_detection_node.py +++ b/human_detection/yolov7_skeleton/human_detection_node.py @@ -307,31 +307,53 @@ def publish_message(self,source_str="timer"): # angle_msg = Float32() # angle_msg.data = angle # self.angle_publisher.publish(angle_msg) + detection_array = Detection3DArray() - pose_stamped_msg = PoseStamped() - pose_stamped_msg.header.stamp = self.timestamp - pose_stamped_msg.header.frame_id = "velodyne" - - lidar_x,lidar_y,lidar_z = convert_to_lidar_frame( - (person0.x,person0.y,person0.z), - self.inverse_camera_transformation_k, - self.inverse_rotation_matrix, - self.translation_vector, - self.configs) + for person in self.person_array: + new_object = Detection3D() + lidar_x,lidar_y,lidar_z = convert_to_lidar_frame( + (person.x,person.y,person.z), + self.inverse_camera_transformation_k, + self.inverse_rotation_matrix, + self.translation_vector, + self.configs) + + new_object.bbox.center.position.x = lidar_x + new_object.bbox.center.position.y = lidar_y + new_object.bbox.center.position.z = lidar_z + new_object.bbox.size.x = 0 + new_object.bbox.size.y = 0 + new_object.bbox.center.orientation.w = 0 + + detection_array.detections.append(new_object) + + self.pose_publisher.publish(detection_array) + + + # pose_stamped_msg = PoseStamped() + # pose_stamped_msg.header.stamp = self.timestamp + # pose_stamped_msg.header.frame_id = "velodyne" + + # lidar_x,lidar_y,lidar_z = convert_to_lidar_frame( + # (person0.x,person0.y,person0.z), + # self.inverse_camera_transformation_k, + # self.inverse_rotation_matrix, + # self.translation_vector, + # self.configs) - #position - pose_stamped_msg.pose.position.x = lidar_x - pose_stamped_msg.pose.position.y = lidar_y - pose_stamped_msg.pose.position.z = lidar_z - - #orientation - yaw = math.atan2(lidar_y, lidar_x) - pose_stamped_msg.pose.orientation.x = 0.0 - pose_stamped_msg.pose.orientation.y = 0.0 - pose_stamped_msg.pose.orientation.z = math.sin(yaw/2) - pose_stamped_msg.pose.orientation.w = math.cos(yaw / 2) + # #position + # pose_stamped_msg.pose.position.x = lidar_x + # pose_stamped_msg.pose.position.y = lidar_y + # pose_stamped_msg.pose.position.z = lidar_z + + # #orientation + # yaw = math.atan2(lidar_y, lidar_x) + # pose_stamped_msg.pose.orientation.x = 0.0 + # pose_stamped_msg.pose.orientation.y = 0.0 + # pose_stamped_msg.pose.orientation.z = math.sin(yaw/2) + # pose_stamped_msg.pose.orientation.w = math.cos(yaw / 2) - self.pose_publisher.publish(pose_stamped_msg) + # self.pose_publisher.publish(pose_stamped_msg) def read_space_separated_matrix(string): """ From 4469eac7be5f762c01e60d23a868bdfb0c70ec8e Mon Sep 17 00:00:00 2001 From: Nathan Date: Mon, 21 Aug 2023 22:21:40 +0800 Subject: [PATCH 24/67] Update readme.md --- human_detection/yolov7_skeleton/readme.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/human_detection/yolov7_skeleton/readme.md b/human_detection/yolov7_skeleton/readme.md index 61b7e31f..8f653bd3 100644 --- a/human_detection/yolov7_skeleton/readme.md +++ b/human_detection/yolov7_skeleton/readme.md @@ -6,10 +6,12 @@ Usage: 1. download the model and test video here: https://utoronto-my.sharepoint.com/:f:/r/personal/nathan_hung_mail_utoronto_ca/Documents/yolov7?csf=1&web=1&e=pSSspq -2. run ``` +2. run `sudo apt-get install ros-humble-vision-msgs` +3. run ``` python yolov7.py --weights-file yolov7.pt --nosave --view-img --show-fps --show-track-lines --classes 0 --no-trace --source video.mp4 ``` to run inference on __video.mp4__ to run inference on webcam, use ``` python yolov7.py --weights-file yolov7.pt --nosave --view-img --show-fps --show-track-lines --classes 0 --no-trace --source webcam ``` + From 25aeb7da0d4ea9003c1704a691bff3ee40a81502 Mon Sep 17 00:00:00 2001 From: Nathan Date: Thu, 24 Aug 2023 22:28:03 +0800 Subject: [PATCH 25/67] added model file existence check --- .gitignore | 5 ++++- human_detection/yolov7_skeleton/sort.py | 2 +- human_detection/yolov7_skeleton/yolov7.py | 6 +++++- 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index 53d738ba..d865b4e2 100644 --- a/.gitignore +++ b/.gitignore @@ -52,4 +52,7 @@ qtcreator-* CATKIN_IGNORE #vscode -.vscode \ No newline at end of file +.vscode +human_detection/multipose/saved_model.pb +human_detection/multipose/variables/variables.data-00000-of-00001 +human_detection/multipose/variables/variables.index diff --git a/human_detection/yolov7_skeleton/sort.py b/human_detection/yolov7_skeleton/sort.py index 11420268..1ca0e02c 100644 --- a/human_detection/yolov7_skeleton/sort.py +++ b/human_detection/yolov7_skeleton/sort.py @@ -3,7 +3,7 @@ import os import numpy as np import matplotlib -matplotlib.use('TkAgg') +# matplotlib.use('TkAgg') import matplotlib.pyplot as plt import matplotlib.patches as patches from skimage import io diff --git a/human_detection/yolov7_skeleton/yolov7.py b/human_detection/yolov7_skeleton/yolov7.py index c8995267..16ef62a6 100644 --- a/human_detection/yolov7_skeleton/yolov7.py +++ b/human_detection/yolov7_skeleton/yolov7.py @@ -10,6 +10,7 @@ from utils.general import check_img_size, non_max_suppression, scale_coords, set_logging, increment_path from utils.torch_utils import select_device, TracedModel import sort +import os """Function to draw bounding boxes""" def draw_boxes(img, bbox, identities=None, categories=None, confidences=None, names=None, colors=None, thickness=2, hide_bounding_box=False,hide_labels=False): @@ -64,10 +65,13 @@ def __init__(self, self.use_half_precision = self.device.type != 'cpu' # enable half precision if on GPU (only supported on CUDA) # Load model + if not os.path.exists(weights_file): + raise FileNotFoundError(f"The file '{weights_file}' does not exist.") + self.model = attempt_load(weights_file, map_location=self.device) # load FP32 model self.stride = int(self.model.stride.max()) # model stride, which is the step size or the number of units the sliding window moves when performing operations like convolution or pooling self.imgsize = check_img_size(img_size, s=self.stride) # check img_size - if not traced_model_already_exists: + if not os.path.exists("traced_model.pt") or not traced_model_already_exists: self.model = TracedModel(self.model, self.device, img_size) if self.use_half_precision: self.model.half() # to FP16 From b2879228e39a300bde9c88551f06d112f35f76d2 Mon Sep 17 00:00:00 2001 From: Nathan Date: Thu, 24 Aug 2023 22:44:05 +0800 Subject: [PATCH 26/67] Update human_detection_node.py --- .../yolov7_skeleton/human_detection_node.py | 27 +++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/human_detection/yolov7_skeleton/human_detection_node.py b/human_detection/yolov7_skeleton/human_detection_node.py index 6501a244..c1550eb9 100644 --- a/human_detection/yolov7_skeleton/human_detection_node.py +++ b/human_detection/yolov7_skeleton/human_detection_node.py @@ -1,5 +1,5 @@ -from vision_msgs.msg import Detection2D, Detection2DArray, Detection3DArray, Detection3D # sudo apt-get install ros-humble-vision-msgs +from vision_msgs.msg import Detection3DArray, Detection3D # sudo apt-get install ros-humble-vision-msgs import argparse from cv_bridge import CvBridge from geometry_msgs.msg import Point @@ -470,6 +470,29 @@ def main(args=None, debug_mode=False): subscriber.destroy_node() rclpy.shutdown() + +import subprocess, threading, os +def run_shell_command(command): + with open(os.devnull, 'w') as nullfile: + process = subprocess.Popen(command, shell=True, stdout=nullfile, stderr=subprocess.STDOUT) + process.communicate() + +command1 = "ros2 run image_transport republish compressed raw --ros-args --remap in/compressed:=/camera/compressed --remap out:=/camera" +command2 = "ros2 bag play /home/trailbot/bags/2023-07-13-17:03" + if __name__ == '__main__': print("\n\nDEBUG MODE ON\n\n") - main(debug_mode=True) + + # Create threads for each shell command and main function + thread1 = threading.Thread(target=run_shell_command, args=(command1,)) + thread2 = threading.Thread(target=run_shell_command, args=(command2,)) + thread_main = threading.Thread(target=main, args=(None,True)) + + thread1.start() + thread2.start() + thread_main.start() + + # Wait for the threads to finish + thread1.join() + thread2.join() + thread_main.join() \ No newline at end of file From c1738851fce9bf939c2f14666d4011757b89e2f6 Mon Sep 17 00:00:00 2001 From: Nathan Date: Mon, 28 Aug 2023 20:15:57 +0800 Subject: [PATCH 27/67] Update human_detection_node.py --- .../yolov7_skeleton/human_detection_node.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/human_detection/yolov7_skeleton/human_detection_node.py b/human_detection/yolov7_skeleton/human_detection_node.py index c1550eb9..08376135 100644 --- a/human_detection/yolov7_skeleton/human_detection_node.py +++ b/human_detection/yolov7_skeleton/human_detection_node.py @@ -318,12 +318,12 @@ def publish_message(self,source_str="timer"): self.translation_vector, self.configs) - new_object.bbox.center.position.x = lidar_x - new_object.bbox.center.position.y = lidar_y - new_object.bbox.center.position.z = lidar_z - new_object.bbox.size.x = 0 - new_object.bbox.size.y = 0 - new_object.bbox.center.orientation.w = 0 + new_object.bbox.center.position.x = float(lidar_x) + new_object.bbox.center.position.y = float(lidar_y) + new_object.bbox.center.position.z = float(lidar_z) + new_object.bbox.size.x = float(0) + new_object.bbox.size.y = float(0) + new_object.bbox.center.orientation.w = float(0) detection_array.detections.append(new_object) From c36dd05821d7412bf12e9669ac2d7808a0c68b1f Mon Sep 17 00:00:00 2001 From: Nathan Date: Mon, 28 Aug 2023 20:25:13 +0800 Subject: [PATCH 28/67] Update human_detection_node.py --- .../yolov7_skeleton/human_detection_node.py | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/human_detection/yolov7_skeleton/human_detection_node.py b/human_detection/yolov7_skeleton/human_detection_node.py index 08376135..bf14dc6b 100644 --- a/human_detection/yolov7_skeleton/human_detection_node.py +++ b/human_detection/yolov7_skeleton/human_detection_node.py @@ -2,7 +2,6 @@ from vision_msgs.msg import Detection3DArray, Detection3D # sudo apt-get install ros-humble-vision-msgs import argparse from cv_bridge import CvBridge -from geometry_msgs.msg import Point from geometry_msgs.msg import PoseStamped import math import numpy as np @@ -189,14 +188,15 @@ def __init__(self,parser_args,model,configs): Bool, 'is_person_topic', 10) - # self.angle_publisher = self.create_publisher( - # Float32, - # 'angle_topic', + # self.pose_publisher = self.create_publisher( + # PoseStamped, + # 'target_location', # 10) - self.pose_publisher = self.create_publisher( - PoseStamped, - 'target_location', + self.detection3DArray_publisher = self.create_publisher( + Detection3DArray, + 'detection_location', 10) + self.timestamp = 0 # if this is -1, node will publish constantly (as camera FPS) @@ -321,13 +321,13 @@ def publish_message(self,source_str="timer"): new_object.bbox.center.position.x = float(lidar_x) new_object.bbox.center.position.y = float(lidar_y) new_object.bbox.center.position.z = float(lidar_z) - new_object.bbox.size.x = float(0) - new_object.bbox.size.y = float(0) - new_object.bbox.center.orientation.w = float(0) + # new_object.bbox.size.x = float(0) + # new_object.bbox.size.y = float(0) + # new_object.bbox.center.orientation.w = float(0) detection_array.detections.append(new_object) - self.pose_publisher.publish(detection_array) + self.detection3DArray_publisher.publish(detection_array) # pose_stamped_msg = PoseStamped() From b2fe592533e03c21af30acc4eef293c7361713ba Mon Sep 17 00:00:00 2001 From: Nathan Date: Mon, 28 Aug 2023 20:30:55 +0800 Subject: [PATCH 29/67] d --- human_detection/configs.yaml | 2 -- human_detection/readme.md | 7 +++++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/human_detection/configs.yaml b/human_detection/configs.yaml index 036c51c8..a95057ae 100644 --- a/human_detection/configs.yaml +++ b/human_detection/configs.yaml @@ -1,5 +1,3 @@ -MODEL_URL: "https://tfhub.dev/google/movenet/multipose/lightning/1?tf-hub-format=compressed" -SAVED_MODEL_PATH: "/mnt/c/Users/nathan/Documents/gits/TRAILBot/human_detection/multipose" people_detection_threshold: 0.4 point_detection_threshold: 0.4 distance_where_lidar_stops_working: 0.4 diff --git a/human_detection/readme.md b/human_detection/readme.md index 02a22fc2..182b7b02 100644 --- a/human_detection/readme.md +++ b/human_detection/readme.md @@ -1,3 +1,10 @@ +## Configs (see `configs.yaml`) +- camera_transformation_k: +- rotation_matrix: +- translation_vector: +- publishing_frequency: frequency to publish the detection location messages. If this is -1, the node will publish constantly (as camera FPS) + + ## Arguments From 47f82d56cebc0e81894ce2e4e4b9754daa842091 Mon Sep 17 00:00:00 2001 From: Nathan Date: Mon, 28 Aug 2023 20:33:44 +0800 Subject: [PATCH 30/67] Update human_detection_node.py --- .../yolov7_skeleton/human_detection_node.py | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/human_detection/yolov7_skeleton/human_detection_node.py b/human_detection/yolov7_skeleton/human_detection_node.py index bf14dc6b..27f3d827 100644 --- a/human_detection/yolov7_skeleton/human_detection_node.py +++ b/human_detection/yolov7_skeleton/human_detection_node.py @@ -242,9 +242,10 @@ def state_callback(self, msg): def camera_callback(self, msg): + if self.cur_state!="SearchState" and self.cur_state!="ApproachState": return - + print("\nCAMERA!\n") cv_image = self.bridge.imgmsg_to_cv2( msg, desired_encoding='passthrough') self.person_array = process_frame(self.model, cv_image, self.configs) @@ -290,6 +291,7 @@ def publish_message(self,source_str="timer"): return if not self.is_there_anyone: return + #person0 for debugging purpse person0 = self.person_array[0] message = f"{source_str:<7}" @@ -304,9 +306,6 @@ def publish_message(self,source_str="timer"): is_person_msg.data = bool(self.is_there_anyone) self.is_person_publisher.publish(is_person_msg) - # angle_msg = Float32() - # angle_msg.data = angle - # self.angle_publisher.publish(angle_msg) detection_array = Detection3DArray() for person in self.person_array: @@ -450,8 +449,6 @@ def main(args=None, debug_mode=False): with open('configs.yaml', 'r') as file: configs = yaml.safe_load(file) - MODEL_URL = configs['MODEL_URL'] - SAVED_MODEL_PATH = configs['SAVED_MODEL_PATH'] parser_args = parse_arguments() if debug_mode: @@ -477,11 +474,11 @@ def run_shell_command(command): process = subprocess.Popen(command, shell=True, stdout=nullfile, stderr=subprocess.STDOUT) process.communicate() -command1 = "ros2 run image_transport republish compressed raw --ros-args --remap in/compressed:=/camera/compressed --remap out:=/camera" -command2 = "ros2 bag play /home/trailbot/bags/2023-07-13-17:03" if __name__ == '__main__': print("\n\nDEBUG MODE ON\n\n") + command1 = "ros2 run image_transport republish compressed raw --ros-args --remap in/compressed:=/camera/compressed --remap out:=/camera" + command2 = "ros2 bag play /home/trailbot/bags/2023-07-13-17:03" # Create threads for each shell command and main function thread1 = threading.Thread(target=run_shell_command, args=(command1,)) From 2e25e2a35e2aabbf7fd4a7ac7e00fce71ce10ec5 Mon Sep 17 00:00:00 2001 From: Nathan Date: Mon, 28 Aug 2023 20:37:05 +0800 Subject: [PATCH 31/67] Update human_detection_node.py --- human_detection/yolov7_skeleton/human_detection_node.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/human_detection/yolov7_skeleton/human_detection_node.py b/human_detection/yolov7_skeleton/human_detection_node.py index 27f3d827..afa3f6cf 100644 --- a/human_detection/yolov7_skeleton/human_detection_node.py +++ b/human_detection/yolov7_skeleton/human_detection_node.py @@ -2,6 +2,7 @@ from vision_msgs.msg import Detection3DArray, Detection3D # sudo apt-get install ros-humble-vision-msgs import argparse from cv_bridge import CvBridge +import cv2 from geometry_msgs.msg import PoseStamped import math import numpy as np @@ -245,12 +246,17 @@ def camera_callback(self, msg): if self.cur_state!="SearchState" and self.cur_state!="ApproachState": return - print("\nCAMERA!\n") + cv_image = self.bridge.imgmsg_to_cv2( msg, desired_encoding='passthrough') self.person_array = process_frame(self.model, cv_image, self.configs) self.is_there_anyone = len(self.person_array)>0 self.timestamp = msg.header.stamp + if show_image_window:=True: + cv2.imshow("Camera Image", cv_image) + cv2.waitKey(0) # Wait for a key press + cv2.destroyAllWindows() # Close all OpenCV windows + def lidar_callback(self, msg): From 5e98062419e85d565084051db98dd89b1ac575d6 Mon Sep 17 00:00:00 2001 From: Nathan Date: Mon, 28 Aug 2023 20:39:33 +0800 Subject: [PATCH 32/67] Update human_detection_node.py --- human_detection/yolov7_skeleton/human_detection_node.py | 1 + 1 file changed, 1 insertion(+) diff --git a/human_detection/yolov7_skeleton/human_detection_node.py b/human_detection/yolov7_skeleton/human_detection_node.py index afa3f6cf..1e3fda25 100644 --- a/human_detection/yolov7_skeleton/human_detection_node.py +++ b/human_detection/yolov7_skeleton/human_detection_node.py @@ -253,6 +253,7 @@ def camera_callback(self, msg): self.is_there_anyone = len(self.person_array)>0 self.timestamp = msg.header.stamp if show_image_window:=True: + print("CAMERA!") cv2.imshow("Camera Image", cv_image) cv2.waitKey(0) # Wait for a key press cv2.destroyAllWindows() # Close all OpenCV windows From eeeba2c9d5fae9a7caea5d2887b1290f9a7431e9 Mon Sep 17 00:00:00 2001 From: Nathan Date: Mon, 28 Aug 2023 20:48:05 +0800 Subject: [PATCH 33/67] Update human_detection_node.py --- human_detection/yolov7_skeleton/human_detection_node.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/human_detection/yolov7_skeleton/human_detection_node.py b/human_detection/yolov7_skeleton/human_detection_node.py index 1e3fda25..f643816f 100644 --- a/human_detection/yolov7_skeleton/human_detection_node.py +++ b/human_detection/yolov7_skeleton/human_detection_node.py @@ -485,7 +485,7 @@ def run_shell_command(command): if __name__ == '__main__': print("\n\nDEBUG MODE ON\n\n") command1 = "ros2 run image_transport republish compressed raw --ros-args --remap in/compressed:=/camera/compressed --remap out:=/camera" - command2 = "ros2 bag play /home/trailbot/bags/2023-07-13-17:03" + command2 = "ros2 bag play /home/trailbot/bags/2023-07-27-16:51" # Create threads for each shell command and main function thread1 = threading.Thread(target=run_shell_command, args=(command1,)) From a2f3d385aa134bba1149203df298139ca565260f Mon Sep 17 00:00:00 2001 From: Nathan Date: Mon, 28 Aug 2023 20:50:25 +0800 Subject: [PATCH 34/67] Update human_detection_node.py --- human_detection/yolov7_skeleton/human_detection_node.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/human_detection/yolov7_skeleton/human_detection_node.py b/human_detection/yolov7_skeleton/human_detection_node.py index f643816f..0cd91a76 100644 --- a/human_detection/yolov7_skeleton/human_detection_node.py +++ b/human_detection/yolov7_skeleton/human_detection_node.py @@ -485,7 +485,7 @@ def run_shell_command(command): if __name__ == '__main__': print("\n\nDEBUG MODE ON\n\n") command1 = "ros2 run image_transport republish compressed raw --ros-args --remap in/compressed:=/camera/compressed --remap out:=/camera" - command2 = "ros2 bag play /home/trailbot/bags/2023-07-27-16:51" + command2 = ""#"ros2 bag play /home/trailbot/bags/2023-07-13-17:03" # Create threads for each shell command and main function thread1 = threading.Thread(target=run_shell_command, args=(command1,)) From a70be909aa966aca05c388561dfaf214abeb6437 Mon Sep 17 00:00:00 2001 From: Nathan Date: Mon, 28 Aug 2023 20:54:26 +0800 Subject: [PATCH 35/67] Update human_detection_node.py --- human_detection/yolov7_skeleton/human_detection_node.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/human_detection/yolov7_skeleton/human_detection_node.py b/human_detection/yolov7_skeleton/human_detection_node.py index 0cd91a76..0b99f7b3 100644 --- a/human_detection/yolov7_skeleton/human_detection_node.py +++ b/human_detection/yolov7_skeleton/human_detection_node.py @@ -485,7 +485,7 @@ def run_shell_command(command): if __name__ == '__main__': print("\n\nDEBUG MODE ON\n\n") command1 = "ros2 run image_transport republish compressed raw --ros-args --remap in/compressed:=/camera/compressed --remap out:=/camera" - command2 = ""#"ros2 bag play /home/trailbot/bags/2023-07-13-17:03" + command2 = "ros2 bag play /home/trailbot/bags/human_tracking/" # Create threads for each shell command and main function thread1 = threading.Thread(target=run_shell_command, args=(command1,)) From e301862aedc3ab7a06dce5d92a4c6e2e58ebd0db Mon Sep 17 00:00:00 2001 From: Nathan Date: Mon, 28 Aug 2023 20:54:45 +0800 Subject: [PATCH 36/67] Update human_detection_node.py --- human_detection/yolov7_skeleton/human_detection_node.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/human_detection/yolov7_skeleton/human_detection_node.py b/human_detection/yolov7_skeleton/human_detection_node.py index 0b99f7b3..6ac5f572 100644 --- a/human_detection/yolov7_skeleton/human_detection_node.py +++ b/human_detection/yolov7_skeleton/human_detection_node.py @@ -252,7 +252,7 @@ def camera_callback(self, msg): self.person_array = process_frame(self.model, cv_image, self.configs) self.is_there_anyone = len(self.person_array)>0 self.timestamp = msg.header.stamp - if show_image_window:=True: + if show_image_window:=False: print("CAMERA!") cv2.imshow("Camera Image", cv_image) cv2.waitKey(0) # Wait for a key press From 75f86b35fed09f24e32c6ff24608f191ebecf9b2 Mon Sep 17 00:00:00 2001 From: Nathan Date: Mon, 28 Aug 2023 21:00:32 +0800 Subject: [PATCH 37/67] Update human_detection_node.py --- .../yolov7_skeleton/human_detection_node.py | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/human_detection/yolov7_skeleton/human_detection_node.py b/human_detection/yolov7_skeleton/human_detection_node.py index 6ac5f572..502fd493 100644 --- a/human_detection/yolov7_skeleton/human_detection_node.py +++ b/human_detection/yolov7_skeleton/human_detection_node.py @@ -114,6 +114,8 @@ def process_frame(model,image,configs): # Run model inference person_array = [] bounding_boxes, identities, confidences=model.process_frame(image,view_img=False) + if identities==None: + return [] for i in range(len(bounding_boxes)): person = Person() centroid = xyxy_to_centroid(bounding_boxes[i]) @@ -299,15 +301,6 @@ def publish_message(self,source_str="timer"): if not self.is_there_anyone: return - #person0 for debugging purpse - person0 = self.person_array[0] - message = f"{source_str:<7}" - message += f" person: {'YES' if self.is_there_anyone else 'NO '}" - message += f" angle: {person0.heading_angle:<20}" - message += f"person_coordinate: {person0.x:<22} {person0.y:<22} {person0.z:22}" - print_verbose_only(self.parser_args, message) - self.print_and_log(message) - # Publish the message is_person_msg = Bool() is_person_msg.data = bool(self.is_there_anyone) @@ -316,6 +309,12 @@ def publish_message(self,source_str="timer"): detection_array = Detection3DArray() for person in self.person_array: + + message = f" angle: {round(person.heading_angle,2)}" + message += f"coord: {round(person.x,2)},{round(person.y,2)},{round(person.z,2)}" + # print_verbose_only(self.parser_args, message) + self.print_and_log(message) + new_object = Detection3D() lidar_x,lidar_y,lidar_z = convert_to_lidar_frame( (person.x,person.y,person.z), From 9bd48b951e3621136261d9c374be5d9aad156b42 Mon Sep 17 00:00:00 2001 From: Nathan Date: Mon, 28 Aug 2023 21:01:26 +0800 Subject: [PATCH 38/67] Update human_detection_node.py --- human_detection/yolov7_skeleton/human_detection_node.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/human_detection/yolov7_skeleton/human_detection_node.py b/human_detection/yolov7_skeleton/human_detection_node.py index 502fd493..0e8b41b7 100644 --- a/human_detection/yolov7_skeleton/human_detection_node.py +++ b/human_detection/yolov7_skeleton/human_detection_node.py @@ -114,7 +114,7 @@ def process_frame(model,image,configs): # Run model inference person_array = [] bounding_boxes, identities, confidences=model.process_frame(image,view_img=False) - if identities==None: + if identities is None: return [] for i in range(len(bounding_boxes)): person = Person() From 8f59051a36437c82020419ef6084027a01136b8d Mon Sep 17 00:00:00 2001 From: Nathan Date: Mon, 28 Aug 2023 21:05:41 +0800 Subject: [PATCH 39/67] Update human_detection_node.py --- .../yolov7_skeleton/human_detection_node.py | 22 +++++++++++-------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/human_detection/yolov7_skeleton/human_detection_node.py b/human_detection/yolov7_skeleton/human_detection_node.py index 0e8b41b7..dd9eee15 100644 --- a/human_detection/yolov7_skeleton/human_detection_node.py +++ b/human_detection/yolov7_skeleton/human_detection_node.py @@ -238,7 +238,15 @@ def __init__(self,parser_args,model,configs): for num in ascii_numbers[-6:]: self.print_and_log(f"\n{num}\n") time.sleep(1) - + def visualize_camera(self,show_image_window=True): + if show_image_window: + print("CAMERA!") + try: + cv2.imshow("Camera Image", self.cv_image) + cv2.waitKey(0) # Wait for a key press + cv2.destroyAllWindows() # Close all OpenCV windows + except: + pass def state_callback(self, msg): self.cur_state = msg.data @@ -249,16 +257,11 @@ def camera_callback(self, msg): if self.cur_state!="SearchState" and self.cur_state!="ApproachState": return - cv_image = self.bridge.imgmsg_to_cv2( + self.cv_image = self.bridge.imgmsg_to_cv2( msg, desired_encoding='passthrough') - self.person_array = process_frame(self.model, cv_image, self.configs) + self.person_array = process_frame(self.model, self.cv_image, self.configs) self.is_there_anyone = len(self.person_array)>0 self.timestamp = msg.header.stamp - if show_image_window:=False: - print("CAMERA!") - cv2.imshow("Camera Image", cv_image) - cv2.waitKey(0) # Wait for a key press - cv2.destroyAllWindows() # Close all OpenCV windows @@ -331,7 +334,8 @@ def publish_message(self,source_str="timer"): # new_object.bbox.center.orientation.w = float(0) detection_array.detections.append(new_object) - + if len(self.person_array)>=3: + self.visualize_camera() self.detection3DArray_publisher.publish(detection_array) From 167ab96150a4f36fc5a0dd9241117a5148aec17e Mon Sep 17 00:00:00 2001 From: Nathan Date: Mon, 28 Aug 2023 21:11:16 +0800 Subject: [PATCH 40/67] Update human_detection_node.py --- .../yolov7_skeleton/human_detection_node.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/human_detection/yolov7_skeleton/human_detection_node.py b/human_detection/yolov7_skeleton/human_detection_node.py index dd9eee15..22892f0f 100644 --- a/human_detection/yolov7_skeleton/human_detection_node.py +++ b/human_detection/yolov7_skeleton/human_detection_node.py @@ -143,7 +143,7 @@ def __init__(self): class LidarCameraSubscriber(Node): def print_and_log(self, string): self.get_logger().info(string) - print(string) + print_verbose_only(string) def __init__(self,parser_args,model,configs): @@ -240,9 +240,15 @@ def __init__(self,parser_args,model,configs): time.sleep(1) def visualize_camera(self,show_image_window=True): if show_image_window: - print("CAMERA!") try: - cv2.imshow("Camera Image", self.cv_image) + # Create a copy of the image to draw the red dots on + image_with_dots = self.cv_image.copy() + + # Draw red dots on the image at specified xy coordinates + for person in self.person_array: + cv2.circle(image_with_dots, (int(person.x), int(person.y)), 5, (0, 0, 255), -1) # Draw a red circle at (x, y) + + cv2.imshow("Camera Image", image_with_dots) cv2.waitKey(0) # Wait for a key press cv2.destroyAllWindows() # Close all OpenCV windows except: @@ -313,8 +319,7 @@ def publish_message(self,source_str="timer"): for person in self.person_array: - message = f" angle: {round(person.heading_angle,2)}" - message += f"coord: {round(person.x,2)},{round(person.y,2)},{round(person.z,2)}" + message = f"coord: {round(person.x,2)},{round(person.y,2)},{round(person.z,2)}" # print_verbose_only(self.parser_args, message) self.print_and_log(message) From f6fa74903096ce237f48250be8247074474dfad5 Mon Sep 17 00:00:00 2001 From: Nathan Date: Mon, 28 Aug 2023 21:19:13 +0800 Subject: [PATCH 41/67] Update human_detection_node.py --- human_detection/yolov7_skeleton/human_detection_node.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/human_detection/yolov7_skeleton/human_detection_node.py b/human_detection/yolov7_skeleton/human_detection_node.py index 22892f0f..4aad6219 100644 --- a/human_detection/yolov7_skeleton/human_detection_node.py +++ b/human_detection/yolov7_skeleton/human_detection_node.py @@ -143,7 +143,7 @@ def __init__(self): class LidarCameraSubscriber(Node): def print_and_log(self, string): self.get_logger().info(string) - print_verbose_only(string) + print_verbose_only(self.parser_args,string) def __init__(self,parser_args,model,configs): From 932de36b5a4d6e4cd7197d51e25765dd50d38872 Mon Sep 17 00:00:00 2001 From: Nathan Date: Mon, 28 Aug 2023 21:21:21 +0800 Subject: [PATCH 42/67] Update human_detection_node.py --- human_detection/yolov7_skeleton/human_detection_node.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/human_detection/yolov7_skeleton/human_detection_node.py b/human_detection/yolov7_skeleton/human_detection_node.py index 4aad6219..ebabb098 100644 --- a/human_detection/yolov7_skeleton/human_detection_node.py +++ b/human_detection/yolov7_skeleton/human_detection_node.py @@ -147,7 +147,6 @@ def print_and_log(self, string): def __init__(self,parser_args,model,configs): - #make array of 6 person self.person_array = [] self.is_there_anyone = False self.cur_state = "SearchState" # initial state @@ -249,7 +248,6 @@ def visualize_camera(self,show_image_window=True): cv2.circle(image_with_dots, (int(person.x), int(person.y)), 5, (0, 0, 255), -1) # Draw a red circle at (x, y) cv2.imshow("Camera Image", image_with_dots) - cv2.waitKey(0) # Wait for a key press cv2.destroyAllWindows() # Close all OpenCV windows except: pass From 15208f738fecaff0f42ee286e5dcbcb4a130df50 Mon Sep 17 00:00:00 2001 From: Nathan Date: Mon, 28 Aug 2023 21:30:35 +0800 Subject: [PATCH 43/67] Update human_detection_node.py --- human_detection/yolov7_skeleton/human_detection_node.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/human_detection/yolov7_skeleton/human_detection_node.py b/human_detection/yolov7_skeleton/human_detection_node.py index ebabb098..d22955d1 100644 --- a/human_detection/yolov7_skeleton/human_detection_node.py +++ b/human_detection/yolov7_skeleton/human_detection_node.py @@ -248,7 +248,7 @@ def visualize_camera(self,show_image_window=True): cv2.circle(image_with_dots, (int(person.x), int(person.y)), 5, (0, 0, 255), -1) # Draw a red circle at (x, y) cv2.imshow("Camera Image", image_with_dots) - cv2.destroyAllWindows() # Close all OpenCV windows + cv2.waitKey(1) except: pass @@ -489,6 +489,10 @@ def run_shell_command(command): if __name__ == '__main__': + show_image_window = True + if show_image_window: + cv2.namedWindow("Camera Image", cv2.WINDOW_NORMAL) + print("\n\nDEBUG MODE ON\n\n") command1 = "ros2 run image_transport republish compressed raw --ros-args --remap in/compressed:=/camera/compressed --remap out:=/camera" command2 = "ros2 bag play /home/trailbot/bags/human_tracking/" From 4c5dac26762c2ef20efefa5e3e3c4967edba1e97 Mon Sep 17 00:00:00 2001 From: Nathan Date: Mon, 28 Aug 2023 21:33:53 +0800 Subject: [PATCH 44/67] Update human_detection_node.py --- human_detection/yolov7_skeleton/human_detection_node.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/human_detection/yolov7_skeleton/human_detection_node.py b/human_detection/yolov7_skeleton/human_detection_node.py index d22955d1..2aee6793 100644 --- a/human_detection/yolov7_skeleton/human_detection_node.py +++ b/human_detection/yolov7_skeleton/human_detection_node.py @@ -337,8 +337,7 @@ def publish_message(self,source_str="timer"): # new_object.bbox.center.orientation.w = float(0) detection_array.detections.append(new_object) - if len(self.person_array)>=3: - self.visualize_camera() + self.visualize_camera() self.detection3DArray_publisher.publish(detection_array) From 1ec1cd709f35ba3d62cbc318797d7b0f21d2ecb0 Mon Sep 17 00:00:00 2001 From: Nathan Date: Mon, 28 Aug 2023 21:35:36 +0800 Subject: [PATCH 45/67] Update human_detection_node.py --- human_detection/yolov7_skeleton/human_detection_node.py | 1 + 1 file changed, 1 insertion(+) diff --git a/human_detection/yolov7_skeleton/human_detection_node.py b/human_detection/yolov7_skeleton/human_detection_node.py index 2aee6793..77ae4846 100644 --- a/human_detection/yolov7_skeleton/human_detection_node.py +++ b/human_detection/yolov7_skeleton/human_detection_node.py @@ -239,6 +239,7 @@ def __init__(self,parser_args,model,configs): time.sleep(1) def visualize_camera(self,show_image_window=True): if show_image_window: + print("CAMERA") try: # Create a copy of the image to draw the red dots on image_with_dots = self.cv_image.copy() From 20ecff4b92d132ae854a0749fd50ac82ff5e7719 Mon Sep 17 00:00:00 2001 From: Nathan Date: Mon, 28 Aug 2023 21:37:26 +0800 Subject: [PATCH 46/67] Update human_detection_node.py --- human_detection/yolov7_skeleton/human_detection_node.py | 1 + 1 file changed, 1 insertion(+) diff --git a/human_detection/yolov7_skeleton/human_detection_node.py b/human_detection/yolov7_skeleton/human_detection_node.py index 77ae4846..89a51e68 100644 --- a/human_detection/yolov7_skeleton/human_detection_node.py +++ b/human_detection/yolov7_skeleton/human_detection_node.py @@ -241,6 +241,7 @@ def visualize_camera(self,show_image_window=True): if show_image_window: print("CAMERA") try: + cv2.imshow("Camera Image", self.cv_image) # Create a copy of the image to draw the red dots on image_with_dots = self.cv_image.copy() From 9fa7d5cee9b756fa5a784f7b89c18afec8d6d13b Mon Sep 17 00:00:00 2001 From: Nathan Date: Mon, 28 Aug 2023 21:39:59 +0800 Subject: [PATCH 47/67] Update human_detection_node.py --- human_detection/yolov7_skeleton/human_detection_node.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/human_detection/yolov7_skeleton/human_detection_node.py b/human_detection/yolov7_skeleton/human_detection_node.py index 89a51e68..eead9b3c 100644 --- a/human_detection/yolov7_skeleton/human_detection_node.py +++ b/human_detection/yolov7_skeleton/human_detection_node.py @@ -251,6 +251,7 @@ def visualize_camera(self,show_image_window=True): cv2.imshow("Camera Image", image_with_dots) cv2.waitKey(1) + cv2.destroyAllWindows() except: pass @@ -492,7 +493,8 @@ def run_shell_command(command): if __name__ == '__main__': show_image_window = True if show_image_window: - cv2.namedWindow("Camera Image", cv2.WINDOW_NORMAL) + pass + # cv2.namedWindow(cv2.WINDOW_NORMAL) print("\n\nDEBUG MODE ON\n\n") command1 = "ros2 run image_transport republish compressed raw --ros-args --remap in/compressed:=/camera/compressed --remap out:=/camera" From e718cbb7e0b57fe0c14c3dc144f5a1f508712493 Mon Sep 17 00:00:00 2001 From: Nathan Date: Mon, 28 Aug 2023 21:43:18 +0800 Subject: [PATCH 48/67] Update human_detection_node.py --- human_detection/yolov7_skeleton/human_detection_node.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/human_detection/yolov7_skeleton/human_detection_node.py b/human_detection/yolov7_skeleton/human_detection_node.py index eead9b3c..74cce82b 100644 --- a/human_detection/yolov7_skeleton/human_detection_node.py +++ b/human_detection/yolov7_skeleton/human_detection_node.py @@ -241,7 +241,6 @@ def visualize_camera(self,show_image_window=True): if show_image_window: print("CAMERA") try: - cv2.imshow("Camera Image", self.cv_image) # Create a copy of the image to draw the red dots on image_with_dots = self.cv_image.copy() @@ -250,8 +249,7 @@ def visualize_camera(self,show_image_window=True): cv2.circle(image_with_dots, (int(person.x), int(person.y)), 5, (0, 0, 255), -1) # Draw a red circle at (x, y) cv2.imshow("Camera Image", image_with_dots) - cv2.waitKey(1) - cv2.destroyAllWindows() + cv2.waitKey(0) except: pass @@ -493,8 +491,7 @@ def run_shell_command(command): if __name__ == '__main__': show_image_window = True if show_image_window: - pass - # cv2.namedWindow(cv2.WINDOW_NORMAL) + cv2.namedWindow("Camera Image", cv2.WINDOW_NORMAL) print("\n\nDEBUG MODE ON\n\n") command1 = "ros2 run image_transport republish compressed raw --ros-args --remap in/compressed:=/camera/compressed --remap out:=/camera" From 2eecd878021fcf592ce579949b28ca6f80652480 Mon Sep 17 00:00:00 2001 From: Nathan Date: Mon, 28 Aug 2023 21:45:57 +0800 Subject: [PATCH 49/67] Update human_detection_node.py --- human_detection/yolov7_skeleton/human_detection_node.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/human_detection/yolov7_skeleton/human_detection_node.py b/human_detection/yolov7_skeleton/human_detection_node.py index 74cce82b..77ae4846 100644 --- a/human_detection/yolov7_skeleton/human_detection_node.py +++ b/human_detection/yolov7_skeleton/human_detection_node.py @@ -249,7 +249,7 @@ def visualize_camera(self,show_image_window=True): cv2.circle(image_with_dots, (int(person.x), int(person.y)), 5, (0, 0, 255), -1) # Draw a red circle at (x, y) cv2.imshow("Camera Image", image_with_dots) - cv2.waitKey(0) + cv2.waitKey(1) except: pass From 0aefbd1ada6247a2dadd0bd244ed6f1789d36578 Mon Sep 17 00:00:00 2001 From: Nathan Date: Mon, 28 Aug 2023 21:49:02 +0800 Subject: [PATCH 50/67] Update human_detection_node.py --- human_detection/yolov7_skeleton/human_detection_node.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/human_detection/yolov7_skeleton/human_detection_node.py b/human_detection/yolov7_skeleton/human_detection_node.py index 77ae4846..ed7f4796 100644 --- a/human_detection/yolov7_skeleton/human_detection_node.py +++ b/human_detection/yolov7_skeleton/human_detection_node.py @@ -249,7 +249,9 @@ def visualize_camera(self,show_image_window=True): cv2.circle(image_with_dots, (int(person.x), int(person.y)), 5, (0, 0, 255), -1) # Draw a red circle at (x, y) cv2.imshow("Camera Image", image_with_dots) - cv2.waitKey(1) + # Check for the 'q' key press to exit the loop + if cv2.waitKey(1) & 0xFF == ord('q'): + return except: pass From 9bd5386e80d9ec9641c47bfe5c5a32e92ca0e638 Mon Sep 17 00:00:00 2001 From: Nathan Date: Mon, 28 Aug 2023 21:50:56 +0800 Subject: [PATCH 51/67] Update human_detection_node.py --- human_detection/yolov7_skeleton/human_detection_node.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/human_detection/yolov7_skeleton/human_detection_node.py b/human_detection/yolov7_skeleton/human_detection_node.py index ed7f4796..3e0c4f65 100644 --- a/human_detection/yolov7_skeleton/human_detection_node.py +++ b/human_detection/yolov7_skeleton/human_detection_node.py @@ -237,6 +237,10 @@ def __init__(self,parser_args,model,configs): for num in ascii_numbers[-6:]: self.print_and_log(f"\n{num}\n") time.sleep(1) + show_image_window = True + if show_image_window: + cv2.namedWindow("Camera Image", cv2.WINDOW_NORMAL) + def visualize_camera(self,show_image_window=True): if show_image_window: print("CAMERA") @@ -491,9 +495,6 @@ def run_shell_command(command): if __name__ == '__main__': - show_image_window = True - if show_image_window: - cv2.namedWindow("Camera Image", cv2.WINDOW_NORMAL) print("\n\nDEBUG MODE ON\n\n") command1 = "ros2 run image_transport republish compressed raw --ros-args --remap in/compressed:=/camera/compressed --remap out:=/camera" From 383209643faf6ecce33ff6a1c59ae634c170bfdd Mon Sep 17 00:00:00 2001 From: Nathan Date: Mon, 28 Aug 2023 21:55:15 +0800 Subject: [PATCH 52/67] Update human_detection_node.py --- human_detection/yolov7_skeleton/human_detection_node.py | 1 + 1 file changed, 1 insertion(+) diff --git a/human_detection/yolov7_skeleton/human_detection_node.py b/human_detection/yolov7_skeleton/human_detection_node.py index 3e0c4f65..38f2182b 100644 --- a/human_detection/yolov7_skeleton/human_detection_node.py +++ b/human_detection/yolov7_skeleton/human_detection_node.py @@ -240,6 +240,7 @@ def __init__(self,parser_args,model,configs): show_image_window = True if show_image_window: cv2.namedWindow("Camera Image", cv2.WINDOW_NORMAL) + cv2.setWindowProperty("Camera Image", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) def visualize_camera(self,show_image_window=True): if show_image_window: From 1fa98bf3740dd814aa1f5dc6054655a7e7ca0475 Mon Sep 17 00:00:00 2001 From: Nathan Date: Mon, 28 Aug 2023 22:08:54 +0800 Subject: [PATCH 53/67] a --- human_detection/configs.yaml | 2 +- .../yolov7_skeleton/human_detection_node.py | 22 +++++++++++-------- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/human_detection/configs.yaml b/human_detection/configs.yaml index a95057ae..8a39402c 100644 --- a/human_detection/configs.yaml +++ b/human_detection/configs.yaml @@ -12,4 +12,4 @@ rotation_matrix: | -0.9999516401 -0.006361853422 -0.007499625104 0.006366381192 -0.9999795662 -0.0005800141927 translation_vector: [-0.06024059837, -0.08180891509, -0.3117851288] -publishing_frequency: 0.5 +publishing_frequency: 2.0 diff --git a/human_detection/yolov7_skeleton/human_detection_node.py b/human_detection/yolov7_skeleton/human_detection_node.py index 38f2182b..1ba930e2 100644 --- a/human_detection/yolov7_skeleton/human_detection_node.py +++ b/human_detection/yolov7_skeleton/human_detection_node.py @@ -3,7 +3,7 @@ import argparse from cv_bridge import CvBridge import cv2 -from geometry_msgs.msg import PoseStamped +# from geometry_msgs.msg import PoseStamped import math import numpy as np import rclpy @@ -249,9 +249,12 @@ def visualize_camera(self,show_image_window=True): # Create a copy of the image to draw the red dots on image_with_dots = self.cv_image.copy() + # Draw red dots on the image at specified xy coordinates for person in self.person_array: cv2.circle(image_with_dots, (int(person.x), int(person.y)), 5, (0, 0, 255), -1) # Draw a red circle at (x, y) + cv2.putText(image_with_dots, str(person.id), (person.x,person.y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) + cv2.imshow("Camera Image", image_with_dots) # Check for the 'q' key press to exit the loop @@ -329,7 +332,7 @@ def publish_message(self,source_str="timer"): # print_verbose_only(self.parser_args, message) self.print_and_log(message) - new_object = Detection3D() + detection3d = Detection3D() lidar_x,lidar_y,lidar_z = convert_to_lidar_frame( (person.x,person.y,person.z), self.inverse_camera_transformation_k, @@ -337,14 +340,15 @@ def publish_message(self,source_str="timer"): self.translation_vector, self.configs) - new_object.bbox.center.position.x = float(lidar_x) - new_object.bbox.center.position.y = float(lidar_y) - new_object.bbox.center.position.z = float(lidar_z) - # new_object.bbox.size.x = float(0) - # new_object.bbox.size.y = float(0) - # new_object.bbox.center.orientation.w = float(0) + detection3d.bbox.center.position.x = float(lidar_x) + detection3d.bbox.center.position.y = float(lidar_y) + detection3d.bbox.center.position.z = float(lidar_z) + detection3d.id = str(person.id) + # detection3d.bbox.size.x = float(0) + # detection3d.bbox.size.y = float(0) + # detection3d.bbox.center.orientation.w = float(0) - detection_array.detections.append(new_object) + detection_array.detections.append(detection3d) self.visualize_camera() self.detection3DArray_publisher.publish(detection_array) From 500d042f0f9832d5a9eece4dc3fdf4a594e8fb34 Mon Sep 17 00:00:00 2001 From: Nathan Date: Mon, 28 Aug 2023 22:14:26 +0800 Subject: [PATCH 54/67] Update human_detection_node.py --- human_detection/yolov7_skeleton/human_detection_node.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/human_detection/yolov7_skeleton/human_detection_node.py b/human_detection/yolov7_skeleton/human_detection_node.py index 1ba930e2..0545c407 100644 --- a/human_detection/yolov7_skeleton/human_detection_node.py +++ b/human_detection/yolov7_skeleton/human_detection_node.py @@ -240,7 +240,7 @@ def __init__(self,parser_args,model,configs): show_image_window = True if show_image_window: cv2.namedWindow("Camera Image", cv2.WINDOW_NORMAL) - cv2.setWindowProperty("Camera Image", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) + # cv2.setWindowProperty("Camera Image", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) def visualize_camera(self,show_image_window=True): if show_image_window: @@ -328,7 +328,7 @@ def publish_message(self,source_str="timer"): for person in self.person_array: - message = f"coord: {round(person.x,2)},{round(person.y,2)},{round(person.z,2)}" + message = f"id {person.id} coord: {round(person.x,2)},{round(person.y,2)},{round(person.z,2)}" # print_verbose_only(self.parser_args, message) self.print_and_log(message) From b98d1228336301992041c038ae581fa79b00eac1 Mon Sep 17 00:00:00 2001 From: Nathan Date: Mon, 28 Aug 2023 22:17:32 +0800 Subject: [PATCH 55/67] Update human_detection_node.py --- human_detection/yolov7_skeleton/human_detection_node.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/human_detection/yolov7_skeleton/human_detection_node.py b/human_detection/yolov7_skeleton/human_detection_node.py index 0545c407..169571dd 100644 --- a/human_detection/yolov7_skeleton/human_detection_node.py +++ b/human_detection/yolov7_skeleton/human_detection_node.py @@ -253,7 +253,7 @@ def visualize_camera(self,show_image_window=True): # Draw red dots on the image at specified xy coordinates for person in self.person_array: cv2.circle(image_with_dots, (int(person.x), int(person.y)), 5, (0, 0, 255), -1) # Draw a red circle at (x, y) - cv2.putText(image_with_dots, str(person.id), (person.x,person.y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) + cv2.putText(image_with_dots, str(person.id), (int(person.x), int(person.y)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) cv2.imshow("Camera Image", image_with_dots) From 04f42b0d19d4d04adf1dfd4a59c64756181492e5 Mon Sep 17 00:00:00 2001 From: Nathan Date: Thu, 31 Aug 2023 09:59:40 -0500 Subject: [PATCH 56/67] updated readme --- human_detection/configs.yaml | 2 +- human_detection/readme.md | 15 +++---- .../yolov7_skeleton/human_detection_node.py | 42 +++++++++++++++++++ human_detection/yolov7_skeleton/readme.md | 16 ++++++- 4 files changed, 62 insertions(+), 13 deletions(-) diff --git a/human_detection/configs.yaml b/human_detection/configs.yaml index 8a39402c..e487952c 100644 --- a/human_detection/configs.yaml +++ b/human_detection/configs.yaml @@ -12,4 +12,4 @@ rotation_matrix: | -0.9999516401 -0.006361853422 -0.007499625104 0.006366381192 -0.9999795662 -0.0005800141927 translation_vector: [-0.06024059837, -0.08180891509, -0.3117851288] -publishing_frequency: 2.0 +publishing_frequency: 1.0 diff --git a/human_detection/readme.md b/human_detection/readme.md index 182b7b02..57d6ce2b 100644 --- a/human_detection/readme.md +++ b/human_detection/readme.md @@ -1,22 +1,21 @@ ## Configs (see `configs.yaml`) +- distance_where_lidar_stops_working: the minimum distance that the lidar can detect. This is around 0.4 for the current velodyne (2023 Aug) - camera_transformation_k: - rotation_matrix: - translation_vector: - publishing_frequency: frequency to publish the detection location messages. If this is -1, the node will publish constantly (as camera FPS) - ## Arguments ### `-v` or `--verbose` - Description: Enable print statements -### `-d` or `--download_model` - -- Description: Download model from internet and save it into ./multipose_model +## Usage (new code using yolo_v7): +please see folder `yolov7_skeleton/` -## Usage +## Usage (old code using movenet) To use the application, run the script and provide the desired arguments as command-line options. Here's an example: ``` @@ -31,8 +30,4 @@ ros2 bag play testbag cd ~/trail_ws .install/setup.bash ros2 launch husky_base base.launch.py -``` - -```bash -python LidarCameraSubscriber.py -v -k -i -``` +``` \ No newline at end of file diff --git a/human_detection/yolov7_skeleton/human_detection_node.py b/human_detection/yolov7_skeleton/human_detection_node.py index 169571dd..571d99a0 100644 --- a/human_detection/yolov7_skeleton/human_detection_node.py +++ b/human_detection/yolov7_skeleton/human_detection_node.py @@ -140,6 +140,48 @@ def __init__(self): self.id = 0 +class internalState: + human_max_speed = 2.8 # m/s + fps = 10 + buffer_ratio = 1.5 # allow fluctuation of up to 1.5 times + max_movement_per_frame = human_max_speed / fps * buffer_ratio + moving_average_weights = [10,5,3,2,1] + + def __init__(self,depth_history_length): + # Instance attributes (unique to each instance) + self.depth_history = [] + self.discarded_depth_history = [] + self.missing_frame_count = 0 + self.depth_history_length =depth_history_length + + def weighted_moving_average(self,data, weights): + num_points = min(len(data), len(weights)) + weights_sum = 0 + weighted_data_sum = 0 + for i in range(num_points): + weighted_data_sum += weights[i]*data[-1-i] + weights_sum += weights[i] + return weighted_data_sum/weights_sum + + def get_average(self): + return self.weighted_moving_average(self.depth_history,internalState.moving_average_weights) + + + def append(self, new_depth): + if len(self.depth_history) < self.depth_history_length: + self.depth_history.append(new_depth) + return 0 + + avg = self.get_average() + if abs(avg-new_depth) < internalState.max_movement_per_frame*self.missing_frame_count: + self.depth_history.pop(0) + self.depth_history.append(new_depth) + self.missing_frame_count = 0 + return 0 + self.missing_frame_count +=1 + self.discarded_depth_history.append(new_depth) + return 1 + class LidarCameraSubscriber(Node): def print_and_log(self, string): self.get_logger().info(string) diff --git a/human_detection/yolov7_skeleton/readme.md b/human_detection/yolov7_skeleton/readme.md index 8f653bd3..d2be6bc4 100644 --- a/human_detection/yolov7_skeleton/readme.md +++ b/human_detection/yolov7_skeleton/readme.md @@ -1,11 +1,23 @@ +## File structure explanation `yolov7.py ` is the yolov7 class for human tracking. It is taken from https://github.com/haroonshakeel/yolov7-object-tracking/blob/main/detect_or_track.py and modified `models/`, `utils/`, `sort.py` are yolov7 helper files, downloaded from https://github.com/WongKinYiu/yolov7 (with no modification) `example_usage.py` is an example that uses the yolov7 class. -Usage: -1. download the model and test video here: https://utoronto-my.sharepoint.com/:f:/r/personal/nathan_hung_mail_utoronto_ca/Documents/yolov7?csf=1&web=1&e=pSSspq +`human_detection_node.py` is the ros node that uses yolov7 to detect people with camera and lidar data. + + + +## Usage (human detection node): + +1. download the model here: https://utoronto-my.sharepoint.com/:f:/r/personal/nathan_hung_mail_utoronto_ca/Documents/yolov7?csf=1&web=1&e=pSSspq (both `.pt` files) +2. run `sudo apt-get install ros-humble-vision-msgs` +3. run ```python3 human_detection_node.py``` + +## Usage (yolov7 model only): + +1. download the model and test video here: https://utoronto-my.sharepoint.com/:f:/r/personal/nathan_hung_mail_utoronto_ca/Documents/yolov7?csf=1&web=1&e=pSSspq. 2. run `sudo apt-get install ros-humble-vision-msgs` 3. run ``` python yolov7.py --weights-file yolov7.pt --nosave --view-img --show-fps --show-track-lines --classes 0 --no-trace --source video.mp4 From fd1566cc6d5b154339b624dc8b3f5ef9d686e114 Mon Sep 17 00:00:00 2001 From: trailbot Date: Thu, 31 Aug 2023 15:37:43 -0400 Subject: [PATCH 57/67] moved yolo to gpu, fixed memory leak, added init.py to yolov7_skeleton --- human_detection/yolov7_skeleton/__init__.py | 0 human_detection/yolov7_skeleton/yolov7.py | 7 +++++-- 2 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 human_detection/yolov7_skeleton/__init__.py diff --git a/human_detection/yolov7_skeleton/__init__.py b/human_detection/yolov7_skeleton/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/human_detection/yolov7_skeleton/yolov7.py b/human_detection/yolov7_skeleton/yolov7.py index 16ef62a6..442c2539 100644 --- a/human_detection/yolov7_skeleton/yolov7.py +++ b/human_detection/yolov7_skeleton/yolov7.py @@ -50,8 +50,10 @@ def draw_track_lines(im0, tracks, sort_tracker, thickness): class Yolo_sort_tracker: def __init__(self, - device='cpu', + # device='cpu', + device='0', weights_file='yolov7.pt', + # weights_file='/home/trailbot/trail_ws/src/TRAILBot/human_detection/yolov7_skeleton/yolov7.pt', img_size=640, traced_model_already_exists=True, source='webcam', @@ -158,7 +160,8 @@ def detect(self, pred = self.model(img, augment=enable_augment)[0] # Apply NMS - pred = non_max_suppression(pred, conf_thres, iou_thres, classes=detection_object_classes, agnostic=enable_agnostic_nms) + with torch.no_grad(): + pred = non_max_suppression(pred, conf_thres, iou_thres, classes=detection_object_classes, agnostic=enable_agnostic_nms) #TESTING ###to be removed if len(pred)!=1: From 91b5e364028bdeaa08720cc9904db10d0e6a98cd Mon Sep 17 00:00:00 2001 From: Nathan Date: Thu, 31 Aug 2023 15:44:54 -0400 Subject: [PATCH 58/67] remove old code --- .../human_detection/{ => .old_movenet}/ascii_numbers.py | 0 .../human_detection/{ => .old_movenet}/human_detection_node.py | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename human_detection/human_detection/{ => .old_movenet}/ascii_numbers.py (100%) rename human_detection/human_detection/{ => .old_movenet}/human_detection_node.py (100%) diff --git a/human_detection/human_detection/ascii_numbers.py b/human_detection/human_detection/.old_movenet/ascii_numbers.py similarity index 100% rename from human_detection/human_detection/ascii_numbers.py rename to human_detection/human_detection/.old_movenet/ascii_numbers.py diff --git a/human_detection/human_detection/human_detection_node.py b/human_detection/human_detection/.old_movenet/human_detection_node.py similarity index 100% rename from human_detection/human_detection/human_detection_node.py rename to human_detection/human_detection/.old_movenet/human_detection_node.py From 021c5b4ac8da086be680f4deeecdc3eeb9637712 Mon Sep 17 00:00:00 2001 From: Nathan Date: Thu, 31 Aug 2023 15:46:44 -0400 Subject: [PATCH 59/67] move yolov7 version to human_detection node --- human_detection/{yolov7_skeleton => human_detection}/.gitignore | 0 .../{yolov7_skeleton/readme.md => human_detection/README.md} | 0 .../{yolov7_skeleton => human_detection}/ascii_numbers.py | 0 human_detection/{yolov7_skeleton => human_detection}/commands.txt | 0 .../{yolov7_skeleton => human_detection}/example_usage.py | 0 .../{yolov7_skeleton => human_detection}/human_detection_node.py | 0 .../{yolov7_skeleton => human_detection}/models/__init__.py | 0 .../{yolov7_skeleton => human_detection}/models/common.py | 0 .../{yolov7_skeleton => human_detection}/models/experimental.py | 0 .../{yolov7_skeleton => human_detection}/models/yolo.py | 0 human_detection/{yolov7_skeleton => human_detection}/sort.py | 0 .../{yolov7_skeleton => human_detection}/utils/__init__.py | 0 .../{yolov7_skeleton => human_detection}/utils/activations.py | 0 .../{yolov7_skeleton => human_detection}/utils/add_nms.py | 0 .../{yolov7_skeleton => human_detection}/utils/autoanchor.py | 0 .../{yolov7_skeleton => human_detection}/utils/aws/__init__.py | 0 .../{yolov7_skeleton => human_detection}/utils/aws/mime.sh | 0 .../{yolov7_skeleton => human_detection}/utils/aws/resume.py | 0 .../{yolov7_skeleton => human_detection}/utils/aws/userdata.sh | 0 .../{yolov7_skeleton => human_detection}/utils/datasets.py | 0 .../{yolov7_skeleton => human_detection}/utils/general.py | 0 .../utils/google_app_engine/Dockerfile | 0 .../utils/google_app_engine/additional_requirements.txt | 0 .../utils/google_app_engine/app.yaml | 0 .../{yolov7_skeleton => human_detection}/utils/google_utils.py | 0 .../{yolov7_skeleton => human_detection}/utils/loss.py | 0 .../{yolov7_skeleton => human_detection}/utils/metrics.py | 0 .../{yolov7_skeleton => human_detection}/utils/plots.py | 0 .../{yolov7_skeleton => human_detection}/utils/torch_utils.py | 0 .../utils/wandb_logging/__init__.py | 0 .../utils/wandb_logging/log_dataset.py | 0 .../utils/wandb_logging/wandb_utils.py | 0 human_detection/{yolov7_skeleton => human_detection}/yolov7.py | 0 human_detection/yolov7_skeleton/__init__.py | 0 34 files changed, 0 insertions(+), 0 deletions(-) rename human_detection/{yolov7_skeleton => human_detection}/.gitignore (100%) rename human_detection/{yolov7_skeleton/readme.md => human_detection/README.md} (100%) rename human_detection/{yolov7_skeleton => human_detection}/ascii_numbers.py (100%) rename human_detection/{yolov7_skeleton => human_detection}/commands.txt (100%) rename human_detection/{yolov7_skeleton => human_detection}/example_usage.py (100%) rename human_detection/{yolov7_skeleton => human_detection}/human_detection_node.py (100%) rename human_detection/{yolov7_skeleton => human_detection}/models/__init__.py (100%) rename human_detection/{yolov7_skeleton => human_detection}/models/common.py (100%) rename human_detection/{yolov7_skeleton => human_detection}/models/experimental.py (100%) rename human_detection/{yolov7_skeleton => human_detection}/models/yolo.py (100%) rename human_detection/{yolov7_skeleton => human_detection}/sort.py (100%) rename human_detection/{yolov7_skeleton => human_detection}/utils/__init__.py (100%) rename human_detection/{yolov7_skeleton => human_detection}/utils/activations.py (100%) rename human_detection/{yolov7_skeleton => human_detection}/utils/add_nms.py (100%) rename human_detection/{yolov7_skeleton => human_detection}/utils/autoanchor.py (100%) rename human_detection/{yolov7_skeleton => human_detection}/utils/aws/__init__.py (100%) rename human_detection/{yolov7_skeleton => human_detection}/utils/aws/mime.sh (100%) rename human_detection/{yolov7_skeleton => human_detection}/utils/aws/resume.py (100%) rename human_detection/{yolov7_skeleton => human_detection}/utils/aws/userdata.sh (100%) rename human_detection/{yolov7_skeleton => human_detection}/utils/datasets.py (100%) rename human_detection/{yolov7_skeleton => human_detection}/utils/general.py (100%) rename human_detection/{yolov7_skeleton => human_detection}/utils/google_app_engine/Dockerfile (100%) rename human_detection/{yolov7_skeleton => human_detection}/utils/google_app_engine/additional_requirements.txt (100%) rename human_detection/{yolov7_skeleton => human_detection}/utils/google_app_engine/app.yaml (100%) rename human_detection/{yolov7_skeleton => human_detection}/utils/google_utils.py (100%) rename human_detection/{yolov7_skeleton => human_detection}/utils/loss.py (100%) rename human_detection/{yolov7_skeleton => human_detection}/utils/metrics.py (100%) rename human_detection/{yolov7_skeleton => human_detection}/utils/plots.py (100%) rename human_detection/{yolov7_skeleton => human_detection}/utils/torch_utils.py (100%) rename human_detection/{yolov7_skeleton => human_detection}/utils/wandb_logging/__init__.py (100%) rename human_detection/{yolov7_skeleton => human_detection}/utils/wandb_logging/log_dataset.py (100%) rename human_detection/{yolov7_skeleton => human_detection}/utils/wandb_logging/wandb_utils.py (100%) rename human_detection/{yolov7_skeleton => human_detection}/yolov7.py (100%) delete mode 100644 human_detection/yolov7_skeleton/__init__.py diff --git a/human_detection/yolov7_skeleton/.gitignore b/human_detection/human_detection/.gitignore similarity index 100% rename from human_detection/yolov7_skeleton/.gitignore rename to human_detection/human_detection/.gitignore diff --git a/human_detection/yolov7_skeleton/readme.md b/human_detection/human_detection/README.md similarity index 100% rename from human_detection/yolov7_skeleton/readme.md rename to human_detection/human_detection/README.md diff --git a/human_detection/yolov7_skeleton/ascii_numbers.py b/human_detection/human_detection/ascii_numbers.py similarity index 100% rename from human_detection/yolov7_skeleton/ascii_numbers.py rename to human_detection/human_detection/ascii_numbers.py diff --git a/human_detection/yolov7_skeleton/commands.txt b/human_detection/human_detection/commands.txt similarity index 100% rename from human_detection/yolov7_skeleton/commands.txt rename to human_detection/human_detection/commands.txt diff --git a/human_detection/yolov7_skeleton/example_usage.py b/human_detection/human_detection/example_usage.py similarity index 100% rename from human_detection/yolov7_skeleton/example_usage.py rename to human_detection/human_detection/example_usage.py diff --git a/human_detection/yolov7_skeleton/human_detection_node.py b/human_detection/human_detection/human_detection_node.py similarity index 100% rename from human_detection/yolov7_skeleton/human_detection_node.py rename to human_detection/human_detection/human_detection_node.py diff --git a/human_detection/yolov7_skeleton/models/__init__.py b/human_detection/human_detection/models/__init__.py similarity index 100% rename from human_detection/yolov7_skeleton/models/__init__.py rename to human_detection/human_detection/models/__init__.py diff --git a/human_detection/yolov7_skeleton/models/common.py b/human_detection/human_detection/models/common.py similarity index 100% rename from human_detection/yolov7_skeleton/models/common.py rename to human_detection/human_detection/models/common.py diff --git a/human_detection/yolov7_skeleton/models/experimental.py b/human_detection/human_detection/models/experimental.py similarity index 100% rename from human_detection/yolov7_skeleton/models/experimental.py rename to human_detection/human_detection/models/experimental.py diff --git a/human_detection/yolov7_skeleton/models/yolo.py b/human_detection/human_detection/models/yolo.py similarity index 100% rename from human_detection/yolov7_skeleton/models/yolo.py rename to human_detection/human_detection/models/yolo.py diff --git a/human_detection/yolov7_skeleton/sort.py b/human_detection/human_detection/sort.py similarity index 100% rename from human_detection/yolov7_skeleton/sort.py rename to human_detection/human_detection/sort.py diff --git a/human_detection/yolov7_skeleton/utils/__init__.py b/human_detection/human_detection/utils/__init__.py similarity index 100% rename from human_detection/yolov7_skeleton/utils/__init__.py rename to human_detection/human_detection/utils/__init__.py diff --git a/human_detection/yolov7_skeleton/utils/activations.py b/human_detection/human_detection/utils/activations.py similarity index 100% rename from human_detection/yolov7_skeleton/utils/activations.py rename to human_detection/human_detection/utils/activations.py diff --git a/human_detection/yolov7_skeleton/utils/add_nms.py b/human_detection/human_detection/utils/add_nms.py similarity index 100% rename from human_detection/yolov7_skeleton/utils/add_nms.py rename to human_detection/human_detection/utils/add_nms.py diff --git a/human_detection/yolov7_skeleton/utils/autoanchor.py b/human_detection/human_detection/utils/autoanchor.py similarity index 100% rename from human_detection/yolov7_skeleton/utils/autoanchor.py rename to human_detection/human_detection/utils/autoanchor.py diff --git a/human_detection/yolov7_skeleton/utils/aws/__init__.py b/human_detection/human_detection/utils/aws/__init__.py similarity index 100% rename from human_detection/yolov7_skeleton/utils/aws/__init__.py rename to human_detection/human_detection/utils/aws/__init__.py diff --git a/human_detection/yolov7_skeleton/utils/aws/mime.sh b/human_detection/human_detection/utils/aws/mime.sh similarity index 100% rename from human_detection/yolov7_skeleton/utils/aws/mime.sh rename to human_detection/human_detection/utils/aws/mime.sh diff --git a/human_detection/yolov7_skeleton/utils/aws/resume.py b/human_detection/human_detection/utils/aws/resume.py similarity index 100% rename from human_detection/yolov7_skeleton/utils/aws/resume.py rename to human_detection/human_detection/utils/aws/resume.py diff --git a/human_detection/yolov7_skeleton/utils/aws/userdata.sh b/human_detection/human_detection/utils/aws/userdata.sh similarity index 100% rename from human_detection/yolov7_skeleton/utils/aws/userdata.sh rename to human_detection/human_detection/utils/aws/userdata.sh diff --git a/human_detection/yolov7_skeleton/utils/datasets.py b/human_detection/human_detection/utils/datasets.py similarity index 100% rename from human_detection/yolov7_skeleton/utils/datasets.py rename to human_detection/human_detection/utils/datasets.py diff --git a/human_detection/yolov7_skeleton/utils/general.py b/human_detection/human_detection/utils/general.py similarity index 100% rename from human_detection/yolov7_skeleton/utils/general.py rename to human_detection/human_detection/utils/general.py diff --git a/human_detection/yolov7_skeleton/utils/google_app_engine/Dockerfile b/human_detection/human_detection/utils/google_app_engine/Dockerfile similarity index 100% rename from human_detection/yolov7_skeleton/utils/google_app_engine/Dockerfile rename to human_detection/human_detection/utils/google_app_engine/Dockerfile diff --git a/human_detection/yolov7_skeleton/utils/google_app_engine/additional_requirements.txt b/human_detection/human_detection/utils/google_app_engine/additional_requirements.txt similarity index 100% rename from human_detection/yolov7_skeleton/utils/google_app_engine/additional_requirements.txt rename to human_detection/human_detection/utils/google_app_engine/additional_requirements.txt diff --git a/human_detection/yolov7_skeleton/utils/google_app_engine/app.yaml b/human_detection/human_detection/utils/google_app_engine/app.yaml similarity index 100% rename from human_detection/yolov7_skeleton/utils/google_app_engine/app.yaml rename to human_detection/human_detection/utils/google_app_engine/app.yaml diff --git a/human_detection/yolov7_skeleton/utils/google_utils.py b/human_detection/human_detection/utils/google_utils.py similarity index 100% rename from human_detection/yolov7_skeleton/utils/google_utils.py rename to human_detection/human_detection/utils/google_utils.py diff --git a/human_detection/yolov7_skeleton/utils/loss.py b/human_detection/human_detection/utils/loss.py similarity index 100% rename from human_detection/yolov7_skeleton/utils/loss.py rename to human_detection/human_detection/utils/loss.py diff --git a/human_detection/yolov7_skeleton/utils/metrics.py b/human_detection/human_detection/utils/metrics.py similarity index 100% rename from human_detection/yolov7_skeleton/utils/metrics.py rename to human_detection/human_detection/utils/metrics.py diff --git a/human_detection/yolov7_skeleton/utils/plots.py b/human_detection/human_detection/utils/plots.py similarity index 100% rename from human_detection/yolov7_skeleton/utils/plots.py rename to human_detection/human_detection/utils/plots.py diff --git a/human_detection/yolov7_skeleton/utils/torch_utils.py b/human_detection/human_detection/utils/torch_utils.py similarity index 100% rename from human_detection/yolov7_skeleton/utils/torch_utils.py rename to human_detection/human_detection/utils/torch_utils.py diff --git a/human_detection/yolov7_skeleton/utils/wandb_logging/__init__.py b/human_detection/human_detection/utils/wandb_logging/__init__.py similarity index 100% rename from human_detection/yolov7_skeleton/utils/wandb_logging/__init__.py rename to human_detection/human_detection/utils/wandb_logging/__init__.py diff --git a/human_detection/yolov7_skeleton/utils/wandb_logging/log_dataset.py b/human_detection/human_detection/utils/wandb_logging/log_dataset.py similarity index 100% rename from human_detection/yolov7_skeleton/utils/wandb_logging/log_dataset.py rename to human_detection/human_detection/utils/wandb_logging/log_dataset.py diff --git a/human_detection/yolov7_skeleton/utils/wandb_logging/wandb_utils.py b/human_detection/human_detection/utils/wandb_logging/wandb_utils.py similarity index 100% rename from human_detection/yolov7_skeleton/utils/wandb_logging/wandb_utils.py rename to human_detection/human_detection/utils/wandb_logging/wandb_utils.py diff --git a/human_detection/yolov7_skeleton/yolov7.py b/human_detection/human_detection/yolov7.py similarity index 100% rename from human_detection/yolov7_skeleton/yolov7.py rename to human_detection/human_detection/yolov7.py diff --git a/human_detection/yolov7_skeleton/__init__.py b/human_detection/yolov7_skeleton/__init__.py deleted file mode 100644 index e69de29b..00000000 From 3409a8e37545eff72acf9336c8a75e21b324d52d Mon Sep 17 00:00:00 2001 From: Nathan Date: Thu, 31 Aug 2023 16:04:52 -0400 Subject: [PATCH 60/67] change import to relative --- .../human_detection/human_detection_node.py | 10 +++++++++- human_detection/human_detection/yolov7.py | 9 +++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/human_detection/human_detection/human_detection_node.py b/human_detection/human_detection/human_detection_node.py index 571d99a0..3130ab27 100644 --- a/human_detection/human_detection/human_detection_node.py +++ b/human_detection/human_detection/human_detection_node.py @@ -16,9 +16,17 @@ import time import yaml -import yolov7 import torch +import sys +import os +# Get the current directory of the main.py file +current_directory = os.path.dirname(os.path.abspath(__file__)) +# Add the current directory to the Python path +sys.path.append(current_directory) + +import yolov7 + def parse_arguments(): """ handle command line arguments diff --git a/human_detection/human_detection/yolov7.py b/human_detection/human_detection/yolov7.py index 442c2539..8a1487f8 100644 --- a/human_detection/human_detection/yolov7.py +++ b/human_detection/human_detection/yolov7.py @@ -1,3 +1,5 @@ + + import argparse import time from pathlib import Path @@ -5,6 +7,13 @@ import torch import numpy as np +import sys +import os +# Get the current directory of the main.py file +current_directory = os.path.dirname(os.path.abspath(__file__)) +# Add the current directory to the Python path +sys.path.append(current_directory) + from models.experimental import attempt_load from utils.datasets import LoadImages from utils.general import check_img_size, non_max_suppression, scale_coords, set_logging, increment_path From a9a6fba7df2837c40baf73305480cb2ba81cbb3b Mon Sep 17 00:00:00 2001 From: Nathan Date: Thu, 31 Aug 2023 16:07:45 -0400 Subject: [PATCH 61/67] Update human_detection_node.py --- human_detection/human_detection/human_detection_node.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/human_detection/human_detection/human_detection_node.py b/human_detection/human_detection/human_detection_node.py index 3130ab27..1be47ff8 100644 --- a/human_detection/human_detection/human_detection_node.py +++ b/human_detection/human_detection/human_detection_node.py @@ -521,7 +521,7 @@ def estimate_depth(x, y, np_2d_array,configs): def main(args=None, debug_mode=False): - with open('configs.yaml', 'r') as file: + with open('/home/trailbot/trail_ws/src/TRAILBot/human_detection/configs.yaml', 'r') as file: configs = yaml.safe_load(file) parser_args = parse_arguments() From 921e83460f34856c64bdd0859953aabd56b5c463 Mon Sep 17 00:00:00 2001 From: Nathan Date: Thu, 31 Aug 2023 16:16:17 -0400 Subject: [PATCH 62/67] Update yolov7.py --- human_detection/human_detection/yolov7.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/human_detection/human_detection/yolov7.py b/human_detection/human_detection/yolov7.py index 8a1487f8..08d67df9 100644 --- a/human_detection/human_detection/yolov7.py +++ b/human_detection/human_detection/yolov7.py @@ -61,7 +61,7 @@ class Yolo_sort_tracker: def __init__(self, # device='cpu', device='0', - weights_file='yolov7.pt', + weights_file='/home/trailbot/trail_ws/src/TRAILBot/human_detection/human_detection/yolov7.pt', # weights_file='/home/trailbot/trail_ws/src/TRAILBot/human_detection/yolov7_skeleton/yolov7.pt', img_size=640, traced_model_already_exists=True, From 54d9f546fda2a869f8db0d87d11859067338774b Mon Sep 17 00:00:00 2001 From: Nathan Date: Thu, 31 Aug 2023 16:24:14 -0400 Subject: [PATCH 63/67] Update yolov7.py --- human_detection/human_detection/yolov7.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/human_detection/human_detection/yolov7.py b/human_detection/human_detection/yolov7.py index 08d67df9..71d91754 100644 --- a/human_detection/human_detection/yolov7.py +++ b/human_detection/human_detection/yolov7.py @@ -61,7 +61,7 @@ class Yolo_sort_tracker: def __init__(self, # device='cpu', device='0', - weights_file='/home/trailbot/trail_ws/src/TRAILBot/human_detection/human_detection/yolov7.pt', + weights_file='yolov7.pt', # weights_file='/home/trailbot/trail_ws/src/TRAILBot/human_detection/yolov7_skeleton/yolov7.pt', img_size=640, traced_model_already_exists=True, @@ -265,7 +265,7 @@ def detect(self, parser = argparse.ArgumentParser() # Files and devices: - parser.add_argument('--weights-file', nargs='+', type=str, default='yolov7.pt', help='model.pt path(s)') + parser.add_argument('--weights-file', nargs='+', type=str, default='/home/trailbot/trail_ws/src/TRAILBot/human_detection/human_detection/yolov7.pt', help='model.pt path(s)') parser.add_argument('--no-trace', action='store_true', help='don`t trace model (if traced_model.pt already exist this can save time)') # Model tracing determines all the operations that are executed when a model parses input data through its linear layers. Just like downloading a model, it only needs to run once. Once the traced_model.pt is generated, this operation is no longer needed parser.add_argument('--source', type=str, default='inference/images', help='video source to process') # mp4 file/folder, 'webcam' for webcam parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') From 85f114a0a5952cb5c5b7b06bae647acbe46363de Mon Sep 17 00:00:00 2001 From: Nathan Date: Thu, 31 Aug 2023 16:25:19 -0400 Subject: [PATCH 64/67] Update yolov7.py --- human_detection/human_detection/yolov7.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/human_detection/human_detection/yolov7.py b/human_detection/human_detection/yolov7.py index 71d91754..08d67df9 100644 --- a/human_detection/human_detection/yolov7.py +++ b/human_detection/human_detection/yolov7.py @@ -61,7 +61,7 @@ class Yolo_sort_tracker: def __init__(self, # device='cpu', device='0', - weights_file='yolov7.pt', + weights_file='/home/trailbot/trail_ws/src/TRAILBot/human_detection/human_detection/yolov7.pt', # weights_file='/home/trailbot/trail_ws/src/TRAILBot/human_detection/yolov7_skeleton/yolov7.pt', img_size=640, traced_model_already_exists=True, @@ -265,7 +265,7 @@ def detect(self, parser = argparse.ArgumentParser() # Files and devices: - parser.add_argument('--weights-file', nargs='+', type=str, default='/home/trailbot/trail_ws/src/TRAILBot/human_detection/human_detection/yolov7.pt', help='model.pt path(s)') + parser.add_argument('--weights-file', nargs='+', type=str, default='yolov7.pt', help='model.pt path(s)') parser.add_argument('--no-trace', action='store_true', help='don`t trace model (if traced_model.pt already exist this can save time)') # Model tracing determines all the operations that are executed when a model parses input data through its linear layers. Just like downloading a model, it only needs to run once. Once the traced_model.pt is generated, this operation is no longer needed parser.add_argument('--source', type=str, default='inference/images', help='video source to process') # mp4 file/folder, 'webcam' for webcam parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') From 178419fe95997fcd7e14493ae7deff8d47a17e4a Mon Sep 17 00:00:00 2001 From: Nathan Date: Thu, 31 Aug 2023 16:28:42 -0400 Subject: [PATCH 65/67] Update google_utils.py --- human_detection/human_detection/utils/google_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/human_detection/human_detection/utils/google_utils.py b/human_detection/human_detection/utils/google_utils.py index f363408e..3238b207 100644 --- a/human_detection/human_detection/utils/google_utils.py +++ b/human_detection/human_detection/utils/google_utils.py @@ -18,8 +18,8 @@ def gsutil_getsize(url=''): def attempt_download(file, repo='WongKinYiu/yolov7'): # Attempt file download if does not exist - file = Path(str(file).strip().replace("'", '').lower()) - + file = Path(str(file).strip().replace("'", '')) + print(file) if not file.exists(): try: response = requests.get(f'https://api.github.com/repos/{repo}/releases/latest').json() # github api From b2c6e0cba57c8949990dd74ef88298b2dff46ae1 Mon Sep 17 00:00:00 2001 From: trailbot Date: Thu, 31 Aug 2023 16:54:53 -0400 Subject: [PATCH 66/67] new launch file --- trailbot_bringup/launch/nav_camera.launch.py | 55 ++++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 trailbot_bringup/launch/nav_camera.launch.py diff --git a/trailbot_bringup/launch/nav_camera.launch.py b/trailbot_bringup/launch/nav_camera.launch.py new file mode 100644 index 00000000..296114d9 --- /dev/null +++ b/trailbot_bringup/launch/nav_camera.launch.py @@ -0,0 +1,55 @@ +#File Launches Everything for Nav2 stack and includes SLAM and Driving + +import os +from ament_index_python.packages import get_package_share_directory +from launch import LaunchDescription +from launch.actions import IncludeLaunchDescription +from launch.launch_description_sources import PythonLaunchDescriptionSource +from launch_ros.actions import Node + + + +def generate_launch_description(): + + # Launching SLAM (also includes driving and all other needed nodes) + slam_launch_path = os.path.join(get_package_share_directory('trailbot_bringup'),'launch','slam_3D.launch.py') + slam_launch = IncludeLaunchDescription(PythonLaunchDescriptionSource([slam_launch_path])) + + #the nav configs + package_name = 'nav' + + # Nav node + nav_launch_path = os.path.join(get_package_share_directory(package_name),'launch','navigation_launch.py') + nav_params_path = os.path.join(get_package_share_directory(package_name),'config','nav2_params_points.yaml') + nav_node = IncludeLaunchDescription(PythonLaunchDescriptionSource([nav_launch_path]), + launch_arguments={'namespace': '', + # 'use_sim_time': 'true', + 'autostart': 'true', + 'params_file': nav_params_path, + # 'use_lifecycle_mgr': 'false', + #'map_subscribe_transient_local': 'true' + } + .items()) + + + + #Ximea Camera Node (commented because no current implementation) + ld = LaunchDescription() + config = os.path.join( + get_package_share_directory("ximea_driver"), + "config", + "params.yaml", + ) + ximea_node = Node( + package="ximea_driver", + executable="ximea_driver_node", + name="ximea_driver_node", + parameters=[config] + ) + + ld = LaunchDescription() + ld.add_action(ximea_node) + ld.add_action(slam_launch) + ld.add_action(nav_node) + + return ld \ No newline at end of file From b3767af122ca09da2eabc4737eca504510735885 Mon Sep 17 00:00:00 2001 From: Nathan Date: Thu, 7 Sep 2023 14:41:38 -0400 Subject: [PATCH 67/67] Update human_detection_node.py --- .../human_detection/human_detection_node.py | 53 +++++++++---------- 1 file changed, 25 insertions(+), 28 deletions(-) diff --git a/human_detection/human_detection/human_detection_node.py b/human_detection/human_detection/human_detection_node.py index 1be47ff8..66d74e03 100644 --- a/human_detection/human_detection/human_detection_node.py +++ b/human_detection/human_detection/human_detection_node.py @@ -114,25 +114,23 @@ def get_heading_angle( x_angle_radian = math.atan( (centroid_x - (image_width / 2)) / (image_width / 2) * math.tan(math.radians(fov / 2))) return offset + scaling * math.degrees(x_angle_radian) -def process_frame(model,image,configs): +def process_frame(person_array_by_id,model,image,configs): """ process a frame. Determine keypoints and number of people and heading angle. """ # Run model inference - person_array = [] bounding_boxes, identities, confidences=model.process_frame(image,view_img=False) if identities is None: return [] for i in range(len(bounding_boxes)): - person = Person() + if identities[i] not in person_array_by_id: + person_array_by_id[ identities[i] ] = Person() centroid = xyxy_to_centroid(bounding_boxes[i]) - person.heading_angle = get_heading_angle(centroid) - person.x, person.y = centroid - person.on_screen=True - person.id = identities[i] - person_array.append(person) - return person_array + person_array_by_id[ identities[i] ].heading_angle = get_heading_angle(centroid) + person_array_by_id[ identities[i] ].x, person_array_by_id[ identities[i] ].y = centroid + person_array_by_id[ identities[i] ].on_screen=True + return person_array_by_id class Person: @@ -142,13 +140,11 @@ class Person: def __init__(self): self.x = -1.0 self.y = -1.0 - self.z = -1.0 + self.z = movingAverage(5) self.on_screen = False self.heading_angle = 0.0 - self.id = 0 - -class internalState: +class movingAverage: human_max_speed = 2.8 # m/s fps = 10 buffer_ratio = 1.5 # allow fluctuation of up to 1.5 times @@ -163,6 +159,8 @@ def __init__(self,depth_history_length): self.depth_history_length =depth_history_length def weighted_moving_average(self,data, weights): + if len(data)==0 or len(weights)==0: + return -1.0 num_points = min(len(data), len(weights)) weights_sum = 0 weighted_data_sum = 0 @@ -172,7 +170,7 @@ def weighted_moving_average(self,data, weights): return weighted_data_sum/weights_sum def get_average(self): - return self.weighted_moving_average(self.depth_history,internalState.moving_average_weights) + return self.weighted_moving_average(self.depth_history,movingAverage.moving_average_weights) def append(self, new_depth): @@ -181,7 +179,7 @@ def append(self, new_depth): return 0 avg = self.get_average() - if abs(avg-new_depth) < internalState.max_movement_per_frame*self.missing_frame_count: + if abs(avg-new_depth) < movingAverage.max_movement_per_frame*self.missing_frame_count: self.depth_history.pop(0) self.depth_history.append(new_depth) self.missing_frame_count = 0 @@ -197,7 +195,7 @@ def print_and_log(self, string): def __init__(self,parser_args,model,configs): - self.person_array = [] + self.person_array_by_id = dict() self.is_there_anyone = False self.cur_state = "SearchState" # initial state self.parser_args = parser_args @@ -301,9 +299,9 @@ def visualize_camera(self,show_image_window=True): # Draw red dots on the image at specified xy coordinates - for person in self.person_array: + for person_id, person in self.person_array_by_id.items(): cv2.circle(image_with_dots, (int(person.x), int(person.y)), 5, (0, 0, 255), -1) # Draw a red circle at (x, y) - cv2.putText(image_with_dots, str(person.id), (int(person.x), int(person.y)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) + cv2.putText(image_with_dots, str(person_id), (int(person.x), int(person.y)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) cv2.imshow("Camera Image", image_with_dots) @@ -324,8 +322,8 @@ def camera_callback(self, msg): self.cv_image = self.bridge.imgmsg_to_cv2( msg, desired_encoding='passthrough') - self.person_array = process_frame(self.model, self.cv_image, self.configs) - self.is_there_anyone = len(self.person_array)>0 + process_frame(self.person_array_by_id, self.model, self.cv_image, self.configs) + self.is_there_anyone = len(self.person_array_by_id)>0 self.timestamp = msg.header.stamp @@ -352,11 +350,11 @@ def lidar_callback(self, msg): self.configs) #update depth for every person - for person in self.person_array: + for person_id, person in self.person_array_by_id.items(): if not person.on_screen: - person.z = -1.0 + person.z.append(-1.0) else: - person.z = estimate_depth(person.x, person.y, points2d,self.configs) + person.z.append(estimate_depth(person.x, person.y, points2d,self.configs)) self.timestamp = msg.header.stamp # if this is -1, node will publish constantly (as camera FPS) if not self.publishing_frequency>0: @@ -376,15 +374,14 @@ def publish_message(self,source_str="timer"): detection_array = Detection3DArray() - for person in self.person_array: - - message = f"id {person.id} coord: {round(person.x,2)},{round(person.y,2)},{round(person.z,2)}" + for person_id, person in self.person_array_by_id.items(): + message = f"id {person_id} coord: {round(person.x,2)},{round(person.y,2)},{round(person.z.get_average(),2)}" # print_verbose_only(self.parser_args, message) self.print_and_log(message) detection3d = Detection3D() lidar_x,lidar_y,lidar_z = convert_to_lidar_frame( - (person.x,person.y,person.z), + (person.x,person.y,person.z.get_average()), self.inverse_camera_transformation_k, self.inverse_rotation_matrix, self.translation_vector, @@ -393,7 +390,7 @@ def publish_message(self,source_str="timer"): detection3d.bbox.center.position.x = float(lidar_x) detection3d.bbox.center.position.y = float(lidar_y) detection3d.bbox.center.position.z = float(lidar_z) - detection3d.id = str(person.id) + detection3d.id = str(person_id) # detection3d.bbox.size.x = float(0) # detection3d.bbox.size.y = float(0) # detection3d.bbox.center.orientation.w = float(0)