From 2210a9e2f3705560c9f989c42b0903a802b64b50 Mon Sep 17 00:00:00 2001 From: Yu-Zhewen <yuzhewen0108@163.com> Date: Thu, 21 Sep 2023 11:12:27 +0100 Subject: [PATCH] yolo coco --- .gitignore | 2 ++ coco_main.py | 27 +++++++++++++++++++++++++++ imagenet_main.py | 3 ++- sparsity_utils.py | 15 +++++++++------ 4 files changed, 40 insertions(+), 7 deletions(-) create mode 100644 coco_main.py diff --git a/.gitignore b/.gitignore index 1c47bf9..f9c6736 100644 --- a/.gitignore +++ b/.gitignore @@ -136,3 +136,5 @@ runlog/ *.csv +output/ + diff --git a/coco_main.py b/coco_main.py new file mode 100644 index 0000000..e7ce6bb --- /dev/null +++ b/coco_main.py @@ -0,0 +1,27 @@ +import argparse +import pathlib +import random +import torch + +from sparsity_utils import * +from ultralytics import YOLO # install this fork, https://github.com/Yu-Zhewen/ultralytics + +parser = argparse.ArgumentParser(description='ultralytics COCO') +parser.add_argument('-a', '--arch', default='yolov8n') +parser.add_argument('--output_path', default=None, type=str, + help='output path') +args = parser.parse_args() +if args.output_path == None: + args.output_path = os.getcwd() + "/output" +pathlib.Path(args.output_path).mkdir(parents=True, exist_ok=True) +print(args) + +random.seed(0) +torch.manual_seed(0) + +# Load a model +model = YOLO("yolov8n.pt") +replace_with_vanilla_convolution(model) +results = model.val(plots=False, batch=1, data="coco128.yaml") #data="coco128.yaml"(subset 128 images) or "coco.yaml" +output_sparsity_to_csv("yolov8n", model, args.output_path) +print(results) \ No newline at end of file diff --git a/imagenet_main.py b/imagenet_main.py index e3f7428..8a2621c 100644 --- a/imagenet_main.py +++ b/imagenet_main.py @@ -1,5 +1,6 @@ import argparse import os +import pathlib import random import torch @@ -43,7 +44,7 @@ def imagenet_main(): if args.output_path == None: args.output_path = os.getcwd() + "/output" - + pathlib.Path(args.output_path).mkdir(parents=True, exist_ok=True) print(args) random.seed(0) diff --git a/sparsity_utils.py b/sparsity_utils.py index 59aa123..696492e 100644 --- a/sparsity_utils.py +++ b/sparsity_utils.py @@ -36,7 +36,7 @@ def output_sparsity_to_csv(model_name, model, output_dir): np.save(os.path.join(output_dir,"{}_{}_mean.npy".format(model_name, name)), module.statistics.mean.cpu().numpy()) np.save(os.path.join(output_dir,"{}_{}_var.npy".format(model_name, name)), module.statistics.var.cpu().numpy()) np.save(os.path.join(output_dir,"{}_{}_correlation.npy".format(model_name, name)), module.statistics.cor.cpu().numpy()) - np.save(os.path.join(output_dir,"{}_{}_sparsity.npy".format(model_name, name)), module.statistics.sparsity) + #np.save(os.path.join(output_dir,"{}_{}_sparsity.npy".format(model_name, name)), module.statistics.sparsity) # np.savetxt(os.path.join(output_dir,"{}_{}_mean.csv".format(model_name, name)), module.statistics.mean.cpu().numpy(), delimiter=",") # np.savetxt(os.path.join(output_dir,"{}_{}_var.csv".format(model_name, name)), module.statistics.var.cpu().numpy(), delimiter=",") # np.savetxt(os.path.join(output_dir,"{}_{}_correlation.csv".format(model_name, name)), module.statistics.cor.cpu().numpy(), delimiter=",") @@ -58,7 +58,7 @@ def __init__(self, stream_num): self.var = torch.zeros(stream_num) self.cov = torch.zeros(stream_num, stream_num) self.cor = torch.zeros(stream_num, stream_num) - self.sparsity = np.empty(shape=[0,stream_num]) + #self.sparsity = np.empty(shape=[0,stream_num]) if torch.cuda.is_available(): self.mean = self.mean.cuda() @@ -71,7 +71,7 @@ def update(self, newValues): self.var = self.var * self.count self.cov = self.cov * (self.count - 1) - self.sparsity = np.vstack((self.sparsity, newValues.clone().cpu().numpy())) + #self.sparsity = np.vstack((self.sparsity, newValues.clone().cpu().numpy())) assert newValues.size()[1] == self.stream_num self.count += newValues.size()[0] @@ -104,9 +104,11 @@ def __init__(self, conv_module): self.kk = np.prod(self.conv_module.kernel_size) def forward(self, x): + # compared with MASE implementation + # differences are: 1) torch.nn.Unfold 2) random sample patches - with open(f"input.dat", 'w') as f: - f.write("\n".join([ str(i) for i in x.clone().cpu().numpy().reshape(-1).tolist() ])) + #with open(f"input.dat", 'w') as f: + # f.write("\n".join([ str(i) for i in x.clone().cpu().numpy().reshape(-1).tolist() ])) # https://discuss.pytorch.org/t/make-custom-conv2d-layer-efficient-wrt-speed-and-memory/70175 assert self.conv_module.padding_mode == 'zeros' @@ -132,9 +134,10 @@ def forward(self, x): # roll the loop to reduce memory self.roll_factor = 7 - assert h_windows == w_windows if h_windows % self.roll_factor != 0: self.roll_factor = get_factors(h_windows)[1] + if w_windows % self.roll_factor != 0: + self.roll_factor = 1 for hi, wi in np.ndindex(self.roll_factor, self.roll_factor): hstart = hi * (h_windows // self.roll_factor)