From f0e4df0df2e5113106d2852841223e1b90422dbe Mon Sep 17 00:00:00 2001 From: DataXujing <274762204@qq.com> Date: Thu, 21 Nov 2019 21:16:05 +0800 Subject: [PATCH] :bug: --- README.md | 72 ++++++++++++++++++++--------------- args.py | 4 +- data/coco.names | 3 +- data/yolo_anchors.txt | 2 +- data_pro.py | 86 +++++++++++++++++++++--------------------- docs/kmeans.png | Bin 0 -> 9004 bytes get_kmeans.py | 6 +-- utils/data_utils.py | 1 + 8 files changed, 95 insertions(+), 79 deletions(-) create mode 100644 docs/kmeans.png diff --git a/README.md b/README.md index beda92c..9597162 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ ### 1. 📣 数据介绍 -确定了业务场景之后,需要手机大量的数据(之前参加过一个安全帽识别检测的比赛,但是数据在比赛平台无法下载为己用),一般来说包含两大来源,一部分是网络数据,可以通过百度、Google图片爬虫拿到,另一部分是用户场景的视频录像,后一部分相对来说数据量更大,但出于商业因素几乎不会开放。本文开源的安全帽检测数据集([SafetyHelmetWearing-Dataset, SHWD](https://github.com/njvisionpower/Safety-Helmet-Wearing-Dataset))主要通过爬虫拿到,总共有7581张图像,包含9044个佩戴安全帽的bounding box(正类),以及111514个未佩戴安全帽的bounding box(负类),所有的图像用labelimg标注出目标区域及类别。其中每个bounding box的标签:hat”表示佩戴安全帽,“person”表示普通未佩戴的行人头部区域的bounding box。另外本数据集中person标签的数据大多数来源于[SCUT-HEAD](https://github.com/HCIILAB/SCUT-HEAD-Dataset-Release)数据集,用于判断是未佩戴安全帽的人。大致说一下数据集构造的过程: +确定了业务场景之后,需要收集大量的数据(之前参加过一个安全帽识别检测的比赛,但是数据在比赛平台无法下载为己用),一般来说包含两大来源,一部分是网络数据,可以通过百度、Google图片爬虫拿到,另一部分是用户场景的视频录像,后一部分相对来说数据量更大,但出于商业因素几乎不会开放。本文开源的安全帽检测数据集([SafetyHelmetWearing-Dataset, SHWD](https://github.com/njvisionpower/Safety-Helmet-Wearing-Dataset))主要通过爬虫拿到,总共有7581张图像,包含9044个佩戴安全帽的bounding box(正类),以及111514个未佩戴安全帽的bounding box(负类),所有的图像用labelimg标注出目标区域及类别。其中每个bounding box的标签:“hat”表示佩戴安全帽,“person”表示普通未佩戴的行人头部区域的bounding box。另外本数据集中person标签的数据大多数来源于[SCUT-HEAD](https://github.com/HCIILAB/SCUT-HEAD-Dataset-Release)数据集,用于判断是未佩戴安全帽的人。大致说一下数据集构造的过程: 1.数据爬取 @@ -47,7 +47,7 @@ Packages: - opencv-python - tqdm -将预训练的darknet的权重下载,下载地址:,并将该weight文件拷贝大`./data/darknet_weights/`下,因为这是darknet版本的预训练权重,需要转化为Tensorflow可用的版本,运行如下代码可以实现: +将预训练的darknet的权重下载,下载地址:,并将该weight文件拷贝到`./data/darknet_weights/`下,因为这是darknet版本的预训练权重,需要转化为Tensorflow可用的版本,运行如下代码可以实现: ```shell python convert_weight.py @@ -55,7 +55,7 @@ python convert_weight.py 这样转化后的Tensorflow checkpoint文件被存放在:`./data/darknet_weights/`目录。你也可以下载已经转化好的模型: -[Google云盘]((https://drive.google.com/drive/folders/1mXbNgNxyXPi7JNsnBaxEv1-nWr7SVoQt?usp=sharing) [GitHub Release](https://github.com/wizyoung/YOLOv3_TensorFlow/releases/) +[Google云盘](https://drive.google.com/drive/folders/1mXbNgNxyXPi7JNsnBaxEv1-nWr7SVoQt?usp=sharing) [GitHub Release](https://github.com/wizyoung/YOLOv3_TensorFlow/releases/) ### 3.🔰 训练数据构建 @@ -67,17 +67,19 @@ python convert_weight.py ```shell python data_pro.py ``` -分割训练集,验证集,测试集并在`./data/my_data/`下生成`train.txt/val.txt/test.txt`,对于一张图像对应一行数据,包括`image_index`,`image_absolute_path`,`box_1`,`box_2`,...,`box_n`,每个字段中间是用空格分隔的,其中: +分割训练集,验证集,测试集并在`./data/my_data/`下生成`train.txt/val.txt/test.txt`,对于一张图像对应一行数据,包括`image_index`,`image_absolute_path`, `img_width`, `img_height`,`box_1`,`box_2`,...,`box_n`,每个字段中间是用空格分隔的,其中: + `image_index`文本的行号 ++ `image_absolute_path` 一定是绝对路径 ++ `img_width`, `img_height`,`box_1`,`box_2`,...,`box_n`中涉及数值的取值一定取int型 + `box_x`的形式为:`label_index, x_min,y_min,x_max,y_max`(注意坐标原点在图像的左上角) + `label_index`是label对应的index(取值为0-class_num-1),这里要注意YOLO系列的模型训练与SSD不同,label不包含background 例子: ``` -0 xxx/xxx/a.jpg 0 453 369 473 391 1 588 245 608 268 -1 xxx/xxx/b.jpg 1 466 403 485 422 2 793 300 809 320 +0 xxx/xxx/a.jpg 1920,1080,0 453 369 473 391 1 588 245 608 268 +1 xxx/xxx/b.jpg 1920,1080,1 466 403 485 422 2 793 300 809 320 ... ``` @@ -98,6 +100,8 @@ person python get_kmeans.py ``` +![](docs/kmeans.png) + 可以得到9个anchors和平均的IOU,把anchors保存在文本文件:`./data/yolo_anchors.txt`, **注意: Kmeans计算出的YOLO Anchors是在在调整大小的图像比例的,默认的调整大小方法是保持图像的纵横比。** @@ -112,8 +116,8 @@ python get_kmeans.py 修改arg.py

 ### Some paths
-train_file = './data/my_data/train.txt'  # The path of the training txt file.
-val_file = './data/my_data/val.txt'  # The path of the validation txt file.
+train_file = './data/my_data/label/train.txt'  # The path of the training txt file.
+val_file = './data/my_data/label/val.txt'  # The path of the validation txt file.
 restore_path = './data/darknet_weights/yolov3.ckpt'  # The path of the weights to restore.
 save_dir = './checkpoint/'  # The directory of the weights to save.
 log_dir = './data/logs/'  # The directory to store the tensorboard log files.
@@ -121,11 +125,12 @@ progress_log_path = './data/progress.log'  # The path to record the training pro
 anchor_path = './data/yolo_anchors.txt'  # The path of the anchor txt file.
 class_name_path = './data/coco.names'  # The path of the class names.
 ### Training releated numbers
-batch_size = 2  # 需要调整为自己的类别数
+batch_size = 32  #6
 img_size = [416, 416]  # Images will be resized to `img_size` and fed to the network, size format: [width, height]
-total_epoches = 500  # 训练周期调整
-train_evaluation_step = 50  # Evaluate on the training batch after some steps.
-val_evaluation_epoch = 1  # Evaluate on the whole validation dataset after some steps. Set to None to evaluate every epoch.
+letterbox_resize = True  # Whether to use the letterbox resize, i.e., keep the original aspect ratio in the resized image.
+total_epoches = 500
+train_evaluation_step = 100  # Evaluate on the training batch after some steps.
+val_evaluation_epoch = 50  # Evaluate on the whole validation dataset after some epochs. Set to None to evaluate every epoch.
 save_epoch = 10  # Save the model after some epochs.
 batch_norm_decay = 0.99  # decay in bn ops
 weight_decay = 5e-4  # l2 weight decay
@@ -134,45 +139,52 @@ global_step = 0  # used when resuming training
 num_threads = 10  # Number of threads for image processing used in tf.data pipeline.
 prefetech_buffer = 5  # Prefetech_buffer used in tf.data pipeline.
 ### Learning rate and optimizer
-optimizer_name = 'adam'  # Chosen from [sgd, momentum, adam, rmsprop]
+optimizer_name = 'momentum'  # Chosen from [sgd, momentum, adam, rmsprop]
 save_optimizer = True  # Whether to save the optimizer parameters into the checkpoint file.
-learning_rate_init = 1e-3
-lr_type = 'exponential'  # Chosen from [fixed, exponential, cosine_decay, cosine_decay_restart, piecewise]
+learning_rate_init = 1e-4
+lr_type = 'piecewise'  # Chosen from [fixed, exponential, cosine_decay, cosine_decay_restart, piecewise]
 lr_decay_epoch = 5  # Epochs after which learning rate decays. Int or float. Used when chosen `exponential` and `cosine_decay_restart` lr_type.
 lr_decay_factor = 0.96  # The learning rate decay factor. Used when chosen `exponential` lr_type.
 lr_lower_bound = 1e-6  # The minimum learning rate.
-# piecewise params
-pw_boundaries = [60, 80]  # epoch based boundaries
-pw_values = [learning_rate_init, 3e-5, 1e-4]
+# only used in piecewise lr type
+pw_boundaries = [30, 50]  # epoch based boundaries
+pw_values = [learning_rate_init, 3e-5, 1e-5]
 ### Load and finetune
 # Choose the parts you want to restore the weights. List form.
-# Set to None to restore the whole model.
-restore_part = ['yolov3/darknet53_body']
+# restore_include: None, restore_exclude: None  => restore the whole model
+# restore_include: None, restore_exclude: scope  => restore the whole model except `scope`
+# restore_include: scope1, restore_exclude: scope2  => if scope1 contains scope2, restore scope1 and not restore scope2 (scope1 - scope2)
+# choise 1: only restore the darknet body
+# restore_include = ['yolov3/darknet53_body']
+# restore_exclude = None
+# choise 2: restore all layers except the last 3 conv2d layers in 3 scale
+restore_include = None
+restore_exclude = ['yolov3/yolov3_head/Conv_14', 'yolov3/yolov3_head/Conv_6', 'yolov3/yolov3_head/Conv_22']
 # Choose the parts you want to finetune. List form.
 # Set to None to train the whole model.
 update_part = ['yolov3/yolov3_head']
 ### other training strategies
-multi_scale_train = False  # Whether to apply multi-scale training strategy. Image size varies from [320, 320] to [640, 640] by default.
-use_label_smooth = False # Whether to use class label smoothing strategy.
-use_focal_loss = False  # Whether to apply focal loss on the conf loss.
-use_mix_up = False  # Whether to use mix up data augmentation strategy. # 数据增强
+multi_scale_train = True  # Whether to apply multi-scale training strategy. Image size varies from [320, 320] to [640, 640] by default.
+use_label_smooth = True # Whether to use class label smoothing strategy.
+use_focal_loss = True  # Whether to apply focal loss on the conf loss.
+use_mix_up = True  # Whether to use mix up data augmentation strategy. 
 use_warm_up = True  # whether to use warm up strategy to prevent from gradient exploding.
 warm_up_epoch = 3  # Warm up training epoches. Set to a larger value if gradient explodes.
 ### some constants in validation
-# nms 非极大值抑制
-nms_threshold = 0.5  # iou threshold in nms operation
-score_threshold = 0.5  # threshold of the probability of the classes in nms operation
-nms_topk = 50  # keep at most nms_topk outputs after nms
+# nms
+nms_threshold = 0.45  # iou threshold in nms operation
+score_threshold = 0.01  # threshold of the probability of the classes in nms operation, i.e. score = pred_confs * pred_probs. set lower for higher recall.
+nms_topk = 150  # keep at most nms_topk outputs after nms
 # mAP eval
 eval_threshold = 0.5  # the iou threshold applied in mAP evaluation
+use_voc_07_metric = False  # whether to use voc 2007 evaluation metric, i.e. the 11-point metric
 ### parse some params
 anchors = parse_anchors(anchor_path)
 classes = read_class_names(class_name_path)
 class_num = len(classes)
 train_img_cnt = len(open(train_file, 'r').readlines())
 val_img_cnt = len(open(val_file, 'r').readlines())
-train_batch_num = int(math.ceil(float(train_img_cnt) / batch_size))  # iteration
-
+train_batch_num = int(math.ceil(float(train_img_cnt) / batch_size))
 lr_decay_freq = int(train_batch_num * lr_decay_epoch)
 pw_boundaries = [float(i) * train_batch_num + global_step for i in pw_boundaries]
 
diff --git a/args.py b/args.py index 7d67c82..51517f6 100644 --- a/args.py +++ b/args.py @@ -7,8 +7,8 @@ import math ### Some paths -train_file = './data/my_data/train.txt' # The path of the training txt file. -val_file = './data/my_data/val.txt' # The path of the validation txt file. +train_file = './data/my_data/label/train.txt' # The path of the training txt file. +val_file = './data/my_data/label/val.txt' # The path of the validation txt file. restore_path = './data/darknet_weights/yolov3.ckpt' # The path of the weights to restore. save_dir = './checkpoint/' # The directory of the weights to save. log_dir = './data/logs/' # The directory to store the tensorboard log files. diff --git a/data/coco.names b/data/coco.names index d32321c..3e8e951 100644 --- a/data/coco.names +++ b/data/coco.names @@ -1 +1,2 @@ -biopsy forceps \ No newline at end of file +hat +person \ No newline at end of file diff --git a/data/yolo_anchors.txt b/data/yolo_anchors.txt index 0281231..633e5d2 100644 --- a/data/yolo_anchors.txt +++ b/data/yolo_anchors.txt @@ -1 +1 @@ -676,197, 763,250, 684,283, 868,231, 745,273, 544,391, 829,258, 678,316, 713,355 \ No newline at end of file +5,5, 6,7, 7,9, 10,11, 13,15, 19,21, 27,31, 43,50, 79,93 \ No newline at end of file diff --git a/data_pro.py b/data_pro.py index 93ff560..e9562f2 100644 --- a/data_pro.py +++ b/data_pro.py @@ -33,12 +33,12 @@ def __init__(self,data_path): def load_labels(self, model): if model == 'train': - txtname = os.path.join(self.data_path, 'train_img.txt') + txtname = os.path.join(self.data_path, 'ImageSets/Main/train.txt') if model == 'test': - txtname = os.path.join(self.data_path, 'test_img.txt') + txtname = os.path.join(self.data_path, 'ImageSets/Main/test.txt') if model == "val": - txtname = os.path.join(self.data_path, 'val_img.txt') + txtname = os.path.join(self.data_path, 'ImageSets/Main/val.txt') with open(txtname, 'r') as f: @@ -47,14 +47,14 @@ def load_labels(self, model): my_index = 0 for ind in image_ind: - class_inds, x1s, y1s, x2s, y2s = self.load_data(ind) + class_inds, x1s, y1s, x2s, y2s,img_width,img_height = self.load_data(ind) if len(class_inds) == 0: pass else: annotation_label = "" #box_x: label_index, x_min,y_min,x_max,y_max - for label_i in range(len(clas_inds)): + for label_i in range(len(class_inds)): annotation_label += " " + str(class_inds[label_i]) annotation_label += " " + str(x1s[label_i]) @@ -62,8 +62,8 @@ def load_labels(self, model): annotation_label += " " + str(x2s[label_i]) annotation_label += " " + str(y2s[label_i]) - with open(model+".txt","a") as f: - f.write(str(my_index) + " " + data_path+"/ImageSets/"+ind+".jpg" + annotation_label + "\n") + with open("./data/my_data/label/"+model+".txt","a") as f: + f.write(str(my_index) + " " + data_path+"/JPEGImages/"+ind+".jpg"+" "+str(img_width) +" "+str(img_height)+ annotation_label + "\n") my_index += 1 @@ -76,8 +76,8 @@ def load_data(self, index): filename = os.path.join(self.data_path, 'Annotations', index + '.xml') tree = ET.parse(filename) image_size = tree.find('size') - # image_width = float(image_size.find('width').text) - # image_height = float(image_size.find('height').text) + image_width = int(float(image_size.find('width').text)) + image_height = int(float(image_size.find('height').text)) # h_ratio = 1.0 * self.image_size / image_height # w_ratio = 1.0 * self.image_size / image_width @@ -91,37 +91,38 @@ def load_data(self, index): for obj in objects: box = obj.find('bndbox') - x1 = float(box.find('xmin').text) - y1 = float(box.find('ymin').text) - x2 = float(box.find('xmax').text) - y2 = float(box.find('ymax').text) + x1 = int(float(box.find('xmin').text)) + y1 = int(float(box.find('ymin').text)) + x2 = int(float(box.find('xmax').text)) + y2 = int(float(box.find('ymax').text)) # x1 = max(min((float(box.find('xmin').text)) * w_ratio, self.image_size), 0) # y1 = max(min((float(box.find('ymin').text)) * h_ratio, self.image_size), 0) # x2 = max(min((float(box.find('xmax').text)) * w_ratio, self.image_size), 0) # y2 = max(min((float(box.find('ymax').text)) * h_ratio, self.image_size), 0) - class_ind = self.class_to_ind[obj.find('name').text] - # class_ind = self.class_to_ind[obj.find('name').text.lower().strip()] - - # boxes = [0.5 * (x1 + x2) / self.image_size, 0.5 * (y1 + y2) / self.image_size, np.sqrt((x2 - x1) / self.image_size), np.sqrt((y2 - y1) / self.image_size)] - # cx = 1.0 * boxes[0] * self.cell_size - # cy = 1.0 * boxes[1] * self.cell_size - # xind = int(np.floor(cx)) - # yind = int(np.floor(cy)) - - # label[yind, xind, :, 0] = 1 - # label[yind, xind, :, 1:5] = boxes - # label[yind, xind, :, 5 + class_ind] = 1 - - if x1 >= x2 or y1 >= y2: - pass - else: - class_inds.append(class_ind) - x1s.append(x1) - y1s.append(y1) - x2s.append(x2) - y2s.append(y2) - - return class_inds, x1s, y1s, x2s, y2s + if obj.find('name').text in self.classes: + class_ind = self.class_to_ind[obj.find('name').text] + # class_ind = self.class_to_ind[obj.find('name').text.lower().strip()] + + # boxes = [0.5 * (x1 + x2) / self.image_size, 0.5 * (y1 + y2) / self.image_size, np.sqrt((x2 - x1) / self.image_size), np.sqrt((y2 - y1) / self.image_size)] + # cx = 1.0 * boxes[0] * self.cell_size + # cy = 1.0 * boxes[1] * self.cell_size + # xind = int(np.floor(cx)) + # yind = int(np.floor(cy)) + + # label[yind, xind, :, 0] = 1 + # label[yind, xind, :, 1:5] = boxes + # label[yind, xind, :, 5 + class_ind] = 1 + + if x1 >= x2 or y1 >= y2: + pass + else: + class_inds.append(class_ind) + x1s.append(x1) + y1s.append(y1) + x2s.append(x2) + y2s.append(y2) + + return class_inds, x1s, y1s, x2s, y2s, image_width, image_height def data_split(img_path): @@ -141,19 +142,19 @@ def data_split(img_path): for file in files: if file in val_part: - with open("./data/my_data/val_img.txt","a") as val_f: + with open("./data/my_data/ImageSets/Main/val.txt","a") as val_f: val_f.write(file[:-4] + "\n" ) val_index += 1 elif file in test_part: - with open("./data/my_data/test_img.txt","a") as test_f: + with open("./data/my_data/ImageSets/Main/test.txt","a") as test_f: test_f.write(file[:-4] + "\n") test_index += 1 else: - with open("./data/my_data/train_img.txt","a") as train_f: + with open("./data/my_data/ImageSets/Main/train.txt","a") as train_f: train_f.write(file[:-4] + "\n") train_index += 1 @@ -166,12 +167,13 @@ def data_split(img_path): if __name__ == "__main__": # 分割train, val, test - img_path = "./data/my_data/ImageSets" - data_split(img_path) + # img_path = "./data/my_data/ImageSets" + # data_split(img_path) print("===========split data finish============") # 做YOLO V3需要的训练集 - data_path = "./data/my_data" # 尽量用绝对路径 + base_path = os.getcwd() + data_path = os.path.join(base_path,"data/my_data") # 绝对路径 data_p = Data_preprocess(data_path) data_p.load_labels("train") diff --git a/docs/kmeans.png b/docs/kmeans.png new file mode 100644 index 0000000000000000000000000000000000000000..dcbc2c195a7c6fdda1fde48536293077330506b2 GIT binary patch literal 9004 zcmbVycUV)+wl|`Hh=R&1QE8$Sl@20cD1snFnhLK-OX$4|2{lL;=^#xA#Rdq3PUryv zAs}4{gbo1|NT{I%5^nIE^W5)y?tRX8@AtE}Xl5j*gD* z0!UNsIUOB6jCTL#EF2C^+*d-*8@Q z-wd(~TqTBbK|LF&POY&o~;usOQ2o&!c5s;YMxY4 zMKX)iA_tFZ6f( za#HFHn%A9hQ7maP1zz~ent%b&C-76~m5Y*rpA`h|D-AYfh_Y4Y9xDZXkV}OM??L5V zO<7}6Y`R1ow9yTm$;s3y`4^K;R%ym$BBJG6=9~}kESw7rC<=o*{M%LN{q`&`b;>Q7c6z8r+Jk7otKBl>KvB0<#5n| zv6oX^wC>%ey_;=4HK0j=eHCXxUepDul!|TlQTvWmPOj>du|)o&&-0eO2<}tZ)G7)& zR=HeV0Zz^Km`XDe84#P0Vs5X9)k#z$O?~u1sX1vBq^%g@B*nycbq=b-iU7#`_8Nx? z#q6&$gRTaW8S!Ithl6C*0)cxOY;%2x8fAqFeRljfVwU>hXKbfAuXd6tVp2SWq~n}? zw~jL*pOIvyFgjmAF4D z(3t&xbLY6^@hy`C6<_(xg_e)pX;a>hSJ9WUey9PW0Eu-wm3ujs2-dwltXANpQ|B|& z{JLzI(FB5h4|PNQWvG3IdmFtuAo;dAA!s@J(XX(JatA@cgpZHLuWO5q&8z(=U%5&| zsW^0ZxBg}%^qi^W_d%LM7qx-AGY@AjP70P0&?VW%z(&`I&x*SU(Ob7(o7 zO0kQq;IA_83RQDO`WHE$6{|Mf%G>Cy?&qi2pWo@MpbPh#17FN%aX}YL_3|@$7tcV} zT8xYoA?LgFPaidmUBUJwFm%Utih9TmxK;GnawLu*%QWYp{*r0@t5SL+J8G z@w$V&ngBZ%FxTj+i?T`G2-Tg;cM!g2sc?%Cf8pt*2_fyGKkMP6ID9=vAX??f^!1U% zU$2&C-peE7P^Z8DJqDAO2m|i z-RYpi`wl=(Y{QP|Jb1#b5PrBxuqF4sITRNZ*W|WB7&2eyyKr#^17e!h6+7KCVyXJQeM$Yx&|XTjw)eK5ftSlf9tjc%*Ilsrq$~yP2!@)D5?-&y zsC;fXr@RCYJg=Hjc)YT@^M?CyRfGX@>7X(wVD<3yuzfzLC#c-bw8`5HbEmTqyvLL00*c83 ze?}B?V_s@I4^HGo`>!cGH`pp-Lnd_Q8(x6N+&Q=CT?WpKh7T}k@a)`by=WmtDZ3Sh z6Mj(lINypLp)&QiUI{nu#-vc`BD%&+cDM814h0~{dHC8*0K;3eP=!ZY+R5-}%wJ!f zE$AtK@csT3$*8LdvR(6)eOGlHNIKCU$rmv8=JWmCyITp62?KI7F0BZ?anv|eW0ik= z-r>;LSElBBa81xE^yk&Lp@My~Du88_!tV3q@dtT?c8(HmW3=-cYUMu-@R0MKaG!6r z`c;NlSODzCJ$ewZK=^^Ub#&n6PgL;Uj4?2)ug=Bhbu1=SsS7t?I)UY7A0X;>P#6#0 zKXT^_z-oVVoO+{C0ugx``B2Ih(4sh6dgAO^cwoE?$qzO*IcRRr)1)CVg{X? ze~t%JjL0^6XqWX$cX1*r@Q;g=bB#6@tJPmIYX^y*(Z)U7;tKz11O$V@Q8s?uI*}!N zhdCb0${6JD#ssFU12<;IBFK@GQ4{z5vo4un+eJggTU(DA_0O*0K9o4g3XWM5gR#~2 zfGVjvW3RZC*a-_c$vP7&@wcaWO&k>B#CBdrGRzSD6ayUl$o-?MLNG_)wy;{&W)28w z=JEaaLCPvzzXtY9E*Vl~uGyduRuP-2jbT_J*kY+hb+OVa)lp*huoST80^FgREZ87& zwEU~Lw3`Y&%s0rS&d=CqGz~TcyCiHT_T;L1AjLu0=+zPYB(QKzCij< zPd^?joiiSFdzi*xGZ`f5tFlrC9Vl*5X*Rbex890N%~AbG-qP+i)O10>` z;90p|x1q8a(RYfH`*ZBlQJNAYG)LxYB9>EIvQIXT;8|vm>s@&yA3Da|EZ(*f;>E#z zSvd}8QTAZA8A`Bxam=*x3(&TAB`Cc2@Gr8a=V)L^R0rM zl1}ELG|*&%`!~w@sO#n;;DJK?&iBkHE#Ie#TRG9;kB|RmhJIR4)B+Hbh_z)^YgadO zTH9X~Lmbv$Hx_!#4#sFjZ(|`Kz4cdlBNa{xJ#b5X+rGl^~KR z_iM6Qbnow)*zdimJvKIQ6a1aGzEbPhgEXywLKE&(yMtq~E|pO~Gv6DU9ZMtcMPfpo<62lQ+`jQkWa}Mp{hYN~`yN_{I?CY-gUh zaAX?3u;nO0l%-=~=2MHx^l4KyVo-wBN5ZPLy3WSvZ*lyOFky3WFr@7dWK6hTwsAqC z?$!xf%%BqqIIT_mgCGT8pQGe`+tlJ&%&NmLmhxRYJa`rP=FqP?O=W5MG>3RHcM2BR z;9j!a_dR)-Ib-z3dpCT^RwVhWPw0 zKbum=!H?TH8YuyOGwsyFezYq?^WE=qt`rev>sSO7V9mE*yw}=*t8m?ia;?&Kg@JmFO27iBf63Si64CzxHmrxt-tZL zx0(l}}(!fIQ}^1zqrhm+cE7PGQG&Vyp*r?1QW$Z6 zk7ULo&Ez@OWgmFD{YLfrIZh}J10=ht>0JFp11>C5n1!&cak*ItyhG-T>Em3!5O~zo zDK2^679w`4FS%iSK~q^NWZkaU$)Cw<*i9(-0P%m6=F8Bn^N6P&4fuG4VGq1Z!KXtosGocH~3#{lFGf4dt>U+egDs zY6pa1LfggMT`#m};jjF7Guqo5g>54zngTsxUnKL{<00gCy;$6ykf?;yuxmfB}4Bz9@{X^ zfpU!5tPC5LMI%I4*e!M9<;f00mM%@>=c_rJM-$T@W0~x$SCQf6!da4V{S`9eN|B!PR|4Nkx}pF!K=gvhx`ETQLG!?u)gYa+7VZ zUJ&R{{^n;@g$wN8*C||u=_>4fpKO>H$jxA1`eA6b2}44%T(yRVw24cxx2E>p3EVQJ zv9A|snqN;@?oq5+8 z<2sgpsNb<|obt(gi1m&^`S0xO{QeCyP>41~J)Bz&Ms-9#M=vZDL5~nifJ|+h-3x(q z8Pahtwl+Q18ozvDXR7QRg!)zwI-*y~|gpGgU6VBs(gbI}lE)C4wBvJaaDCw5D|gWP zb&b@#s|e{hj)xNifaE2aD<<>i>vG-CdNL<@_xQyVT=W-?H1G!dukB;{D=ZTFQe{I} z5_W(=i6$&!W(_XaBrDh4e>h=xKs!TSkwLr$fj4@qAKB}XEzJG7+oZ3K=y~{2s-k0& z(;4tYNvrN;4{Kb`#eUVIslC{l6mX+<>~ezKg|+Kh*TqB0y&1AK?K2oSCKxld=5bF- zN&7%$`+WqQOPfdIuJ6SB+k)FHpV{FMsoNc`$ZcGyl09t8+4oUcFbA$C_{@sYJwpAd zuM=X~_MLcWvK^oHOcffJVWW|t|6Kcq5tAol?qy#Hog6%Yd8bCW>V4XryEnGNwggQnRXZkm{rkfTA*BD!0fw4#T|m^9=42NnR( zp8XU2h-rXlxXng*fAal(5iuQHw^}>X|hJU{@$%NWFed;t>g- z0sN>@Eq2nJ^H1|-4*PU|=MOha-+Rd`y42}B87j4oB&`aOEr$4a zUezRujP0E??WHwcTvgvW7oW$?3BigCIn@Q3X%a^6TyW;3VM&tTJ~q+-=CT%tXov$) zPf_YIQ?KFCBBjYk#xIgGRH>%NC04;cxM- zn#!Po4}L^Nm}b8(^%dso7fpC<6Jc+X+3ERRKTMJ4ipPLP(afzU5uzVVhI#)i^hx@c zn$zJ^*PI0M{s1sqK3SeY-uq@C^Lm!u$O**tw4yy?_T>?$I(p1Vd;O{Pzw6@b1}@w3q7P^9>Z4B>+xGnK^mMJ%AatlTOv&wXLB`mTR0y#Lqdwi+5% zFH%&zS%@;5_MCsDyPa|;5bZ_yo79P8bDjQ$dG?F^2Z>?1)aQ^)iN;BKh@fOri=3~nu4vuxxNxTl z<*|mx$XWre;aRrUTgQ59RGBHgYI3it0I5;DAJ#vFiAS9 zKm|Eb%0t+Tp8-9|ga=im6Ywl9AYLHA*Ns0@&VOwVd`z<1&M1EWZURZ{ToIJb~R#7?$}CNh-?ZNOt{A zI)gI&HRN)G)ZpE6ka^-e`KDC7g`uacV`&lln$H!e7m+hnsQ&o+#0TpR{@~Fxb!)AZt z#XeLFFG2g&awex^w=TBWA|N#rCadv(3F&hStu(Jz7nRaJGhqSpzQHE=sgTk;>(8{V z&$r(cgLM_D&8oU22d_mkHzYMQ?OFnd4D&QCS1+SVww_3#zfvlCryYavV}2PtI#NKX zD#BONpLx%rfxqg%LEpav`i(1wUj9rAzs`SM$-)D~p1XAgqF6WK|l>wsl|;L57u?oTQ1vLIHM-4=Y?6tG<^ZPMHa6BVw)mbsrJX| z7A>?DWwH={vNm1K=ju1!cSwumlGMHlu#m#dE@$neiynRqwodC>-{|nRG>+1ii7M<7 zf%@IseRBCP_|@{C{h$fb74cnEq?vrI}yp0)e+a?V_j9Nk$?;@6ll-Q6OxR`Gldu4-ZiD{F(V+^fY#= zHtk?bPDE7buqWGb5cr}GeWPwub!h=axvcAZhqPP2B<|7&dG3M)h)>jr*Nt+O+YNN- z&#irH?zK|UtpGvbV&VtWyPhkiyO{}Tnd^fp{}SY*wN`j}vykb8e%i3uS%;{tNty%X z^YQ{kCQq62*9lXe4`Oi1IjZzY{Qj4pEZKQ`=D*AT&TmPB&OyF76}mY7=)FXAE;d9pdUYBVhz*%a=RK35D9k zpIcQ1m!1EfAARt=6ys6;<(eDumtuMU=G{1m!3WBtz00aZHSnyEiu6Hk=_9gm>RUR?_XB^4zfs+ToKxjYwuo!wctSU$|Y zK0DKatOZ8|31JDM`m3 zRPHn1k(YIGHCj{GMJjv|PT$E=%{Ppyl0Di>=dR;fbp4UrsCK+7rqQcR^E^p#WE93M z5O%i=_h!|#yiG#5PJf4KsAK%Oo+TN+=~+aXqk$)u@eZHIv{ZF&Eea>jx68E~$%XM2 zCBsAa>*x+FZZG{n%e*qZ@~^?r%Q8nWcj+;?B8^$!ix`wto> z>jr`Uz3BZ|kdY?Gwf~NXDIJ=^8miQoK8p6}(2#FdR&yN>;v}TMnr4n(T=1#@-0|mdx0C2?LWiv>nEl|Ka6qq(CNEb! z`ds|_DUck$$lHI*rKG4`9ja(zXlCn(&SZ~)%#XS3WJ1>z_~m1lQKHSm|M+hF8ILu6 zB>QCj#d?q=Bx@(x@tf7uB~?P9(B7z}qox<5x1nM+)PN($l5gt&;zBr#4$b}la3NMO z;^Y4z50YtrJkS)D089jYN;X%nTNV9gU$MjP1ygm_?!sZ3Q0_D~L+aU~|5T6@!TrH< zO-j^yy27u};tRdiffUJ81fs#DTN#@y&coO3Wrc}V`@;*^qTS3q#c8RSUfug|Aq+TWFL6G+E}r2xK2>|ITI~+MN=o=QUAUC* zp_9djA3`H`TaBqdi$vSO8kPIVy+bSjUkWIoaT0nG35r@Z!>C0ossUVjgd6_1lmtoUUz9}5xDZUra0D+i_@A~?0eXaE zN)vClcDSerI?Bp-^WQGLuR>bPcvqcmnwEC1)6>%WLy%}PvMZeJuB;d9j^Of@Snk%K z-`g^am7&c@V{p(jV~=KLm$c}-FH^-z@1)N@2F zaGikfWjYq`F|)IRD)>qdUZsF<(??%}lsYm#97yMd#pM1YJe)b%#34W|j}Zt)p2`7E zwKd=ZZ~aTu|7s7&e;c4_arpiJPvW*`b7toFlV1iSom)SNw!?)Eq^_%mR5cIz587?~ AVE_OC literal 0 HcmV?d00001 diff --git a/get_kmeans.py b/get_kmeans.py index dc1faac..a8863f3 100644 --- a/get_kmeans.py +++ b/get_kmeans.py @@ -98,8 +98,8 @@ def parse_anno(annotation_path, target_size=None): result = [] for line in anno: s = line.strip().split(' ') - img_w = int(s[2]) - img_h = int(s[3]) + img_w = int(float(s[2])) + img_h = int(float(s[3])) s = s[4:] box_cnt = len(s) // 5 for i in range(box_cnt): @@ -139,7 +139,7 @@ def get_kmeans(anno, cluster_num=9): # if target_resize is speficied, the anchors are on the resized image scale # if target_resize is set to None, the anchors are on the original image scale target_size = [416, 416] - annotation_path = "./data/my_data/train.txt" + annotation_path = "./data/my_data/label/train.txt" anno_result = parse_anno(annotation_path, target_size=target_size) anchors, ave_iou = get_kmeans(anno_result, 9) diff --git a/utils/data_utils.py b/utils/data_utils.py index 3e22958..ba1cda6 100644 --- a/utils/data_utils.py +++ b/utils/data_utils.py @@ -128,6 +128,7 @@ def parse_data(line, class_num, img_size, anchors, mode, letterbox_resize): if not isinstance(line, list): img_idx, pic_path, boxes, labels, _, _ = parse_line(line) img = cv2.imread(pic_path) + # print(img.shape[:2]) # expand the 2nd dimension, mix up weight default to 1. boxes = np.concatenate((boxes, np.full(shape=(boxes.shape[0], 1), fill_value=1., dtype=np.float32)), axis=-1) else: