From ead95b25eefe26a3bc0c87546564e162b8de7a2f Mon Sep 17 00:00:00 2001 From: Gunjan Kholapure Date: Mon, 17 Feb 2020 22:30:19 +0530 Subject: [PATCH 1/7] modified point pillars for use with argoverse dataset --- second/.gitignore | 23 + second/builder/dataset_builder.py | 24 +- second/calc_map.py | 470 ++++++ .../pointpillars/car/xyres_12_argo_48.proto | 187 +++ .../configs/pointpillars/car/xyres_16.proto | 30 +- .../pointpillars/car/xyres_16_argo_30.proto | 189 +++ .../pointpillars/car/xyres_16_argo_48.proto | 187 +++ .../pointpillars/car/xyres_16_argo_64.proto | 187 +++ .../pointpillars/car/xyres_16_argo_80.proto | 187 +++ .../pointpillars/car/xyres_16_argo_88.proto | 190 +++ .../pointpillars/car/xyres_16_argo_96.proto | 187 +++ .../car/xyres_16_argo_upper.proto | 187 +++ .../pointpillars/car/xyres_20_argo_48.proto | 187 +++ .../car/xyres_20_argo_upper.proto | 187 +++ .../pointpillars/car/xyres_24_argo_48.proto | 187 +++ .../pointpillars/car/xyres_28_argo_48.proto | 187 +++ .../pedestrian/xyres_16_argo_56.proto | 190 +++ .../pedestrian/xyres_16_argo_64.proto | 190 +++ second/core/non_max_suppression/nms_gpu.py | 1308 +++++++++-------- second/data/dataset.py | 114 +- second/data/preprocess.py | 311 ++-- second/get_tracking_result.py | 92 ++ second/ped_train.trch | Bin 0 -> 30537 bytes second/ped_val.trch | Bin 0 -> 10446 bytes second/pp_inference.py | 267 ++++ second/protos/input_reader_pb2.py | 3 +- .../pytorch/builder/input_reader_builder.py | 4 +- second/pytorch/builder/second_builder.py | 3 +- second/pytorch/core/box_torch_ops.py | 3 +- second/pytorch/inference.py | 1 + second/pytorch/models/pointpillars.py | 19 +- second/pytorch/models/voxelnet.py | 37 +- second/pytorch/train.py | 72 +- second/pytorch/utils.py | 1 - second/sample.trch | Bin 0 -> 1241 bytes second/sample_val.trch | Bin 0 -> 751 bytes second/test.trch | Bin 0 -> 61458 bytes second/train.trch | Bin 0 -> 40011 bytes second/utils/eval.py | 43 +- second/val.trch | Bin 0 -> 14683 bytes second/val_full.trch | Bin 0 -> 75092 bytes torchplus/train/checkpoint.py | 4 +- 42 files changed, 4569 insertions(+), 889 deletions(-) create mode 100644 second/.gitignore create mode 100644 second/calc_map.py create mode 100644 second/configs/pointpillars/car/xyres_12_argo_48.proto create mode 100644 second/configs/pointpillars/car/xyres_16_argo_30.proto create mode 100644 second/configs/pointpillars/car/xyres_16_argo_48.proto create mode 100644 second/configs/pointpillars/car/xyres_16_argo_64.proto create mode 100644 second/configs/pointpillars/car/xyres_16_argo_80.proto create mode 100644 second/configs/pointpillars/car/xyres_16_argo_88.proto create mode 100644 second/configs/pointpillars/car/xyres_16_argo_96.proto create mode 100644 second/configs/pointpillars/car/xyres_16_argo_upper.proto create mode 100644 second/configs/pointpillars/car/xyres_20_argo_48.proto create mode 100644 second/configs/pointpillars/car/xyres_20_argo_upper.proto create mode 100644 second/configs/pointpillars/car/xyres_24_argo_48.proto create mode 100644 second/configs/pointpillars/car/xyres_28_argo_48.proto create mode 100644 second/configs/pointpillars/pedestrian/xyres_16_argo_56.proto create mode 100644 second/configs/pointpillars/pedestrian/xyres_16_argo_64.proto create mode 100644 second/get_tracking_result.py create mode 100644 second/ped_train.trch create mode 100644 second/ped_val.trch create mode 100644 second/pp_inference.py create mode 100644 second/sample.trch create mode 100644 second/sample_val.trch create mode 100644 second/test.trch create mode 100644 second/train.trch create mode 100644 second/val.trch create mode 100644 second/val_full.trch diff --git a/second/.gitignore b/second/.gitignore new file mode 100644 index 00000000..30ad8a62 --- /dev/null +++ b/second/.gitignore @@ -0,0 +1,23 @@ +*.sh +report* +*.ipynb +*.tckpt +analysis/ +*.pkl +models/ +network_input_examples/ +pcd_output/ +ped_models_56/ +road_train_info.trch +sample_result_ab3dmot_format/ +sample_road_info.trch +sample_road_val_info.trch +sample_test/ +sample_test_wroad/ +sample_test_wroi/ +sample_train_roadmap/ +sample_val_roadmap/ +test_roadmap/ +track_analysis/ +sample_val_roadmap/ +test/ diff --git a/second/builder/dataset_builder.py b/second/builder/dataset_builder.py index bc6d2d4c..8985c174 100644 --- a/second/builder/dataset_builder.py +++ b/second/builder/dataset_builder.py @@ -28,10 +28,13 @@ import numpy as np from second.builder import dbsampler_builder from functools import partial +import torch + def build(input_reader_config, model_config, + info, training, voxel_generator, target_assigner=None): @@ -58,8 +61,8 @@ def build(input_reader_config, cfg = input_reader_config db_sampler_cfg = input_reader_config.database_sampler db_sampler = None - if len(db_sampler_cfg.sample_groups) > 0: # enable sample - db_sampler = dbsampler_builder.build(db_sampler_cfg) + #if len(db_sampler_cfg.sample_groups) > 0: # enable sample + # db_sampler = dbsampler_builder.build(db_sampler_cfg) u_db_sampler_cfg = input_reader_config.unlabeled_database_sampler u_db_sampler = None if len(u_db_sampler_cfg.sample_groups) > 0: # enable sample @@ -68,10 +71,17 @@ def build(input_reader_config, # [352, 400] feature_map_size = grid_size[:2] // out_size_factor feature_map_size = [*feature_map_size, 1][::-1] - + inform = info.copy() + inform["road_map"] = None + + root_path = input_reader_config.kitti_root_path + index_list = torch.load(input_reader_config.kitti_info_path) + + inform["index_list"] = index_list + prep_func = partial( prep_pointcloud, - root_path=cfg.kitti_root_path, + root_path=root_path, class_names=list(cfg.class_names), voxel_generator=voxel_generator, target_assigner=target_assigner, @@ -100,9 +110,11 @@ def build(input_reader_config, remove_environment=cfg.remove_environment, use_group_id=cfg.use_group_id, out_size_factor=out_size_factor) + dataset = KittiDataset( - info_path=cfg.kitti_info_path, - root_path=cfg.kitti_root_path, + info_path=inform, + root_path=root_path, + class_names=list(cfg.class_names), num_point_features=num_point_features, target_assigner=target_assigner, feature_map_size=feature_map_size, diff --git a/second/calc_map.py b/second/calc_map.py new file mode 100644 index 00000000..90784d9e --- /dev/null +++ b/second/calc_map.py @@ -0,0 +1,470 @@ +import pickle +import argoverse +from argoverse.data_loading.argoverse_tracking_loader import ArgoverseTrackingLoader +from argoverse.map_representation.map_api import ArgoverseMap +from shapely.geometry import Polygon +import argparse +import numpy as np +import operator +import torch +import time + +# result path, range + + +parser = argparse.ArgumentParser(description="arg parser") + + + +#parser.add_argument('--device', type=int, default=1, help='specify the gpu device for training') +parser.add_argument('--res_path',type=str,help='specify path where result file is stored.') +parser.add_argument('--range',type=float,help='specify range of the model for mAP evaluation') +parser.add_argument('--thresh',type=float,help='specify threshold for mAP evaluation') +parser.add_argument('--det_thresh',type=float,help='specify detection threshold for filtering detections') + + +args,unknown = parser.parse_known_args() + +#https://github.com/rafaelpadilla/Object-Detection-Metrics/blob/master/lib/Evaluator.py +def ElevenPointInterpolatedAP(rec, prec): + # def CalculateAveragePrecision2(rec, prec): + mrec = [] + # mrec.append(0) + [mrec.append(e) for e in rec] + # mrec.append(1) + mpre = [] + # mpre.append(0) + [mpre.append(e) for e in prec] + # mpre.append(0) + recallValues = np.linspace(0, 1, 41) + recallValues = list(recallValues[::-1]) + rhoInterp = [] + recallValid = [] + # For each recallValues (0, 0.1, 0.2, ... , 1) + for r in recallValues: + # Obtain all recall values higher or equal than r + argGreaterRecalls = np.argwhere(mrec[:] >= r) + pmax = 0 + # If there are recalls above r + if argGreaterRecalls.size != 0: + pmax = max(mpre[argGreaterRecalls.min():]) + recallValid.append(r) + rhoInterp.append(pmax) + # By definition AP = sum(max(precision whose recall is above r))/11 + ap = sum(rhoInterp) / 41 + # Generating values for the plot + rvals = [] + rvals.append(recallValid[0]) + [rvals.append(e) for e in recallValid] + rvals.append(0) + pvals = [] + pvals.append(0) + [pvals.append(e) for e in rhoInterp] + pvals.append(0) + # rhoInterp = rhoInterp[::-1] + cc = [] + for i in range(len(rvals)): + p = (rvals[i], pvals[i - 1]) + if p not in cc: + cc.append(p) + p = (rvals[i], pvals[i]) + if p not in cc: + cc.append(p) + recallValues = [i[0] for i in cc] + rhoInterp = [i[1] for i in cc] + return [ap, rhoInterp, recallValues, None] + +def CalculateAveragePrecision(rec, prec): + mrec = [] + mrec.append(0) + [mrec.append(e) for e in rec] + mrec.append(1) + mpre = [] + mpre.append(0) + [mpre.append(e) for e in prec] + mpre.append(0) + for i in range(len(mpre) - 1, 0, -1): + mpre[i - 1] = max(mpre[i - 1], mpre[i]) + ii = [] + for i in range(len(mrec) - 1): + if mrec[1:][i] != mrec[0:-1][i]: + ii.append(i + 1) + ap = 0 + for i in ii: + ap = ap + np.sum((mrec[i] - mrec[i - 1]) * mpre[i]) + # return [ap, mpre[1:len(mpre)-1], mrec[1:len(mpre)-1], ii] + return [ap, mpre[0:len(mpre) - 1], mrec[0:len(mpre) - 1], ii] + + + +#https://github.com/udacity/didi-competition/blob/master/tracklets/python/evaluate_tracklets.py +# 3d iou + + +def iou_3d(box_a, box_b,vol_a,vol_b): + """ + A simplified calculation of 3d bounding box intersection. + It is assumed that the bounding box is only rotated + around Z axis (yaw) from an axis-aligned box. + :param box_a, box_b: obstacle bounding boxes for comparison + :return: intersection volume (float) + """ + # height (Z) overlap + box_a = np.array([ box_a[:,0], box_a[:,2], box_a[:,1] ]) + box_b = np.array([ box_b[:,0], box_b[:,2], box_b[:,1] ] ) + + min_h_a = np.min(box_a[2]) + max_h_a = np.max(box_a[2]) + min_h_b = np.min(box_b[2]) + max_h_b = np.max(box_b[2]) + max_of_min = np.max([min_h_a, min_h_b]) + min_of_max = np.min([max_h_a, max_h_b]) + z_intersection = np.max([0, min_of_max - max_of_min]) + if z_intersection == 0: + return 0. + + # oriented XY overlap + xy_poly_a = Polygon(zip(*box_a[0:2, 0:4])) + #print(xy_poly_a) + xy_poly_b = Polygon(zip(*box_b[0:2, 0:4])) + try: + xy_intersection = xy_poly_a.intersection(xy_poly_b).area + except: + print("ERROR",xy_poly_a,xy_poly_b) + if xy_intersection == 0: + return 0. + + + + vol_intersect = z_intersection * xy_intersection + union = vol_a + vol_b - vol_intersect + iou_val = vol_intersect/union + + return iou_val + + +def quaternion_to_euler(quat): + w,x,y,z = quat + + t0 = +2.0 * (w * x + y * z) + t1 = +1.0 - 2.0 * (x * x + y * y) + roll = math.atan2(t0, t1) + t2 = +2.0 * (w * y - z * x) + t2 = +1.0 if t2 > +1.0 else t2 + t2 = -1.0 if t2 < -1.0 else t2 + pitch = math.asin(t2) + t3 = +2.0 * (w * z + x * y) + t4 = +1.0 - 2.0 * (y * y + z * z) + yaw = math.atan2(t3, t4) + return yaw, pitch, roll + + +def corners_in_roi_kitti(corners): + limit = args.range + ''' + for i in range(4): + if not (-limit<= corners[i][0] <= limit and -limit <= corners[i][1] <= limit) : + p1 = Polygon( [ (-limit,limit), (limit,limit), (limit,-limit), (-limit,-limit) ] ) + p2 = Polygon( [ (corners[0][0],corners[0][1]), (corners[1][0],corners[1][1]), (corners[2][0],corners[2][1]), (corners[3][0],corners[3][1]) ] ) + + if p1.intersection(p2).area >= 0.7*p2.area: + return True + else: + return False + ''' + for i in range(4): + if -limit<=corners[i][0] <= limit and -limit <= corners[i][1] <= limit: + return True + + return False + +def center_to_bbox_2d(location,dimensions,yaw): + crnrs = [] + for i in range(len(location)): + x,y,z = location[i] + l,h,w = dimensions[i] + corners = np.array([ [l/2,w/2],[l/2,-w/2],[-l/2,-w/2],[-l/2,w/2] ]) + + rot_mat = np.array( [[ np.cos(yaw[i]) ,np.sin(yaw[i]) ], + [-np.sin(yaw[i]), np.cos(yaw[i])] ] ) + rot_corners = np.dot(rot_mat,corners.T).T + rot_corners = rot_corners + [x,z] + crnrs.append(rot_corners) + return crnrs + +def center_to_bbox_3d(location,dimensions,yaw): + crnrs = [] + for i in range(len(location)): + x,y,z = location[i] + l,h,w = dimensions[i] + corners = np.array([ [-l/2,0,w/2],[-l/2,0,-w/2],[l/2,0,-w/2],[l/2,0,w/2], + [-l/2,-h,w/2],[-l/2,-h,-w/2],[l/2,-h,-w/2],[l/2,-h,w/2], + ]) + + rot_mat = np.array( [[ np.cos(yaw[i]) , 0, np.sin(yaw[i]) ], + [ 0 , 1, 0 ], + [-np.sin(yaw[i]) ,0, np.cos(yaw[i])] ] + ) + rot_corners = np.dot(rot_mat,corners.T).T + rot_corners = rot_corners + [x,y,z] + crnrs.append(rot_corners) + return crnrs + + +def mAP(): + # comment corners_in_roi_kitti for normal evaluation + # score threshold, corners_in_kitti_roi, gt_roi + + f = open(args.res_path,"rb") + result = pickle.load(f) + + root_dir = "./../../argodataset/argoverse-tracking/val/" + argoverse_loader = ArgoverseTrackingLoader(root_dir) + am = ArgoverseMap() + + + preds = result + threshold = 0.5 + out,out_3d = [],[] + cnt = 0 + + file_cnt = 0 + lcc = 0 + print(len(preds)) + for i in range(len(preds)): + if file_cnt%50 == 0: + print(file_cnt) + file_cnt += 1 + if len(preds[i]['image_idx']) == 0: + continue + + file = preds[i]['image_idx'][0] + first_del = file.find('_') + seq_no = int(file[:first_del]) + frame_no = int( file[ first_del + 1: ] ) + argoverse_data = argoverse_loader[seq_no] + objects = argoverse_data.get_label_object(frame_no) + good_result = (preds[i]['score'] > args.det_thresh) + ps,pind,pdims = preds[i]['score'][good_result], preds[i]['name'][good_result],preds[i]['dimensions'][good_result] + + city_to_egovehicle_se3 = argoverse_data.get_pose(frame_no) + locs,rots = preds[i]['location'][good_result], preds[i]['rotation_y'][good_result] + city_name = argoverse_data.city_name + ''' + for lci in range(len(locs)): + kitti_loc = locs[lci] + argo_loc = [kitti_loc[2],-kitti_loc[0],1.78-kitti_loc[1]] + pts = city_to_egovehicle_se3.transform_point_cloud(np.array([argo_loc]))[0][:2] + cl_obj,cl_conf,clines = am.get_nearest_centerline(pts,city_name) + if not cl_obj.is_intersection: + vec,conf = am.get_lane_direction(pts,city_name) + + lidar_unit_vec = city_to_egovehicle_se3.inverse_transform_point_cloud(np.array([[10*vec[0],10*vec[1],15]]))[0][:2] + lidar_orig = city_to_egovehicle_se3.inverse_transform_point_cloud( np.array([[0.,0.,15.]]) )[0][:2] + lidar_vec = lidar_unit_vec-lidar_orig + #print(lidar_vec) + lidar_angle = np.arctan2(lidar_vec[1],lidar_vec[0]) + + lidar_angle = -np.pi/2 - lidar_angle + if lidar_angle > np.pi: + lidar_angle -= 2*np.pi + elif lidar_angle < -np.pi: + lidar_angle += 2*np.pi + + if rots[lci] > np.pi: + rots[lci] -= 2*np.pi + elif rots[lci] < -np.pi: + rots[lci] += 2*np.pi + + + if conf>0.5 and np.abs(lidar_angle-rots[lci]) > 0.2 and not (np.pi-0.2 <= np.abs(lidar_angle-rots[lci]) <= np.pi+0.2 ) : + lcc += 1 + #print(conf, lidar_angle, rots[lci]) + rots[lci] = lidar_angle + + + + ''' + + pc = center_to_bbox_2d(preds[i]['location'][good_result],preds[i]['dimensions'][good_result],rots) + pc_3d = center_to_bbox_3d(preds[i]['location'][good_result],preds[i]['dimensions'][good_result],rots) + #print(preds[i]['location']) + mark,mark_3d = {},{} + categ = "VEHICLE" + + + ''' + gt_roi = [] + gt_locss = [] + for obj in objects: + if obj.label_class == categ: + gt_roi.append(obj) + corners= obj.as_3d_bbox() + centers = np.mean(corners,axis=0) + gt_locss.append(centers) + + roi_locs = city_to_egovehicle_se3.transform_point_cloud(np.array(gt_locss)) # put into city coords + roi_locs_flag = am.remove_non_roi_points(roi_locs, city_name) # remove non-driveable region + gt_roi = np.array(gt_roi) + gt_roi = gt_roi[roi_locs_flag] + ''' + + for obj in objects: + if obj.label_class == categ: + corners = obj.as_2d_bbox() + corners = corners[:,[1,0]] + corners[:,[0]] = -corners[:,[0]] + corners[[2,3],:] = corners[[3,2],:] + + corners_3d = obj.as_3d_bbox() + corners_3d = corners_3d[:,[1,2,0]] + corners_3d[:,0] = -corners_3d[:,0] + corners_3d[:,1] = 1.73 - corners_3d[:,1] + corners_3d = corners_3d[[2,3,7,6,1,0,4,5],:] + vol_gt = obj.length*obj.height*obj.width + if corners_in_roi_kitti(corners): + cnt+=1 + p1 = Polygon( [ (corners[0][0],corners[0][1]), (corners[1][0],corners[1][1]), (corners[2][0],corners[2][1]), (corners[3][0],corners[3][1]) ] ) + mx,mx_3d = 0,0 + temp_crnr,temp_crnr_3d = np.zeros((4,2)), np.zeros((8,3)) + temp_scr,temp_scr_3d = 0,0 + ind,ind_3d = -1,-1 + + for num,qnt in enumerate(zip(pc,pc_3d,ps,pind,pdims)): + crnr,crnr_3d, score,c_ind,dims = qnt + pvol = dims[0]*dims[1]*dims[2] + if c_ind == categ: + p2 = Polygon( [ (crnr[0][0],crnr[0][1]), (crnr[1][0],crnr[1][1]), (crnr[2][0],crnr[2][1]), (crnr[3][0],crnr[3][1]) ] ) + iou = p1.intersection(p2).area / p1.union(p2).area + iou_td = iou_3d(corners_3d,crnr_3d,vol_gt,pvol) + + if iou>mx : + mx = iou + temp_crnr[:] = crnr + temp_scr = score + ind = num + #print(iou,p1,p2,crnr_3d,corners_3d) + + if iou_td>mx_3d : + mx_3d = iou_td + temp_crnr_3d[:] = crnr_3d + temp_scr_3d = score + ind_3d = num + + if mx >= threshold and ind not in mark.keys() : + out.append((temp_scr,True)) + mark[ind] = 1 + + + if mx_3d >= threshold and ind not in mark_3d.keys() : + out_3d.append((temp_scr_3d,True)) + mark_3d[ind_3d] = 1 + #print(mx_3d) + + for sn in range(len(pind)): + if pind[sn]==categ and sn not in mark.keys(): + out.append((ps[sn],False)) + if pind[sn] == categ and sn not in mark_3d.keys(): + out_3d.append((ps[sn],False)) + prec,prec_3d = 0,0 + for elems in out: + prec += elems[1] + for elems in out_3d: + prec_3d += elems[1] + + + print("preds - ",len(out),"true predictions - ", prec , " true 3d prediction - ", prec_3d, "ground truth labels - ",cnt) + print(lcc) + + out.sort(key = operator.itemgetter(0),reverse = True) + out_3d.sort(key=operator.itemgetter(0),reverse=True) + print(len(out),len(out_3d)) + + precision, precision_3d = [], [] + recall, recall_3d = [], [] + + tp = 0 + for i in range(len(out)): + if out[i][1]: + tp += 1 + + precision.append(tp/(i+1)) + recall.append(tp/cnt) + + tp3d = 0 + + for j in range(len(out_3d)): + if out_3d[j][1]: + tp3d += 1 + + precision_3d.append(tp3d/(j+1)) + recall_3d.append(tp3d/cnt) + + ''' + plt.plot(recall,precision) + plt.show() + plt.plot(recall_3d,precision_3d) + plt.show() + ''' + + t0 = time.time() + area = np.max(precision) + print(recall[0]) + #print(out[:10]) + find = 0.025 + for i in range(len(recall)): + if recall[i] >= find: + #print(recall[i],precision[i]) + area += np.max(precision[i:]) + find += 0.025 + + + t1 = time.time() + + #auth_ap_every = CalculateAveragePrecision(recall,precision) + + t2 = time.time() + + auth_ap_eleven = ElevenPointInterpolatedAP(recall,precision) + + t3 = time.time() + print(t1-t0,t2-t1,t3-t2) + + print("Average Precision bev- ", area/41) + #print("every point interpolation - ",auth_ap_every[0]) + print("41 point interpolation - ", auth_ap_eleven[0]) + + find3d = 0.025 + area3d = np.max(precision_3d) + + for i in range(len(recall_3d)): + if recall_3d[i] >= find3d: + area3d += np.max(precision_3d[i:]) + find3d += 0.025 + + #auth_ap_every_3d = CalculateAveragePrecision(recall_3d,precision_3d) + auth_ap_eleven_3d = ElevenPointInterpolatedAP(recall_3d,precision_3d) + + print("Average Precision 3d- ", area3d/41) + #print("every point interpolation - ",auth_ap_every_3d[0]) + print("41 point interpolation - ", auth_ap_eleven_3d[0]) + + name = args.res_path.replace("pkl","txt") + + + with open(name,"a+") as f: + f.write("preds - " + str(len(out)) + " true predictions - " + str(prec) + " true 3d prediction - " + str(prec_3d) + " ground truth labels - " + str(cnt) + "\n") + f.write("threshold - " + str(args.thresh) + "\n") + + f.write("bev AP - " + str(area/41) + "\n") + #f.write("every point interpolation - " + str(auth_ap_every[0]) + "\n") + f.write("41 point interpolation - " + str(auth_ap_eleven[0]) + "\n" ) + + f.write("3d AP - " + str(area3d/41) + "\n") + #f.write("every point interpolation - " + str(auth_ap_every_3d[0]) + "\n" ) + f.write("41 point interpolation - " + str(auth_ap_eleven_3d[0]) + "\n" ) + + +if __name__ == '__main__': + mAP() + diff --git a/second/configs/pointpillars/car/xyres_12_argo_48.proto b/second/configs/pointpillars/car/xyres_12_argo_48.proto new file mode 100644 index 00000000..ce8fb262 --- /dev/null +++ b/second/configs/pointpillars/car/xyres_12_argo_48.proto @@ -0,0 +1,187 @@ +model: { + second: { + voxel_generator { + point_cloud_range : [-48, -48, -3, 48, 48, 1] + voxel_size : [0.12, 0.12, 4] + max_number_of_points_per_voxel : 100 + } + num_class: 1 + voxel_feature_extractor: { + module_class_name: "PillarFeatureNet" + num_filters: [64] + with_distance: false + } + middle_feature_extractor: { + module_class_name: "PointPillarsScatter" + } + rpn: { + module_class_name: "RPN" + layer_nums: [3, 5, 5] + layer_strides: [2, 2, 2] + num_filters: [64, 128, 256] + upsample_strides: [1, 2, 4] + num_upsample_filters: [128, 128, 128] + use_groupnorm: false + num_groups: 32 + } + loss: { + classification_loss: { + weighted_sigmoid_focal: { + alpha: 0.25 + gamma: 2.0 + anchorwise_output: true + } + } + localization_loss: { + weighted_smooth_l1: { + sigma: 3.0 + code_weight: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] + } + } + classification_weight: 1.0 + localization_weight: 2.0 + } + # Outputs + use_sigmoid_score: true + encode_background_as_zeros: true + encode_rad_error_by_sin: true + + use_direction_classifier: true + direction_loss_weight: 0.2 + use_aux_classifier: false + # Loss + pos_class_weight: 1.0 + neg_class_weight: 1.0 + + loss_norm_type: NormByNumPositives + # Postprocess + post_center_limit_range: [-48, -48, -3, 48, 48, 1] + use_rotate_nms: false + use_multi_class_nms: false + nms_pre_max_size: 1000 + nms_post_max_size: 300 + nms_score_threshold: 0.05 + nms_iou_threshold: 0.5 + + use_bev: false + num_point_features: 3 + without_reflectivity: true + box_coder: { + ground_box3d_coder: { + linear_dim: false + encode_angle_vector: false + } + } + target_assigner: { + anchor_generators: { + anchor_generator_stride: { + sizes: [1.67, 3.63, 1.77] # wlh + strides: [0.24, 0.24, 0.0] # if generate only 1 z_center, z_stride will be ignored + offsets: [-47.88, -47.88, -1.73] # origin_offset + strides / 2 + rotations: [0, 1.57] # 0, pi/2 + matched_threshold : 0.6 + unmatched_threshold : 0.45 + } + } + + sample_positive_fraction : -1 + sample_size : 512 + region_similarity_calculator: { + nearest_iou_similarity: { + } + } + } + } +} + + +train_input_reader: { + record_file_path: "/data/sets/kitti_second/kitti_train.tfrecord" + class_names: ["VEHICLE"] + max_num_epochs : 200 + batch_size: 1 + prefetch_size : 25 + max_number_of_voxels: 16000 + shuffle_points: true + num_workers: 2 + groundtruth_localization_noise_std: [0.25, 0.25, 0.25] + groundtruth_rotation_uniform_noise: [-0.15707963267, 0.15707963267] + global_rotation_uniform_noise: [-0.78539816, 0.78539816] + global_scaling_uniform_noise: [0.95, 1.05] + global_random_rotation_range_per_object: [0, 0] + anchor_area_threshold: 1 + remove_points_after_sample: false + groundtruth_points_drop_percentage: 0.0 + groundtruth_drop_max_keep_points: 15 + database_sampler { + database_info_path: "/data/sets/kitti_second/kitti_dbinfos_train.pkl" + sample_groups { + name_to_max_num { + key: "VEHICLE" + value: 15 + } + } + database_prep_steps { + filter_by_min_num_points { + min_num_point_pairs { + key: "VEHICLE" + value: 5 + } + } + } + database_prep_steps { + filter_by_difficulty { + removed_difficulties: [-1] + } + } + global_random_rotation_range_per_object: [0, 0] + rate: 1.0 + } + + remove_unknown_examples: false + remove_environment: false + kitti_info_path: "/nfs/151/gpu/gunji/point_pillars/second/train.trch" + kitti_root_path: "/nfs/151/gpu/gunji/argodataset/argoverse-tracking/train/" +} + +train_config: { + optimizer: { + adam_optimizer: { + learning_rate: { + exponential_decay_learning_rate: { + initial_learning_rate: 0.0002 + decay_steps: 40605 # 1856 steps per epoch * 15 epochs + decay_factor: 0.8 + staircase: true + } + } + weight_decay: 0.0001 + } + use_moving_average: false + + } + inter_op_parallelism_threads: 4 + intra_op_parallelism_threads: 4 + steps: 433120 # 1856 steps per epoch * 160 epochs + steps_per_eval: 13535 # 1856 steps per epoch * 5 epochs + save_checkpoints_secs : 1800 # half hour + save_summary_steps : 10 + enable_mixed_precision: false + loss_scale_factor : 512.0 + clear_metrics_every_epoch: false +} + +eval_input_reader: { + record_file_path: "/data/sets/kitti_second/kitti_val.tfrecord" + class_names: ["VEHICLE"] + batch_size: 1 + max_num_epochs : 200 + prefetch_size : 25 + max_number_of_voxels: 16000 + shuffle_points: false + num_workers: 3 + anchor_area_threshold: 1 + remove_environment: false + kitti_info_path: "/nfs/151/gpu/gunji/point_pillars/second/val.trch" + kitti_root_path: "/nfs/151/gpu/gunji/argodataset/argoverse-tracking/val/" +} diff --git a/second/configs/pointpillars/car/xyres_16.proto b/second/configs/pointpillars/car/xyres_16.proto index d2b7bdc1..74be6574 100644 --- a/second/configs/pointpillars/car/xyres_16.proto +++ b/second/configs/pointpillars/car/xyres_16.proto @@ -1,7 +1,7 @@ model: { second: { voxel_generator { - point_cloud_range : [0, -39.68, -3, 69.12, 39.68, 1] + point_cloud_range : [-96, -96, -3, 96, 96, 1] voxel_size : [0.16, 0.16, 4] max_number_of_points_per_voxel : 100 } @@ -55,7 +55,7 @@ model: { loss_norm_type: NormByNumPositives # Postprocess - post_center_limit_range: [0, -39.68, -5, 69.12, 39.68, 5] + post_center_limit_range: [-96, -96, -3, 96, 96, 1] use_rotate_nms: false use_multi_class_nms: false nms_pre_max_size: 1000 @@ -64,8 +64,8 @@ model: { nms_iou_threshold: 0.5 use_bev: false - num_point_features: 4 - without_reflectivity: false + num_point_features: 3 + without_reflectivity: true box_coder: { ground_box3d_coder: { linear_dim: false @@ -75,9 +75,9 @@ model: { target_assigner: { anchor_generators: { anchor_generator_stride: { - sizes: [1.6, 3.9, 1.56] # wlh + sizes: [1.67, 3.63, 1.77] # wlh strides: [0.32, 0.32, 0.0] # if generate only 1 z_center, z_stride will be ignored - offsets: [0.16, -39.52, -1.78] # origin_offset + strides / 2 + offsets: [-95.84, -95.84, -1.78] # origin_offset + strides / 2 rotations: [0, 1.57] # 0, pi/2 matched_threshold : 0.6 unmatched_threshold : 0.45 @@ -97,9 +97,9 @@ model: { train_input_reader: { record_file_path: "/data/sets/kitti_second/kitti_train.tfrecord" - class_names: ["Car"] + class_names: ["VEHICLE"] max_num_epochs : 160 - batch_size: 2 + batch_size: 1 prefetch_size : 25 max_number_of_voxels: 12000 shuffle_points: true @@ -117,14 +117,14 @@ train_input_reader: { database_info_path: "/data/sets/kitti_second/kitti_dbinfos_train.pkl" sample_groups { name_to_max_num { - key: "Car" + key: "VEHICLE" value: 15 } } database_prep_steps { filter_by_min_num_points { min_num_point_pairs { - key: "Car" + key: "VEHICLE" value: 5 } } @@ -150,7 +150,7 @@ train_config: { learning_rate: { exponential_decay_learning_rate: { initial_learning_rate: 0.0002 - decay_steps: 27840 # 1856 steps per epoch * 15 epochs + decay_steps: 2000#27840 1856 steps per epoch * 15 epochs decay_factor: 0.8 staircase: true } @@ -162,8 +162,8 @@ train_config: { } inter_op_parallelism_threads: 4 intra_op_parallelism_threads: 4 - steps: 296960 # 1856 steps per epoch * 160 epochs - steps_per_eval: 9280 # 1856 steps per epoch * 5 epochs + steps: 5000#296960 1856 steps per epoch * 160 epochs + steps_per_eval: 2500 #9280 1856 steps per epoch * 5 epochs save_checkpoints_secs : 1800 # half hour save_summary_steps : 10 enable_mixed_precision: false @@ -173,8 +173,8 @@ train_config: { eval_input_reader: { record_file_path: "/data/sets/kitti_second/kitti_val.tfrecord" - class_names: ["Car"] - batch_size: 2 + class_names: ["VEHICLE"] + batch_size: 1 max_num_epochs : 160 prefetch_size : 25 max_number_of_voxels: 12000 diff --git a/second/configs/pointpillars/car/xyres_16_argo_30.proto b/second/configs/pointpillars/car/xyres_16_argo_30.proto new file mode 100644 index 00000000..7fd6b19d --- /dev/null +++ b/second/configs/pointpillars/car/xyres_16_argo_30.proto @@ -0,0 +1,189 @@ +model: { + second: { + voxel_generator { + point_cloud_range : [-32, -32, -3, 32, 32, 1] + voxel_size : [0.16, 0.16, 4] + max_number_of_points_per_voxel : 100 + } + num_class: 1 + voxel_feature_extractor: { + module_class_name: "PillarFeatureNet" + num_filters: [64] + with_distance: false + } + middle_feature_extractor: { + module_class_name: "PointPillarsScatter" + } + rpn: { + module_class_name: "RPN" + layer_nums: [3, 5, 5] + layer_strides: [2, 2, 2] + num_filters: [64, 128, 256] + upsample_strides: [1, 2, 4] + num_upsample_filters: [128, 128, 128] + use_groupnorm: false + num_groups: 32 + } + loss: { + classification_loss: { + weighted_sigmoid_focal: { + alpha: 0.25 + gamma: 2.0 + anchorwise_output: true + } + } + localization_loss: { + weighted_smooth_l1: { + sigma: 3.0 + code_weight: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] + } + } + classification_weight: 1.0 + localization_weight: 2.0 + } + # Outputs + use_sigmoid_score: true + encode_background_as_zeros: true + encode_rad_error_by_sin: true + + use_direction_classifier: true + direction_loss_weight: 0.2 + use_aux_classifier: false + # Loss + pos_class_weight: 1.0 + neg_class_weight: 1.0 + + loss_norm_type: NormByNumPositives + # Postprocess + post_center_limit_range: [-32, -32, -3, 32, 32, 1] + use_rotate_nms: false + use_multi_class_nms: false + nms_pre_max_size: 1000 + nms_post_max_size: 300 + nms_score_threshold: 0.05 + nms_iou_threshold: 0.5 + + use_bev: false + num_point_features: 3 + without_reflectivity: true + box_coder: { + ground_box3d_coder: { + linear_dim: false + encode_angle_vector: false + } + } + target_assigner: { + anchor_generators: { + anchor_generator_stride: { + sizes: [1.67, 3.63, 1.77] # wlh + strides: [0.32, 0.32, 0.0] # if generate only 1 z_center, z_stride will be ignored + offsets: [-31.84, -31.84, -1.78] # origin_offset + strides / 2 + rotations: [0, 1.57] # 0, pi/2 + matched_threshold : 0.6 + unmatched_threshold : 0.45 + } + } + + sample_positive_fraction : -1 + sample_size : 512 + region_similarity_calculator: { + nearest_iou_similarity: { + } + } + } + } +} + + +train_input_reader: { + record_file_path: "/data/sets/kitti_second/kitti_train.tfrecord" + class_names: ["VEHICLE"] + max_num_epochs : 10 + batch_size: 1 + prefetch_size : 25 + max_number_of_voxels: 12000 + shuffle_points: true + num_workers: 2 + groundtruth_localization_noise_std: [0.25, 0.25, 0.25] + groundtruth_rotation_uniform_noise: [-0.15707963267, 0.15707963267] + global_rotation_uniform_noise: [-0.78539816, 0.78539816] + global_scaling_uniform_noise: [0.95, 1.05] + global_random_rotation_range_per_object: [0, 0] + anchor_area_threshold: 1 + remove_points_after_sample: false + groundtruth_points_drop_percentage: 0.0 + groundtruth_drop_max_keep_points: 15 + database_sampler { + database_info_path: "/data/sets/kitti_second/kitti_dbinfos_train.pkl" + sample_groups { + name_to_max_num { + key: "VEHICLE" + value: 15 + } + } + database_prep_steps { + filter_by_min_num_points { + min_num_point_pairs { + key: "VEHICLE" + value: 5 + } + } + } + database_prep_steps { + filter_by_difficulty { + removed_difficulties: [-1] + } + } + global_random_rotation_range_per_object: [0, 0] + rate: 1.0 + } + + remove_unknown_examples: false + remove_environment: false + kitti_info_path: "/nfs/151/gpu/gunji/point_pillars/second/sample.trch" + kitti_root_path: "/nfs/151/gpu/gunji/argodataset/argoverse-tracking/sample/" + +} + +train_config: { + optimizer: { + adam_optimizer: { + learning_rate: { + exponential_decay_learning_rate: { + initial_learning_rate: 0.0002 + decay_steps: 2500 # 1856 steps per epoch * 15 epochs + decay_factor: 0.8 + staircase: true + } + } + weight_decay: 0.0001 + } + use_moving_average: false + + } + inter_op_parallelism_threads: 4 + intra_op_parallelism_threads: 4 + steps: 10000 # 1856 steps per epoch * 160 epochs + steps_per_eval:10000 # 1856 steps per epoch * 5 epochs + save_checkpoints_secs : 1800 # half hour + save_summary_steps : 10 + enable_mixed_precision: false + loss_scale_factor : 512.0 + clear_metrics_every_epoch: false +} + +eval_input_reader: { + record_file_path: "/data/sets/kitti_second/kitti_val.tfrecord" + class_names: ["VEHICLE"] + batch_size: 1 + max_num_epochs : 10 + prefetch_size : 25 + max_number_of_voxels: 12000 + shuffle_points: false + num_workers: 3 + anchor_area_threshold: 1 + remove_environment: false + kitti_info_path: "/nfs/151/gpu/gunji/point_pillars/second/sample_val.trch" + kitti_root_path: "/nfs/151/gpu/gunji/argodataset/argoverse-tracking/sample/" + +} diff --git a/second/configs/pointpillars/car/xyres_16_argo_48.proto b/second/configs/pointpillars/car/xyres_16_argo_48.proto new file mode 100644 index 00000000..a2c7fd48 --- /dev/null +++ b/second/configs/pointpillars/car/xyres_16_argo_48.proto @@ -0,0 +1,187 @@ +model: { + second: { + voxel_generator { + point_cloud_range : [-48, -48, -3, 48, 48, 1] + voxel_size : [0.16, 0.16, 4] + max_number_of_points_per_voxel : 100 + } + num_class: 1 + voxel_feature_extractor: { + module_class_name: "PillarFeatureNet" + num_filters: [64] + with_distance: false + } + middle_feature_extractor: { + module_class_name: "PointPillarsScatter" + } + rpn: { + module_class_name: "RPN" + layer_nums: [3, 5, 5] + layer_strides: [2, 2, 2] + num_filters: [64, 128, 256] + upsample_strides: [1, 2, 4] + num_upsample_filters: [128, 128, 128] + use_groupnorm: false + num_groups: 32 + } + loss: { + classification_loss: { + weighted_sigmoid_focal: { + alpha: 0.25 + gamma: 2.0 + anchorwise_output: true + } + } + localization_loss: { + weighted_smooth_l1: { + sigma: 3.0 + code_weight: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] + } + } + classification_weight: 1.0 + localization_weight: 2.0 + } + # Outputs + use_sigmoid_score: true + encode_background_as_zeros: true + encode_rad_error_by_sin: true + + use_direction_classifier: true + direction_loss_weight: 0.2 + use_aux_classifier: false + # Loss + pos_class_weight: 1.0 + neg_class_weight: 1.0 + + loss_norm_type: NormByNumPositives + # Postprocess + post_center_limit_range: [-48, -48, -3, 48, 48, 1] + use_rotate_nms: false + use_multi_class_nms: false + nms_pre_max_size: 1000 + nms_post_max_size: 300 + nms_score_threshold: 0.05 + nms_iou_threshold: 0.5 + + use_bev: false + num_point_features: 3 + without_reflectivity: true + box_coder: { + ground_box3d_coder: { + linear_dim: false + encode_angle_vector: false + } + } + target_assigner: { + anchor_generators: { + anchor_generator_stride: { + sizes: [1.67, 3.63, 1.77] # wlh + strides: [0.32, 0.32, 0.0] # if generate only 1 z_center, z_stride will be ignored + offsets: [-47.84, -47.84, -1.73] # origin_offset + strides / 2 + rotations: [0, 1.57] # 0, pi/2 + matched_threshold : 0.6 + unmatched_threshold : 0.45 + } + } + + sample_positive_fraction : -1 + sample_size : 512 + region_similarity_calculator: { + nearest_iou_similarity: { + } + } + } + } +} + + +train_input_reader: { + record_file_path: "/data/sets/kitti_second/kitti_train.tfrecord" + class_names: ["VEHICLE"] + max_num_epochs : 200 + batch_size: 1 + prefetch_size : 25 + max_number_of_voxels: 12000 + shuffle_points: true + num_workers: 2 + groundtruth_localization_noise_std: [0.25, 0.25, 0.25] + groundtruth_rotation_uniform_noise: [-0.15707963267, 0.15707963267] + global_rotation_uniform_noise: [-0.78539816, 0.78539816] + global_scaling_uniform_noise: [0.95, 1.05] + global_random_rotation_range_per_object: [0, 0] + anchor_area_threshold: 1 + remove_points_after_sample: false + groundtruth_points_drop_percentage: 0.0 + groundtruth_drop_max_keep_points: 15 + database_sampler { + database_info_path: "/data/sets/kitti_second/kitti_dbinfos_train.pkl" + sample_groups { + name_to_max_num { + key: "VEHICLE" + value: 15 + } + } + database_prep_steps { + filter_by_min_num_points { + min_num_point_pairs { + key: "VEHICLE" + value: 5 + } + } + } + database_prep_steps { + filter_by_difficulty { + removed_difficulties: [-1] + } + } + global_random_rotation_range_per_object: [0, 0] + rate: 1.0 + } + + remove_unknown_examples: false + remove_environment: false + kitti_info_path: "/nfs/151/gpu/gunji/point_pillars/second/train.trch" + kitti_root_path: "/nfs/151/gpu/gunji/argodataset/argoverse-tracking/train/" +} + +train_config: { + optimizer: { + adam_optimizer: { + learning_rate: { + exponential_decay_learning_rate: { + initial_learning_rate: 0.0002 + decay_steps: 40605 # 1856 steps per epoch * 15 epochs + decay_factor: 0.8 + staircase: true + } + } + weight_decay: 0.0001 + } + use_moving_average: false + + } + inter_op_parallelism_threads: 4 + intra_op_parallelism_threads: 4 + steps: 433120 # 1856 steps per epoch * 160 epochs + steps_per_eval: 13535 # 1856 steps per epoch * 5 epochs + save_checkpoints_secs : 1800 # half hour + save_summary_steps : 10 + enable_mixed_precision: false + loss_scale_factor : 512.0 + clear_metrics_every_epoch: false +} + +eval_input_reader: { + record_file_path: "/data/sets/kitti_second/kitti_val.tfrecord" + class_names: ["VEHICLE"] + batch_size: 1 + max_num_epochs : 200 + prefetch_size : 25 + max_number_of_voxels: 12000 + shuffle_points: false + num_workers: 3 + anchor_area_threshold: 1 + remove_environment: false + kitti_info_path: "/nfs/151/gpu/gunji/point_pillars/second/val.trch" + kitti_root_path: "/nfs/151/gpu/gunji/argodataset/argoverse-tracking/val/" +} diff --git a/second/configs/pointpillars/car/xyres_16_argo_64.proto b/second/configs/pointpillars/car/xyres_16_argo_64.proto new file mode 100644 index 00000000..3670cb32 --- /dev/null +++ b/second/configs/pointpillars/car/xyres_16_argo_64.proto @@ -0,0 +1,187 @@ +model: { + second: { + voxel_generator { + point_cloud_range : [-64, -64, -3, 64, 64, 1] + voxel_size : [0.16, 0.16, 4] + max_number_of_points_per_voxel : 100 + } + num_class: 1 + voxel_feature_extractor: { + module_class_name: "PillarFeatureNet" + num_filters: [64] + with_distance: false + } + middle_feature_extractor: { + module_class_name: "PointPillarsScatter" + } + rpn: { + module_class_name: "RPN" + layer_nums: [3, 5, 5] + layer_strides: [2, 2, 2] + num_filters: [64, 128, 256] + upsample_strides: [1, 2, 4] + num_upsample_filters: [128, 128, 128] + use_groupnorm: false + num_groups: 32 + } + loss: { + classification_loss: { + weighted_sigmoid_focal: { + alpha: 0.25 + gamma: 2.0 + anchorwise_output: true + } + } + localization_loss: { + weighted_smooth_l1: { + sigma: 3.0 + code_weight: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] + } + } + classification_weight: 1.0 + localization_weight: 2.0 + } + # Outputs + use_sigmoid_score: true + encode_background_as_zeros: true + encode_rad_error_by_sin: true + + use_direction_classifier: true + direction_loss_weight: 0.2 + use_aux_classifier: false + # Loss + pos_class_weight: 1.0 + neg_class_weight: 1.0 + + loss_norm_type: NormByNumPositives + # Postprocess + post_center_limit_range: [-64, -64, -3, 64, 64, 1] + use_rotate_nms: false + use_multi_class_nms: false + nms_pre_max_size: 1000 + nms_post_max_size: 300 + nms_score_threshold: 0.05 + nms_iou_threshold: 0.5 + + use_bev: false + num_point_features: 3 + without_reflectivity: true + box_coder: { + ground_box3d_coder: { + linear_dim: false + encode_angle_vector: false + } + } + target_assigner: { + anchor_generators: { + anchor_generator_stride: { + sizes: [1.67, 3.63, 1.77] # wlh + strides: [0.32, 0.32, 0.0] # if generate only 1 z_center, z_stride will be ignored + offsets: [-63.84, -63.84, -1.73] # origin_offset + strides / 2 + rotations: [0, 1.57] # 0, pi/2 + matched_threshold : 0.6 + unmatched_threshold : 0.45 + } + } + + sample_positive_fraction : -1 + sample_size : 512 + region_similarity_calculator: { + nearest_iou_similarity: { + } + } + } + } +} + + +train_input_reader: { + record_file_path: "/data/sets/kitti_second/kitti_train.tfrecord" + class_names: ["VEHICLE"] + max_num_epochs : 200 + batch_size: 1 + prefetch_size : 25 + max_number_of_voxels: 12000 + shuffle_points: true + num_workers: 2 + groundtruth_localization_noise_std: [0.25, 0.25, 0.25] + groundtruth_rotation_uniform_noise: [-0.15707963267, 0.15707963267] + global_rotation_uniform_noise: [-0.78539816, 0.78539816] + global_scaling_uniform_noise: [0.95, 1.05] + global_random_rotation_range_per_object: [0, 0] + anchor_area_threshold: 1 + remove_points_after_sample: false + groundtruth_points_drop_percentage: 0.0 + groundtruth_drop_max_keep_points: 15 + database_sampler { + database_info_path: "/data/sets/kitti_second/kitti_dbinfos_train.pkl" + sample_groups { + name_to_max_num { + key: "VEHICLE" + value: 15 + } + } + database_prep_steps { + filter_by_min_num_points { + min_num_point_pairs { + key: "VEHICLE" + value: 5 + } + } + } + database_prep_steps { + filter_by_difficulty { + removed_difficulties: [-1] + } + } + global_random_rotation_range_per_object: [0, 0] + rate: 1.0 + } + + remove_unknown_examples: false + remove_environment: false + kitti_info_path: "/nfs/151/gpu/gunji/point_pillars/second/train.trch" + kitti_root_path: "/nfs/151/gpu/gunji/argodataset/argoverse-tracking/train/" +} + +train_config: { + optimizer: { + adam_optimizer: { + learning_rate: { + exponential_decay_learning_rate: { + initial_learning_rate: 0.0002 + decay_steps: 40605 # 1856 steps per epoch * 15 epochs + decay_factor: 0.8 + staircase: true + } + } + weight_decay: 0.0001 + } + use_moving_average: false + + } + inter_op_parallelism_threads: 4 + intra_op_parallelism_threads: 4 + steps: 433120 # 1856 steps per epoch * 160 epochs + steps_per_eval: 13535 # 1856 steps per epoch * 5 epochs + save_checkpoints_secs : 1800 # half hour + save_summary_steps : 10 + enable_mixed_precision: false + loss_scale_factor : 512.0 + clear_metrics_every_epoch: false +} + +eval_input_reader: { + record_file_path: "/data/sets/kitti_second/kitti_val.tfrecord" + class_names: ["VEHICLE"] + batch_size: 1 + max_num_epochs : 200 + prefetch_size : 25 + max_number_of_voxels: 12000 + shuffle_points: false + num_workers: 3 + anchor_area_threshold: 1 + remove_environment: false + kitti_info_path:"/nfs/151/gpu/gunji/point_pillars/second/val.trch" + kitti_root_path: "/nfs/151/gpu/gunji/argodataset/argoverse-tracking/val/" +} diff --git a/second/configs/pointpillars/car/xyres_16_argo_80.proto b/second/configs/pointpillars/car/xyres_16_argo_80.proto new file mode 100644 index 00000000..c61b1da3 --- /dev/null +++ b/second/configs/pointpillars/car/xyres_16_argo_80.proto @@ -0,0 +1,187 @@ +model: { + second: { + voxel_generator { + point_cloud_range : [-80, -80, -3, 80, 80, 1] + voxel_size : [0.16, 0.16, 4] + max_number_of_points_per_voxel : 100 + } + num_class: 1 + voxel_feature_extractor: { + module_class_name: "PillarFeatureNet" + num_filters: [64] + with_distance: false + } + middle_feature_extractor: { + module_class_name: "PointPillarsScatter" + } + rpn: { + module_class_name: "RPN" + layer_nums: [3, 5, 5] + layer_strides: [2, 2, 2] + num_filters: [64, 128, 256] + upsample_strides: [1, 2, 4] + num_upsample_filters: [128, 128, 128] + use_groupnorm: false + num_groups: 32 + } + loss: { + classification_loss: { + weighted_sigmoid_focal: { + alpha: 0.25 + gamma: 2.0 + anchorwise_output: true + } + } + localization_loss: { + weighted_smooth_l1: { + sigma: 3.0 + code_weight: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] + } + } + classification_weight: 1.0 + localization_weight: 2.0 + } + # Outputs + use_sigmoid_score: true + encode_background_as_zeros: true + encode_rad_error_by_sin: true + + use_direction_classifier: true + direction_loss_weight: 0.2 + use_aux_classifier: false + # Loss + pos_class_weight: 1.0 + neg_class_weight: 1.0 + + loss_norm_type: NormByNumPositives + # Postprocess + post_center_limit_range: [-80, -80, -3, 80, 80, 1] + use_rotate_nms: false + use_multi_class_nms: false + nms_pre_max_size: 1000 + nms_post_max_size: 300 + nms_score_threshold: 0.05 + nms_iou_threshold: 0.5 + + use_bev: false + num_point_features: 3 + without_reflectivity: true + box_coder: { + ground_box3d_coder: { + linear_dim: false + encode_angle_vector: false + } + } + target_assigner: { + anchor_generators: { + anchor_generator_stride: { + sizes: [1.67, 3.63, 1.77] # wlh + strides: [0.32, 0.32, 0.0] # if generate only 1 z_center, z_stride will be ignored + offsets: [-79.84, -79.84, -1.73] # origin_offset + strides / 2 + rotations: [0, 1.57] # 0, pi/2 + matched_threshold : 0.6 + unmatched_threshold : 0.45 + } + } + + sample_positive_fraction : -1 + sample_size : 512 + region_similarity_calculator: { + nearest_iou_similarity: { + } + } + } + } +} + + +train_input_reader: { + record_file_path: "/data/sets/kitti_second/kitti_train.tfrecord" + class_names: ["VEHICLE"] + max_num_epochs : 200 + batch_size: 1 + prefetch_size : 25 + max_number_of_voxels: 12000 + shuffle_points: true + num_workers: 2 + groundtruth_localization_noise_std: [0.25, 0.25, 0.25] + groundtruth_rotation_uniform_noise: [-0.15707963267, 0.15707963267] + global_rotation_uniform_noise: [-0.78539816, 0.78539816] + global_scaling_uniform_noise: [0.95, 1.05] + global_random_rotation_range_per_object: [0, 0] + anchor_area_threshold: 1 + remove_points_after_sample: false + groundtruth_points_drop_percentage: 0.0 + groundtruth_drop_max_keep_points: 15 + database_sampler { + database_info_path: "/data/sets/kitti_second/kitti_dbinfos_train.pkl" + sample_groups { + name_to_max_num { + key: "VEHICLE" + value: 15 + } + } + database_prep_steps { + filter_by_min_num_points { + min_num_point_pairs { + key: "VEHICLE" + value: 5 + } + } + } + database_prep_steps { + filter_by_difficulty { + removed_difficulties: [-1] + } + } + global_random_rotation_range_per_object: [0, 0] + rate: 1.0 + } + + remove_unknown_examples: false + remove_environment: false + kitti_info_path: "/nfs/151/gpu/gunji/point_pillars/second/train.trch" + kitti_root_path: "/nfs/151/gpu/gunji/argodataset/argoverse-tracking/train/" +} + +train_config: { + optimizer: { + adam_optimizer: { + learning_rate: { + exponential_decay_learning_rate: { + initial_learning_rate: 0.0002 + decay_steps: 40605 # 1856 steps per epoch * 15 epochs + decay_factor: 0.8 + staircase: true + } + } + weight_decay: 0.0001 + } + use_moving_average: false + + } + inter_op_parallelism_threads: 4 + intra_op_parallelism_threads: 4 + steps: 433120 # 1856 steps per epoch * 160 epochs + steps_per_eval: 13535 # 1856 steps per epoch * 5 epochs + save_checkpoints_secs : 1800 # half hour + save_summary_steps : 10 + enable_mixed_precision: false + loss_scale_factor : 512.0 + clear_metrics_every_epoch: false +} + +eval_input_reader: { + record_file_path: "/data/sets/kitti_second/kitti_val.tfrecord" + class_names: ["VEHICLE"] + batch_size: 1 + max_num_epochs : 200 + prefetch_size : 25 + max_number_of_voxels: 12000 + shuffle_points: false + num_workers: 3 + anchor_area_threshold: 1 + remove_environment: false + kitti_info_path:"/nfs/151/gpu/gunji/point_pillars/second/val.trch" + kitti_root_path: "/nfs/151/gpu/gunji/argodataset/argoverse-tracking/val/" +} diff --git a/second/configs/pointpillars/car/xyres_16_argo_88.proto b/second/configs/pointpillars/car/xyres_16_argo_88.proto new file mode 100644 index 00000000..85bbded4 --- /dev/null +++ b/second/configs/pointpillars/car/xyres_16_argo_88.proto @@ -0,0 +1,190 @@ +model: { + second: { + voxel_generator { + point_cloud_range : [-89.6, -89.6, -3, 89.6, 89.6, 1] + voxel_size : [0.16, 0.16, 4] + max_number_of_points_per_voxel : 100 + } + num_class: 1 + voxel_feature_extractor: { + module_class_name: "PillarFeatureNet" + num_filters: [64] + with_distance: false + } + middle_feature_extractor: { + module_class_name: "PointPillarsScatter" + } + rpn: { + module_class_name: "RPN" + layer_nums: [3, 5, 5] + layer_strides: [2, 2, 2] + num_filters: [64, 128, 256] + upsample_strides: [1, 2, 4] + num_upsample_filters: [128, 128, 128] + use_groupnorm: false + num_groups: 32 + } + loss: { + classification_loss: { + weighted_sigmoid_focal: { + alpha: 0.25 + gamma: 2.0 + anchorwise_output: true + } + } + localization_loss: { + weighted_smooth_l1: { + sigma: 3.0 + code_weight: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] + } + } + classification_weight: 1.0 + localization_weight: 2.0 + } + # Outputs + use_sigmoid_score: true + encode_background_as_zeros: true + encode_rad_error_by_sin: true + + use_direction_classifier: true + direction_loss_weight: 0.2 + use_aux_classifier: false + # Loss + pos_class_weight: 1.0 + neg_class_weight: 1.0 + + loss_norm_type: NormByNumPositives + # Postprocess + post_center_limit_range: [-89.6, -89.6, -3, 89.6, 89.6, 1] + use_rotate_nms: false + use_multi_class_nms: false + nms_pre_max_size: 1000 + nms_post_max_size: 300 + nms_score_threshold: 0.05 + nms_iou_threshold: 0.5 + + use_bev: false + num_point_features: 3 + without_reflectivity: true + box_coder: { + ground_box3d_coder: { + linear_dim: false + encode_angle_vector: false + } + } + target_assigner: { + anchor_generators: { + anchor_generator_stride: { + sizes: [1.67, 3.63, 1.77] # wlh + strides: [0.32, 0.32, 0.0] # if generate only 1 z_center, z_stride will be ignored + offsets: [-89.44, -89.44, -1.73] # origin_offset + strides / 2 + rotations: [0, 1.57] # 0, pi/2 + matched_threshold : 0.6 + unmatched_threshold : 0.45 + } + } + + sample_positive_fraction : -1 + sample_size : 512 + region_similarity_calculator: { + nearest_iou_similarity: { + } + } + } + } +} + + +train_input_reader: { + record_file_path: "/data/sets/kitti_second/kitti_train.tfrecord" + class_names: ["VEHICLE"] + max_num_epochs : 160 + batch_size: 1 + prefetch_size : 25 + max_number_of_voxels: 12000 + shuffle_points: true + num_workers: 2 + groundtruth_localization_noise_std: [0.25, 0.25, 0.25] + groundtruth_rotation_uniform_noise: [-0.15707963267, 0.15707963267] + global_rotation_uniform_noise: [-0.78539816, 0.78539816] + global_scaling_uniform_noise: [0.95, 1.05] + global_random_rotation_range_per_object: [0, 0] + anchor_area_threshold: 1 + remove_points_after_sample: false + groundtruth_points_drop_percentage: 0.0 + groundtruth_drop_max_keep_points: 15 + database_sampler { + database_info_path: "/data/sets/kitti_second/kitti_dbinfos_train.pkl" + sample_groups { + name_to_max_num { + key: "VEHICLE" + value: 15 + } + } + database_prep_steps { + filter_by_min_num_points { + min_num_point_pairs { + key: "VEHICLE" + value: 5 + } + } + } + database_prep_steps { + filter_by_difficulty { + removed_difficulties: [-1] + } + } + global_random_rotation_range_per_object: [0, 0] + rate: 1.0 + } + + remove_unknown_examples: false + remove_environment: false + kitti_info_path: "/nfs/151/gpu/gunji/point_pillars/second/train.trch" + kitti_root_path: "/nfs/151/gpu/gunji/argodataset/argoverse-tracking/train/" + +} + +train_config: { + optimizer: { + adam_optimizer: { + learning_rate: { + exponential_decay_learning_rate: { + initial_learning_rate: 0.0002 + decay_steps: 40605 # 1856 steps per epoch * 15 epochs + decay_factor: 0.8 + staircase: true + } + } + weight_decay: 0.0001 + } + use_moving_average: false + + } + inter_op_parallelism_threads: 4 + intra_op_parallelism_threads: 4 + steps: 433120 # steps per epoch * 160 epochs + steps_per_eval: 13535 # 1856 steps per epoch * 5 epochs + save_checkpoints_secs : 1800 # half hour + save_summary_steps : 10 + enable_mixed_precision: false + loss_scale_factor : 512.0 + clear_metrics_every_epoch: false +} + +eval_input_reader: { + record_file_path: "/data/sets/kitti_second/kitti_val.tfrecord" + class_names: ["VEHICLE"] + batch_size: 1 + max_num_epochs : 160 + prefetch_size : 25 + max_number_of_voxels: 12000 + shuffle_points: false + num_workers: 3 + anchor_area_threshold: 1 + remove_environment: false + kitti_info_path: "/nfs/151/gpu/gunji/point_pillars/second/val.trch" + kitti_root_path: "/nfs/151/gpu/gunji/argodataset/argoverse-tracking/val/" + + +} diff --git a/second/configs/pointpillars/car/xyres_16_argo_96.proto b/second/configs/pointpillars/car/xyres_16_argo_96.proto new file mode 100644 index 00000000..6e89c20c --- /dev/null +++ b/second/configs/pointpillars/car/xyres_16_argo_96.proto @@ -0,0 +1,187 @@ +model: { + second: { + voxel_generator { + point_cloud_range : [-96, -96, -3, 96, 96, 1] + voxel_size : [0.16, 0.16, 4] + max_number_of_points_per_voxel : 100 + } + num_class: 1 + voxel_feature_extractor: { + module_class_name: "PillarFeatureNet" + num_filters: [64] + with_distance: false + } + middle_feature_extractor: { + module_class_name: "PointPillarsScatter" + } + rpn: { + module_class_name: "RPN" + layer_nums: [3, 5, 5] + layer_strides: [2, 2, 2] + num_filters: [64, 128, 256] + upsample_strides: [1, 2, 4] + num_upsample_filters: [128, 128, 128] + use_groupnorm: false + num_groups: 32 + } + loss: { + classification_loss: { + weighted_sigmoid_focal: { + alpha: 0.25 + gamma: 2.0 + anchorwise_output: true + } + } + localization_loss: { + weighted_smooth_l1: { + sigma: 3.0 + code_weight: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] + } + } + classification_weight: 1.0 + localization_weight: 2.0 + } + # Outputs + use_sigmoid_score: true + encode_background_as_zeros: true + encode_rad_error_by_sin: true + + use_direction_classifier: true + direction_loss_weight: 0.2 + use_aux_classifier: false + # Loss + pos_class_weight: 1.0 + neg_class_weight: 1.0 + + loss_norm_type: NormByNumPositives + # Postprocess + post_center_limit_range: [-96, -96, -3, 96, 96, 1] + use_rotate_nms: false + use_multi_class_nms: false + nms_pre_max_size: 1000 + nms_post_max_size: 300 + nms_score_threshold: 0.05 + nms_iou_threshold: 0.5 + + use_bev: false + num_point_features: 3 + without_reflectivity: true + box_coder: { + ground_box3d_coder: { + linear_dim: false + encode_angle_vector: false + } + } + target_assigner: { + anchor_generators: { + anchor_generator_stride: { + sizes: [1.67, 3.63, 1.77] # wlh + strides: [0.32, 0.32, 0.0] # if generate only 1 z_center, z_stride will be ignored + offsets: [-95.84, -95.84, -1.73] # origin_offset + strides / 2 + rotations: [0, 1.57] # 0, pi/2 + matched_threshold : 0.6 + unmatched_threshold : 0.45 + } + } + + sample_positive_fraction : -1 + sample_size : 512 + region_similarity_calculator: { + nearest_iou_similarity: { + } + } + } + } +} + + +train_input_reader: { + record_file_path: "/data/sets/kitti_second/kitti_train.tfrecord" + class_names: ["VEHICLE"] + max_num_epochs : 200 + batch_size: 1 + prefetch_size : 25 + max_number_of_voxels: 12000 + shuffle_points: true + num_workers: 2 + groundtruth_localization_noise_std: [0.25, 0.25, 0.25] + groundtruth_rotation_uniform_noise: [-0.15707963267, 0.15707963267] + global_rotation_uniform_noise: [-0.78539816, 0.78539816] + global_scaling_uniform_noise: [0.95, 1.05] + global_random_rotation_range_per_object: [0, 0] + anchor_area_threshold: 1 + remove_points_after_sample: false + groundtruth_points_drop_percentage: 0.0 + groundtruth_drop_max_keep_points: 15 + database_sampler { + database_info_path: "/data/sets/kitti_second/kitti_dbinfos_train.pkl" + sample_groups { + name_to_max_num { + key: "VEHICLE" + value: 15 + } + } + database_prep_steps { + filter_by_min_num_points { + min_num_point_pairs { + key: "VEHICLE" + value: 5 + } + } + } + database_prep_steps { + filter_by_difficulty { + removed_difficulties: [-1] + } + } + global_random_rotation_range_per_object: [0, 0] + rate: 1.0 + } + + remove_unknown_examples: false + remove_environment: false + kitti_info_path: "/nfs/151/gpu/gunji/point_pillars/second/train.trch" + kitti_root_path:"/nfs/151/gpu/gunji/argodataset/argoverse-tracking/train/" +} + +train_config: { + optimizer: { + adam_optimizer: { + learning_rate: { + exponential_decay_learning_rate: { + initial_learning_rate: 0.0002 + decay_steps: 40605 # 1856 steps per epoch * 15 epochs + decay_factor: 0.8 + staircase: true + } + } + weight_decay: 0.0001 + } + use_moving_average: false + + } + inter_op_parallelism_threads: 4 + intra_op_parallelism_threads: 4 + steps: 433120 # 1856 steps per epoch * 160 epochs + steps_per_eval: 13535 # 1856 steps per epoch * 5 epochs + save_checkpoints_secs : 1800 # half hour + save_summary_steps : 10 + enable_mixed_precision: false + loss_scale_factor : 512.0 + clear_metrics_every_epoch: false +} + +eval_input_reader: { + record_file_path: "/data/sets/kitti_second/kitti_val.tfrecord" + class_names: ["VEHICLE"] + batch_size: 1 + max_num_epochs : 200 + prefetch_size : 25 + max_number_of_voxels: 12000 + shuffle_points: false + num_workers: 3 + anchor_area_threshold: 1 + remove_environment: false + kitti_info_path: "/nfs/151/gpu/gunji/point_pillars/second/val.trch" + kitti_root_path: "/nfs/151/gpu/gunji/argodataset/argoverse-tracking/val/" +} diff --git a/second/configs/pointpillars/car/xyres_16_argo_upper.proto b/second/configs/pointpillars/car/xyres_16_argo_upper.proto new file mode 100644 index 00000000..7733f04d --- /dev/null +++ b/second/configs/pointpillars/car/xyres_16_argo_upper.proto @@ -0,0 +1,187 @@ +model: { + second: { + voxel_generator { + point_cloud_range : [-112, -112, -3, 112, 112, 1] + voxel_size : [0.16, 0.16, 4] + max_number_of_points_per_voxel : 100 + } + num_class: 1 + voxel_feature_extractor: { + module_class_name: "PillarFeatureNet" + num_filters: [64] + with_distance: false + } + middle_feature_extractor: { + module_class_name: "PointPillarsScatter" + } + rpn: { + module_class_name: "RPN" + layer_nums: [3, 5, 5] + layer_strides: [2, 2, 2] + num_filters: [64, 128, 256] + upsample_strides: [1, 2, 4] + num_upsample_filters: [128, 128, 128] + use_groupnorm: false + num_groups: 32 + } + loss: { + classification_loss: { + weighted_sigmoid_focal: { + alpha: 0.25 + gamma: 2.0 + anchorwise_output: true + } + } + localization_loss: { + weighted_smooth_l1: { + sigma: 3.0 + code_weight: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] + } + } + classification_weight: 1.0 + localization_weight: 2.0 + } + # Outputs + use_sigmoid_score: true + encode_background_as_zeros: true + encode_rad_error_by_sin: true + + use_direction_classifier: true + direction_loss_weight: 0.2 + use_aux_classifier: false + # Loss + pos_class_weight: 1.0 + neg_class_weight: 1.0 + + loss_norm_type: NormByNumPositives + # Postprocess + post_center_limit_range: [-112, -112, -3, 112, 112, 1] + use_rotate_nms: false + use_multi_class_nms: false + nms_pre_max_size: 1000 + nms_post_max_size: 300 + nms_score_threshold: 0.05 + nms_iou_threshold: 0.5 + + use_bev: false + num_point_features: 3 + without_reflectivity: true + box_coder: { + ground_box3d_coder: { + linear_dim: false + encode_angle_vector: false + } + } + target_assigner: { + anchor_generators: { + anchor_generator_stride: { + sizes: [1.67, 3.63, 1.77] # wlh + strides: [0.32, 0.32, 0.0] # if generate only 1 z_center, z_stride will be ignored + offsets: [-111.84, -111.84, -1.73] # origin_offset + strides / 2 + rotations: [0, 1.57] # 0, pi/2 + matched_threshold : 0.6 + unmatched_threshold : 0.45 + } + } + + sample_positive_fraction : -1 + sample_size : 512 + region_similarity_calculator: { + nearest_iou_similarity: { + } + } + } + } +} + + +train_input_reader: { + record_file_path: "/data/sets/kitti_second/kitti_train.tfrecord" + class_names: ["VEHICLE"] + max_num_epochs : 200 + batch_size: 1 + prefetch_size : 25 + max_number_of_voxels: 12000 + shuffle_points: true + num_workers: 2 + groundtruth_localization_noise_std: [0.25, 0.25, 0.25] + groundtruth_rotation_uniform_noise: [-0.15707963267, 0.15707963267] + global_rotation_uniform_noise: [-0.78539816, 0.78539816] + global_scaling_uniform_noise: [0.95, 1.05] + global_random_rotation_range_per_object: [0, 0] + anchor_area_threshold: 1 + remove_points_after_sample: false + groundtruth_points_drop_percentage: 0.0 + groundtruth_drop_max_keep_points: 15 + database_sampler { + database_info_path: "/data/sets/kitti_second/kitti_dbinfos_train.pkl" + sample_groups { + name_to_max_num { + key: "VEHICLE" + value: 15 + } + } + database_prep_steps { + filter_by_min_num_points { + min_num_point_pairs { + key: "VEHICLE" + value: 5 + } + } + } + database_prep_steps { + filter_by_difficulty { + removed_difficulties: [-1] + } + } + global_random_rotation_range_per_object: [0, 0] + rate: 1.0 + } + + remove_unknown_examples: false + remove_environment: false + kitti_info_path: "/nfs/151/gpu/gunji/point_pillars/second/train.trch" + kitti_root_path:"/nfs/151/gpu/gunji/argodataset/argoverse-tracking/train/" +} + +train_config: { + optimizer: { + adam_optimizer: { + learning_rate: { + exponential_decay_learning_rate: { + initial_learning_rate: 0.0002 + decay_steps: 40605 # 1856 steps per epoch * 15 epochs + decay_factor: 0.8 + staircase: true + } + } + weight_decay: 0.0001 + } + use_moving_average: false + + } + inter_op_parallelism_threads: 4 + intra_op_parallelism_threads: 4 + steps: 433120 # 1856 steps per epoch * 160 epochs + steps_per_eval: 13535 # 1856 steps per epoch * 5 epochs + save_checkpoints_secs : 1800 # half hour + save_summary_steps : 10 + enable_mixed_precision: false + loss_scale_factor : 512.0 + clear_metrics_every_epoch: false +} + +eval_input_reader: { + record_file_path: "/data/sets/kitti_second/kitti_val.tfrecord" + class_names: ["VEHICLE"] + batch_size: 1 + max_num_epochs : 200 + prefetch_size : 25 + max_number_of_voxels: 12000 + shuffle_points: false + num_workers: 3 + anchor_area_threshold: 1 + remove_environment: false + kitti_info_path: "/nfs/151/gpu/gunji/point_pillars/second/val.trch" + kitti_root_path: "/nfs/151/gpu/gunji/argodataset/argoverse-tracking/val/" +} diff --git a/second/configs/pointpillars/car/xyres_20_argo_48.proto b/second/configs/pointpillars/car/xyres_20_argo_48.proto new file mode 100644 index 00000000..ed0c41d9 --- /dev/null +++ b/second/configs/pointpillars/car/xyres_20_argo_48.proto @@ -0,0 +1,187 @@ +model: { + second: { + voxel_generator { + point_cloud_range : [-48, -48, -3, 48, 48, 1] + voxel_size : [0.2, 0.2, 4] + max_number_of_points_per_voxel : 100 + } + num_class: 1 + voxel_feature_extractor: { + module_class_name: "PillarFeatureNet" + num_filters: [64] + with_distance: false + } + middle_feature_extractor: { + module_class_name: "PointPillarsScatter" + } + rpn: { + module_class_name: "RPN" + layer_nums: [3, 5, 5] + layer_strides: [2, 2, 2] + num_filters: [64, 128, 256] + upsample_strides: [1, 2, 4] + num_upsample_filters: [128, 128, 128] + use_groupnorm: false + num_groups: 32 + } + loss: { + classification_loss: { + weighted_sigmoid_focal: { + alpha: 0.25 + gamma: 2.0 + anchorwise_output: true + } + } + localization_loss: { + weighted_smooth_l1: { + sigma: 3.0 + code_weight: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] + } + } + classification_weight: 1.0 + localization_weight: 2.0 + } + # Outputs + use_sigmoid_score: true + encode_background_as_zeros: true + encode_rad_error_by_sin: true + + use_direction_classifier: true + direction_loss_weight: 0.2 + use_aux_classifier: false + # Loss + pos_class_weight: 1.0 + neg_class_weight: 1.0 + + loss_norm_type: NormByNumPositives + # Postprocess + post_center_limit_range: [-48, -48, -3, 48, 48, 1] + use_rotate_nms: false + use_multi_class_nms: false + nms_pre_max_size: 1000 + nms_post_max_size: 300 + nms_score_threshold: 0.05 + nms_iou_threshold: 0.5 + + use_bev: false + num_point_features: 3 + without_reflectivity: true + box_coder: { + ground_box3d_coder: { + linear_dim: false + encode_angle_vector: false + } + } + target_assigner: { + anchor_generators: { + anchor_generator_stride: { + sizes: [1.67, 3.63, 1.77] # wlh + strides: [0.4, 0.4, 0.0] # if generate only 1 z_center, z_stride will be ignored + offsets: [-47.80, -47.80, -1.73] # origin_offset + strides / 2 + rotations: [0, 1.57] # 0, pi/2 + matched_threshold : 0.6 + unmatched_threshold : 0.45 + } + } + + sample_positive_fraction : -1 + sample_size : 512 + region_similarity_calculator: { + nearest_iou_similarity: { + } + } + } + } +} + + +train_input_reader: { + record_file_path: "/data/sets/kitti_second/kitti_train.tfrecord" + class_names: ["VEHICLE"] + max_num_epochs : 200 + batch_size: 1 + prefetch_size : 25 + max_number_of_voxels: 12000 + shuffle_points: true + num_workers: 2 + groundtruth_localization_noise_std: [0.25, 0.25, 0.25] + groundtruth_rotation_uniform_noise: [-0.15707963267, 0.15707963267] + global_rotation_uniform_noise: [-0.78539816, 0.78539816] + global_scaling_uniform_noise: [0.95, 1.05] + global_random_rotation_range_per_object: [0, 0] + anchor_area_threshold: 1 + remove_points_after_sample: false + groundtruth_points_drop_percentage: 0.0 + groundtruth_drop_max_keep_points: 15 + database_sampler { + database_info_path: "/data/sets/kitti_second/kitti_dbinfos_train.pkl" + sample_groups { + name_to_max_num { + key: "VEHICLE" + value: 15 + } + } + database_prep_steps { + filter_by_min_num_points { + min_num_point_pairs { + key: "VEHICLE" + value: 5 + } + } + } + database_prep_steps { + filter_by_difficulty { + removed_difficulties: [-1] + } + } + global_random_rotation_range_per_object: [0, 0] + rate: 1.0 + } + + remove_unknown_examples: false + remove_environment: false + kitti_info_path: "/nfs/151/gpu/gunji/point_pillars/second/train.trch" + kitti_root_path: "/nfs/151/gpu/gunji/argodataset/argoverse-tracking/train/" +} + +train_config: { + optimizer: { + adam_optimizer: { + learning_rate: { + exponential_decay_learning_rate: { + initial_learning_rate: 0.0002 + decay_steps: 40605 # 1856 steps per epoch * 15 epochs + decay_factor: 0.8 + staircase: true + } + } + weight_decay: 0.0001 + } + use_moving_average: false + + } + inter_op_parallelism_threads: 4 + intra_op_parallelism_threads: 4 + steps: 433120 # 1856 steps per epoch * 160 epochs + steps_per_eval: 13535 # 1856 steps per epoch * 5 epochs + save_checkpoints_secs : 1800 # half hour + save_summary_steps : 10 + enable_mixed_precision: false + loss_scale_factor : 512.0 + clear_metrics_every_epoch: false +} + +eval_input_reader: { + record_file_path: "/data/sets/kitti_second/kitti_val.tfrecord" + class_names: ["VEHICLE"] + batch_size: 1 + max_num_epochs : 200 + prefetch_size : 25 + max_number_of_voxels: 12000 + shuffle_points: false + num_workers: 3 + anchor_area_threshold: 1 + remove_environment: false + kitti_info_path: "/nfs/151/gpu/gunji/point_pillars/second/val.trch" + kitti_root_path: "/nfs/151/gpu/gunji/argodataset/argoverse-tracking/val/" +} diff --git a/second/configs/pointpillars/car/xyres_20_argo_upper.proto b/second/configs/pointpillars/car/xyres_20_argo_upper.proto new file mode 100644 index 00000000..c4daf813 --- /dev/null +++ b/second/configs/pointpillars/car/xyres_20_argo_upper.proto @@ -0,0 +1,187 @@ +model: { + second: { + voxel_generator { + point_cloud_range : [-112, -112, -3, 112, 112, 1] + voxel_size : [0.2, 0.2, 4] + max_number_of_points_per_voxel : 100 + } + num_class: 1 + voxel_feature_extractor: { + module_class_name: "PillarFeatureNet" + num_filters: [64] + with_distance: false + } + middle_feature_extractor: { + module_class_name: "PointPillarsScatter" + } + rpn: { + module_class_name: "RPN" + layer_nums: [3, 5, 5] + layer_strides: [2, 2, 2] + num_filters: [64, 128, 256] + upsample_strides: [1, 2, 4] + num_upsample_filters: [128, 128, 128] + use_groupnorm: false + num_groups: 32 + } + loss: { + classification_loss: { + weighted_sigmoid_focal: { + alpha: 0.25 + gamma: 2.0 + anchorwise_output: true + } + } + localization_loss: { + weighted_smooth_l1: { + sigma: 3.0 + code_weight: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] + } + } + classification_weight: 1.0 + localization_weight: 2.0 + } + # Outputs + use_sigmoid_score: true + encode_background_as_zeros: true + encode_rad_error_by_sin: true + + use_direction_classifier: true + direction_loss_weight: 0.2 + use_aux_classifier: false + # Loss + pos_class_weight: 1.0 + neg_class_weight: 1.0 + + loss_norm_type: NormByNumPositives + # Postprocess + post_center_limit_range: [-112, -112, -3, 112, 112, 1] + use_rotate_nms: false + use_multi_class_nms: false + nms_pre_max_size: 1000 + nms_post_max_size: 300 + nms_score_threshold: 0.05 + nms_iou_threshold: 0.5 + + use_bev: false + num_point_features: 3 + without_reflectivity: true + box_coder: { + ground_box3d_coder: { + linear_dim: false + encode_angle_vector: false + } + } + target_assigner: { + anchor_generators: { + anchor_generator_stride: { + sizes: [1.67, 3.63, 1.77] # wlh + strides: [0.4, 0.4, 0.0] # if generate only 1 z_center, z_stride will be ignored + offsets: [-111.80, -111.80, -1.73] # origin_offset + strides / 2 + rotations: [0, 1.57] # 0, pi/2 + matched_threshold : 0.6 + unmatched_threshold : 0.45 + } + } + + sample_positive_fraction : -1 + sample_size : 512 + region_similarity_calculator: { + nearest_iou_similarity: { + } + } + } + } +} + + +train_input_reader: { + record_file_path: "/data/sets/kitti_second/kitti_train.tfrecord" + class_names: ["VEHICLE"] + max_num_epochs : 200 + batch_size: 1 + prefetch_size : 25 + max_number_of_voxels: 12000 + shuffle_points: true + num_workers: 2 + groundtruth_localization_noise_std: [0.25, 0.25, 0.25] + groundtruth_rotation_uniform_noise: [-0.15707963267, 0.15707963267] + global_rotation_uniform_noise: [-0.78539816, 0.78539816] + global_scaling_uniform_noise: [0.95, 1.05] + global_random_rotation_range_per_object: [0, 0] + anchor_area_threshold: 1 + remove_points_after_sample: false + groundtruth_points_drop_percentage: 0.0 + groundtruth_drop_max_keep_points: 15 + database_sampler { + database_info_path: "/data/sets/kitti_second/kitti_dbinfos_train.pkl" + sample_groups { + name_to_max_num { + key: "VEHICLE" + value: 15 + } + } + database_prep_steps { + filter_by_min_num_points { + min_num_point_pairs { + key: "VEHICLE" + value: 5 + } + } + } + database_prep_steps { + filter_by_difficulty { + removed_difficulties: [-1] + } + } + global_random_rotation_range_per_object: [0, 0] + rate: 1.0 + } + + remove_unknown_examples: false + remove_environment: false + kitti_info_path: "/nfs/151/gpu/gunji/point_pillars/second/train.trch" + kitti_root_path: "/nfs/151/gpu/gunji/argodataset/argoverse-tracking/train/" +} + +train_config: { + optimizer: { + adam_optimizer: { + learning_rate: { + exponential_decay_learning_rate: { + initial_learning_rate: 0.0002 + decay_steps: 40605 # 1856 steps per epoch * 15 epochs + decay_factor: 0.8 + staircase: true + } + } + weight_decay: 0.0001 + } + use_moving_average: false + + } + inter_op_parallelism_threads: 4 + intra_op_parallelism_threads: 4 + steps: 433120 # 1856 steps per epoch * 160 epochs + steps_per_eval: 13535 # 1856 steps per epoch * 5 epochs + save_checkpoints_secs : 1800 # half hour + save_summary_steps : 10 + enable_mixed_precision: false + loss_scale_factor : 512.0 + clear_metrics_every_epoch: false +} + +eval_input_reader: { + record_file_path: "/data/sets/kitti_second/kitti_val.tfrecord" + class_names: ["VEHICLE"] + batch_size: 1 + max_num_epochs : 200 + prefetch_size : 25 + max_number_of_voxels: 12000 + shuffle_points: false + num_workers: 3 + anchor_area_threshold: 1 + remove_environment: false + kitti_info_path: "/nfs/151/gpu/gunji/point_pillars/second/val.trch" + kitti_root_path: "/nfs/151/gpu/gunji/argodataset/argoverse-tracking/val/" +} diff --git a/second/configs/pointpillars/car/xyres_24_argo_48.proto b/second/configs/pointpillars/car/xyres_24_argo_48.proto new file mode 100644 index 00000000..6da69357 --- /dev/null +++ b/second/configs/pointpillars/car/xyres_24_argo_48.proto @@ -0,0 +1,187 @@ +model: { + second: { + voxel_generator { + point_cloud_range : [-48, -48, -3, 48, 48, 1] + voxel_size : [0.24, 0.24, 4] + max_number_of_points_per_voxel : 100 + } + num_class: 1 + voxel_feature_extractor: { + module_class_name: "PillarFeatureNet" + num_filters: [64] + with_distance: false + } + middle_feature_extractor: { + module_class_name: "PointPillarsScatter" + } + rpn: { + module_class_name: "RPN" + layer_nums: [3, 5, 5] + layer_strides: [2, 2, 2] + num_filters: [64, 128, 256] + upsample_strides: [1, 2, 4] + num_upsample_filters: [128, 128, 128] + use_groupnorm: false + num_groups: 32 + } + loss: { + classification_loss: { + weighted_sigmoid_focal: { + alpha: 0.25 + gamma: 2.0 + anchorwise_output: true + } + } + localization_loss: { + weighted_smooth_l1: { + sigma: 3.0 + code_weight: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] + } + } + classification_weight: 1.0 + localization_weight: 2.0 + } + # Outputs + use_sigmoid_score: true + encode_background_as_zeros: true + encode_rad_error_by_sin: true + + use_direction_classifier: true + direction_loss_weight: 0.2 + use_aux_classifier: false + # Loss + pos_class_weight: 1.0 + neg_class_weight: 1.0 + + loss_norm_type: NormByNumPositives + # Postprocess + post_center_limit_range: [-48, -48, -3, 48, 48, 1] + use_rotate_nms: false + use_multi_class_nms: false + nms_pre_max_size: 1000 + nms_post_max_size: 300 + nms_score_threshold: 0.05 + nms_iou_threshold: 0.5 + + use_bev: false + num_point_features: 3 + without_reflectivity: true + box_coder: { + ground_box3d_coder: { + linear_dim: false + encode_angle_vector: false + } + } + target_assigner: { + anchor_generators: { + anchor_generator_stride: { + sizes: [1.67, 3.63, 1.77] # wlh + strides: [0.48, 0.48, 0.0] # if generate only 1 z_center, z_stride will be ignored + offsets: [-47.76, -47.76, -1.73] # origin_offset + strides / 2 + rotations: [0, 1.57] # 0, pi/2 + matched_threshold : 0.6 + unmatched_threshold : 0.45 + } + } + + sample_positive_fraction : -1 + sample_size : 512 + region_similarity_calculator: { + nearest_iou_similarity: { + } + } + } + } +} + + +train_input_reader: { + record_file_path: "/data/sets/kitti_second/kitti_train.tfrecord" + class_names: ["VEHICLE"] + max_num_epochs : 200 + batch_size: 1 + prefetch_size : 25 + max_number_of_voxels: 12000 + shuffle_points: true + num_workers: 2 + groundtruth_localization_noise_std: [0.25, 0.25, 0.25] + groundtruth_rotation_uniform_noise: [-0.15707963267, 0.15707963267] + global_rotation_uniform_noise: [-0.78539816, 0.78539816] + global_scaling_uniform_noise: [0.95, 1.05] + global_random_rotation_range_per_object: [0, 0] + anchor_area_threshold: 1 + remove_points_after_sample: false + groundtruth_points_drop_percentage: 0.0 + groundtruth_drop_max_keep_points: 15 + database_sampler { + database_info_path: "/data/sets/kitti_second/kitti_dbinfos_train.pkl" + sample_groups { + name_to_max_num { + key: "VEHICLE" + value: 15 + } + } + database_prep_steps { + filter_by_min_num_points { + min_num_point_pairs { + key: "VEHICLE" + value: 5 + } + } + } + database_prep_steps { + filter_by_difficulty { + removed_difficulties: [-1] + } + } + global_random_rotation_range_per_object: [0, 0] + rate: 1.0 + } + + remove_unknown_examples: false + remove_environment: false + kitti_info_path: "/nfs/151/gpu/gunji/point_pillars/second/train.trch" + kitti_root_path: "/nfs/151/gpu/gunji/argodataset/argoverse-tracking/train/" +} + +train_config: { + optimizer: { + adam_optimizer: { + learning_rate: { + exponential_decay_learning_rate: { + initial_learning_rate: 0.0002 + decay_steps: 40605 # 1856 steps per epoch * 15 epochs + decay_factor: 0.8 + staircase: true + } + } + weight_decay: 0.0001 + } + use_moving_average: false + + } + inter_op_parallelism_threads: 4 + intra_op_parallelism_threads: 4 + steps: 433120 # 1856 steps per epoch * 160 epochs + steps_per_eval: 13535 # 1856 steps per epoch * 5 epochs + save_checkpoints_secs : 1800 # half hour + save_summary_steps : 10 + enable_mixed_precision: false + loss_scale_factor : 512.0 + clear_metrics_every_epoch: false +} + +eval_input_reader: { + record_file_path: "/data/sets/kitti_second/kitti_val.tfrecord" + class_names: ["VEHICLE"] + batch_size: 1 + max_num_epochs : 200 + prefetch_size : 25 + max_number_of_voxels: 12000 + shuffle_points: false + num_workers: 3 + anchor_area_threshold: 1 + remove_environment: false + kitti_info_path: "/nfs/151/gpu/gunji/point_pillars/second/val.trch" + kitti_root_path: "/nfs/151/gpu/gunji/argodataset/argoverse-tracking/val/" +} diff --git a/second/configs/pointpillars/car/xyres_28_argo_48.proto b/second/configs/pointpillars/car/xyres_28_argo_48.proto new file mode 100644 index 00000000..6b76364d --- /dev/null +++ b/second/configs/pointpillars/car/xyres_28_argo_48.proto @@ -0,0 +1,187 @@ +model: { + second: { + voxel_generator { + point_cloud_range : [-48.16, -48.16, -3, 48.16, 48.16, 1] + voxel_size : [0.28, 0.28, 4] + max_number_of_points_per_voxel : 200 + } + num_class: 1 + voxel_feature_extractor: { + module_class_name: "PillarFeatureNet" + num_filters: [64] + with_distance: false + } + middle_feature_extractor: { + module_class_name: "PointPillarsScatter" + } + rpn: { + module_class_name: "RPN" + layer_nums: [3, 5, 5] + layer_strides: [2, 2, 2] + num_filters: [64, 128, 256] + upsample_strides: [1, 2, 4] + num_upsample_filters: [128, 128, 128] + use_groupnorm: false + num_groups: 32 + } + loss: { + classification_loss: { + weighted_sigmoid_focal: { + alpha: 0.25 + gamma: 2.0 + anchorwise_output: true + } + } + localization_loss: { + weighted_smooth_l1: { + sigma: 3.0 + code_weight: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] + } + } + classification_weight: 1.0 + localization_weight: 2.0 + } + # Outputs + use_sigmoid_score: true + encode_background_as_zeros: true + encode_rad_error_by_sin: true + + use_direction_classifier: true + direction_loss_weight: 0.2 + use_aux_classifier: false + # Loss + pos_class_weight: 1.0 + neg_class_weight: 1.0 + + loss_norm_type: NormByNumPositives + # Postprocess + post_center_limit_range: [-48.16, -48.16, -3, 48.16, 48.16, 1] + use_rotate_nms: false + use_multi_class_nms: false + nms_pre_max_size: 1000 + nms_post_max_size: 300 + nms_score_threshold: 0.05 + nms_iou_threshold: 0.5 + + use_bev: false + num_point_features: 3 + without_reflectivity: true + box_coder: { + ground_box3d_coder: { + linear_dim: false + encode_angle_vector: false + } + } + target_assigner: { + anchor_generators: { + anchor_generator_stride: { + sizes: [1.67, 3.63, 1.77] # wlh + strides: [0.56, 0.56, 0.0] # if generate only 1 z_center, z_stride will be ignored + offsets: [-47.88, -47.88, -1.73] # origin_offset + strides / 2 + rotations: [0, 1.57] # 0, pi/2 + matched_threshold : 0.6 + unmatched_threshold : 0.45 + } + } + + sample_positive_fraction : -1 + sample_size : 512 + region_similarity_calculator: { + nearest_iou_similarity: { + } + } + } + } +} + + +train_input_reader: { + record_file_path: "/data/sets/kitti_second/kitti_train.tfrecord" + class_names: ["VEHICLE"] + max_num_epochs : 200 + batch_size: 1 + prefetch_size : 25 + max_number_of_voxels: 12000 + shuffle_points: true + num_workers: 2 + groundtruth_localization_noise_std: [0.25, 0.25, 0.25] + groundtruth_rotation_uniform_noise: [-0.15707963267, 0.15707963267] + global_rotation_uniform_noise: [-0.78539816, 0.78539816] + global_scaling_uniform_noise: [0.95, 1.05] + global_random_rotation_range_per_object: [0, 0] + anchor_area_threshold: 1 + remove_points_after_sample: false + groundtruth_points_drop_percentage: 0.0 + groundtruth_drop_max_keep_points: 15 + database_sampler { + database_info_path: "/data/sets/kitti_second/kitti_dbinfos_train.pkl" + sample_groups { + name_to_max_num { + key: "VEHICLE" + value: 15 + } + } + database_prep_steps { + filter_by_min_num_points { + min_num_point_pairs { + key: "VEHICLE" + value: 5 + } + } + } + database_prep_steps { + filter_by_difficulty { + removed_difficulties: [-1] + } + } + global_random_rotation_range_per_object: [0, 0] + rate: 1.0 + } + + remove_unknown_examples: false + remove_environment: false + kitti_info_path: "/nfs/151/gpu/gunji/point_pillars/second/train.trch" + kitti_root_path: "/nfs/151/gpu/gunji/argodataset/argoverse-tracking/train/" +} + +train_config: { + optimizer: { + adam_optimizer: { + learning_rate: { + exponential_decay_learning_rate: { + initial_learning_rate: 0.0002 + decay_steps: 40605 # 1856 steps per epoch * 15 epochs + decay_factor: 0.8 + staircase: true + } + } + weight_decay: 0.0001 + } + use_moving_average: false + + } + inter_op_parallelism_threads: 4 + intra_op_parallelism_threads: 4 + steps: 433120 # 1856 steps per epoch * 160 epochs + steps_per_eval: 13535 # 1856 steps per epoch * 5 epochs + save_checkpoints_secs : 1800 # half hour + save_summary_steps : 10 + enable_mixed_precision: false + loss_scale_factor : 512.0 + clear_metrics_every_epoch: false +} + +eval_input_reader: { + record_file_path: "/data/sets/kitti_second/kitti_val.tfrecord" + class_names: ["VEHICLE"] + batch_size: 1 + max_num_epochs : 200 + prefetch_size : 25 + max_number_of_voxels: 12000 + shuffle_points: false + num_workers: 3 + anchor_area_threshold: 1 + remove_environment: false + kitti_info_path: "/nfs/151/gpu/gunji/point_pillars/second/val.trch" + kitti_root_path: "/nfs/151/gpu/gunji/argodataset/argoverse-tracking/val/" +} diff --git a/second/configs/pointpillars/pedestrian/xyres_16_argo_56.proto b/second/configs/pointpillars/pedestrian/xyres_16_argo_56.proto new file mode 100644 index 00000000..a778a0a6 --- /dev/null +++ b/second/configs/pointpillars/pedestrian/xyres_16_argo_56.proto @@ -0,0 +1,190 @@ +model: { + second: { + voxel_generator { + point_cloud_range : [-56, -56, -2.5, 56,56, 0.5] + voxel_size : [0.16, 0.16, 3] + max_number_of_points_per_voxel : 100 + } + num_class: 1 + voxel_feature_extractor: { + module_class_name: "PillarFeatureNet" + num_filters: [64] + with_distance: false + } + middle_feature_extractor: { + module_class_name: "PointPillarsScatter" + } + rpn: { + module_class_name: "RPN" + layer_nums: [3, 5, 5] + layer_strides: [1, 2, 2] + num_filters: [64, 128, 256] + upsample_strides: [1, 2, 4] + num_upsample_filters: [128, 128, 128] + use_groupnorm: false + num_groups: 32 + } + loss: { + classification_loss: { + weighted_sigmoid_focal: { + alpha: 0.25 + gamma: 2.0 + anchorwise_output: true + } + } + localization_loss: { + weighted_smooth_l1: { + sigma: 3.0 + code_weight: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] + } + } + classification_weight: 1.0 + localization_weight: 2.0 + } + # Outputs + use_sigmoid_score: true + encode_background_as_zeros: true + encode_rad_error_by_sin: true + + use_direction_classifier: true + direction_loss_weight: 0.2 + use_aux_classifier: false + # Loss + pos_class_weight: 1.0 + neg_class_weight: 1.0 + + loss_norm_type: NormByNumPositives + # Postprocess + post_center_limit_range: [-56, -56, -2.5, 56, 56, 0.5] + use_rotate_nms: false + use_multi_class_nms: false + nms_pre_max_size: 1000 + nms_post_max_size: 300 + nms_score_threshold: 0.05 + nms_iou_threshold: 0.5 + + use_bev: false + num_point_features: 3 + without_reflectivity: true + box_coder: { + ground_box3d_coder: { + linear_dim: false + encode_angle_vector: false + } + } + target_assigner: { + + anchor_generators: { + anchor_generator_stride: { + sizes: [0.71, 0.71, 1.81] # wlh + strides: [0.16, 0.16, 0.0] # if generate only 1 z_center, z_stride will be ignored + offsets: [-55.92, -55.92, -1.465] # origin_offset + strides / 2 + rotations: [0, 1.57] # 0, pi/2 + matched_threshold : 0.5 + unmatched_threshold : 0.35 + } + } + + sample_positive_fraction : -1 + sample_size : 512 + region_similarity_calculator: { + nearest_iou_similarity: { + } + } + } + } +} + + +train_input_reader: { + record_file_path: "/data/sets/kitti_second/kitti_train.tfrecord" + class_names: ["PEDESTRIAN"] + max_num_epochs : 160 + batch_size: 1 + prefetch_size : 25 + max_number_of_voxels: 12000 + shuffle_points: true + num_workers: 2 + groundtruth_localization_noise_std: [0.25, 0.25, 0.25] + groundtruth_rotation_uniform_noise: [-0.15707963267, 0.15707963267] + global_rotation_uniform_noise: [-0.78539816, 0.78539816] + global_scaling_uniform_noise: [0.95, 1.05] + global_random_rotation_range_per_object: [0, 0] + anchor_area_threshold: 1 + remove_points_after_sample: false + groundtruth_points_drop_percentage: 0.0 + groundtruth_drop_max_keep_points: 15 + database_sampler { + database_info_path: "/data/sets/kitti_second/kitti_dbinfos_train.pkl" + sample_groups { + name_to_max_num { + key: "PEDESTRIAN" + value: 8 + } + } + database_prep_steps { + filter_by_min_num_points { + min_num_point_pairs { + key: "PEDESTRIAN" + value: 5 + } + } + } + database_prep_steps { + filter_by_difficulty { + removed_difficulties: [-1] + } + } + global_random_rotation_range_per_object: [0, 0] + rate: 1.0 + } + + remove_unknown_examples: false + remove_environment: false + kitti_info_path: "/nfs/151/gpu/gunji/point_pillars/second/ped_train.trch" + kitti_root_path: "/nfs/151/gpu/gunji/argodataset/argoverse-tracking/train/" + +} + +train_config: { + optimizer: { + adam_optimizer: { + learning_rate: { + exponential_decay_learning_rate: { + initial_learning_rate: 0.0002 + decay_steps: 30570 # 1856 steps per epoch * 15 epochs + decay_factor: 0.8 + staircase: true + } + } + weight_decay: 0.0001 + } + use_moving_average: false + + } + inter_op_parallelism_threads: 4 + intra_op_parallelism_threads: 4 + steps: 326080 # 1856 steps per epoch * 160 epochs + steps_per_eval: 10190 # 1856 steps per epoch * 5 epochs + save_checkpoints_secs : 1800 # half hour + save_summary_steps : 10 + enable_mixed_precision: false + loss_scale_factor : 512.0 + clear_metrics_every_epoch: false +} + +eval_input_reader: { + record_file_path: "/data/sets/kitti_second/kitti_val.tfrecord" + class_names: [ "PEDESTRIAN"] + batch_size: 1 + max_num_epochs : 200 + prefetch_size : 25 + max_number_of_voxels: 12000 + shuffle_points: false + num_workers: 3 + anchor_area_threshold: 1 + remove_environment: false + kitti_info_path: "/nfs/151/gpu/gunji/point_pillars/second/ped_val.trch" + kitti_root_path: "/nfs/151/gpu/gunji/argodataset/argoverse-tracking/val/" + +} diff --git a/second/configs/pointpillars/pedestrian/xyres_16_argo_64.proto b/second/configs/pointpillars/pedestrian/xyres_16_argo_64.proto new file mode 100644 index 00000000..4ccd5374 --- /dev/null +++ b/second/configs/pointpillars/pedestrian/xyres_16_argo_64.proto @@ -0,0 +1,190 @@ +model: { + second: { + voxel_generator { + point_cloud_range : [-48, -48, -2.5, 48,48, 0.5] + voxel_size : [0.16, 0.16, 3] + max_number_of_points_per_voxel : 100 + } + num_class: 1 + voxel_feature_extractor: { + module_class_name: "PillarFeatureNet" + num_filters: [64] + with_distance: false + } + middle_feature_extractor: { + module_class_name: "PointPillarsScatter" + } + rpn: { + module_class_name: "RPN" + layer_nums: [3, 5, 5] + layer_strides: [1, 2, 2] + num_filters: [64, 128, 256] + upsample_strides: [1, 2, 4] + num_upsample_filters: [128, 128, 128] + use_groupnorm: false + num_groups: 32 + } + loss: { + classification_loss: { + weighted_sigmoid_focal: { + alpha: 0.25 + gamma: 2.0 + anchorwise_output: true + } + } + localization_loss: { + weighted_smooth_l1: { + sigma: 3.0 + code_weight: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] + } + } + classification_weight: 1.0 + localization_weight: 2.0 + } + # Outputs + use_sigmoid_score: true + encode_background_as_zeros: true + encode_rad_error_by_sin: true + + use_direction_classifier: true + direction_loss_weight: 0.2 + use_aux_classifier: false + # Loss + pos_class_weight: 1.0 + neg_class_weight: 1.0 + + loss_norm_type: NormByNumPositives + # Postprocess + post_center_limit_range: [-48, -48, -2.5, 48, 48, 0.5] + use_rotate_nms: false + use_multi_class_nms: false + nms_pre_max_size: 1000 + nms_post_max_size: 300 + nms_score_threshold: 0.05 + nms_iou_threshold: 0.5 + + use_bev: false + num_point_features: 3 + without_reflectivity: true + box_coder: { + ground_box3d_coder: { + linear_dim: false + encode_angle_vector: false + } + } + target_assigner: { + + anchor_generators: { + anchor_generator_stride: { + sizes: [0.71, 0.71, 1.81] # wlh + strides: [0.16, 0.16, 0.0] # if generate only 1 z_center, z_stride will be ignored + offsets: [-47.92, -47.92, -1.465] # origin_offset + strides / 2 + rotations: [0, 1.57] # 0, pi/2 + matched_threshold : 0.5 + unmatched_threshold : 0.35 + } + } + + sample_positive_fraction : -1 + sample_size : 512 + region_similarity_calculator: { + nearest_iou_similarity: { + } + } + } + } +} + + +train_input_reader: { + record_file_path: "/data/sets/kitti_second/kitti_train.tfrecord" + class_names: ["PEDESTRIAN"] + max_num_epochs : 160 + batch_size: 1 + prefetch_size : 25 + max_number_of_voxels: 12000 + shuffle_points: true + num_workers: 2 + groundtruth_localization_noise_std: [0.25, 0.25, 0.25] + groundtruth_rotation_uniform_noise: [-0.15707963267, 0.15707963267] + global_rotation_uniform_noise: [-0.78539816, 0.78539816] + global_scaling_uniform_noise: [0.95, 1.05] + global_random_rotation_range_per_object: [0, 0] + anchor_area_threshold: 1 + remove_points_after_sample: false + groundtruth_points_drop_percentage: 0.0 + groundtruth_drop_max_keep_points: 15 + database_sampler { + database_info_path: "/data/sets/kitti_second/kitti_dbinfos_train.pkl" + sample_groups { + name_to_max_num { + key: "PEDESTRIAN" + value: 8 + } + } + database_prep_steps { + filter_by_min_num_points { + min_num_point_pairs { + key: "PEDESTRIAN" + value: 5 + } + } + } + database_prep_steps { + filter_by_difficulty { + removed_difficulties: [-1] + } + } + global_random_rotation_range_per_object: [0, 0] + rate: 1.0 + } + + remove_unknown_examples: false + remove_environment: false + kitti_info_path: "/nfs/151/gpu/gunji/point_pillars/second/ped_train.trch" + kitti_root_path: "/nfs/151/gpu/gunji/argodataset/argoverse-tracking/train/" + +} + +train_config: { + optimizer: { + adam_optimizer: { + learning_rate: { + exponential_decay_learning_rate: { + initial_learning_rate: 0.0002 + decay_steps: 30570 # 1856 steps per epoch * 15 epochs + decay_factor: 0.8 + staircase: true + } + } + weight_decay: 0.0001 + } + use_moving_average: false + + } + inter_op_parallelism_threads: 4 + intra_op_parallelism_threads: 4 + steps: 326080 # 1856 steps per epoch * 160 epochs + steps_per_eval: 10190 # 1856 steps per epoch * 5 epochs + save_checkpoints_secs : 1800 # half hour + save_summary_steps : 10 + enable_mixed_precision: false + loss_scale_factor : 512.0 + clear_metrics_every_epoch: false +} + +eval_input_reader: { + record_file_path: "/data/sets/kitti_second/kitti_val.tfrecord" + class_names: [ "PEDESTRIAN"] + batch_size: 1 + max_num_epochs : 200 + prefetch_size : 25 + max_number_of_voxels: 12000 + shuffle_points: false + num_workers: 3 + anchor_area_threshold: 1 + remove_environment: false + kitti_info_path: "/nfs/151/gpu/gunji/point_pillars/second/ped_val.trch" + kitti_root_path: "/nfs/151/gpu/gunji/argodataset/argoverse-tracking/val/" + +} diff --git a/second/core/non_max_suppression/nms_gpu.py b/second/core/non_max_suppression/nms_gpu.py index b9600e59..18337137 100644 --- a/second/core/non_max_suppression/nms_gpu.py +++ b/second/core/non_max_suppression/nms_gpu.py @@ -1,653 +1,655 @@ -import math -from pathlib import Path - -import numba -import numpy as np -from numba import cuda - -from second.utils.buildtools.pybind11_build import load_pb11 - -try: - from second.core.non_max_suppression.nms import non_max_suppression -except: - current_dir = Path(__file__).resolve().parents[0] - load_pb11( - ["../cc/nms/nms_kernel.cu.cc", "../cc/nms/nms.cc"], - current_dir / "nms.so", - current_dir, - cuda=True) - from second.core.non_max_suppression.nms import non_max_suppression - - -@cuda.jit('(float32[:], float32[:])', device=True, inline=True) -def iou_device(a, b): - left = max(a[0], b[0]) - right = min(a[2], b[2]) - top = max(a[1], b[1]) - bottom = min(a[3], b[3]) - width = max(right - left + 1, 0.) - height = max(bottom - top + 1, 0.) - interS = width * height - Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1) - Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1) - return interS / (Sa + Sb - interS) - - -@cuda.jit('(int64, float32, float32[:, :], uint64[:])') -def nms_kernel_v2(n_boxes, nms_overlap_thresh, dev_boxes, dev_mask): - threadsPerBlock = 8 * 8 - row_start = cuda.blockIdx.y - col_start = cuda.blockIdx.x - tx = cuda.threadIdx.x - row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock) - col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock) - block_boxes = cuda.shared.array( - shape=(threadsPerBlock, 5), dtype=numba.float32) - dev_box_idx = threadsPerBlock * col_start + tx - if (tx < col_size): - block_boxes[tx, 0] = dev_boxes[dev_box_idx, 0] - block_boxes[tx, 1] = dev_boxes[dev_box_idx, 1] - block_boxes[tx, 2] = dev_boxes[dev_box_idx, 2] - block_boxes[tx, 3] = dev_boxes[dev_box_idx, 3] - block_boxes[tx, 4] = dev_boxes[dev_box_idx, 4] - cuda.syncthreads() - if (cuda.threadIdx.x < row_size): - cur_box_idx = threadsPerBlock * row_start + cuda.threadIdx.x - # cur_box = dev_boxes + cur_box_idx * 5; - i = 0 - t = 0 - start = 0 - if (row_start == col_start): - start = tx + 1 - for i in range(start, col_size): - if (iou_device(dev_boxes[cur_box_idx], block_boxes[i]) > - nms_overlap_thresh): - t |= 1 << i - col_blocks = ((n_boxes) // (threadsPerBlock) + ( - (n_boxes) % (threadsPerBlock) > 0)) - dev_mask[cur_box_idx * col_blocks + col_start] = t - - -@cuda.jit('(int64, float32, float32[:], uint64[:])') -def nms_kernel(n_boxes, nms_overlap_thresh, dev_boxes, dev_mask): - threadsPerBlock = 8 * 8 - row_start = cuda.blockIdx.y - col_start = cuda.blockIdx.x - tx = cuda.threadIdx.x - row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock) - col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock) - block_boxes = cuda.shared.array(shape=(64 * 5, ), dtype=numba.float32) - dev_box_idx = threadsPerBlock * col_start + tx - if (tx < col_size): - block_boxes[tx * 5 + 0] = dev_boxes[dev_box_idx * 5 + 0] - block_boxes[tx * 5 + 1] = dev_boxes[dev_box_idx * 5 + 1] - block_boxes[tx * 5 + 2] = dev_boxes[dev_box_idx * 5 + 2] - block_boxes[tx * 5 + 3] = dev_boxes[dev_box_idx * 5 + 3] - block_boxes[tx * 5 + 4] = dev_boxes[dev_box_idx * 5 + 4] - cuda.syncthreads() - if (tx < row_size): - cur_box_idx = threadsPerBlock * row_start + tx - # cur_box = dev_boxes + cur_box_idx * 5; - t = 0 - start = 0 - if (row_start == col_start): - start = tx + 1 - for i in range(start, col_size): - iou = iou_device(dev_boxes[cur_box_idx * 5:cur_box_idx * 5 + 4], - block_boxes[i * 5:i * 5 + 4]) - if (iou > nms_overlap_thresh): - t |= 1 << i - col_blocks = ((n_boxes) // (threadsPerBlock) + ( - (n_boxes) % (threadsPerBlock) > 0)) - dev_mask[cur_box_idx * col_blocks + col_start] = t - - -@numba.jit(nopython=True) -def div_up(m, n): - return m // n + (m % n > 0) - - -@numba.jit(nopython=True) -def nms_postprocess(keep_out, mask_host, boxes_num): - threadsPerBlock = 8 * 8 - col_blocks = div_up(boxes_num, threadsPerBlock) - remv = np.zeros((col_blocks), dtype=np.uint64) - num_to_keep = 0 - for i in range(boxes_num): - nblock = i // threadsPerBlock - inblock = i % threadsPerBlock - mask = np.array(1 << inblock, dtype=np.uint64) - if not (remv[nblock] & mask): - keep_out[num_to_keep] = i - num_to_keep += 1 - # unsigned long long *p = &mask_host[0] + i * col_blocks; - for j in range(nblock, col_blocks): - remv[j] |= mask_host[i * col_blocks + j] - # remv[j] |= p[j]; - return num_to_keep - - -def nms_gpu(dets, nms_overlap_thresh, device_id=0): - """nms in gpu. - - Args: - dets ([type]): [description] - nms_overlap_thresh ([type]): [description] - device_id ([type], optional): Defaults to 0. [description] - - Returns: - [type]: [description] - """ - - boxes_num = dets.shape[0] - keep_out = np.zeros([boxes_num], dtype=np.int32) - scores = dets[:, 4] - order = scores.argsort()[::-1].astype(np.int32) - boxes_host = dets[order, :] - - threadsPerBlock = 8 * 8 - col_blocks = div_up(boxes_num, threadsPerBlock) - cuda.select_device(device_id) - mask_host = np.zeros((boxes_num * col_blocks, ), dtype=np.uint64) - blockspergrid = (div_up(boxes_num, threadsPerBlock), - div_up(boxes_num, threadsPerBlock)) - stream = cuda.stream() - with stream.auto_synchronize(): - boxes_dev = cuda.to_device(boxes_host.reshape([-1]), stream) - mask_dev = cuda.to_device(mask_host, stream) - nms_kernel[blockspergrid, threadsPerBlock, stream]( - boxes_num, nms_overlap_thresh, boxes_dev, mask_dev) - mask_dev.copy_to_host(mask_host, stream=stream) - # stream.synchronize() - num_out = nms_postprocess(keep_out, mask_host, boxes_num) - keep = keep_out[:num_out] - return list(order[keep]) - - -def nms_gpu_cc(dets, nms_overlap_thresh, device_id=0): - boxes_num = dets.shape[0] - keep = np.zeros(boxes_num, dtype=np.int32) - scores = dets[:, 4] - order = scores.argsort()[::-1].astype(np.int32) - sorted_dets = dets[order, :] - num_out = non_max_suppression(sorted_dets, keep, nms_overlap_thresh, - device_id) - keep = keep[:num_out] - return list(order[keep]) - - -@cuda.jit('(float32[:], float32[:], float32[:])', device=True, inline=True) -def trangle_area(a, b, c): - return ( - (a[0] - c[0]) * (b[1] - c[1]) - (a[1] - c[1]) * (b[0] - c[0])) / 2.0 - - -@cuda.jit('(float32[:], int32)', device=True, inline=True) -def area(int_pts, num_of_inter): - area_val = 0.0 - for i in range(num_of_inter - 2): - area_val += abs( - trangle_area(int_pts[:2], int_pts[2 * i + 2:2 * i + 4], - int_pts[2 * i + 4:2 * i + 6])) - return area_val - - -@cuda.jit('(float32[:], int32)', device=True, inline=True) -def sort_vertex_in_convex_polygon(int_pts, num_of_inter): - if num_of_inter > 0: - center = cuda.local.array((2, ), dtype=numba.float32) - center[:] = 0.0 - for i in range(num_of_inter): - center[0] += int_pts[2 * i] - center[1] += int_pts[2 * i + 1] - center[0] /= num_of_inter - center[1] /= num_of_inter - v = cuda.local.array((2, ), dtype=numba.float32) - vs = cuda.local.array((16, ), dtype=numba.float32) - for i in range(num_of_inter): - v[0] = int_pts[2 * i] - center[0] - v[1] = int_pts[2 * i + 1] - center[1] - d = math.sqrt(v[0] * v[0] + v[1] * v[1]) - v[0] = v[0] / d - v[1] = v[1] / d - if v[1] < 0: - v[0] = -2 - v[0] - vs[i] = v[0] - j = 0 - temp = 0 - for i in range(1, num_of_inter): - if vs[i - 1] > vs[i]: - temp = vs[i] - tx = int_pts[2 * i] - ty = int_pts[2 * i + 1] - j = i - while j > 0 and vs[j - 1] > temp: - vs[j] = vs[j - 1] - int_pts[j * 2] = int_pts[j * 2 - 2] - int_pts[j * 2 + 1] = int_pts[j * 2 - 1] - j -= 1 - - vs[j] = temp - int_pts[j * 2] = tx - int_pts[j * 2 + 1] = ty - - -@cuda.jit( - '(float32[:], float32[:], int32, int32, float32[:])', - device=True, - inline=True) -def line_segment_intersection(pts1, pts2, i, j, temp_pts): - A = cuda.local.array((2, ), dtype=numba.float32) - B = cuda.local.array((2, ), dtype=numba.float32) - C = cuda.local.array((2, ), dtype=numba.float32) - D = cuda.local.array((2, ), dtype=numba.float32) - - A[0] = pts1[2 * i] - A[1] = pts1[2 * i + 1] - - B[0] = pts1[2 * ((i + 1) % 4)] - B[1] = pts1[2 * ((i + 1) % 4) + 1] - - C[0] = pts2[2 * j] - C[1] = pts2[2 * j + 1] - - D[0] = pts2[2 * ((j + 1) % 4)] - D[1] = pts2[2 * ((j + 1) % 4) + 1] - BA0 = B[0] - A[0] - BA1 = B[1] - A[1] - DA0 = D[0] - A[0] - CA0 = C[0] - A[0] - DA1 = D[1] - A[1] - CA1 = C[1] - A[1] - acd = DA1 * CA0 > CA1 * DA0 - bcd = (D[1] - B[1]) * (C[0] - B[0]) > (C[1] - B[1]) * (D[0] - B[0]) - if acd != bcd: - abc = CA1 * BA0 > BA1 * CA0 - abd = DA1 * BA0 > BA1 * DA0 - if abc != abd: - DC0 = D[0] - C[0] - DC1 = D[1] - C[1] - ABBA = A[0] * B[1] - B[0] * A[1] - CDDC = C[0] * D[1] - D[0] * C[1] - DH = BA1 * DC0 - BA0 * DC1 - Dx = ABBA * DC0 - BA0 * CDDC - Dy = ABBA * DC1 - BA1 * CDDC - temp_pts[0] = Dx / DH - temp_pts[1] = Dy / DH - return True - return False - - -@cuda.jit( - '(float32[:], float32[:], int32, int32, float32[:])', - device=True, - inline=True) -def line_segment_intersection_v1(pts1, pts2, i, j, temp_pts): - a = cuda.local.array((2, ), dtype=numba.float32) - b = cuda.local.array((2, ), dtype=numba.float32) - c = cuda.local.array((2, ), dtype=numba.float32) - d = cuda.local.array((2, ), dtype=numba.float32) - - a[0] = pts1[2 * i] - a[1] = pts1[2 * i + 1] - - b[0] = pts1[2 * ((i + 1) % 4)] - b[1] = pts1[2 * ((i + 1) % 4) + 1] - - c[0] = pts2[2 * j] - c[1] = pts2[2 * j + 1] - - d[0] = pts2[2 * ((j + 1) % 4)] - d[1] = pts2[2 * ((j + 1) % 4) + 1] - - area_abc = trangle_area(a, b, c) - area_abd = trangle_area(a, b, d) - - if area_abc * area_abd >= 0: - return False - - area_cda = trangle_area(c, d, a) - area_cdb = area_cda + area_abc - area_abd - - if area_cda * area_cdb >= 0: - return False - t = area_cda / (area_abd - area_abc) - - dx = t * (b[0] - a[0]) - dy = t * (b[1] - a[1]) - temp_pts[0] = a[0] + dx - temp_pts[1] = a[1] + dy - return True - - -@cuda.jit('(float32, float32, float32[:])', device=True, inline=True) -def point_in_quadrilateral(pt_x, pt_y, corners): - ab0 = corners[2] - corners[0] - ab1 = corners[3] - corners[1] - - ad0 = corners[6] - corners[0] - ad1 = corners[7] - corners[1] - - ap0 = pt_x - corners[0] - ap1 = pt_y - corners[1] - - abab = ab0 * ab0 + ab1 * ab1 - abap = ab0 * ap0 + ab1 * ap1 - adad = ad0 * ad0 + ad1 * ad1 - adap = ad0 * ap0 + ad1 * ap1 - - return abab >= abap and abap >= 0 and adad >= adap and adap >= 0 - - -@cuda.jit('(float32[:], float32[:], float32[:])', device=True, inline=True) -def quadrilateral_intersection(pts1, pts2, int_pts): - num_of_inter = 0 - for i in range(4): - if point_in_quadrilateral(pts1[2 * i], pts1[2 * i + 1], pts2): - int_pts[num_of_inter * 2] = pts1[2 * i] - int_pts[num_of_inter * 2 + 1] = pts1[2 * i + 1] - num_of_inter += 1 - if point_in_quadrilateral(pts2[2 * i], pts2[2 * i + 1], pts1): - int_pts[num_of_inter * 2] = pts2[2 * i] - int_pts[num_of_inter * 2 + 1] = pts2[2 * i + 1] - num_of_inter += 1 - temp_pts = cuda.local.array((2, ), dtype=numba.float32) - for i in range(4): - for j in range(4): - has_pts = line_segment_intersection(pts1, pts2, i, j, temp_pts) - if has_pts: - int_pts[num_of_inter * 2] = temp_pts[0] - int_pts[num_of_inter * 2 + 1] = temp_pts[1] - num_of_inter += 1 - - return num_of_inter - - -@cuda.jit('(float32[:], float32[:])', device=True, inline=True) -def rbbox_to_corners(corners, rbbox): - # generate clockwise corners and rotate it clockwise - angle = rbbox[4] - a_cos = math.cos(angle) - a_sin = math.sin(angle) - center_x = rbbox[0] - center_y = rbbox[1] - x_d = rbbox[2] - y_d = rbbox[3] - corners_x = cuda.local.array((4, ), dtype=numba.float32) - corners_y = cuda.local.array((4, ), dtype=numba.float32) - corners_x[0] = -x_d / 2 - corners_x[1] = -x_d / 2 - corners_x[2] = x_d / 2 - corners_x[3] = x_d / 2 - corners_y[0] = -y_d / 2 - corners_y[1] = y_d / 2 - corners_y[2] = y_d / 2 - corners_y[3] = -y_d / 2 - for i in range(4): - corners[2 * i] = a_cos * corners_x[i] + a_sin * corners_y[i] + center_x - corners[2 * i + - 1] = -a_sin * corners_x[i] + a_cos * corners_y[i] + center_y - - -@cuda.jit('(float32[:], float32[:])', device=True, inline=True) -def inter(rbbox1, rbbox2): - corners1 = cuda.local.array((8, ), dtype=numba.float32) - corners2 = cuda.local.array((8, ), dtype=numba.float32) - intersection_corners = cuda.local.array((16, ), dtype=numba.float32) - - rbbox_to_corners(corners1, rbbox1) - rbbox_to_corners(corners2, rbbox2) - - num_intersection = quadrilateral_intersection(corners1, corners2, - intersection_corners) - sort_vertex_in_convex_polygon(intersection_corners, num_intersection) - # print(intersection_corners.reshape([-1, 2])[:num_intersection]) - - return area(intersection_corners, num_intersection) - - -@cuda.jit('(float32[:], float32[:])', device=True, inline=True) -def devRotateIoU(rbox1, rbox2): - area1 = rbox1[2] * rbox1[3] - area2 = rbox2[2] * rbox2[3] - area_inter = inter(rbox1, rbox2) - return area_inter / (area1 + area2 - area_inter) - - -@cuda.jit('(int64, float32, float32[:], uint64[:])') -def rotate_nms_kernel(n_boxes, nms_overlap_thresh, dev_boxes, dev_mask): - threadsPerBlock = 8 * 8 - row_start = cuda.blockIdx.y - col_start = cuda.blockIdx.x - tx = cuda.threadIdx.x - row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock) - col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock) - block_boxes = cuda.shared.array(shape=(64 * 6, ), dtype=numba.float32) - dev_box_idx = threadsPerBlock * col_start + tx - if (tx < col_size): - block_boxes[tx * 6 + 0] = dev_boxes[dev_box_idx * 6 + 0] - block_boxes[tx * 6 + 1] = dev_boxes[dev_box_idx * 6 + 1] - block_boxes[tx * 6 + 2] = dev_boxes[dev_box_idx * 6 + 2] - block_boxes[tx * 6 + 3] = dev_boxes[dev_box_idx * 6 + 3] - block_boxes[tx * 6 + 4] = dev_boxes[dev_box_idx * 6 + 4] - block_boxes[tx * 6 + 5] = dev_boxes[dev_box_idx * 6 + 5] - cuda.syncthreads() - if (tx < row_size): - cur_box_idx = threadsPerBlock * row_start + tx - # cur_box = dev_boxes + cur_box_idx * 5; - t = 0 - start = 0 - if (row_start == col_start): - start = tx + 1 - for i in range(start, col_size): - iou = devRotateIoU(dev_boxes[cur_box_idx * 6:cur_box_idx * 6 + 5], - block_boxes[i * 6:i * 6 + 5]) - # print('iou', iou, cur_box_idx, i) - if (iou > nms_overlap_thresh): - t |= 1 << i - col_blocks = ((n_boxes) // (threadsPerBlock) + ( - (n_boxes) % (threadsPerBlock) > 0)) - dev_mask[cur_box_idx * col_blocks + col_start] = t - - -def rotate_nms_gpu(dets, nms_overlap_thresh, device_id=0): - """nms in gpu. WARNING: this function can provide right result - but its performance isn't be tested - - Args: - dets ([type]): [description] - nms_overlap_thresh ([type]): [description] - device_id ([type], optional): Defaults to 0. [description] - - Returns: - [type]: [description] - """ - dets = dets.astype(np.float32) - boxes_num = dets.shape[0] - keep_out = np.zeros([boxes_num], dtype=np.int32) - scores = dets[:, 5] - order = scores.argsort()[::-1].astype(np.int32) - boxes_host = dets[order, :] - - threadsPerBlock = 8 * 8 - col_blocks = div_up(boxes_num, threadsPerBlock) - cuda.select_device(device_id) - # mask_host shape: boxes_num * col_blocks * sizeof(np.uint64) - mask_host = np.zeros((boxes_num * col_blocks, ), dtype=np.uint64) - blockspergrid = (div_up(boxes_num, threadsPerBlock), - div_up(boxes_num, threadsPerBlock)) - stream = cuda.stream() - with stream.auto_synchronize(): - boxes_dev = cuda.to_device(boxes_host.reshape([-1]), stream) - mask_dev = cuda.to_device(mask_host, stream) - rotate_nms_kernel[blockspergrid, threadsPerBlock, stream]( - boxes_num, nms_overlap_thresh, boxes_dev, mask_dev) - mask_dev.copy_to_host(mask_host, stream=stream) - num_out = nms_postprocess(keep_out, mask_host, boxes_num) - keep = keep_out[:num_out] - return list(order[keep]) - - -@cuda.jit('(int64, int64, float32[:], float32[:], float32[:])', fastmath=False) -def rotate_iou_kernel(N, K, dev_boxes, dev_query_boxes, dev_iou): - threadsPerBlock = 8 * 8 - row_start = cuda.blockIdx.x - col_start = cuda.blockIdx.y - tx = cuda.threadIdx.x - row_size = min(N - row_start * threadsPerBlock, threadsPerBlock) - col_size = min(K - col_start * threadsPerBlock, threadsPerBlock) - block_boxes = cuda.shared.array(shape=(64 * 5, ), dtype=numba.float32) - block_qboxes = cuda.shared.array(shape=(64 * 5, ), dtype=numba.float32) - - dev_query_box_idx = threadsPerBlock * col_start + tx - dev_box_idx = threadsPerBlock * row_start + tx - if (tx < col_size): - block_qboxes[tx * 5 + 0] = dev_query_boxes[dev_query_box_idx * 5 + 0] - block_qboxes[tx * 5 + 1] = dev_query_boxes[dev_query_box_idx * 5 + 1] - block_qboxes[tx * 5 + 2] = dev_query_boxes[dev_query_box_idx * 5 + 2] - block_qboxes[tx * 5 + 3] = dev_query_boxes[dev_query_box_idx * 5 + 3] - block_qboxes[tx * 5 + 4] = dev_query_boxes[dev_query_box_idx * 5 + 4] - if (tx < row_size): - block_boxes[tx * 5 + 0] = dev_boxes[dev_box_idx * 5 + 0] - block_boxes[tx * 5 + 1] = dev_boxes[dev_box_idx * 5 + 1] - block_boxes[tx * 5 + 2] = dev_boxes[dev_box_idx * 5 + 2] - block_boxes[tx * 5 + 3] = dev_boxes[dev_box_idx * 5 + 3] - block_boxes[tx * 5 + 4] = dev_boxes[dev_box_idx * 5 + 4] - cuda.syncthreads() - if tx < row_size: - for i in range(col_size): - offset = row_start * threadsPerBlock * K + col_start * threadsPerBlock + tx * K + i - dev_iou[offset] = devRotateIoU(block_qboxes[i * 5:i * 5 + 5], - block_boxes[tx * 5:tx * 5 + 5]) - - -def rotate_iou_gpu(boxes, query_boxes, device_id=0): - """rotated box iou running in gpu. 500x faster than cpu version - (take 5ms in one example with numba.cuda code). - convert from [this project]( - https://github.com/hongzhenwang/RRPN-revise/tree/master/lib/rotation). - - Args: - boxes (float tensor: [N, 5]): rbboxes. format: centers, dims, - angles(clockwise when positive) - query_boxes (float tensor: [K, 5]): [description] - device_id (int, optional): Defaults to 0. [description] - - Returns: - [type]: [description] - """ - box_dtype = boxes.dtype - boxes = boxes.astype(np.float32) - query_boxes = query_boxes.astype(np.float32) - N = boxes.shape[0] - K = query_boxes.shape[0] - iou = np.zeros((N, K), dtype=np.float32) - if N == 0 or K == 0: - return iou - threadsPerBlock = 8 * 8 - cuda.select_device(device_id) - blockspergrid = (div_up(N, threadsPerBlock), div_up(K, threadsPerBlock)) - - stream = cuda.stream() - with stream.auto_synchronize(): - boxes_dev = cuda.to_device(boxes.reshape([-1]), stream) - query_boxes_dev = cuda.to_device(query_boxes.reshape([-1]), stream) - iou_dev = cuda.to_device(iou.reshape([-1]), stream) - rotate_iou_kernel[blockspergrid, threadsPerBlock, stream]( - N, K, boxes_dev, query_boxes_dev, iou_dev) - iou_dev.copy_to_host(iou.reshape([-1]), stream=stream) - return iou.astype(boxes.dtype) - - -@cuda.jit('(float32[:], float32[:], int32)', device=True, inline=True) -def devRotateIoUEval(rbox1, rbox2, criterion=-1): - area1 = rbox1[2] * rbox1[3] - area2 = rbox2[2] * rbox2[3] - area_inter = inter(rbox1, rbox2) - if criterion == -1: - return area_inter / (area1 + area2 - area_inter) - elif criterion == 0: - return area_inter / area1 - elif criterion == 1: - return area_inter / area2 - else: - return area_inter - - -@cuda.jit( - '(int64, int64, float32[:], float32[:], float32[:], int32)', - fastmath=False) -def rotate_iou_kernel_eval(N, - K, - dev_boxes, - dev_query_boxes, - dev_iou, - criterion=-1): - threadsPerBlock = 8 * 8 - row_start = cuda.blockIdx.x - col_start = cuda.blockIdx.y - tx = cuda.threadIdx.x - row_size = min(N - row_start * threadsPerBlock, threadsPerBlock) - col_size = min(K - col_start * threadsPerBlock, threadsPerBlock) - block_boxes = cuda.shared.array(shape=(64 * 5, ), dtype=numba.float32) - block_qboxes = cuda.shared.array(shape=(64 * 5, ), dtype=numba.float32) - - dev_query_box_idx = threadsPerBlock * col_start + tx - dev_box_idx = threadsPerBlock * row_start + tx - if (tx < col_size): - block_qboxes[tx * 5 + 0] = dev_query_boxes[dev_query_box_idx * 5 + 0] - block_qboxes[tx * 5 + 1] = dev_query_boxes[dev_query_box_idx * 5 + 1] - block_qboxes[tx * 5 + 2] = dev_query_boxes[dev_query_box_idx * 5 + 2] - block_qboxes[tx * 5 + 3] = dev_query_boxes[dev_query_box_idx * 5 + 3] - block_qboxes[tx * 5 + 4] = dev_query_boxes[dev_query_box_idx * 5 + 4] - if (tx < row_size): - block_boxes[tx * 5 + 0] = dev_boxes[dev_box_idx * 5 + 0] - block_boxes[tx * 5 + 1] = dev_boxes[dev_box_idx * 5 + 1] - block_boxes[tx * 5 + 2] = dev_boxes[dev_box_idx * 5 + 2] - block_boxes[tx * 5 + 3] = dev_boxes[dev_box_idx * 5 + 3] - block_boxes[tx * 5 + 4] = dev_boxes[dev_box_idx * 5 + 4] - cuda.syncthreads() - if tx < row_size: - for i in range(col_size): - offset = row_start * threadsPerBlock * K + col_start * threadsPerBlock + tx * K + i - dev_iou[offset] = devRotateIoUEval(block_qboxes[i * 5:i * 5 + 5], - block_boxes[tx * 5:tx * 5 + 5], - criterion) - - -def rotate_iou_gpu_eval(boxes, query_boxes, criterion=-1, device_id=0): - """rotated box iou running in gpu. 500x faster than cpu version - (take 5ms in one example with numba.cuda code). - convert from [this project]( - https://github.com/hongzhenwang/RRPN-revise/tree/master/lib/rotation). - - Args: - boxes (float tensor: [N, 5]): rbboxes. format: centers, dims, - angles(clockwise when positive) - query_boxes (float tensor: [K, 5]): [description] - device_id (int, optional): Defaults to 0. [description] - - Returns: - [type]: [description] - """ - box_dtype = boxes.dtype - boxes = boxes.astype(np.float32) - query_boxes = query_boxes.astype(np.float32) - N = boxes.shape[0] - K = query_boxes.shape[0] - iou = np.zeros((N, K), dtype=np.float32) - if N == 0 or K == 0: - return iou - threadsPerBlock = 8 * 8 - cuda.select_device(device_id) - blockspergrid = (div_up(N, threadsPerBlock), div_up(K, threadsPerBlock)) - - stream = cuda.stream() - with stream.auto_synchronize(): - boxes_dev = cuda.to_device(boxes.reshape([-1]), stream) - query_boxes_dev = cuda.to_device(query_boxes.reshape([-1]), stream) - iou_dev = cuda.to_device(iou.reshape([-1]), stream) - rotate_iou_kernel_eval[blockspergrid, threadsPerBlock, stream]( - N, K, boxes_dev, query_boxes_dev, iou_dev, criterion) - iou_dev.copy_to_host(iou.reshape([-1]), stream=stream) - return iou.astype(boxes.dtype) +import math +from pathlib import Path + +import numba +import numpy as np +from numba import cuda + +from second.utils.buildtools.pybind11_build import load_pb11 + +try: + from second.core.non_max_suppression.nms import non_max_suppression +except: + current_dir = Path(__file__).resolve().parents[0] + load_pb11( + ["../cc/nms/nms_kernel.cu.cc", "../cc/nms/nms.cc"], + current_dir / "nms.so", + current_dir, + cuda=True) + from second.core.non_max_suppression.nms import non_max_suppression + + +@cuda.jit('(float32[:], float32[:])', device=True, inline=True) +def iou_device(a, b): + left = max(a[0], b[0]) + right = min(a[2], b[2]) + top = max(a[1], b[1]) + bottom = min(a[3], b[3]) + width = max(right - left + 1, 0.) + height = max(bottom - top + 1, 0.) + interS = width * height + Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1) + Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1) + return interS / (Sa + Sb - interS) + + +@cuda.jit('(int64, float32, float32[:, :], uint64[:])') +def nms_kernel_v2(n_boxes, nms_overlap_thresh, dev_boxes, dev_mask): + threadsPerBlock = 8 * 8 + row_start = cuda.blockIdx.y + col_start = cuda.blockIdx.x + tx = cuda.threadIdx.x + row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock) + col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock) + block_boxes = cuda.shared.array( + shape=(threadsPerBlock, 5), dtype=numba.float32) + dev_box_idx = threadsPerBlock * col_start + tx + if (tx < col_size): + block_boxes[tx, 0] = dev_boxes[dev_box_idx, 0] + block_boxes[tx, 1] = dev_boxes[dev_box_idx, 1] + block_boxes[tx, 2] = dev_boxes[dev_box_idx, 2] + block_boxes[tx, 3] = dev_boxes[dev_box_idx, 3] + block_boxes[tx, 4] = dev_boxes[dev_box_idx, 4] + cuda.syncthreads() + if (cuda.threadIdx.x < row_size): + cur_box_idx = threadsPerBlock * row_start + cuda.threadIdx.x + # cur_box = dev_boxes + cur_box_idx * 5; + i = 0 + t = 0 + start = 0 + if (row_start == col_start): + start = tx + 1 + for i in range(start, col_size): + if (iou_device(dev_boxes[cur_box_idx], block_boxes[i]) > + nms_overlap_thresh): + t |= 1 << i + col_blocks = ((n_boxes) // (threadsPerBlock) + ( + (n_boxes) % (threadsPerBlock) > 0)) + dev_mask[cur_box_idx * col_blocks + col_start] = t + + +@cuda.jit('(int64, float32, float32[:], uint64[:])') +def nms_kernel(n_boxes, nms_overlap_thresh, dev_boxes, dev_mask): + threadsPerBlock = 8 * 8 + row_start = cuda.blockIdx.y + col_start = cuda.blockIdx.x + tx = cuda.threadIdx.x + row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock) + col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock) + block_boxes = cuda.shared.array(shape=(64 * 5, ), dtype=numba.float32) + dev_box_idx = threadsPerBlock * col_start + tx + if (tx < col_size): + block_boxes[tx * 5 + 0] = dev_boxes[dev_box_idx * 5 + 0] + block_boxes[tx * 5 + 1] = dev_boxes[dev_box_idx * 5 + 1] + block_boxes[tx * 5 + 2] = dev_boxes[dev_box_idx * 5 + 2] + block_boxes[tx * 5 + 3] = dev_boxes[dev_box_idx * 5 + 3] + block_boxes[tx * 5 + 4] = dev_boxes[dev_box_idx * 5 + 4] + cuda.syncthreads() + if (tx < row_size): + cur_box_idx = threadsPerBlock * row_start + tx + # cur_box = dev_boxes + cur_box_idx * 5; + t = 0 + start = 0 + if (row_start == col_start): + start = tx + 1 + for i in range(start, col_size): + iou = iou_device(dev_boxes[cur_box_idx * 5:cur_box_idx * 5 + 4], + block_boxes[i * 5:i * 5 + 4]) + if (iou > nms_overlap_thresh): + t |= 1 << i + col_blocks = ((n_boxes) // (threadsPerBlock) + ( + (n_boxes) % (threadsPerBlock) > 0)) + dev_mask[cur_box_idx * col_blocks + col_start] = t + + +@numba.jit(nopython=True) +def div_up(m, n): + return m // n + (m % n > 0) + + +@numba.jit(nopython=True) +def nms_postprocess(keep_out, mask_host, boxes_num): + threadsPerBlock = 8 * 8 + col_blocks = div_up(boxes_num, threadsPerBlock) + remv = np.zeros((col_blocks), dtype=np.uint64) + num_to_keep = 0 + for i in range(boxes_num): + nblock = i // threadsPerBlock + inblock = i % threadsPerBlock + mask = np.array(1 << inblock, dtype=np.uint64) + if not (remv[nblock] & mask): + keep_out[num_to_keep] = i + num_to_keep += 1 + # unsigned long long *p = &mask_host[0] + i * col_blocks; + for j in range(nblock, col_blocks): + remv[j] |= mask_host[i * col_blocks + j] + # remv[j] |= p[j]; + return num_to_keep + + +def nms_gpu(dets, nms_overlap_thresh, device_id=1): + """nms in gpu. + + Args: + dets ([type]): [description] + nms_overlap_thresh ([type]): [description] + device_id ([type], optional): Defaults to 0. [description] + + Returns: + [type]: [description] + """ + + boxes_num = dets.shape[0] + #print(boxes_num) + keep_out = np.zeros([boxes_num], dtype=np.int32) + scores = dets[:, 4] + order = scores.argsort()[::-1].astype(np.int32) + boxes_host = dets[order, :] + + threadsPerBlock = 8 * 8 + col_blocks = div_up(boxes_num, threadsPerBlock) + cuda.select_device(device_id) + mask_host = np.zeros((boxes_num * col_blocks, ), dtype=np.uint64) + blockspergrid = (div_up(boxes_num, threadsPerBlock), + div_up(boxes_num, threadsPerBlock)) + #print(blockspergrid) + stream = cuda.stream() + with stream.auto_synchronize(): + boxes_dev = cuda.to_device(boxes_host.reshape([-1]), stream) + mask_dev = cuda.to_device(mask_host, stream) + nms_kernel[blockspergrid, threadsPerBlock, stream]( + boxes_num, nms_overlap_thresh, boxes_dev, mask_dev) + mask_dev.copy_to_host(mask_host, stream=stream) + # stream.synchronize() + num_out = nms_postprocess(keep_out, mask_host, boxes_num) + keep = keep_out[:num_out] + return list(order[keep]) + + +def nms_gpu_cc(dets, nms_overlap_thresh, device_id=0): + boxes_num = dets.shape[0] + keep = np.zeros(boxes_num, dtype=np.int32) + scores = dets[:, 4] + order = scores.argsort()[::-1].astype(np.int32) + sorted_dets = dets[order, :] + num_out = non_max_suppression(sorted_dets, keep, nms_overlap_thresh, + device_id) + keep = keep[:num_out] + return list(order[keep]) + + +@cuda.jit('(float32[:], float32[:], float32[:])', device=True, inline=True) +def trangle_area(a, b, c): + return ( + (a[0] - c[0]) * (b[1] - c[1]) - (a[1] - c[1]) * (b[0] - c[0])) / 2.0 + + +@cuda.jit('(float32[:], int32)', device=True, inline=True) +def area(int_pts, num_of_inter): + area_val = 0.0 + for i in range(num_of_inter - 2): + area_val += abs( + trangle_area(int_pts[:2], int_pts[2 * i + 2:2 * i + 4], + int_pts[2 * i + 4:2 * i + 6])) + return area_val + + +@cuda.jit('(float32[:], int32)', device=True, inline=True) +def sort_vertex_in_convex_polygon(int_pts, num_of_inter): + if num_of_inter > 0: + center = cuda.local.array((2, ), dtype=numba.float32) + center[:] = 0.0 + for i in range(num_of_inter): + center[0] += int_pts[2 * i] + center[1] += int_pts[2 * i + 1] + center[0] /= num_of_inter + center[1] /= num_of_inter + v = cuda.local.array((2, ), dtype=numba.float32) + vs = cuda.local.array((16, ), dtype=numba.float32) + for i in range(num_of_inter): + v[0] = int_pts[2 * i] - center[0] + v[1] = int_pts[2 * i + 1] - center[1] + d = math.sqrt(v[0] * v[0] + v[1] * v[1]) + v[0] = v[0] / d + v[1] = v[1] / d + if v[1] < 0: + v[0] = -2 - v[0] + vs[i] = v[0] + j = 0 + temp = 0 + for i in range(1, num_of_inter): + if vs[i - 1] > vs[i]: + temp = vs[i] + tx = int_pts[2 * i] + ty = int_pts[2 * i + 1] + j = i + while j > 0 and vs[j - 1] > temp: + vs[j] = vs[j - 1] + int_pts[j * 2] = int_pts[j * 2 - 2] + int_pts[j * 2 + 1] = int_pts[j * 2 - 1] + j -= 1 + + vs[j] = temp + int_pts[j * 2] = tx + int_pts[j * 2 + 1] = ty + + +@cuda.jit( + '(float32[:], float32[:], int32, int32, float32[:])', + device=True, + inline=True) +def line_segment_intersection(pts1, pts2, i, j, temp_pts): + A = cuda.local.array((2, ), dtype=numba.float32) + B = cuda.local.array((2, ), dtype=numba.float32) + C = cuda.local.array((2, ), dtype=numba.float32) + D = cuda.local.array((2, ), dtype=numba.float32) + + A[0] = pts1[2 * i] + A[1] = pts1[2 * i + 1] + + B[0] = pts1[2 * ((i + 1) % 4)] + B[1] = pts1[2 * ((i + 1) % 4) + 1] + + C[0] = pts2[2 * j] + C[1] = pts2[2 * j + 1] + + D[0] = pts2[2 * ((j + 1) % 4)] + D[1] = pts2[2 * ((j + 1) % 4) + 1] + BA0 = B[0] - A[0] + BA1 = B[1] - A[1] + DA0 = D[0] - A[0] + CA0 = C[0] - A[0] + DA1 = D[1] - A[1] + CA1 = C[1] - A[1] + acd = DA1 * CA0 > CA1 * DA0 + bcd = (D[1] - B[1]) * (C[0] - B[0]) > (C[1] - B[1]) * (D[0] - B[0]) + if acd != bcd: + abc = CA1 * BA0 > BA1 * CA0 + abd = DA1 * BA0 > BA1 * DA0 + if abc != abd: + DC0 = D[0] - C[0] + DC1 = D[1] - C[1] + ABBA = A[0] * B[1] - B[0] * A[1] + CDDC = C[0] * D[1] - D[0] * C[1] + DH = BA1 * DC0 - BA0 * DC1 + Dx = ABBA * DC0 - BA0 * CDDC + Dy = ABBA * DC1 - BA1 * CDDC + temp_pts[0] = Dx / DH + temp_pts[1] = Dy / DH + return True + return False + + +@cuda.jit( + '(float32[:], float32[:], int32, int32, float32[:])', + device=True, + inline=True) +def line_segment_intersection_v1(pts1, pts2, i, j, temp_pts): + a = cuda.local.array((2, ), dtype=numba.float32) + b = cuda.local.array((2, ), dtype=numba.float32) + c = cuda.local.array((2, ), dtype=numba.float32) + d = cuda.local.array((2, ), dtype=numba.float32) + + a[0] = pts1[2 * i] + a[1] = pts1[2 * i + 1] + + b[0] = pts1[2 * ((i + 1) % 4)] + b[1] = pts1[2 * ((i + 1) % 4) + 1] + + c[0] = pts2[2 * j] + c[1] = pts2[2 * j + 1] + + d[0] = pts2[2 * ((j + 1) % 4)] + d[1] = pts2[2 * ((j + 1) % 4) + 1] + + area_abc = trangle_area(a, b, c) + area_abd = trangle_area(a, b, d) + + if area_abc * area_abd >= 0: + return False + + area_cda = trangle_area(c, d, a) + area_cdb = area_cda + area_abc - area_abd + + if area_cda * area_cdb >= 0: + return False + t = area_cda / (area_abd - area_abc) + + dx = t * (b[0] - a[0]) + dy = t * (b[1] - a[1]) + temp_pts[0] = a[0] + dx + temp_pts[1] = a[1] + dy + return True + + +@cuda.jit('(float32, float32, float32[:])', device=True, inline=True) +def point_in_quadrilateral(pt_x, pt_y, corners): + ab0 = corners[2] - corners[0] + ab1 = corners[3] - corners[1] + + ad0 = corners[6] - corners[0] + ad1 = corners[7] - corners[1] + + ap0 = pt_x - corners[0] + ap1 = pt_y - corners[1] + + abab = ab0 * ab0 + ab1 * ab1 + abap = ab0 * ap0 + ab1 * ap1 + adad = ad0 * ad0 + ad1 * ad1 + adap = ad0 * ap0 + ad1 * ap1 + + return abab >= abap and abap >= 0 and adad >= adap and adap >= 0 + + +@cuda.jit('(float32[:], float32[:], float32[:])', device=True, inline=True) +def quadrilateral_intersection(pts1, pts2, int_pts): + num_of_inter = 0 + for i in range(4): + if point_in_quadrilateral(pts1[2 * i], pts1[2 * i + 1], pts2): + int_pts[num_of_inter * 2] = pts1[2 * i] + int_pts[num_of_inter * 2 + 1] = pts1[2 * i + 1] + num_of_inter += 1 + if point_in_quadrilateral(pts2[2 * i], pts2[2 * i + 1], pts1): + int_pts[num_of_inter * 2] = pts2[2 * i] + int_pts[num_of_inter * 2 + 1] = pts2[2 * i + 1] + num_of_inter += 1 + temp_pts = cuda.local.array((2, ), dtype=numba.float32) + for i in range(4): + for j in range(4): + has_pts = line_segment_intersection(pts1, pts2, i, j, temp_pts) + if has_pts: + int_pts[num_of_inter * 2] = temp_pts[0] + int_pts[num_of_inter * 2 + 1] = temp_pts[1] + num_of_inter += 1 + + return num_of_inter + + +@cuda.jit('(float32[:], float32[:])', device=True, inline=True) +def rbbox_to_corners(corners, rbbox): + # generate clockwise corners and rotate it clockwise + angle = rbbox[4] + a_cos = math.cos(angle) + a_sin = math.sin(angle) + center_x = rbbox[0] + center_y = rbbox[1] + x_d = rbbox[2] + y_d = rbbox[3] + corners_x = cuda.local.array((4, ), dtype=numba.float32) + corners_y = cuda.local.array((4, ), dtype=numba.float32) + corners_x[0] = -x_d / 2 + corners_x[1] = -x_d / 2 + corners_x[2] = x_d / 2 + corners_x[3] = x_d / 2 + corners_y[0] = -y_d / 2 + corners_y[1] = y_d / 2 + corners_y[2] = y_d / 2 + corners_y[3] = -y_d / 2 + for i in range(4): + corners[2 * i] = a_cos * corners_x[i] + a_sin * corners_y[i] + center_x + corners[2 * i + + 1] = -a_sin * corners_x[i] + a_cos * corners_y[i] + center_y + + +@cuda.jit('(float32[:], float32[:])', device=True, inline=True) +def inter(rbbox1, rbbox2): + corners1 = cuda.local.array((8, ), dtype=numba.float32) + corners2 = cuda.local.array((8, ), dtype=numba.float32) + intersection_corners = cuda.local.array((16, ), dtype=numba.float32) + + rbbox_to_corners(corners1, rbbox1) + rbbox_to_corners(corners2, rbbox2) + + num_intersection = quadrilateral_intersection(corners1, corners2, + intersection_corners) + sort_vertex_in_convex_polygon(intersection_corners, num_intersection) + # print(intersection_corners.reshape([-1, 2])[:num_intersection]) + + return area(intersection_corners, num_intersection) + + +@cuda.jit('(float32[:], float32[:])', device=True, inline=True) +def devRotateIoU(rbox1, rbox2): + area1 = rbox1[2] * rbox1[3] + area2 = rbox2[2] * rbox2[3] + area_inter = inter(rbox1, rbox2) + return area_inter / (area1 + area2 - area_inter) + + +@cuda.jit('(int64, float32, float32[:], uint64[:])') +def rotate_nms_kernel(n_boxes, nms_overlap_thresh, dev_boxes, dev_mask): + threadsPerBlock = 8 * 8 + row_start = cuda.blockIdx.y + col_start = cuda.blockIdx.x + tx = cuda.threadIdx.x + row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock) + col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock) + block_boxes = cuda.shared.array(shape=(64 * 6, ), dtype=numba.float32) + dev_box_idx = threadsPerBlock * col_start + tx + if (tx < col_size): + block_boxes[tx * 6 + 0] = dev_boxes[dev_box_idx * 6 + 0] + block_boxes[tx * 6 + 1] = dev_boxes[dev_box_idx * 6 + 1] + block_boxes[tx * 6 + 2] = dev_boxes[dev_box_idx * 6 + 2] + block_boxes[tx * 6 + 3] = dev_boxes[dev_box_idx * 6 + 3] + block_boxes[tx * 6 + 4] = dev_boxes[dev_box_idx * 6 + 4] + block_boxes[tx * 6 + 5] = dev_boxes[dev_box_idx * 6 + 5] + cuda.syncthreads() + if (tx < row_size): + cur_box_idx = threadsPerBlock * row_start + tx + # cur_box = dev_boxes + cur_box_idx * 5; + t = 0 + start = 0 + if (row_start == col_start): + start = tx + 1 + for i in range(start, col_size): + iou = devRotateIoU(dev_boxes[cur_box_idx * 6:cur_box_idx * 6 + 5], + block_boxes[i * 6:i * 6 + 5]) + # print('iou', iou, cur_box_idx, i) + if (iou > nms_overlap_thresh): + t |= 1 << i + col_blocks = ((n_boxes) // (threadsPerBlock) + ( + (n_boxes) % (threadsPerBlock) > 0)) + dev_mask[cur_box_idx * col_blocks + col_start] = t + + +def rotate_nms_gpu(dets, nms_overlap_thresh, device_id=0): + """nms in gpu. WARNING: this function can provide right result + but its performance isn't be tested + + Args: + dets ([type]): [description] + nms_overlap_thresh ([type]): [description] + device_id ([type], optional): Defaults to 0. [description] + + Returns: + [type]: [description] + """ + dets = dets.astype(np.float32) + boxes_num = dets.shape[0] + keep_out = np.zeros([boxes_num], dtype=np.int32) + scores = dets[:, 5] + order = scores.argsort()[::-1].astype(np.int32) + boxes_host = dets[order, :] + + threadsPerBlock = 8 * 8 + col_blocks = div_up(boxes_num, threadsPerBlock) + cuda.select_device(device_id) + # mask_host shape: boxes_num * col_blocks * sizeof(np.uint64) + mask_host = np.zeros((boxes_num * col_blocks, ), dtype=np.uint64) + blockspergrid = (div_up(boxes_num, threadsPerBlock), + div_up(boxes_num, threadsPerBlock)) + stream = cuda.stream() + with stream.auto_synchronize(): + boxes_dev = cuda.to_device(boxes_host.reshape([-1]), stream) + mask_dev = cuda.to_device(mask_host, stream) + rotate_nms_kernel[blockspergrid, threadsPerBlock, stream]( + boxes_num, nms_overlap_thresh, boxes_dev, mask_dev) + mask_dev.copy_to_host(mask_host, stream=stream) + num_out = nms_postprocess(keep_out, mask_host, boxes_num) + keep = keep_out[:num_out] + return list(order[keep]) + + +@cuda.jit('(int64, int64, float32[:], float32[:], float32[:])', fastmath=False) +def rotate_iou_kernel(N, K, dev_boxes, dev_query_boxes, dev_iou): + threadsPerBlock = 8 * 8 + row_start = cuda.blockIdx.x + col_start = cuda.blockIdx.y + tx = cuda.threadIdx.x + row_size = min(N - row_start * threadsPerBlock, threadsPerBlock) + col_size = min(K - col_start * threadsPerBlock, threadsPerBlock) + block_boxes = cuda.shared.array(shape=(64 * 5, ), dtype=numba.float32) + block_qboxes = cuda.shared.array(shape=(64 * 5, ), dtype=numba.float32) + + dev_query_box_idx = threadsPerBlock * col_start + tx + dev_box_idx = threadsPerBlock * row_start + tx + if (tx < col_size): + block_qboxes[tx * 5 + 0] = dev_query_boxes[dev_query_box_idx * 5 + 0] + block_qboxes[tx * 5 + 1] = dev_query_boxes[dev_query_box_idx * 5 + 1] + block_qboxes[tx * 5 + 2] = dev_query_boxes[dev_query_box_idx * 5 + 2] + block_qboxes[tx * 5 + 3] = dev_query_boxes[dev_query_box_idx * 5 + 3] + block_qboxes[tx * 5 + 4] = dev_query_boxes[dev_query_box_idx * 5 + 4] + if (tx < row_size): + block_boxes[tx * 5 + 0] = dev_boxes[dev_box_idx * 5 + 0] + block_boxes[tx * 5 + 1] = dev_boxes[dev_box_idx * 5 + 1] + block_boxes[tx * 5 + 2] = dev_boxes[dev_box_idx * 5 + 2] + block_boxes[tx * 5 + 3] = dev_boxes[dev_box_idx * 5 + 3] + block_boxes[tx * 5 + 4] = dev_boxes[dev_box_idx * 5 + 4] + cuda.syncthreads() + if tx < row_size: + for i in range(col_size): + offset = row_start * threadsPerBlock * K + col_start * threadsPerBlock + tx * K + i + dev_iou[offset] = devRotateIoU(block_qboxes[i * 5:i * 5 + 5], + block_boxes[tx * 5:tx * 5 + 5]) + + +def rotate_iou_gpu(boxes, query_boxes, device_id=0): + """rotated box iou running in gpu. 500x faster than cpu version + (take 5ms in one example with numba.cuda code). + convert from [this project]( + https://github.com/hongzhenwang/RRPN-revise/tree/master/lib/rotation). + + Args: + boxes (float tensor: [N, 5]): rbboxes. format: centers, dims, + angles(clockwise when positive) + query_boxes (float tensor: [K, 5]): [description] + device_id (int, optional): Defaults to 0. [description] + + Returns: + [type]: [description] + """ + box_dtype = boxes.dtype + boxes = boxes.astype(np.float32) + query_boxes = query_boxes.astype(np.float32) + N = boxes.shape[0] + K = query_boxes.shape[0] + iou = np.zeros((N, K), dtype=np.float32) + if N == 0 or K == 0: + return iou + threadsPerBlock = 8 * 8 + cuda.select_device(device_id) + blockspergrid = (div_up(N, threadsPerBlock), div_up(K, threadsPerBlock)) + + stream = cuda.stream() + with stream.auto_synchronize(): + boxes_dev = cuda.to_device(boxes.reshape([-1]), stream) + query_boxes_dev = cuda.to_device(query_boxes.reshape([-1]), stream) + iou_dev = cuda.to_device(iou.reshape([-1]), stream) + rotate_iou_kernel[blockspergrid, threadsPerBlock, stream]( + N, K, boxes_dev, query_boxes_dev, iou_dev) + iou_dev.copy_to_host(iou.reshape([-1]), stream=stream) + return iou.astype(boxes.dtype) + + +@cuda.jit('(float32[:], float32[:], int32)', device=True, inline=True) +def devRotateIoUEval(rbox1, rbox2, criterion=-1): + area1 = rbox1[2] * rbox1[3] + area2 = rbox2[2] * rbox2[3] + area_inter = inter(rbox1, rbox2) + if criterion == -1: + return area_inter / (area1 + area2 - area_inter) + elif criterion == 0: + return area_inter / area1 + elif criterion == 1: + return area_inter / area2 + else: + return area_inter + + +@cuda.jit( + '(int64, int64, float32[:], float32[:], float32[:], int32)', + fastmath=False) +def rotate_iou_kernel_eval(N, + K, + dev_boxes, + dev_query_boxes, + dev_iou, + criterion=-1): + threadsPerBlock = 8 * 8 + row_start = cuda.blockIdx.x + col_start = cuda.blockIdx.y + tx = cuda.threadIdx.x + row_size = min(N - row_start * threadsPerBlock, threadsPerBlock) + col_size = min(K - col_start * threadsPerBlock, threadsPerBlock) + block_boxes = cuda.shared.array(shape=(64 * 5, ), dtype=numba.float32) + block_qboxes = cuda.shared.array(shape=(64 * 5, ), dtype=numba.float32) + + dev_query_box_idx = threadsPerBlock * col_start + tx + dev_box_idx = threadsPerBlock * row_start + tx + if (tx < col_size): + block_qboxes[tx * 5 + 0] = dev_query_boxes[dev_query_box_idx * 5 + 0] + block_qboxes[tx * 5 + 1] = dev_query_boxes[dev_query_box_idx * 5 + 1] + block_qboxes[tx * 5 + 2] = dev_query_boxes[dev_query_box_idx * 5 + 2] + block_qboxes[tx * 5 + 3] = dev_query_boxes[dev_query_box_idx * 5 + 3] + block_qboxes[tx * 5 + 4] = dev_query_boxes[dev_query_box_idx * 5 + 4] + if (tx < row_size): + block_boxes[tx * 5 + 0] = dev_boxes[dev_box_idx * 5 + 0] + block_boxes[tx * 5 + 1] = dev_boxes[dev_box_idx * 5 + 1] + block_boxes[tx * 5 + 2] = dev_boxes[dev_box_idx * 5 + 2] + block_boxes[tx * 5 + 3] = dev_boxes[dev_box_idx * 5 + 3] + block_boxes[tx * 5 + 4] = dev_boxes[dev_box_idx * 5 + 4] + cuda.syncthreads() + if tx < row_size: + for i in range(col_size): + offset = row_start * threadsPerBlock * K + col_start * threadsPerBlock + tx * K + i + dev_iou[offset] = devRotateIoUEval(block_qboxes[i * 5:i * 5 + 5], + block_boxes[tx * 5:tx * 5 + 5], + criterion) + + +def rotate_iou_gpu_eval(boxes, query_boxes, criterion=-1, device_id=0): + """rotated box iou running in gpu. 500x faster than cpu version + (take 5ms in one example with numba.cuda code). + convert from [this project]( + https://github.com/hongzhenwang/RRPN-revise/tree/master/lib/rotation). + + Args: + boxes (float tensor: [N, 5]): rbboxes. format: centers, dims, + angles(clockwise when positive) + query_boxes (float tensor: [K, 5]): [description] + device_id (int, optional): Defaults to 0. [description] + + Returns: + [type]: [description] + """ + box_dtype = boxes.dtype + boxes = boxes.astype(np.float32) + query_boxes = query_boxes.astype(np.float32) + N = boxes.shape[0] + K = query_boxes.shape[0] + iou = np.zeros((N, K), dtype=np.float32) + if N == 0 or K == 0: + return iou + threadsPerBlock = 8 * 8 + cuda.select_device(device_id) + blockspergrid = (div_up(N, threadsPerBlock), div_up(K, threadsPerBlock)) + + stream = cuda.stream() + with stream.auto_synchronize(): + boxes_dev = cuda.to_device(boxes.reshape([-1]), stream) + query_boxes_dev = cuda.to_device(query_boxes.reshape([-1]), stream) + iou_dev = cuda.to_device(iou.reshape([-1]), stream) + rotate_iou_kernel_eval[blockspergrid, threadsPerBlock, stream]( + N, K, boxes_dev, query_boxes_dev, iou_dev, criterion) + iou_dev.copy_to_host(iou.reshape([-1]), stream=stream) + return iou.astype(boxes.dtype) diff --git a/second/data/dataset.py b/second/data/dataset.py index 645faf12..5037f8c4 100644 --- a/second/data/dataset.py +++ b/second/data/dataset.py @@ -8,7 +8,15 @@ from second.core import box_np_ops from second.core import preprocess as prep from second.data import kitti_common as kitti -from second.data.preprocess import _read_and_prep_v9 +from second.data.preprocess import _read_and_prep_v9, quaternion_to_euler +import torch + +import argoverse +from argoverse.data_loading.argoverse_tracking_loader import ArgoverseTrackingLoader +from argoverse.map_representation.map_api import ArgoverseMap +import copy + + class Dataset(object): @@ -27,15 +35,18 @@ def __len__(self): class KittiDataset(Dataset): - def __init__(self, info_path, root_path, num_point_features, + def __init__(self, info_path, root_path, class_names, num_point_features, target_assigner, feature_map_size, prep_func): - with open(info_path, 'rb') as f: - infos = pickle.load(f) - #self._kitti_infos = kitti.filter_infos_by_used_classes(infos, class_names) + #with open(info_path, 'rb') as f: + # infos = pickle.load(f) self._root_path = root_path - self._kitti_infos = infos + self.argoverse_loader = ArgoverseTrackingLoader(root_path) + self.am = ArgoverseMap() + + self.inform = info_path self._num_point_features = num_point_features - print("remain number of infos:", len(self._kitti_infos)) + self.class_names = class_names + #print("remain number of infos:", len(self._kitti_infos)) # generate anchors cache # [352, 400] ret = target_assigner.generate_anchors(feature_map_size) @@ -51,18 +62,101 @@ def __init__(self, info_path, root_path, num_point_features, "matched_thresholds": matched_thresholds, "unmatched_thresholds": unmatched_thresholds, } + self._prep_func = partial(prep_func, anchor_cache=anchor_cache) def __len__(self): - return len(self._kitti_infos) + return len(self.inform["index_list"]) @property def kitti_infos(self): - return self._kitti_infos + ret = [] + for info in self.inform["index_list"]: + annos = {} + undr_scr = info.find('_') + seq = int(info[:undr_scr]) + frame = int(info[undr_scr+1:]) + argoverse_data = self.argoverse_loader[seq] + + ## converting to kitti camera coordinate system + objects = argoverse_data.get_label_object(frame) + + city_name = argoverse_data.city_name + city_to_egovehicle_se3 = argoverse_data.get_pose(frame) + am = self.am + + gt_location = [] + dims = [] + gt_rots = [] + gt_names = [] + gt_bbox = [] + gt_occ = [] + gt_trunc = [] + gt_alpha = [] + + for obj in objects: + if obj.label_class in self.class_names: + corners = obj.as_3d_bbox() + center = np.mean(corners,axis=0) + center[2] = corners[2,2] + + #if np.abs(center[0]) > 32 or np.abs(center[1]) > 32 : + # continue + + gt_location.append(center) + gt_names.append(obj.label_class) + quat = obj.quaternion + yaw,pitch,roll = quaternion_to_euler(quat) + if np.pi/2<= yaw <=np.pi: + yaw = 3*np.pi/2 -yaw + else: + yaw = -yaw-np.pi/2 + assert -np.pi <= yaw <= np.pi + + gt_rots.append(yaw) + dims.append([obj.length,obj.height,obj.width]) + gt_bbox.append([100,100,200,200]) + gt_occ.append(0) + gt_trunc.append(0) + gt_alpha.append(0) + + gt_location,dims,gt_rots,gt_names = np.array(gt_location), np.array(dims), np.array(gt_rots), np.array(gt_names) + gt_bbox, gt_occ, gt_trunc, gt_alpha = np.array(gt_bbox), np.array(gt_occ), np.array(gt_trunc) ,np.array(gt_alpha) + + roi_locations = copy.deepcopy(gt_location) + ''' + if self.inform["include_roi"] or self.inform["dr_area"]: + roi_locs = city_to_egovehicle_se3.transform_point_cloud(roi_locations) # put into city coords + + if self.inform["include_roi"]: + roi_locs_flag = am.remove_non_roi_points(roi_locs, city_name) # remove non-driveable region + + if not self.inform["include_roi"] and self.inform["dr_area"]: + roi_locs_flag = am.remove_non_driveable_area_points(roi_locs, city_name) # remove non-driveable region + + gt_location,dims,gt_rots,gt_names = ( gt_location[roi_locs_flag] , dims[roi_locs_flag] , + gt_rots[roi_locs_flag] , gt_names[roi_locs_flag] ) + + gt_bbox, gt_occ, gt_trunc, gt_alpha = ( gt_bbox[roi_locs_flag], gt_occ[roi_locs_flag], gt_trunc[roi_locs_flag], gt_alpha[roi_locs_flag] ) + ''' + gt_location[:,2] -= 1.73 + gt_location[:,[0,1,2]] = gt_location[:,[1,2,0]] + gt_location[:,[0,1]] = -gt_location[:,[0,1]] + + annos["name"], annos["location"], annos["dimensions"], annos["rotation_y"] = gt_names, gt_location, dims, gt_rots + annos["bbox"], annos["alpha"], annos["occluded"], annos["truncated"] = gt_bbox, gt_alpha, gt_occ, gt_trunc + ret.append(annos) + + return ret def __getitem__(self, idx): + info = { "index": self.inform["index_list"][idx], "road_map": self.inform["road_map"] , "include_roadmap": self.inform["include_roadmap"], "include_road_points": self.inform["include_road_points"] ,"include_roi": self.inform["include_roi"], + "dr_area": self.inform["dr_area"]} return _read_and_prep_v9( - info=self._kitti_infos[idx], + info=info, + class_names=self.class_names, root_path=self._root_path, num_point_features=self._num_point_features, + argoverse_loader = self.argoverse_loader, + argoverse_map = self.am, prep_func=self._prep_func) diff --git a/second/data/preprocess.py b/second/data/preprocess.py index 436f55dd..511efda0 100644 --- a/second/data/preprocess.py +++ b/second/data/preprocess.py @@ -11,6 +11,12 @@ from second.core.geometry import points_in_convex_polygon_3d_jit from second.core.point_cloud.bev_ops import points_to_bev from second.data import kitti_common as kitti +import argoverse +from argoverse.data_loading.argoverse_tracking_loader import ArgoverseTrackingLoader +from argoverse.map_representation.map_api import ArgoverseMap +import copy +import math +import torch def merge_second_batch(batch_list, _unused=False): @@ -41,6 +47,20 @@ def merge_second_batch(batch_list, _unused=False): ret[key] = np.stack(elems, axis=0) return ret +def quaternion_to_euler(quat): + w,x,y,z = quat + + t0 = +2.0 * (w * x + y * z) + t1 = +1.0 - 2.0 * (x * x + y * y) + roll = math.atan2(t0, t1) + t2 = +2.0 * (w * y - z * x) + t2 = +1.0 if t2 > +1.0 else t2 + t2 = -1.0 if t2 < -1.0 else t2 + pitch = math.asin(t2) + t3 = +2.0 * (w * z + x * y) + t4 = +1.0 - 2.0 * (y * y + z * z) + yaw = math.atan2(t3, t4) + return yaw, pitch, roll def prep_pointcloud(input_dict, root_path, @@ -48,7 +68,7 @@ def prep_pointcloud(input_dict, target_assigner, db_sampler=None, max_voxels=20000, - class_names=['Car'], + class_names=['PEDESTRIAN'], remove_outside_points=False, training=True, create_targets=True, @@ -79,113 +99,41 @@ def prep_pointcloud(input_dict, min_gt_point_dict=None, bev_only=False, use_group_id=False, - out_dtype=np.float32): + out_dtype=np.float32 + ): """convert point cloud to voxels, create targets if ground truths exists. """ points = input_dict["points"] + pc_range = voxel_generator.point_cloud_range + + pts_x, pts_y, pts_z = points[:, 0], points[:, 1], points[:, 2] + range_flag = ( (pts_x >= pc_range[0]) & (pts_x <= pc_range[3]) + & (pts_y >= pc_range[1]) & (pts_y <= pc_range[4]) + & (pts_z >= pc_range[2]) & (pts_z <= pc_range[5]) ) + + points = points[range_flag] + + if training: gt_boxes = input_dict["gt_boxes"] gt_names = input_dict["gt_names"] - difficulty = input_dict["difficulty"] + ## group_ids ? np.arange(num_gt,dtype=np.int32) num_gt - number of objects (of all categories) in annotated lidar frame + group_ids = None if use_group_id and "group_ids" in input_dict: group_ids = input_dict["group_ids"] - rect = input_dict["rect"] - Trv2c = input_dict["Trv2c"] - P2 = input_dict["P2"] - unlabeled_training = unlabeled_db_sampler is not None - image_idx = input_dict["image_idx"] - - if reference_detections is not None: - C, R, T = box_np_ops.projection_matrix_to_CRT_kitti(P2) - frustums = box_np_ops.get_frustum_v2(reference_detections, C) - frustums -= T - # frustums = np.linalg.inv(R) @ frustums.T - frustums = np.einsum('ij, akj->aki', np.linalg.inv(R), frustums) - frustums = box_np_ops.camera_to_lidar(frustums, rect, Trv2c) - surfaces = box_np_ops.corner_to_surfaces_3d_jit(frustums) - masks = points_in_convex_polygon_3d_jit(points, surfaces) - points = points[masks.any(-1)] - - if remove_outside_points and not lidar_input: - image_shape = input_dict["image_shape"] - points = box_np_ops.remove_outside_points(points, rect, Trv2c, P2, - image_shape) - if remove_environment is True and training: - selected = kitti.keep_arrays_by_name(gt_names, class_names) - gt_boxes = gt_boxes[selected] - gt_names = gt_names[selected] - difficulty = difficulty[selected] - if group_ids is not None: - group_ids = group_ids[selected] - points = prep.remove_points_outside_boxes(points, gt_boxes) - if training: - # print(gt_names) - selected = kitti.drop_arrays_by_name(gt_names, ["DontCare"]) - gt_boxes = gt_boxes[selected] - gt_names = gt_names[selected] - difficulty = difficulty[selected] - if group_ids is not None: - group_ids = group_ids[selected] - - gt_boxes = box_np_ops.box_camera_to_lidar(gt_boxes, rect, Trv2c) - if remove_unknown: - remove_mask = difficulty == -1 - """ - gt_boxes_remove = gt_boxes[remove_mask] - gt_boxes_remove[:, 3:6] += 0.25 - points = prep.remove_points_in_boxes(points, gt_boxes_remove) - """ - keep_mask = np.logical_not(remove_mask) - gt_boxes = gt_boxes[keep_mask] - gt_names = gt_names[keep_mask] - difficulty = difficulty[keep_mask] - if group_ids is not None: - group_ids = group_ids[keep_mask] + + + #unlabeled_training = unlabeled_db_sampler is not None + + if training: + + gt_boxes_mask = np.array( [n in class_names for n in gt_names], dtype=np.bool_) - if db_sampler is not None: - sampled_dict = db_sampler.sample_all( - root_path, - gt_boxes, - gt_names, - num_point_features, - random_crop, - gt_group_ids=group_ids, - rect=rect, - Trv2c=Trv2c, - P2=P2) - - if sampled_dict is not None: - sampled_gt_names = sampled_dict["gt_names"] - sampled_gt_boxes = sampled_dict["gt_boxes"] - sampled_points = sampled_dict["points"] - sampled_gt_masks = sampled_dict["gt_masks"] - # gt_names = gt_names[gt_boxes_mask].tolist() - gt_names = np.concatenate([gt_names, sampled_gt_names], axis=0) - # gt_names += [s["name"] for s in sampled] - gt_boxes = np.concatenate([gt_boxes, sampled_gt_boxes]) - gt_boxes_mask = np.concatenate( - [gt_boxes_mask, sampled_gt_masks], axis=0) - if group_ids is not None: - sampled_group_ids = sampled_dict["group_ids"] - group_ids = np.concatenate([group_ids, sampled_group_ids]) - - if remove_points_after_sample: - points = prep.remove_points_in_boxes( - points, sampled_gt_boxes) - - points = np.concatenate([sampled_points, points], axis=0) - # unlabeled_mask = np.zeros((gt_boxes.shape[0], ), dtype=np.bool_) - if without_reflectivity: - used_point_axes = list(range(num_point_features)) - used_point_axes.pop(3) - points = points[:, used_point_axes] - pc_range = voxel_generator.point_cloud_range - if bev_only: # set z and h to limits - gt_boxes[:, 2] = pc_range[2] - gt_boxes[:, 5] = pc_range[5] - pc_range[2] + #print(gt_boxes_mask.shape,gt_boxes.shape,"before") + prep.noise_per_object_v3_( gt_boxes, points, @@ -195,14 +143,23 @@ def prep_pointcloud(input_dict, global_random_rot_range=global_random_rot_range, group_ids=group_ids, num_try=100) + #print(gt_boxes_mask.shape,gt_boxes.shape,"after") + # should remove unrelated objects after noise per object gt_boxes = gt_boxes[gt_boxes_mask] gt_names = gt_names[gt_boxes_mask] + + if group_ids is not None: group_ids = group_ids[gt_boxes_mask] + + + gt_classes = np.array( [class_names.index(n) + 1 for n in gt_names], dtype=np.int32) - + + + #need to check the output gt_boxes, points = prep.random_flip(gt_boxes, points) gt_boxes, points = prep.global_rotation( gt_boxes, points, rotation=global_rotation_noise) @@ -211,7 +168,8 @@ def prep_pointcloud(input_dict, # Global translation gt_boxes, points = prep.global_translate(gt_boxes, points, global_loc_noise_std) - + + bv_range = voxel_generator.point_cloud_range[[0, 1, 3, 4]] mask = prep.filter_gt_box_outside_range(gt_boxes, bv_range) gt_boxes = gt_boxes[mask] @@ -222,17 +180,16 @@ def prep_pointcloud(input_dict, # limit rad to [-pi, pi] gt_boxes[:, 6] = box_np_ops.limit_period( gt_boxes[:, 6], offset=0.5, period=2 * np.pi) + #assert -np.pi/2 <= g <= np.pi/2 if shuffle_points: # shuffle is a little slow. np.random.shuffle(points) - # [0, -40, -3, 70.4, 40, 1] voxel_size = voxel_generator.voxel_size pc_range = voxel_generator.point_cloud_range grid_size = voxel_generator.grid_size - # [352, 400] - + voxels, coordinates, num_points = voxel_generator.generate( points, max_voxels) @@ -242,11 +199,8 @@ def prep_pointcloud(input_dict, 'coordinates': coordinates, "num_voxels": np.array([voxels.shape[0]], dtype=np.int64) } - example.update({ - 'rect': rect, - 'Trv2c': Trv2c, - 'P2': P2, - }) + + # if not lidar_input: feature_map_size = grid_size[:2] // out_size_factor feature_map_size = [*feature_map_size, 1][::-1] @@ -264,8 +218,7 @@ def prep_pointcloud(input_dict, anchors_bv = box_np_ops.rbbox2d_to_near_bbox( anchors[:, [0, 1, 3, 4, 6]]) example["anchors"] = anchors - # print("debug", anchors.shape, matched_thresholds.shape) - # anchors_bv = anchors_bv.reshape([-1, 4]) + anchors_mask = None if anchor_area_threshold >= 0: coors = coordinates @@ -276,8 +229,8 @@ def prep_pointcloud(input_dict, anchors_area = box_np_ops.fused_get_anchors_area( dense_voxel_map, anchors_bv, voxel_size, pc_range, grid_size) anchors_mask = anchors_area > anchor_area_threshold - # example['anchors_mask'] = anchors_mask.astype(np.uint8) example['anchors_mask'] = anchors_mask + if generate_bev: bev_vxsize = voxel_size.copy() bev_vxsize[:2] /= 2 @@ -303,58 +256,120 @@ def prep_pointcloud(input_dict, return example -def _read_and_prep_v9(info, root_path, num_point_features, prep_func): +def _read_and_prep_v9(info, class_names,root_path, num_point_features, argoverse_loader ,argoverse_map,prep_func): """read data from KITTI-format infos, then call prep function. """ # velodyne_path = str(pathlib.Path(root_path) / info['velodyne_path']) # velodyne_path += '_reduced' - v_path = pathlib.Path(root_path) / info['velodyne_path'] - v_path = v_path.parent.parent / ( - v_path.parent.stem + "_reduced") / v_path.name + fr_seq = info["index"] + undr_scr = fr_seq.find('_') + seq = int(fr_seq[:undr_scr]) + frame = int(fr_seq[undr_scr+1:]) + argoverse_data = argoverse_loader[seq] + + ## converting to kitti camera coordinate system + ## all input points + points = argoverse_data.get_lidar(frame) + roi_pts = copy.deepcopy(points) + objects = argoverse_data.get_label_object(frame) - points = np.fromfile( - str(v_path), dtype=np.float32, - count=-1).reshape([-1, num_point_features]) - image_idx = info['image_idx'] - rect = info['calib/R0_rect'].astype(np.float32) - Trv2c = info['calib/Tr_velo_to_cam'].astype(np.float32) - P2 = info['calib/P2'].astype(np.float32) + city_name = argoverse_data.city_name + city_to_egovehicle_se3 = argoverse_data.get_pose(frame) + am = argoverse_map + + + + if info["include_roi"] or info["dr_area"] or not info["include_road_points"]: + roi_pts = city_to_egovehicle_se3.transform_point_cloud(roi_pts) # put into city coords + + if info["include_roi"]: + roi_pts_flag = am.remove_non_roi_points(roi_pts, city_name) # remove non-driveable region + roi_pts = roi_pts[roi_pts_flag] + + if not info["include_roi"] and info["dr_area"]: + roi_pts_flag = am.remove_non_driveable_area_points(roi_pts, city_name) # remove non-driveable region + roi_pts = roi_pts[roi_pts_flag] + + + if not info["include_road_points"]: + roi_pts = am.remove_ground_surface(roi_pts, city_name) # remove ground surface + # convert city to lidar co-ordinates + if info["include_roi"] or info["dr_area"] or not info["include_road_points"]: + roi_pts = city_to_egovehicle_se3.inverse_transform_point_cloud(roi_pts) + + + roi_pts[:,2] = roi_pts[:,2] - 1.73 + + + gt_location = [] + dims = [] + gt_rots = [] + gt_names = [] + + for obj in objects: + if obj.label_class in class_names: + corners = obj.as_3d_bbox() + center = np.mean(corners,axis=0) + center[2] = corners[2,2] + gt_location.append(center) + gt_names.append(obj.label_class) + quat = obj.quaternion + yaw,pitch,roll = quaternion_to_euler(quat) + if np.pi/2<= yaw <=np.pi: + yaw = 3*np.pi/2 -yaw + else: + yaw = -yaw-np.pi/2 + assert -np.pi <= yaw <= np.pi + + gt_rots.append(yaw) + dims.append([obj.width,obj.length,obj.height]) + + gt_location = np.array(gt_location) + + if info["include_roi"]: + roi_locs = city_to_egovehicle_se3.transform_point_cloud(gt_location) # put into city coords + #non roi + roi_locs_flag = am.remove_non_roi_points(roi_locs, city_name) # remove non-driveable region + + if not info["include_roi"] and info["dr_area"]: + roi_locs = city_to_egovehicle_se3.transform_point_cloud(gt_location) # put into city coords + #non roi + roi_locs_flag = am.remove_non_driveable_area_points(roi_locs, city_name) # remove non-driveable region + + + + dims = np.array(dims) + gt_rots = np.array( gt_rots ) + gt_location[:,2] -= 1.73 + gt_boxes = np.concatenate([gt_location, dims, gt_rots[..., np.newaxis]], axis=1).astype(np.float32) + gt_names = np.array(gt_names) + + if info["include_roi"] or info["dr_area"] : + gt_boxes = gt_boxes[roi_locs_flag] + gt_names = gt_names[roi_locs_flag] + input_dict = { - 'points': points, - 'rect': rect, - 'Trv2c': Trv2c, - 'P2': P2, - 'image_shape': np.array(info["img_shape"], dtype=np.int32), - 'image_idx': image_idx, - 'image_path': info['img_path'], - # 'pointcloud_num_features': num_point_features, + 'points': roi_pts, + 'pointcloud_num_features': num_point_features, + 'gt_boxes': gt_boxes, + 'gt_names': gt_names, } - if 'annos' in info: - annos = info['annos'] - # we need other objects to avoid collision when sample - annos = kitti.remove_dontcare(annos) - loc = annos["location"] - dims = annos["dimensions"] - rots = annos["rotation_y"] - gt_names = annos["name"] - # print(gt_names, len(loc)) - gt_boxes = np.concatenate( - [loc, dims, rots[..., np.newaxis]], axis=1).astype(np.float32) - # gt_boxes = box_np_ops.box_camera_to_lidar(gt_boxes, rect, Trv2c) - difficulty = annos["difficulty"] - input_dict.update({ - 'gt_boxes': gt_boxes, - 'gt_names': gt_names, - 'difficulty': difficulty, - }) + ''' if 'group_ids' in annos: input_dict['group_ids'] = annos["group_ids"] + ''' example = prep_func(input_dict=input_dict) - example["image_idx"] = image_idx - example["image_shape"] = input_dict["image_shape"] if "anchors_mask" in example: example["anchors_mask"] = example["anchors_mask"].astype(np.uint8) + example["image_idx"] = fr_seq + example["image_shape"] = np.array([400,400],dtype=np.int32) + if info["include_roadmap"]: + example["road_map"] = torch.load(info["road_map"] + fr_seq ) + else: + example["road_map"] = None + example["include_roadmap"] = info["include_roadmap"] + #torch.save(example,"./network_input_examples/" + info) return example diff --git a/second/get_tracking_result.py b/second/get_tracking_result.py new file mode 100644 index 00000000..8a569a6e --- /dev/null +++ b/second/get_tracking_result.py @@ -0,0 +1,92 @@ +import argoverse +from argoverse.data_loading.argoverse_tracking_loader import ArgoverseTrackingLoader +from argoverse.map_representation.map_api import ArgoverseMap +import pickle +import math +import numpy as np +import os +import argparse + +parser = argparse.ArgumentParser(description="arg parser") +parser.add_argument('--model_path',type=str,help='specify path where result file is stored.') +parser.add_argument('--sv_dir',type=str,help='specify path to store the results.') +parser.add_argument('--set',type=str,default='val',help='test/train/val') + +#parser.add_argument('--thresh',type=float,help='specify threshold for mAP evaluation') +#parser.add_argument('--det_thresh',type=float,help='specify detection threshold for filtering detections') +args = parser.parse_args() + + + +## NOTE : if file already exists, this code will append on file +## for storing in form of world coordinates +## second entry 1 for pedestrian, 2 for car + +#sv_dir = "./AB3DMOT/data/argo/car_3d_det_val_96/" + +def track_format(): + f = open(args.model_path,"rb") + result = pickle.load(f) + print(result[0].keys()) + + sv_dir = args.sv_dir + + if not os.path.exists(sv_dir): + os.mkdir(sv_dir) + else: + for fls in os.listdir(sv_dir): + os.remove(sv_dir+fls) + + save_path= "" + + root_dir = os.path.join('./../../argodataset/argoverse-tracking/', args.set) + argoverse_loader = ArgoverseTrackingLoader(root_dir) + + am = ArgoverseMap() + + for res_cnt,res in enumerate(result): + if len(res['image_idx'])==0: + continue + if res_cnt%100 == 0: + print(res_cnt) + fr_seq = res['image_idx'][0] + undr_scr = fr_seq.find('_') + seq = int(fr_seq[:undr_scr]) + frame = int(fr_seq[undr_scr+1:]) + argoverse_data = argoverse_loader[seq] + seq_id = argoverse_loader.log_list[seq] + #save_path = os.path.join("./point_pillars/second/sample_result_ab3dmot_format/",seq_id+".txt") + save_path = os.path.join(sv_dir,seq_id+".txt") + + city_name = argoverse_data.city_name + city_to_egovehicle_se3 = argoverse_data.get_pose(frame) + + file = "" + if not os.path.exists(save_path): + file = open(save_path,"w") + else: + file = open(save_path,"a") + + for i in range(len(res['name'])): + r_y = res['rotation_y'][i] + if r_y > np.pi : r_y -= 2*np.pi + if r_y < -np.pi : r_y += 2*np.pi + + x,y,z = res['location'][i][2], -res['location'][i][0], 1.73 - res['location'][i][1] + #print(z) + roi_pts = city_to_egovehicle_se3.transform_point_cloud(np.array([[x,y,z]])) # put into city coords + x,y,z = roi_pts[0] + x,y,z = -y,-z,x + + + file.write( str(frame) + ", 1 ," + str(res['bbox'][i][0]) + " , " + str(res['bbox'][i][1]) + " , " + + str(res['bbox'][i][2]) + " , " + str(res['bbox'][i][3]) + " , " + str(res['score'][i]) + " , " + + str(res['dimensions'][i][1]) + " , " + str(res['dimensions'][i][2]) + " , " + str(res['dimensions'][i][0]) + + " , " + str(x) + " , " + str(y) + " , " + + str(z) + " , " + str(r_y) + + " , " + str(res['alpha'][i]) + " " + str("\n") ) + file.close() + + +if __name__ == '__main__': + track_format() \ No newline at end of file diff --git a/second/ped_train.trch b/second/ped_train.trch new file mode 100644 index 0000000000000000000000000000000000000000..cdbaa30ce64188f162734dabfe1867edea85da4b GIT binary patch literal 30537 zcmY-2dA#Ri`N#1y#w6q?Ns@#lB%C=j*DOhrkbP%_CNVLL;d2}NzVG{%kR%~Vk|arz zBuSEe&%Wu zX!(O?Tld+~{O|tFz|vDbIxsM>YuRPiF5B)hW9lB$%5Kwlnc0lu6Q<0=ugsV}Yu1cu zQ>V?`Zu&Meo7s+aQ z7wXs1e5$M2;==tpnooB%n_Q${NAsDkW}Az)nT{ph#d=jVi?>T0Thsk)S4Oi$ujGq= zt}CNivRCq@Ki`$nEY&Od!b^8$G|Ti#zU&veGMZ(3CFy>#E2CMiSCa1XT^Y>^y^?eX zyE2-wy^?f?x-yz^?Jnys2AlEyI-2Newl|?)M>Daj+2Evp9nIvfW{Y3y*U_xl)ok+1 z{W_YJ+8l>S^eeq8nw8sS4b?=y+Lh6)(kuCbU+cx(D@k;%u8d~wUP+?sbY(Q__DT|6uPdWjzgLpz23;A=hV3rvE(V*8`gJrL zcQxDFq+ds~X;-ts&H8mTn|C!^oYJqO*`ll2LhFR&j@R^-31;-L8yg`(DWscIe7zruRyc-mxpA z*{N5O^o*{IX6If>(lfgn;YH(yyc0t*hDItbQHM?p@6W_vqKr z?Ag_9aj$+I&E8$jCim&r(d^sRY;(VU9nJn-%|;LC*U=o<)ok^kejUxhUCm|>>D5Dg z&Y}G}n!~!9&pEtbM{`716UmWnU&qtoqk2^|N4GJJuN^+7E2BBKR}#Q+T^Y^sy^`&p z(3R1g*elumNnIJu$-R=TpVF1loZ2hd_-S1k&FQ_8EuGPo(VW>U+0gg8GMck`CENLa zS4MMoyUV(Z!RDNP9nHC2&GvrKucJAytJ&ZW`*k$ucQsr5QNNDng05zhKknDjT-eoY z^C$f}nv2?egQPF+Rnc71E;nk@KkdqBF71^pdI)Y>Xjt@>#mIE>Rw6Gzv;?ouIZH|{oAgL=Gu0bbr*xp@A`E# z*L5}9`+dKT=K8K?gMaAP(cIA0Z1Io%I+`21noa(xUq^FOSF_DO_v>hG?rJvrmwp}1 zEnUr4|JtvkxwWg=?BDuzG`Dp%+x>gLj^_4WJ;Z+h(XXSqqpR8PKl^nwcXl=V{a3$^ z=B}=0zjycRXzuB1_Iq!?j^@6uX219M>u4V6YWDkJzmDdiu4cav_v>gL>1y`-Xupo; zv94yn|L)h(Jl?)uC-BOBqHX$wsVw-(u8iiXUdeKw?#gJM>6I+@*{+P{xn9XqpYO_O zUg(u9^u?}>=A~ZAGGFe>XkO`+Eb`T^jOMj=y%XBk;OqT5nm4+dp1j$wqj{^V>B)ck zbu@2xHCueAUq|z9SF_3g_UmZg>uR?7zkVIf`|Z*u)_3*?T^VIy)WASbLxW|M6lJy% zjTi zmy)6^T~!mUdmk*zm{GnU63wz^lrM^evz!@ad69TlFry5L1T@x+GE`Z6HJM(G8$p!u z6}4BBYp+6zGGRn4XQC8k(ui2hWGTv*M#NH9l%jljMD%JUDauzyM6XtsqI`8k^lB9; z%GX9juU3_!e0@apYBed!>Q%K@b?<{^4KvC&M50;KjIx$UIBT0x))9$kT{FshA_1*$ zM%h3lq7BU`8;OLpu^D9(k(f3$qiiM;)aGWCDI!sAVMh7p2>I7&+0u-%l}J`wn^C?c zlGRi*%C|+b+Qy9X9g(cIHKS}NlGQXb%6CPw+TM(^gGg4>%_uvHWVMqSWrj#rJ6F~> z(O4eJl$lbLUAj-@*NA3UGfE>8O);bFCKAmoGs^BF(d=PH*;6E%z04?ki$t@J8D(FQ zX!bLs>@TwU1I#D~R(@zWi~6oQC^^c(Hh~MFP9ijPfs$xb8Be+$|E;J!X`9MWVXTjBn_k+C=tPa+H7Dg!XuHlqYOrdonr7Q`K#!dC*m!mZCgU)poi* zDk{&KQJxc7`159z7eto*q8a5Skww33MtMbK$*-DGUK8o`>t>WUL^}PZ8RadJPXEV@ z^0r8)-!Y@STiJHHeQ;L(n;hjmo4Ed$9OZqRz&=Q30G>^MK9P+|W(c+kZFDk&uuW`Z zk{O1p+iu2Gb~lH~INbm2_F+|-GntXtW_NQZGZx$IZk}XDW1HR0o6LA@v%C3{8IWyu zH-9oCvdzn4fn)|{n-|7{$qdUjFO7wg8JKP2TR54aS+4)aStOak*(Se5lNp|E@>@Ka z0oo?NC6XDUZSq?(nL*knzon8Hrfu?DCYf>CCckBq8K`aYTP~TA+9tmhk{PP2+wVgY zrbZcC)xK7FY!zilB%5(2bG1k^AqJDR$i60;%+?~inrt#%i|lDdllfX?M=P03*p=FHWlgYW@ePlMLIjhjIxC| znUTA)ZPxJ9xh<0!x@{8JI+?NCCWEQT4Bj>=Y?I9BZIi>c$qe5%NlZ&-{I3~;GlJW^(sxQ`2)B8y@0=WErd+@K?UEd2SDXBr(jveFIJ6f=D^ojB<#`<_u8gCTx3(nnoQ&(TRPrkA{Ti^iC5N6 zw`nkmCo_rL#C2sdlekS_S0yuv+eCJCGLyJXXxAh&iQB|>Z8DR1b=&FsOzJv`DZHxf zbo)%|dXpJkWZ^fMOyDBRzR_g<7FqO7Ceydbl5aMdzePHIi^=>g(&<}G=5LWs-)1s@ zi*)*Slli-{?ey?7sXLOHz->BxXEHOmO<;E=Glknkc26>MxJ_vHB{PZJ#P&dS+gYwd zl?SCL4^_3Dtrv>Q!)BC6L>Bs}8RapNW&YcY^0>$%pD?35DYC?;%qUNbboLoD%CjP! zea?*XyhvwXFr&OE(%F~HC@)vGoo%mIl~gX- z_I7fVcWh#Nx4P|YeLD4Di7C9Q?QHvW>VGCPxX403FqyzPw(EW`%N%7ge~T<~w8``> zvcxeav$sfR=P;SQMLIjD$>c54*||;TZjsK;V={GDww)b*IyG-HQ@2fL=Syblwh3(h zWTtML$QDRu>b41O!DOaxo7fggX6mkPJ6)enEi5r{SGApPpH3}eGH;74d{L8WTV&ab zo6OoGi(bNH(iU0rk|uMuNT-)FnX*MXy^P6>Ez;>_O(txSPA_LNUstxB9)3EtLNfEU zO{d2uM;WpSY+N!!woPP_%$RKx+QeiAZJXF8Co^i>1h--`!?sOyD?UBaoZ-pHIq5yw#jepWRAIQ@>@5VgKnGr)=%cB z+a|valR50R$#3Iij=OF0+ccR2@9OrQ$~C^Sxx{gIRr^k@&lbxTCbPFla9f&8-XgJW zZ8CR@gf`V=>K2J?8W;}#cCyK5 zMsk#$Z4#Q9%(1skM!P0+@NJV)N#^LgZ=O`~Fl3fo-wL}YGi=+ech6*oZJYJ(oy@Rp zv)+A^8MbZKyMHpnwoP9TOlH`&>FmMDjN3N7Jv5nt+orpR+qCzHQdW{)wW94oTbi){1+Gs=k~+dRpPay@(8q$sENKefG7 zc7_?{Op)Dw&x~@G$Zo%HMmbw#x96Bq&K24156md%iR|`=W|Z?qcKahU$^{|~{jnM4 zLXr0V#Ef!LW&7?Oew1->a+FJK-VHxZj&i9@U_VQaa+ytJKTnQwxlL%lNRD!aO>Doc zuALoMA6;B2F>F`WS1P~8Qm--@wM7FkXrW41_VZ!#IPD{E)lG?=rK8MJNUx+R%W+a|DElNq*cBD*b_aoZ-e z+mji%ZDPA4nSr~y?dDi+9hW;z#%*zYl)FU!th>!9_lUIMUNg#lA}zSzjPig;3m&X& zQyqRx@K7=b-ZrT|lFWg(O{$M2bKq^W?Z=Zj@U}_y$z%?^ZIXC8nL}?pZTRQL#?=Q0 z&(>dW-%mpvOg&fCWK6G*FPTi;BALG;PGsg5N%b|8iCg4_{f5cB zUD>u^c%FYNnR(l$1#c%aZ`-us-Q+0$wQ0e7$x;4i6U+O_Q9iH`%-e06>UQM za&nZFY-0OLa+H;;`4|{i=fAH?QC6vHJKfHIUo)euDzfmen^9I1S@!B?lr=;a{S7n9 znj%YH%j8g7q|@t|9BPYndOedvZIMoIU~;G}(&>#%4z(-WP7jZRnb5)eqC(l)WWpBtVpB~fY>_Xujmd;9@`biFnXpB= zGtFee7U|CRCg<5A-I;E3nk~|uolMTMMY^-I$w_u)+nwQS8@nWPl5G=LlgvrBO<=nv zGhEw5wtF(ewM}SyCNo^y#I|=b!*w+u1LNv7j(sHt>#DZX?KO`5O~z`Gg&$}#REsS8 zV3UzrWYLG34Ade^KHTI4Tcp!Rnw(&ZboywM6Ks)AA8T@gEz;@ZO-`^Y+fEPP`8Y9| zGi;kqpPbAYwoPECCUb^u6WQs>oMGF9c4jhX*fz19mCPA-H6H`x>Ii?f#QAkq+v#?M zKiA|0TV&zqnVevYEc<+u6Ks)1Utn^AEwbbbO-`^yI(?DJ3ARY5FEKg67U}e*CMVb; zoxaTE1iP~B^zaCOc`|3%Hl4mAnKNvgz^+VYthR~ls$>Rho6xRKX0*16?V4nU>uNp* z#@CVlT8YuRs&<-RW9iqKjMgFxzusiD7FqTUCZn~;qHi=Atwol6lgVf;(&?K`25XT{ z-(oUWi*)){lc8Fq)3=$7)RncWZ93IjMO5Fe%xfB7FqI> zCgZe7r=K<%rbRmatjRDf(&^_-Zjg&~`bCo)(spKdN+ce?R)iiNne4UXO zk(j5e_^7|e))qB6#TMDv;wGoqBFk99ImNGfT7I`HsV{(cu@=93N z~3nKNsf*tV>0JIh}MQnr$!Y~5F#$i5{-nOeoi!1#6|`?eWn8<9wF8()C?U4yr5L zt`ARSC7FY2o377F=BKp#=D+hf^Vq{?n|oHbU8;XK!(I|UrCr6x!1(rWXV}N&r?f>n zv7gCLX^V8?0F$567U{%6CO@MsvcyA7enMNM6Ni~m4j1Xf5hlmdBAqzO9@AdW^Dz+hFJLTZ znoQCnU+gTCIa=gPoozBni+rJTO(toP{+wqrNsIL7e3MC9q(2v!OwuC#xzJ>iuB`oG zzx5~oFG^;Xw&~9$$xPEWfnA!+JZ%%%Wyws`Hlbaf%tUPy+ZD-7)HcCgnao6O6WvwG zOw=~vU7gHCZ4=)$$xPI8z2b6hGE=ope%B>4SKH)weKM1^O@22dGh5r_cVjZswM~9E zB{N^!B+nM>nxeJMYa%(Zcw{GLnZ2-_yV7m_)|w#o0MWR9_I z@_QwjgKV4pUQ6aE+a|v^k~z$_$?vUX4zq3YdpnuKY@7VvP3ADWy8cH&)JHDwNgQNX z@lk(`=-xLu#uf=~fEV%b3wdbj5T89Nnc3Us^G7E$ecJ>*CYkx$<_BBnunF$N)$K;= z6NNb?=I^R@BkdD~xlQJ8k%i4;GJlIKYhIJ-TcrE*natiIy`SG?@)p^^0w!~}NZ%JU zIms63`a&k>*dje&*yI$ul8=EHexk5QGE=urCl^g-?zRbR@nj}%o5+?(X7;uTZOLS& zZ=2YbN@o7H32vEWCUBeRmQ7{`w+U~#WTtSN_*O_}4wvhgF_}5sCcnwa%;7fqt(eRlZj;|i$;{$5`K_GHG;WjMD#^^_Hu)l-ne=T{2_2O;+DcW-zx&Ylmb;bDNjej>!z?a{ZUkjAX`hn|x*_GoV+uFTeIN z)UGB2y2x)XCgZtCw6jcxbCF>8F!?LgL}J~`WH=WIbsv-ATqM%{OonrjKo2k(&PC!p z$YeOL5k=r(~Jk<5s06WLM84CywZ9h1zMZWG&a$&BeX!JUxI zm~Io@Ny&`qHsPHj4{_DxRGammmdu!Lv;H%Zxl3-d{_iDoo7`so-%sW~xy|~|N#;hm z&H8_k%$;(Z_W!WD{l;7$6CB$_Kt2J}ik24eV{%vH$@=r-?+tCJbfZ34R{ znE~AzsY1U7g_er zCS$qCqHi%7%0-rZtI0?%(&^hw26B;3-)?g3Ez;>bOb)$8I(?_fk#{8@12Ozi_O4`( zylpytPclc|Hi6xj%#pWEWDg{B>}?a;L&+R`+r;)rGDqJw!9AAD;kV84-s3iLJdw;i zF4zC~eJYuG+$N7_l9|VC@^~(pdE6$C7m}IBZSr_2nR(nMk5`hJ$8GX>Etz?|x_xKV zYejEJ%;Ht;JEOf;^p?paF0!$=P3CZsZM|zUg^O(JJ(D?HWJ~Xx%;6#%8kl2bkrSef z5_xCLW=0t;^3Isuj50>#rSKt>0lbp0JrKj!iawkiWloz{!d%Hw=C%p!Bgs+bv5D;e zlB3LP6WT|Uqs(U$+sBfl%x@Fi$CINhU=!UZk~s*s3Gb819E97%_o-wK!sYr-`gC%X zMQrl>OmdV(ZSq?znG5DN`F%E-E9N%&eJ+_x<~I3#KACIgHu)``%tdpX{JxOPRdbvC zzL?BqbDR8@Pv*LL-@}JAgEspen;d1xX20W-8Ov?<8`bsQKB2u@Ho@e=xk&hvOs<=Y zEagikm(4{M@@12&<|51ZipfQDkn2ytMdDuF}r(w~1`MWUidsgtkF4m(Fcs+bEf9=hgN5`h@xy-E1Op z<-Drxbo&?GY-Tc(i~L9b6p?jqVRAAqvdS$@ewtfkja!@CH5X~^RFk{rBCXxV5Xht$axJ^hilNrKoV%jyC zG2AAolFT4(6VnxqhSG+vXLqkIjDeP3GRY&3^Vz=H|K0ehy6L z?zzo=4o>Fwxy^nKP3Hc&&3+C~<_3Co`_`yO07pvPKd)-v8toCl(I)rLMP3NUn%qAZ zc^MpUa{pW;v=dEkpNm9xvdP_Zk-$zhxp^)U*Xbtr&PBpH)8wpNB&xGa&dMv>cgpas z%d?X?Ew}k5cy2Q1@xJ_&qR=1t4XCgn5_`&X~wzKV- z$i*f<)-AHopPKwox5zSoX7VH5B8&XF$q#gkEb$j6Kh7=E*2x{4$5s3{8usuQ@P1dF0z!zO>U%%EaXX(8|fm;c-rJfx=0(JHMx;q+4f}k_~H3v x?xfpv@x^3rrQ7uI94#Evm=L>zGx8O~uqMNw?T?!xZw?(XjH?(W$4 za_?LF{^Q!;FwVPYzxNwFZ#lW!)IOvCp0@rUi|l#%=KTju?l$a;?)>K@>#|Hszb;+6 zjP;Jrj*%UsTPL*nxb}`Q)|KDT(wBcSy1lb=bX#lNnBCe(jInN0TKe#do%@Y#YaQ1< zv2C1npJY9jY3a!?j@zTdJFVx?ZY|xZ_A#B-YiN&_9^q$nj2UIUhxQmhp4S{6t{J{s zwKl=JrZ~8=ZaKobr`TNC^q)|*Hp6I|DH z=f=urt~0D}&I2o(rOvQfa~@P#zdFNa%Ndo;UT4@GIX6`{XPsel<;+p%t}|?&oH^{g zb%yoNnd8n^XW0BHH*($u>I_>jXU@A&onZ^dsv253?jkwD7EO`EE|w!~@f11g5;?+_ zOp${wl_PBF6glSf``Hlg=d$Um9CEoFVaun;5m(3&wqlALa6pc*mEzeO;{B}Z3~R`l z^EK8PHZW%nIH=ArI@s4Gly)hGi*rC9CPJ5!&Zrpb7N~|tEL27EgrX#XSCIG zgsqVx53**Cu(eX;G1krzwoZyX#JV}c)=QB`SU*SD1}U=X4ReHTlp>qnI7isf6xsA9 zIl_je$fh^V5w=;JGE7m~=5>Z`ku%5Jvd*xra^`?r*BQ1=&Kz;uI>WZhnM1bJ8Mb}S z9CL>{!*-1Q4rJ3ir34!uk2^3ly>pJR))aY=U2=qtNRh|bHAmRU6nTi%9-Sj>?-beem>gjpamp}7Wn=3Mvz$4e*BLf0XAam| zXW00hIpTym!}iIUL+)E=*nT;4%!ze|?H~Ic#HJ5O2{tJncTi~hz#L%*rO1O!&JlKS ziaf?4Il>N2k%u@eN7&&h@(4%d2s<)GHa#Ur*ik96>8Uxwj!uzHACn{O*c93HaXG?{ zk5h&zDm$UhuoH9Ucqi2vc5==f@RT~kPR*Gko>phr={a-AGwKXGGiQ!@R-IvI$9|jG z^t6;<=fvYSg{IHV5q4gRJjnSu!Y)XW$G9*@*hMMw5EthNyCg*(;nEyom!-(2FV7Kn zMT%_t${b-=rO2kQ&JlJ^ifsDY9AVeRDZ><%U0-L|4LNhX8|w_aDQ6CNbDd$g(CFPc!tP0t7rZw|*nKJTa`)#5dmu$#?7NQ!Lq(HvoqrN~Af&k^=Sifr`B9AQtT$VQ*e5%x@+GE7m~vvr0&movwE zzRs{0a^`?9)*1Fv&K&XOI>TPcnM1x>XV_~wbIjN440|K?+svllObPZ@JZ^Jn`t2NH z@1)3syqhEJy%c$j_j81OkRlK9VUDnmQsfan&Jp%WifsDR9ATfO$fiHf5%xujZ2HR_ zVPB=lroYY+_D!5JOi|glb%uSHGspYB&afYH=72xe8TM1o9P#Hm!+yz`L;hN4*l#&= z%->@k(%Qh`{-`tT&zw2lUv-B4oihjgr_Qi{bLNQu)fwKUYnLvG{7*&is)TnFHdNkS z3GX3nth}cZ-b*;J^4?1L48lQ`&!~j=5u);$l<=8_O_ldm!euY@lkWK#<&;R^}b z)WS;mB0@H`s1m-IkWDSFgfAgvQ%fr0O9|Q3(n|O;fxV_T#ZQkfTNu8ansqH-7`}p< zg{@c^K0wXNRw@jy)GV!`FuYOC+6ESe4+@TVx`B5ZB6w3!*kl{R_Tht-@Ma+oK12y$ zS;%9rqJ*z1-$l*-Mihqcs%C#93&VF)v%j{& z@ZHtyZ&YFU9%}a2UKqZon*Hrn7(QCf{`M{mAERb}9fjdzgX1TaySJMN?m_XB8n#y- zr-XM3S=@Lfe1ed*?W2V6D+GK$Av>6;gzqn80|yA3;gf`%??5H|puqU?iVL_;4vyn; z0rrDM@I&fx`s+TC?mkN2+%ariT zg{XVskjxx(=0)qH4P zC=7p5&Bx}Y!tj^XEbo=V@K@EW@3q44*Tv!Q-Zu)v-&C`|w+h4GR3kfr^ig#RjJWxpxmzYAH|A4>S2Le}+{ z68^W4W&NXs|0`rw|0%g;cgvsD_}<357UrIw{4@sT)5TjZ)jmI z+-mkWtS}dDHT&DFFc)q$``e;07j8BC+o~`ZZZ-ScrZ5-o;P|wMS1;R%xNHZ7Pj6#< z^|HN^tG1BE?Wp9UEo5!Om0YuhEUi_^C0oeKMku*r3t8AmB^PWV>uOVSy%w^pQA#e? zLRQtT+TMk^TB})DM`5nkYF1{2xn8ST+PK18u+^+>d~m$8 zVO5_Xg6|U)?`&Mv_f>Mg7V@AImE5m|JmvvP?$<&d@<1i`Yax#~SqVQ_$UA$85`L(V zclIzP{BR-f>=8=%kwV_tDN6WJf$`2x-}zh^>evUZ&4nDUq{5&=L zJHIge0yX=)FgQNB;jPO>A}-uP@yU&ET`o~_;TG}-U8dy1E#z&uLdk_&$eVDLk_)$x zx8NEj7j7YMz;#M4+(OoWgOUrkkmcW``1-Bz=-`wDa2RHquSfd2tWWkL7= literal 0 HcmV?d00001 diff --git a/second/pp_inference.py b/second/pp_inference.py new file mode 100644 index 00000000..7b9e8e11 --- /dev/null +++ b/second/pp_inference.py @@ -0,0 +1,267 @@ +pth = '/users/gpu/gunji/.local/lib/python3.6/site-packages' +import sys +if pth in sys.path: + sys.path.remove(pth) + +print("pp_inference",sys.path) +import argoverse +from argoverse.data_loading.argoverse_tracking_loader import ArgoverseTrackingLoader +from argoverse.map_representation.map_api import ArgoverseMap +import pathlib +import shutil +from second.data.preprocess import merge_second_batch, prep_pointcloud +from pathlib import Path + +import numpy as np +import torch +import os + +import torchplus +from second.core import box_np_ops +from second.core.inference import InferenceContext +from second.builder import target_assigner_builder, voxel_builder +from second.pytorch.builder import box_coder_builder, second_builder +from second.pytorch.models.voxelnet import VoxelNet +from second.pytorch.train import predict_kitti_to_anno, example_convert_to_torch +import fire +import copy +import abc +import contextlib + +import numpy as np +from google.protobuf import text_format + +from second.data.preprocess import merge_second_batch, prep_pointcloud +from second.protos import pipeline_pb2 +from numba.errors import NumbaDeprecationWarning, NumbaWarning, NumbaPerformanceWarning +import warnings +import pickle +warnings.simplefilter('ignore', category=NumbaDeprecationWarning) +warnings.simplefilter('ignore', category=NumbaWarning) +warnings.simplefilter('ignore', category=NumbaPerformanceWarning) + + +import argparse +parser = argparse.ArgumentParser(description="arg parser") +parser.add_argument('--device', type=int, default=1, help='specify the gpu device for training') +parser.add_argument('--model_path', type=str, help='specify path to load model from.') +parser.add_argument('--save_path', type=str, help='specify path to save evaluation results.') +parser.add_argument('--config_path', type=str, help='config path') +parser.add_argument('--model_dir', type=str, help='model dir') +parser.add_argument('--include_roi',type=int,default=1) +parser.add_argument('--include_road_points',type=int,default=0) +parser.add_argument('--dr_area',type=int,default=0) +parser.add_argument('--set', default='val',type=str, help='val/test/train') + + + +args = parser.parse_args() +print(args.device) +torch.cuda.set_device(args.device) + + + +def test(config_path=args.config_path, + model_dir = args.model_dir, + result_path=None, + create_folder=False, + pickle_result=True, + include_roadmap=False, + device=1): + """train a VoxelNet model specified by a config file. + """ + if create_folder: + if pathlib.Path(model_dir).exists(): + model_dir = torchplus.train.create_folder(model_dir) + + model_dir = pathlib.Path(model_dir) + model_dir.mkdir(parents=True, exist_ok=True) + eval_checkpoint_dir = model_dir / 'eval_checkpoints' + eval_checkpoint_dir.mkdir(parents=True, exist_ok=True) + if result_path is None: + result_path = model_dir / 'results' + config_file_bkp = "pipeline.config" + config = pipeline_pb2.TrainEvalPipelineConfig() + with open(config_path, "r") as f: + proto_str = f.read() + text_format.Merge(proto_str, config) + shutil.copyfile(config_path, str(model_dir / config_file_bkp)) + input_cfg = config.train_input_reader + eval_input_cfg = config.eval_input_reader + model_cfg = config.model.second + train_cfg = config.train_config + batch_size = 1 + class_names = list(input_cfg.class_names) + ###################### + # BUILD VOXEL GENERATOR + ###################### + voxel_generator = voxel_builder.build(model_cfg.voxel_generator) + grid_size = voxel_generator.grid_size + ###################### + # BUILD TARGET ASSIGNER + ###################### + bv_range = voxel_generator.point_cloud_range[[0, 1, 3, 4]] + box_coder = box_coder_builder.build(model_cfg.box_coder) + target_assigner_cfg = model_cfg.target_assigner + target_assigner = target_assigner_builder.build(target_assigner_cfg, + bv_range, box_coder) + ###################### + # BUILD NET + ###################### + center_limit_range = model_cfg.post_center_limit_range + net = second_builder.build(model_cfg, voxel_generator, target_assigner,include_roadmap) + net.cuda().eval() + + print("num_trainable parameters:", len(list(net.parameters()))) + # for n, p in net.named_parameters(): + # print(n, p.shape) + + #torchplus.train.try_restore_latest_checkpoints(model_dir, [net]) + torchplus.train.restore(args.model_path, net) + #torchplus.train.restore("./ped_models_56/voxelnet-275130.tckpt",net) + out_size_factor = model_cfg.rpn.layer_strides[0] / model_cfg.rpn.upsample_strides[0] + print(out_size_factor) + #out_size_factor *= model_cfg.middle_feature_extractor.downsample_factor + out_size_factor = int(out_size_factor) + feature_map_size = grid_size[:2] // out_size_factor + feature_map_size = [*feature_map_size, 1][::-1] + print(feature_map_size) + ret = target_assigner.generate_anchors(feature_map_size) + #anchors_dict = target_assigner.generate_anchors_dict(feature_map_size) + anchors = ret["anchors"] + anchors = anchors.reshape([-1, 7]) + matched_thresholds = ret["matched_thresholds"] + unmatched_thresholds = ret["unmatched_thresholds"] + anchors_bv = box_np_ops.rbbox2d_to_near_bbox( + anchors[:, [0, 1, 3, 4, 6]]) + anchor_cache = { + "anchors": anchors, + "anchors_bv": anchors_bv, + "matched_thresholds": matched_thresholds, + "unmatched_thresholds": unmatched_thresholds, + #"anchors_dict": anchors_dict, + } + + + am = ArgoverseMap() + dt_annos = [] + + root_dir = os.path.join('./../../argodataset/argoverse-tracking/',args.set) + argoverse_loader = ArgoverseTrackingLoader(root_dir) + + prog_cnt = 0 + for seq in range(len(argoverse_loader)): + argoverse_data = argoverse_loader[seq] + nlf = argoverse_data.num_lidar_frame + for frame in range(nlf): + prog_cnt += 1 + if prog_cnt%50 ==0: + print(prog_cnt) + points = argoverse_data.get_lidar(frame) + roi_pts = copy.deepcopy(points) + city_name = argoverse_data.city_name + city_to_egovehicle_se3 = argoverse_data.get_pose(frame) + + + ''' + roi_pts = city_to_egovehicle_se3.transform_point_cloud(roi_pts) # put into city coords + #non roi + roi_pts_flag = am.remove_non_roi_points(roi_pts, city_name) # remove non-driveable region + roi_pts = roi_pts[roi_pts_flag] + roi_pts = am.remove_ground_surface(roi_pts, city_name) # remove ground surface + + # convert city to lidar co-ordinates + + roi_pts = city_to_egovehicle_se3.inverse_transform_point_cloud(roi_pts) + ''' + if args.include_roi or args.dr_area or not args.include_road_points: + roi_pts = city_to_egovehicle_se3.transform_point_cloud(roi_pts) # put into city coords + + if args.include_roi: + roi_pts_flag = am.remove_non_roi_points(roi_pts, city_name) # remove non-driveable region + roi_pts = roi_pts[roi_pts_flag] + + if not args.include_roi and args.dr_area: + roi_pts_flag = am.remove_non_driveable_area_points(roi_pts, city_name) # remove non-driveable region + roi_pts = roi_pts[roi_pts_flag] + + + if not args.include_road_points: + roi_pts = am.remove_ground_surface(roi_pts, city_name) # remove ground surface + + # convert city to lidar co-ordinates + if args.include_roi or args.dr_area or not args.include_road_points: + roi_pts = city_to_egovehicle_se3.inverse_transform_point_cloud(roi_pts) + + + + + roi_pts[:,2] = roi_pts[:,2] - 1.73 + + pts_x, pts_y, pts_z = roi_pts[:,0], roi_pts[:,1], roi_pts[:,2] + + + input_dict = { + 'points': roi_pts, + 'pointcloud_num_features': 3, + } + + out_size_factor = model_cfg.rpn.layer_strides[0] // model_cfg.rpn.upsample_strides[0] + + + example = prep_pointcloud( + input_dict=input_dict, + root_path=None, + voxel_generator=voxel_generator, + target_assigner=target_assigner, + max_voxels=input_cfg.max_number_of_voxels, + class_names=list(input_cfg.class_names), + training=False, + create_targets=False, + shuffle_points=input_cfg.shuffle_points, + generate_bev=False, + without_reflectivity=model_cfg.without_reflectivity, + num_point_features=model_cfg.num_point_features, + anchor_area_threshold=input_cfg.anchor_area_threshold, + anchor_cache=anchor_cache, + out_size_factor=out_size_factor, + out_dtype=np.float32) + + + + if "anchors_mask" in example: + example["anchors_mask"] = example["anchors_mask"].astype(np.uint8) + example["image_idx"] = str(seq) + "_" + str(frame) + example["image_shape"] = np.array([400,400],dtype=np.int32) + example["road_map"] = None + example["include_roadmap"] = False + example["points"] = roi_pts + #torch.save(example,"./network_input_examples/" + info) + example = merge_second_batch([example]) + + example_torch = example_convert_to_torch(example,device=args.device) + try: + result_annos = predict_kitti_to_anno( + net, example_torch, input_cfg.class_names, + model_cfg.post_center_limit_range, model_cfg.lidar_input) + except: + print(seq,frame) + continue + dt_annos += result_annos + + if pickle_result: + sdi = args.save_path.rfind('/') + save_dir = args.save_path[:sdi] + if not os.path.exists(save_dir): + os.mkdir(save_dir) + + with open( args.save_path, 'wb') as f: + pickle.dump(dt_annos, f) + + +if __name__ == '__main__': + test() + + + + \ No newline at end of file diff --git a/second/protos/input_reader_pb2.py b/second/protos/input_reader_pb2.py index 8f831b1a..6ffdd530 100644 --- a/second/protos/input_reader_pb2.py +++ b/second/protos/input_reader_pb2.py @@ -224,7 +224,8 @@ has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), + ], extensions=[ ], diff --git a/second/pytorch/builder/input_reader_builder.py b/second/pytorch/builder/input_reader_builder.py index a9345808..9b293de4 100644 --- a/second/pytorch/builder/input_reader_builder.py +++ b/second/pytorch/builder/input_reader_builder.py @@ -48,6 +48,7 @@ def dataset(self): def build(input_reader_config, model_config, + options, training, voxel_generator, target_assigner=None) -> DatasetWrapper: @@ -66,7 +67,6 @@ def build(input_reader_config, if not isinstance(input_reader_config, input_reader_pb2.InputReader): raise ValueError('input_reader_config not of type ' 'input_reader_pb2.InputReader.') - dataset = dataset_builder.build(input_reader_config, model_config, - training, voxel_generator, target_assigner) + dataset = dataset_builder.build(input_reader_config, model_config,options,training, voxel_generator, target_assigner) dataset = DatasetWrapper(dataset) return dataset diff --git a/second/pytorch/builder/second_builder.py b/second/pytorch/builder/second_builder.py index 20d0d4bf..b22b5878 100644 --- a/second/pytorch/builder/second_builder.py +++ b/second/pytorch/builder/second_builder.py @@ -21,7 +21,7 @@ def build(model_cfg: second_pb2.VoxelNet, voxel_generator, - target_assigner) -> VoxelNet: + target_assigner,include_roadmap=False) -> VoxelNet: """build second pytorch instance. """ if not isinstance(model_cfg, second_pb2.VoxelNet): @@ -50,6 +50,7 @@ def build(model_cfg: second_pb2.VoxelNet, voxel_generator, direction_loss_weight = model_cfg.direction_loss_weight net = VoxelNet( + include_roadmap, dense_shape, num_class=num_class, vfe_class_name=model_cfg.voxel_feature_extractor.module_class_name, diff --git a/second/pytorch/core/box_torch_ops.py b/second/pytorch/core/box_torch_ops.py index 0c92f57b..1fcfa4ea 100644 --- a/second/pytorch/core/box_torch_ops.py +++ b/second/pytorch/core/box_torch_ops.py @@ -452,11 +452,12 @@ def nms(bboxes, scores, indices = torch.topk(scores, k=pre_max_size) bboxes = bboxes[indices] dets = torch.cat([bboxes, scores.unsqueeze(-1)], dim=1) + device = dets.get_device() dets_np = dets.data.cpu().numpy() if len(dets_np) == 0: keep = np.array([], dtype=np.int64) else: - ret = np.array(nms_gpu(dets_np, iou_threshold), dtype=np.int64) + ret = np.array(nms_gpu(dets_np, iou_threshold,device), dtype=np.int64) keep = ret[:post_max_size] if keep.shape[0] == 0: return None diff --git a/second/pytorch/inference.py b/second/pytorch/inference.py index 4e1e38d6..4d8406ee 100644 --- a/second/pytorch/inference.py +++ b/second/pytorch/inference.py @@ -12,6 +12,7 @@ from second.pytorch.train import predict_kitti_to_anno, example_convert_to_torch + class TorchInferenceContext(InferenceContext): def __init__(self): super().__init__() diff --git a/second/pytorch/models/pointpillars.py b/second/pytorch/models/pointpillars.py index 6d975d1e..a79be695 100644 --- a/second/pytorch/models/pointpillars.py +++ b/second/pytorch/models/pointpillars.py @@ -11,6 +11,7 @@ from second.pytorch.utils import get_paddings_indicator from torchplus.nn import Empty from torchplus.tools import change_default_args +import numpy as np class PFNLayer(nn.Module): @@ -133,7 +134,8 @@ def forward(self, features, num_voxels, coors): mask = get_paddings_indicator(num_voxels, voxel_count, axis=0) mask = torch.unsqueeze(mask, -1).type_as(features) features *= mask - + + #print(features.shape,num_voxels.shape,torch.min(num_voxels),torch.max(num_voxels)) # Forward pass through PFNLayers for pfn in self.pfn_layers: features = pfn(features) @@ -160,15 +162,16 @@ def __init__(self, self.nx = output_shape[3] self.nchannels = num_input_features - def forward(self, voxel_features, coords, batch_size): + def forward(self, voxel_features, coords, batch_size,road_map,include_roadmap): # batch_canvas will be the final output. batch_canvas = [] for batch_itt in range(batch_size): # Create the canvas for this sample - canvas = torch.zeros(self.nchannels, self.nx * self.ny, dtype=voxel_features.dtype, + canvas = torch.zeros(self.nchannels + int(include_roadmap), self.nx * self.ny, dtype=voxel_features.dtype, device=voxel_features.device) - + #print(road_map.shape) + road_mask = road_map[batch_itt] # Only include non-empty pillars batch_mask = coords[:, 0] == batch_itt this_coords = coords[batch_mask, :] @@ -178,8 +181,9 @@ def forward(self, voxel_features, coords, batch_size): voxels = voxels.t() # Now scatter the blob back to the canvas. - canvas[:, indices] = voxels - + canvas[:self.nchannels, indices] = voxels + if include_roadmap: + canvas[self.nchannels] = torch.from_numpy(np.reshape(road_mask,-1)).float().to('cuda') # Append to a list for later stacking. batch_canvas.append(canvas) @@ -187,6 +191,7 @@ def forward(self, voxel_features, coords, batch_size): batch_canvas = torch.stack(batch_canvas, 0) # Undo the column stacking to final 4-dim tensor - batch_canvas = batch_canvas.view(batch_size, self.nchannels, self.ny, self.nx) + batch_canvas = batch_canvas.view(batch_size, self.nchannels + int(include_roadmap), self.ny, self.nx) + #print(batch_canvas.shape) return batch_canvas diff --git a/second/pytorch/models/voxelnet.py b/second/pytorch/models/voxelnet.py index 3e496727..05de2460 100644 --- a/second/pytorch/models/voxelnet.py +++ b/second/pytorch/models/voxelnet.py @@ -3,7 +3,7 @@ from functools import reduce import numpy as np -import sparseconvnet as scn +#import sparseconvnet as scn import torch from torch import nn from torch.nn import functional as F @@ -188,7 +188,7 @@ def forward(self, features, num_voxels, coors): voxelwise = torch.max(features, dim=1)[0] return voxelwise - +''' class SparseMiddleExtractor(nn.Module): def __init__(self, output_shape, @@ -258,7 +258,7 @@ def forward(self, voxel_features, coors, batch_size): N, C, D, H, W = ret.shape ret = ret.view(N, C * D, H, W) return ret - +''' class ZeroPad3d(nn.ConstantPad3d): def __init__(self, padding): @@ -314,6 +314,7 @@ def forward(self, voxel_features, coors, batch_size): class RPN(nn.Module): def __init__(self, + include_roadmap, use_norm=True, num_class=2, layer_nums=[3, 5, 5], @@ -379,7 +380,7 @@ def __init__(self, self.block1 = Sequential( nn.ZeroPad2d(1), Conv2d( - num_input_filters, num_filters[0], 3, stride=layer_strides[0]), + num_input_filters + int(include_roadmap), num_filters[0], 3, stride=layer_strides[0]), BatchNorm2d(num_filters[0]), nn.ReLU(), ) @@ -473,6 +474,8 @@ def forward(self, x, bev=None): "box_preds": box_preds, "cls_preds": cls_preds, } + #if not self.training: + # ret_dict["final_tensor"] = x.data().cpu() if self._use_direction_classifier: dir_cls_preds = self.conv_dir_cls(x) dir_cls_preds = dir_cls_preds.permute(0, 2, 3, 1).contiguous() @@ -488,6 +491,7 @@ class LossNormType(Enum): class VoxelNet(nn.Module): def __init__(self, + include_roadmap, output_shape, num_class=2, num_input_features=4, @@ -616,6 +620,7 @@ def __init__(self, } rpn_class = rpn_class_dict[rpn_class_name] self.rpn = rpn_class( + include_roadmap, use_norm=True, num_class=num_class, layer_nums=rpn_layer_nums, @@ -670,7 +675,7 @@ def forward(self, example): preds_dict = self.sparse_rpn(voxel_features, coors, batch_size_dev) else: spatial_features = self.middle_feature_extractor( - voxel_features, coors, batch_size_dev) + voxel_features, coors, batch_size_dev,example["road_map"],example["include_roadmap"]) if self._use_bev: preds_dict = self.rpn(spatial_features, example["bev_map"]) else: @@ -748,9 +753,9 @@ def predict(self, example, preds_dict): batch_anchors = example["anchors"].view(batch_size, -1, 7) self._total_inference_count += batch_size - batch_rect = example["rect"] - batch_Trv2c = example["Trv2c"] - batch_P2 = example["P2"] + #batch_rect = example["rect"] + #batch_Trv2c = example["Trv2c"] + #batch_P2 = example["P2"] if "anchors_mask" not in example: batch_anchors_mask = [None] * batch_size else: @@ -778,9 +783,8 @@ def predict(self, example, preds_dict): batch_dir_preds = [None] * batch_size predictions_dicts = [] - for box_preds, cls_preds, dir_preds, rect, Trv2c, P2, img_idx, a_mask in zip( - batch_box_preds, batch_cls_preds, batch_dir_preds, batch_rect, - batch_Trv2c, batch_P2, batch_imgidx, batch_anchors_mask + for box_preds, cls_preds, dir_preds,img_idx, a_mask in zip( + batch_box_preds, batch_cls_preds, batch_dir_preds, batch_imgidx, batch_anchors_mask ): if a_mask is not None: box_preds = box_preds[a_mask] @@ -918,6 +922,8 @@ def predict(self, example, preds_dict): final_box_preds = box_preds final_scores = scores final_labels = label_preds + + ''' final_box_preds_camera = box_torch_ops.box_lidar_to_camera( final_box_preds, rect, Trv2c) locs = final_box_preds_camera[:, :3] @@ -938,6 +944,15 @@ def predict(self, example, preds_dict): # box_2d_preds = torch.stack([minx, miny, maxx, maxy], dim=1) box_2d_preds = torch.cat([minxy, maxxy], dim=1) # predictions + ''' + box_2d_preds = [100,100,200,200]*final_box_preds.shape[0] + box_2d_preds = torch.from_numpy(np.reshape(box_2d_preds,(-1,4))).float().to('cuda') + final_box_preds_camera = torch.zeros(final_box_preds.shape).float().to('cuda') + final_box_preds_camera[:] = final_box_preds.detach()[:] + final_box_preds_camera[:,[0,1,2]] = final_box_preds_camera[:,[1,2,0]] + final_box_preds_camera[:,[0,1]] = -final_box_preds_camera[:,[0,1]] + final_box_preds_camera[:,[3,4,5]] = final_box_preds_camera[:,[4,5,3]] + predictions_dict = { "bbox": box_2d_preds, "box3d_camera": final_box_preds_camera, diff --git a/second/pytorch/train.py b/second/pytorch/train.py index 4fcf0902..df0ea39e 100644 --- a/second/pytorch/train.py +++ b/second/pytorch/train.py @@ -1,10 +1,10 @@ +import sys import os import pathlib import pickle import shutil import time from functools import partial - import fire import numpy as np import torch @@ -21,7 +21,22 @@ second_builder) from second.utils.eval import get_coco_eval_result, get_official_eval_result from second.utils.progress_bar import ProgressBar +from numba.errors import NumbaDeprecationWarning, NumbaWarning, NumbaPerformanceWarning +import warnings + +warnings.simplefilter('ignore', category=NumbaDeprecationWarning) +warnings.simplefilter('ignore', category=NumbaWarning) +warnings.simplefilter('ignore', category=NumbaPerformanceWarning) + +import argparse +parser = argparse.ArgumentParser(description="arg parser") +parser.add_argument('--device', type=int, default=1, help='specify the gpu device for training') +#parser.add_argument('--config_path', type=str, default="abc", help='specify the gpu device for training') +#parser.add_argument('--model_dir', type=str, default="def", help='specify the gpu device for training') +#parser.add_argument('--include_roadmap', type=bool, default=False, help='specify the gpu device for training') +args,unknown = parser.parse_known_args() +torch.cuda.set_device(args.device) def _get_pos_neg_loss(cls_loss, labels): # cls_loss: [N, num_anchors, num_class] @@ -62,7 +77,7 @@ def flat_nested_json_dict(json_dict, sep=".") -> dict: def example_convert_to_torch(example, dtype=torch.float32, device=None) -> dict: - device = device or torch.device("cuda:0") + device = device or torch.device("cuda:"+str(args.device)) example_torch = {} float_names = [ "voxels", "anchors", "reg_targets", "reg_weights", "bev_map", "rect", @@ -89,7 +104,12 @@ def train(config_path, create_folder=False, display_step=50, summary_step=5, - pickle_result=True): + pickle_result=True, + include_roadmap=False, + dr_area = False, + include_roi=True, + include_road_points=False, + device=1): """train a VoxelNet model specified by a config file. """ if create_folder: @@ -130,9 +150,9 @@ def train(config_path, # BUILD NET ###################### center_limit_range = model_cfg.post_center_limit_range - net = second_builder.build(model_cfg, voxel_generator, target_assigner) + net = second_builder.build(model_cfg, voxel_generator, target_assigner,include_roadmap) net.cuda() - # net_train = torch.nn.DataParallel(net).cuda() + #net = torch.nn.DataParallel(net).cuda() print("num_trainable parameters:", len(list(net.parameters()))) # for n, p in net.named_parameters(): # print(n, p.shape) @@ -165,16 +185,25 @@ def train(config_path, ###################### # PREPARE INPUT ###################### + options = {} + options["include_roadmap"] = include_roadmap + options["include_roi"] = include_roi + options["dr_area"] = dr_area + options["include_road_points"] = include_road_points + #print(options) + dataset = input_reader_builder.build( input_cfg, model_cfg, + options, training=True, voxel_generator=voxel_generator, target_assigner=target_assigner) eval_dataset = input_reader_builder.build( eval_input_cfg, model_cfg, + options , training=False, voxel_generator=voxel_generator, target_assigner=target_assigner) @@ -243,7 +272,6 @@ def _worker_init_fn(worker_id): example_torch = example_convert_to_torch(example, float_dtype) batch_size = example["anchors"].shape[0] - ret_dict = net(example_torch) # box_preds = ret_dict["box_preds"] @@ -305,7 +333,7 @@ def _worker_init_fn(worker_id): metrics["num_anchors"] = int(num_anchors) metrics["lr"] = float( mixed_optimizer.param_groups[0]['lr']) - metrics["image_idx"] = example['image_idx'][0] + #metrics["image_idx"] = example['image_idx'][0] flatted_metrics = flat_nested_json_dict(metrics) flatted_summarys = flat_nested_json_dict(metrics, "/") for k, v in flatted_summarys.items(): @@ -331,15 +359,15 @@ def _worker_init_fn(worker_id): print(log_str) ckpt_elasped_time = time.time() - ckpt_start_time if ckpt_elasped_time > train_cfg.save_checkpoints_secs: - torchplus.train.save_models(model_dir, [net, optimizer], - net.get_global_step()) + # torchplus.train.save_models(model_dir, [net, optimizer], + # net.get_global_step()) ckpt_start_time = time.time() total_step_elapsed += steps torchplus.train.save_models(model_dir, [net, optimizer], net.get_global_step()) # Ensure that all evaluation points are saved forever - torchplus.train.save_models(eval_checkpoint_dir, [net, optimizer], net.get_global_step(), max_to_keep=100) + #torchplus.train.save_models(eval_checkpoint_dir, [net, optimizer], net.get_global_step(), max_to_keep=100) net.eval() result_path_step = result_path / f"step_{net.get_global_step()}" @@ -381,10 +409,11 @@ def _worker_init_fn(worker_id): f'generate label finished({sec_per_ex:.2f}/s). start eval:', file=logf) gt_annos = [ - info["annos"] for info in eval_dataset.dataset.kitti_infos + info for info in eval_dataset.dataset.kitti_infos ] if not pickle_result: dt_annos = kitti.get_label_annos(result_path_step) + #print("dt_annos",dt_annos) result, mAPbbox, mAPbev, mAP3d, mAPaos = get_official_eval_result(gt_annos, dt_annos, class_names, return_data=True) print(result, file=logf) @@ -394,10 +423,10 @@ def _worker_init_fn(worker_id): for i, class_name in enumerate(class_names): writer.add_scalar('bev_ap:{}'.format(class_name), mAPbev[i, 1, 0], global_step) writer.add_scalar('3d_ap:{}'.format(class_name), mAP3d[i, 1, 0], global_step) - writer.add_scalar('aos_ap:{}'.format(class_name), mAPaos[i, 1, 0], global_step) + #writer.add_scalar('aos_ap:{}'.format(class_name), mAPaos[i, 1, 0], global_step) writer.add_scalar('bev_map', np.mean(mAPbev[:, 1, 0]), global_step) writer.add_scalar('3d_map', np.mean(mAP3d[:, 1, 0]), global_step) - writer.add_scalar('aos_map', np.mean(mAPaos[:, 1, 0]), global_step) + #writer.add_scalar('aos_map', np.mean(mAPaos[:, 1, 0]), global_step) result = get_coco_eval_result(gt_annos, dt_annos, class_names) print(result, file=logf) @@ -471,7 +500,7 @@ def _predict_kitti_to_file(net, result_lines.append(result_line) else: result_lines = [] - result_file = f"{result_save_path}/{kitti.get_image_index_str(img_idx)}.txt" + result_file = f"{result_save_path}/{img_idx}.txt" result_str = '\n'.join(result_lines) with open(result_file, 'w') as f: f.write(result_str) @@ -504,17 +533,21 @@ def predict_kitti_to_anno(net, for box, box_lidar, bbox, score, label in zip( box_preds, box_preds_lidar, box_2d_preds, scores, label_preds): + ''' if not lidar_input: if bbox[0] > image_shape[1] or bbox[1] > image_shape[0]: continue if bbox[2] < 0 or bbox[3] < 0: continue + # print(img_shape) + if center_limit_range is not None: limit_range = np.array(center_limit_range) if (np.any(box_lidar[:3] < limit_range[:3]) or np.any(box_lidar[:3] > limit_range[3:])): continue + ''' bbox[2:] = np.minimum(bbox[2:], image_shape[::-1]) bbox[:2] = np.maximum(bbox[:2], [0, 0]) anno["name"].append(class_names[int(label)]) @@ -545,7 +578,7 @@ def predict_kitti_to_anno(net, annos.append(kitti.empty_result_anno()) num_example = annos[-1]["name"].shape[0] annos[-1]["image_idx"] = np.array( - [img_idx] * num_example, dtype=np.int64) + [img_idx] * num_example) return annos @@ -555,7 +588,10 @@ def evaluate(config_path, predict_test=False, ckpt_path=None, ref_detfile=None, - pickle_result=True): + pickle_result=True, + include_roadmap=False, + device=1): + print(config_path) model_dir = pathlib.Path(model_dir) if predict_test: result_name = 'predict_test' @@ -600,6 +636,7 @@ def evaluate(config_path, eval_dataset = input_reader_builder.build( input_cfg, model_cfg, + include_roadmap, training=False, voxel_generator=voxel_generator, target_assigner=target_assigner) @@ -643,9 +680,10 @@ def evaluate(config_path, print(f"avg forward time per example: {net.avg_forward_time:.3f}") print(f"avg postprocess time per example: {net.avg_postprocess_time:.3f}") if not predict_test: - gt_annos = [info["annos"] for info in eval_dataset.dataset.kitti_infos] + gt_annos = [info for info in eval_dataset.dataset.kitti_infos] if not pickle_result: dt_annos = kitti.get_label_annos(result_path_step) + #print(dt_annos) result = get_official_eval_result(gt_annos, dt_annos, class_names) print(result) result = get_coco_eval_result(gt_annos, dt_annos, class_names) diff --git a/second/pytorch/utils.py b/second/pytorch/utils.py index e49cbadc..fddd117b 100644 --- a/second/pytorch/utils.py +++ b/second/pytorch/utils.py @@ -11,7 +11,6 @@ def get_paddings_indicator(actual_num, max_num, axis=0): Returns: [type]: [description] """ - actual_num = torch.unsqueeze(actual_num, axis + 1) # tiled_actual_num: [N, M, 1] max_num_shape = [1] * len(actual_num.shape) diff --git a/second/sample.trch b/second/sample.trch new file mode 100644 index 0000000000000000000000000000000000000000..d455d8b4efcec957a1a4f6e32789783e13bb618d GIT binary patch literal 1241 zcmXxk*KQL*7{zhNB>_VqbV3c?G&9?GX`zN<0--M@EFrPL$g=ad9VCz<JOHo8 zBOr0Xn#{Qw{pQR!bM9uX_@mnX`*Y^c)aPGwll8UY?C(;(u2QI_6NN%yxz`x;Tzq~N5&fEm6g16e%LvjFqvV*%3=MM$y`b{^C(;SRICCjc8Cc(EnUMd(P6jfvPbmT zEBZ`|0sF*|{bIxcG3KC{aLCd#92Om>M3*C?$5GMem>6(e3^^f2oD^eDi3v4J-*8%V zI3v286+O<0KIg@Nx)^dnj4)!1BPO_(fx#0Ue92%XBitFiw;jjm#3n~GtuX{81O<2c_~JuV$3Ts;k9LAcq2Nz6 z0yDIRI;~@tHZVtySuwvmsjEL?tXB$8o0Z=ySux)yCoq)gb0BUAwmQ~fQ)LodVljr z?lWH?z>|Aw?KRGx)#JEv&zN+d|6R5ES99-m+lI3&dEB_`eKwx|@6qj;g}0n~%$P9; zX#a!u-+BLi#vZb(4%~D9{n~N-7i>9&|I9vn9(2$?yN=y;zg_m+aldxl)3%({fAFBg z57>3=fqNdY>w)cfN4MiIyyXP`g9q-hzYc0ASZCZV$Fug_@1S&K*>ydTqcihj)6Df=;Rr|QR|ow^^(cA9>y+G+c- zZl~+Vrk#FdOo!ST`Y~x|?8mg7sUNd;=6=lES^BYPXYI$bovk0McJ_X(+d2BNY3Cdn zv!QmbeoWfA`!Q|j>Bp>{w;%I%zJ4s)`TMbK7wE^TU9cbPcA` zeoWiN`Y~%4@5j7dq92QP$$l)`rTVdIhx)N@lYVU4bYv`s+N>XwHt)x@E&4HQ%YMw; zsvnEC?#Hri`mt)4?#H@arXQPj*^#jvYM1NBq+Pxr({_b^%-R+EF>hDu$D&=iAIo-? zeyrM6`>}3U>&K>DePpbL+BNzyY1iz>v|Xzovv%!%%-ePPv1r%r$Fg0oAFFo#eyrOK z`mt#@92x7OcB6hw+Ku}$Z8z!1tlhL9^LDd-EZWWcv23^K$Ew}3AM19jer(#UN5*ET z-KHOtcH4eT+wJ->Yq#&mydB$*MY}^kmhF!HShYL#W8LoDk4?MF@QKqs-9zoJqjS>k zHae&6?xSgZgwr;W~K zd-~{HwP%dZb$jOM+_Y!)vk#rKN9UwHXLL^6b4TZ_J#Tc*+w(`~qP<{rF53%7=c>JE zbgtWrN9U%!q@R8KTsk@@?Pa5L+Fm|7XYCcEbKYJ#{MnQEQ|+peDQT}BnbP)}ktu7h z9hvg>x{;}9uOFGp_J)zEYHu8w>h`9QscCN>egu;4Q|*?1Oxj!fF>P<_$E>}*AM^H( zek|HM`>|~A>c^_RyC3WJo_=iFdq+n9qPwpjllK08Oxp+gF>4>}$Gm;0AB*)(tRI{9@sZKL)Sl?aqF>jyg$D)0}3c z=*Omgab)x_u$THVX9vS`1 z>YaW}+IRagZQtw1tbM;9^Y#b*ShPRv$FluVKUVDr{aCj@?#HJ6$;jwWwV(E5(*CR; z)AqxD%-Wy#W8VIvAB*;v{aCiY>c^`6s2}V0*ZtVEzZn_*srK7`OxoY|W7>Y)k6HWs ze$3lX`mty~?Z>kHLqAsSXZ={Wf9%Jm{nNY&0F14rzKa(F|m&8OcmDk-26j3(Z27 znw6|H8(C|1ve6u1Hl#VpM01g;<|Z@EL*|;7EHockYJRfP0%WZP$wmu-`H&VS6D>lf zT9nMR7@2Evvd|J_sU^uuOOdsP$VLfR3@IfOWn`+H%v6xMO0rNzma5504Owexve7bN zIizLDM9Yz>mM1fyfqACmU@5)m??)!upGOy`oh&Laz*PnNoXtaKq+>mstz#h}mjOUOi*lBq5uGhI&Rx`HfpC0Xh! zveMOLt!v0e*MdIVuOky(Po}zo%yc7}>n5_$&19)t$V#`8wQeID-46O}zk^J4CzrjHIzXz|2FUdK0J+{6 zpwOEGlzMA`N^cKP>zx4_z1yMto%Gb%K<`0`-iK2C0Lt`3DA$jmLLWe-ehgLm2~_K+ zP@|s>P^M3z zTz`NHeFl~KBUI^6P^~{hjs7x_kK)gvM1O@+{SC_WcPQ6Cph90jrTz(3`WIB|-%z9f z4CDj&ODNG-P^$kznZAZ{{SPYi4OHq|sM2>(tuf<{{%S26jWdvs-EpBr<3Xv$hcZn7 z<(d#GG!ay4VyMz2P_0R!Mw1QX!*+5g(G*asDWObLLAj=e3QYr*nii@w9aL+2sL>1q z`G}nnN;DIcYGx?YEKsglp+d7krDlgJ%>mV#6KXWqKt5RKh7!#KrJ5JYG#`{}eyGp_ zP^kr>N((`?7KR!v(y9ACu{-OAv?!TqF*4QSWTqv^TuYLLmLf|Hk(Cm%R!TO?Kp(+5 znW!LBm1L%h%vFVWTj=uTFa7+mIHklFHa^~flRd`nQ0|5*UDs}Rmf7Sl9g5? zYpqT;S_Aa4ye64wEi%>GWTth#C0S`JvewpQqisMR)!UMZwj)z*Pi7iR=GuWQv}4Ea){^MSVJ*-)-?phD+DrOtyYoe$Nz z0BUsMKz@U)u7Vm}J&OiIQ}rB%LWqhf30^66|6n>0C)VSdvbbq@yM2Y)Lv?l1`VT<0a{Q z33dUKbiyPZF-d1k(jk*{%Iw(PT5{i7z8xUZcLSsvgwdCEcWnw7NBv7hJp-huOxh97SO#zjf5~?&6RBLLe(KG}3L75gx zG#!*`dMMKjP_7xFLNh_7W`-)w0@a!oYBbwGemrJ}63qdnniI-27nEymsL(u6sd=GF z^Fg)dhZ-#~kROT#p+pNosTPJZEdu3Q6e_eBRBCak(h^XuC80)34dh2*2uhSdsZuCY z2Ib13LIqT+gep}~tr}|74CGtO(omvhpj693nU;fcEe{o10V=g3RB0us*2++$RR;2{ zWmPEAYEY`xp-gK)xz>aVtp$}@8>+MpRBK(R(Ru^<*0Mg7XagwKhES%Bpj;b6g*Jgo zZ3EuW{I+D(gq{AcW^hi2BlFpB$ z10?AL33d^ZbcQ4yB1xx6(lL^BjwBr_Pyd<43!7gBuPMD-4Ch3exI%IbA zf5sHPrd&QiqALbSb>#q=t{NcM)dLi|W`I)H4p8a30cu@8K%*Nvbk~%kyQbU-CAtYp zbu*Od7AV)PP@&tPQny2u?tp6D2{pQFAU_#*Ly7KzQr!z>x(~{AKUC-esMLc{rH7ze z4?~R}8OYDWqfny9pj3}TnVx`hJqZz7cYUk&7I%12P5Uqh*W17-Rxlz8`j$-f z9hqs2Ps!2$Zmx02LgSL9#v?0@Pu7}%Y&0S0LwF)G(Zpn`Nyto-lDQ@$3r$Xznu4q} zC0T1KveDF_kK<{`MAMR~rXw>=Pv)9|EHoooY9_MM%w(-u$VRh*KA2}C6U|PhnuE+V zCz)$5ve4XQsd>ms^OCjZBOA>R`iNeDOtc`GY9TVy!ep*R$U=*fr4}PAEl$>2f^4)T z=)-y`GSLv3Dj_qaWUh=Xly~&wF{bprr5GSlIY6pvfK2rOxtakAEj>V~Wd^9U>;Scv z8=%qh9sD2v8~Bk~0ZOzYlxig?)5=h;RiHwvLZw!NDy)`AkP4W(KK z%Cs(&YdxsY`cSD2ph_D;wKjqpZ9I@4k4>ONn?k8JgEDOn<=O%&)cx-~^1pRfYAdMH z)=;f&phnvc7*ADs?DS=`g6);ZUO^ z2J*e-NGQ=!P^zP$OvgaEj)e*x2bDS=s&oQW>qMy0Ndx)Tax#?Y6e!iHP^QzMT&F{Y z&VWjt2~|1^s&zKh=$wIkYdIH6bRLxId??ceP_7H1LKi`$E`};y0@b<{YIIqr?$*+s zb_Q~z6iGTplFpH&gCyxBNjgfB&XT0VBc5#w)o+KS8NheCuk&<+#BpoVA zr%KYXl60;FyI@H=S(1*Hq_ZXIa7j8{l8%?8^CjtkNjhPIUBo1vF-eC^(kYX4%p{#N zNe4~RNt1NcB%L+EE^Lxco226=>AXogaFR}(q$4Nk%t<=@3dfg_4e;q;n|P1yRyTlynp& zokdB9QPOFYbQ~p}M@a`#(us7y?vq@)um=|oC8k&;fNq!TIW zL`pi5l1`*xmqw|P8rBY z@u^Uv)1XwRLz&Kia-9hkItwawHdN^xsMfhqqw@yx0en7`=mIF!g;1u8pj;P2g)V_g zT?$pY461cG)aZ(VeC%EcCAtbqbv2af8YtJbP@(IfQrAP3Zh>2sOHCARo3jLy2yI zQr!w=x(&*8J5=ZnsMMWMrMsY7cSDWt8OTTMy-=e2pj7umnI3?0JqQ(g2rBh3ROu0@ z)}v6P#|HAj`Z$#62`JT*P^PD#Tu(!Vo`Fg|3srg!s`Wh7=!H)H&+u6{q!-CVFOjKU zCNsT4=6aPZ^cq>}b+XbMWUV*JMsI;Wg5M?+y+fvYm(27Ynd^PB&=1H`KO`&th^+Mi z+33fh596PZiGE6^`Wcz&Lo(OT$wI#%OZ}3p^eeK~M`WX4gFcpjLnitynd*0BrjN;7 zzb6ZQLYDfJtn>%6)@Nj+KY~7>|3oJGGnwiyWTwx_Tz@4C{f#X3ce2ty$XZ{Jjs6Mx zsQwq3=-*_j|B#u!By)X57W!|;?$%QK*7EfLiT*c0s&59!^z8t-z8j#>m{kROj(phUAmsb+&R%?{<711dBpRBA4$ z(%ew3d7wt~4&;YoJ}A-rP^tx>ObbG}7J>>b43%00sD}f57P^k>6ltZ-&s8KnPZ!Hy+sD@HCP^P7!T+2X(mW4_!2US`gs zQX4^)Hil|#0yWxnAm3UxgA#2HrP>0@v?Y{lE2z-cP^oR8O4~xUwu2gN-^mB&@LA`S zNIEByPKuC{L%H-cSoB%K^dM@Q1xk#u+@ogPWYN7DI`bbutC zAi*v|lFpE%LnP@GNjgT7&XJ^pB8wdQY?4l!q~j*(yh%E6l1`kYBPZ$133j2Abm}A>J4xqG(!rB- z@+2KSNoP;e;gfXw1iSc2I)9Q5prjKh=?F?XgOU!Rq*EyA7)m;af?W_LokU4TQPNqI zbQmR_MoGs}(s`70ASInh2kgGRd69G?C7nn~CsNXhlyo8`ok&S1QqqZ(bRq@2L`pi5 zl1`+g6DjFLN;;8}PNbw0Dd|K?I+221A|;(jNhea$iIj9AC7nn~CsNXhlyo8`ok+nh zk&;fNq!TIWL`pi5l1`+g6DjFLN;;8}PNZO$NJ%GB(utIGA|;(jNhea$iIj9AC7nn~ zCsMFWq@)um=|oC8k&;fNq!TGw>T9yn|HxY3kd3|teYSr`CK@x*=lQ;kDr8kfv9 z9$9F7veX1*r3uMe6OoN327R_qLMED&Of?yqX>u~x6l9?($x>61m8K?ZO+z-C7WCOZ z9hqo)GSv)ZrWwgxGm(X6CQHpiR+^QpH5=JzcF<@09Au(7$y9TZndT;Q%|jNNmn=0O zS!w=`-S2}m{(X=I2JrWz1Nhs~0sP(Q0RCol0Dmt!fWH+Tz~6}u;BQ0+@b{q|y59$B zy59#`3i7w1kiQItGNq8e3WfYlDC93fA%71F`D;+f-+~V0 zPRQSILjHmi^7orit+7y}9R~8TyCalnCn(j~M z*zOG_+6PLtFO+FNDA)c_p#z{&EmTQRtplM(2My#S_FyQ{AyBGAp-hKCxekX49RZa( z5~_3*RO@J{(J=%0U_BN}bR3lGcqr2eP_7f9LMK6`PKGL-0@XSdYIIts?)O2uvu;SI zlZnnCQ=Lg>I*ZJ8Hd*K#veda`rSr&I=aY>t0DS~sNG7_7Om#7t=@K&6rDUPY$WoV+ zm98LbT}d{&3iM%oHJRudGS#(Yrt8RD*OP^AAWPjyR=SC-bu-!M7SPA?tz@Fx$W*tJ zneHHS-ANX@i!60FS?M0K*1cq-`#>Mi_mhbpAX7a^W_pOs^)OlJ5wg^yWTnT*T91>B zo&bGRKS?HficIx1nduoa*Ry1y=Q?(GhQ!ThdLHu5F65P6$Q!$m7j_}<>q1`Fg}ki` zd07|ou5PF9nfAYnpPn~K@9L7?)g`^FOL|wA^scVycNqJ>x3_iq=e?~*tNpI_t-qt0(txI}am-MzS>1|!o+q$5ipN~jy>yqBqCB3andRv$Dwl3*y zUDDgSq_=fRZ|j17em*0;txI}am-MzS>1|!o+q$H;bxCjQlHS%Oy{!xS`T2tMwl3*y zUDDgSq_=fRZ|joY)+N2IOL|+E^tLYOOV2l?w{=Nx>yqBqop|u9l-|}Qy{$`nTbJ~< zF6nJu(%ZVAuRjx!-qt0(txI}am-MzS>1|!o+q$H;bxCjQlHS$@eG!_5^tLYPZC%pa zx}>*tNpI_t-qt0(txI}am-MzS=qu4|q_=fRZ|joY)+N2IOL|+E^tLYPZC%pax}>*t zJNh>+zCTADMuVM4gB?kOol1ipOoN?GgB?$Uolt`vQiGjS=`N`TJF5metp+=<20O6^ zJF^BmwFW!420OV1JG;_ddJT4d4R(SJc7_dhiVb#-4R(?Zc9soxnhkcIrMpBM>`WW% zR2%GE8|-8o>}(tCbQ|n^8|;J|?2JoyDL2?TH`qxx*jYE&X*bw;H`s|c*qJxjsW;fU zm+q2pu(NNl({HfzZ?F?^urqM5Q*h_uJC=L+v^|t)ER<>oDASHmuAQJlJ42;*fhz3^ z)!Geew0o!Fdr;Ttr9H?*dy=X4A~Wqx=GuoWv@cm|KeE#PWUT|pMlI+EPGq73$y5iC znGPm%9YPj5lq_`^S?O@H))8c*BSAlAN0Et+CQ}_lW;&M4bsSmfc(T+9WTg|yS|^c> zP6qvOokAu$l}vRSndx*g*BNA?Gs#kCk(JISYn?+jIv4aKbsm}Md@|JqWTp$rTo;jr zE+$J|LRPwztaTaL=yK3EtSiVwSCXl&A~Ri0=DLO~bS+uxIMmK`KVckR~ zx|vLM3z_LwGS_Wnq1(w)caW9tBx~J8Ho6=14eK5<(Y<7<`^Zf9ler!s3q44ddWfv_ zFj?ynveBcUZ&;6!i5@3YJwaxAlFaoKS?Fo9)H7tIXUSU6k&T}3IDEtUz8CR@;6yJ5 zr+O(k)62oRUI{MrYH+F7f-AiqTQm-<0) zr5^^@`cZJB4`?6wKMqdxli*Z84bJql;9MUD7y5Z{sb2(F`eksfUj;Y%i1yL{>)=Gc z2~PFf;7q>@&h>F{q2C9W`Xsp0r@^)U5Zvf9+8+Xc3{Lc?;8cGO&h(eyT%QLQ`fG5h zzXezNdvL9P1ULGE_Q%0LgA@HLIMu&{GyNwx*O$SCz6vh&-_FA~tga2|Ysg#3kk^nQ z?;%58M25VHJjvki=~ZONyU382ks)s*cN)H74fi5VKzbjU^g=S}jbzd*$)tCZNiQXn z-byCDmP~pt8T11;73s}n(yPg&caupkCzIYzCcU0adOw-;f->n1Wzdh=ETngoNiQjr z-clyLrc8QIne?JE=}l$QtIDKzl|esT^O4?GCcUmqdS99J!ZPWNWzs9lq<5A{FD;Ya zS_b_{EkSy3ne^f^>CI)*tIK4el=Sj4>Fs6G>&v9~mqFjKD$*Owq*s_p?=X{IVkW)C zOnQx(^d2+mMP|~Q%%E>rE0Nx1CcVr|dYhT_Iy32gX3`7Iq&J#LuQZe1X$F16T8H#j zGwHQv(tFLM7n@0MHj`d$CcWECdbyeOb~ET3)@G#ln@KM?liqM9z2Z!I$C>n!GwCg7 z(reD7_nbQp->|-KTpAnfP3K^*ItP2#IoQk2!QOTb_PTSh_nm{i@Eq)oXWBQcJ%hdT z9PFj%U~fGKd+j;cd(XjMd=B>JbFf#RgT4Dq`@lai*xS#+UVjeu{&TPwpo6^u9qbk8 zVDCT&dkH$&ThO$R{-cAv2OaE1=wNR`2YVGd*t^idUWN|#HgvGpp@Y2-P5VRO)L?H! z2YV$t*gMg|UWyL(R&=n}qJzB`9qh&EU~fj#{x~>4*t^leUXBj-c66}Uql3L4-Ff&% z)ipI;3MIM>N_9Du=?W;vpoy9iShuJIO?Mk*V$`Gu=bxx|b|;A6e>tveE-&tp~|Q4}pHH z9wrk#LZ*6@%=8$U>v6Ks6J)6;$x2U=wVoy$Jp=k-dX`M|9GU8QGSdrWt{2HdFOj8Q zCM&%{)_Rp}^cv_#=yfvD8)T|C$xLsNx!xuVy+f9Im#p+2S?hhW(GNi1sD4N$`VpDx z12WT($y`4n3;mQV^)s^4hh(jvlZ}1>`bPCjGSRQdR3DLpUF~xAuD}O*7_^i=x?BJRDUNE{ew*P1)1re zWUhaah5k*J`VU#@OS0BiWTXFf9KKO~-v;${uyb;-lX9@LaE?Cc!u@Eq*)9PIcU?ED<;03GZEO?MGG*cm$5Av)M8I@mEf z*f~1bK|0t;I@nP<*jbwH!gR3Hbg<)eu=8}V19h+yb+99KurqbALv^rIHQmMPVCU*! z2kT%b>tILgU}x)KhwEUc>tM(0VCQSP3)sO<*ujq2!Oqyh4%xv@*};z4oriB&U8__M zxw;i{b1UTHR>-}rkZW5Zx3)qqZH3&~3c0ei)9?*zxL;~{(v_{GD_codwvw)FC0*G{ zy0VpYWh?2*R??NNpdYw3NmsU#u52Y;*-E;ym2_n*>B?5pm93;JTS-^8f_}_4CSBP| zy0VpYWh?2*R??NNq$^uVSGJO_Y$aXU3i{#NmULw+>B?5pm93;JTS-^8lBIScEA33y z+J$ViE9ggRH!{)gWU4*LOnZ{K_96@IO_th+th6s#Yd^Bl{-AGI2at(cGL^_o2a>rC zA`2Z%mO6y2bSPQtFtX9%pl?`5kco~YQyoQSI-1ON3|Z({vea>8rQ^w3Cyx)5t=nlcmleE1gN!I*V*{Hs~AHIb@=9$yDc&na(G3T|gGPkSui( zS?OZ3)+J=4OFItVu)c4ux-2-+<-w`02+nk6aIUL@3tb&t>YCt6*9OTp&hEK$=1UGt?_J_c8 z!HJ#^PW3`?rWb>Ay%b#N<=|4U1Xp@BxYldIjb5kyaqvcPqBnz6y%n74?ciMR1Q&X@ z^YD$TE5LdWa;+@nR$0iUvXDDvAy>*mZj^;wC=0ny7IK|zr{Npba23{vr0Zl!*U6Hu zlO$be$~eI$6?nvZU)|K|f$0ldh8`T_;PrPL_0?Ea^H~(si<=>tspS$&#*< z1^rlkPP$H(be$~eI$6?nvZU)|N!Q7eu9GEQCri3c7WBjPCFwd@(si<=>tspS$&#*< zB};uvR{D;tHD2Ic zaWc~qWUeL2LQ9dQhR8|@St}(QWgUlaRNq%@<-yL$!A{D-&dR|~%fZge!A{J<&dkA1 z&B4yiv~N_)2Rk_jJ30qDI|n;F2Rl6nJ3a?HKL^L3lJRR&n9qdFM>_{E#Odae{9qd$1cdHLaw`o+;$7O>=ts@E##`(PQy2<;gYUHNLSsGuDT^%bxXSHmUPuE>8e}O zRkx(8Zb?_&f_}h`BVBb%y6Tp6)h+3&Thdjxq^oX8SKX4Xx+Pt83;MA-gLKs`>8e}O zRkx(8Zb?_&lCHWXU3E*k>XvlXE$D~oBGOg2q^oX8SKX4Xx+M!;PL{fYtaK$=>ngI* z)u11tYsf^`lBupEGhI*Sx`8ZoBU$PuveM0Dty{=Ow}QS=-9{$5olJEHndwe4*Ii_x zyU9}bkd^KwYu!gSx*zn7>H#v*gJi0Q$V?BDxgH@4JxZ2(%l}z;-ndx;h*BfM^ zH_1|Ok(J&iYrR7@dbi{7o$C7nu=j$Ul!Kj>gPoRxotJ~1n1h{}gPod#otuN5oN3>w zJ`8qt4t97Bc6ttWd=7Sg4t9VJc7hIegbsFwrn?Xw>=YgB7#-{!9qb?->?9rRC>`u9 z9qce2>@-bxaXQ#}I@p0a*oivWkviC!I@qB)*r__$u{zkfn(l&iu#^jAWykKtCWelZj>_ zQ_V_dnvKjgJ6UKBvecYprMbvjbCZqc0sUCaOD39+Of^55X#q0Vf@GnE$WjZFl@=ju zElM_84D`dWIGJb(GS!k~rlrVSLu8?ZER~X#GO|`qHYz|r0wtNKB2(35riRS5G+AgF zvedF8AE3HA+T9a(F7U(HYF2nMyA@F%(MlWYfG}wR%EHI$x7Rh zwYDW2Z3p^Bv_0u}p-I0CP5NzU(yv34ejl3j3(=(Ch$j6?H0gJuI}YE7x>M9IMF;z> z=wQDV9qjj_gZ*N3u-}Xh_N&psem6SUFGmOa?P%IJA_e>X=wQDf9qc!xgZ+whu-}mm z_Dj;ieoH#ouSo~{J!#qp{!zhxQ##nMN(cL0>0rMs9qhNIgZ;X6u-}&s_6yU&eq);U z(SJ&?-0rM&9qc!!gZ=7su-~1g{ULB(u-~2z_UqHZet$aH zFHi^j4eDUOLLKaPsDu3yb+F%}ru}hnMX=wa4)%-G!G4oE*soG|9=<_!l_A$ciLQfE zT@Pit0m^kFROlwC)Xh+(TcBFELXB?gG<<^^E)Th#OmqjC>P|A#U1YAi$wK##rS2sw z-AC5CpKSC1=m+XSGSNe1s)xx;kC3?@B?~=9mU^75^aNS!NwU#XpdX{B$wbePsh%Y> zJxAtxo-FhNS?Wcy(o1Bmm&r!2fPQ#hB@?|yrh1*s^ah#hO|sBiWU05wO7D=h-X$Bo z2l|nDpG@=vGSv^sOg|!XeLxoaFgQylUy!MONoM*Lnd>96 z(67l-zacCAmaO$VveCz&Z&1G{6MaIa`jpJ{2Qt@ZWT8KjrT#=#`ZHPUFJz<7LEoVM zN+$XnndmCu9nyHgiN+63H9>Hu34?P@6kKTH;8K$WSDG}q)?~qr zCZ~Oanj$#Sl)2B)fmGu6Sln&3i9 z2bWr=^Y9I;`(fnELW!1xQY{Z(nB2;K4sMN|(rB$F>t3r)d>ok1B8ve25tCNY= zAXBYLW?GBPwKiF39kSHAWTo}UTI-XIHURy=ZAd2Ch)lIHnQ0R;*QR8l&B#)lla;n0 zYi&t3+6wezwl$e(8#2|lWTx%NT-%d{#*(FWAS>-i*4l||v@__3YZo%nu4Jm+$V|JF zx%MCn?Mar}i>$OaS!*A%(Y~M`sr|@A`;(~-ATzaOE|G-}BugDcRyvrhbqLw$P|!E5 z!^lL3lc|m%GaX6hI*KfGG+F8xveL0+t>ef>$Ai9Moj@i!kxX?GndxLQ*C}M7Q^``N zk(Ev-Yn?$hIurB_>nt+S*<`A7$V}&wxy~aCollm!fUI;OS?eOQ(Z!%|SeKBAE+tc4 zMrOL4%yk7>=t{EGRb-{B$y(QtjjruDe8c+w2cBOSoap-CR5t`?x-mG{O~Hk34lZ>| zaHU&=Yuy&y=yuvStUH1e-5H$fuHZ~}2j{vcxX``9rS1!^bboNI2Z9?tNc+HlC^*r> z!Koez&h%(-uE&B4Jsw=@iQr042G@EjxY5(JkN#(Z6FnQ8>bc-d&j;svA-K?s!KGdb zuJm$ntyh8@y-NE-;I-gHuLq}kBRJEW!MWZFF7$SAsds`ay&GKXz2HXg)BZU4L2#lU z2B-Q_aHbD}bNx8D&`*L({j_s;!%DlKoBlH>(T7l~pF^2`0p07ea zcVwe6Qx5(==!a_@GSRqXs`1E7BvUYgT7(SKqi`zOf?gkX=XClEM%cs$x^eCm1ZYv%|SMr z6Z8#hE;7;FWU6_{O!Jbt<|7NuPnKGMth69mYaz1H!k}+hi;#&HB~vX%W?G!gwFFsc zNwU;ZWThdpRzfyPLEo@4GEq*ZD#%PFnX4iT)nuuLth6*)YZS?OG|)_G*3^FcpG7m$fABvV~PX1bWnbqQJMQnJ)# zWTnf=T33*bt_1z?Ttz0jnoM;Kndw?G*L7r}>&a3#kdwxpUm|Gvd|C7Qa>UqeL&XwG1=%R9fxmF z-~Uwfp9UxTS#YWkgERd+IM*+N3;i;<)USdoeH2{l*TIc`L;D8x+u%gM3r_WMaHih} z=lUeL(5Jzr{t#U0v*22P3~ux%+6VrhgA@HFIMwIDnf@A_>u8s#e{|zqmb#STw1y}kexYoD9jlQG(Auwht{Cdn2jT4+| z+~7>(1?L(+xX=W_r6vrnG*NJ^iGv$WLi^)j(%?js1*e)kIMWotxuy&*G*xh^sXGtf zu)3d$J`Lo?EXajfko&SA*JVL&%Ys~%1-UB=a#a@OrmRlGH>}~Gias0ZrYzD;S)`k? zNH=AXZptFvltsEJi*!>K>832u58MKzd$LH^WRY&kB3+V2x+9BpMHcCXEYby8r2DZz zKW0mjZpR{BjzwllNmpZ$ZpI>Aj77Q^i*zj(=~gVz4_8CF6N_{u7U@PT(uG*0`>;sY zVUcdbB3*_>x(f^RBee?YCM?oLSfqQfNY`MIZowj5f{y&H>~wY_g|5& zzarg!MY{ZoboUkM>MPRCSEP%tNcUcWzF}=ay7h{5=@seDE7Fx$q#LhD7haL>eI{M} zOuG3Q^bKoA(%sLb%b!WNKa;M1Cf)x`x&WGV12pLhXwn_fpl?`vlWu_~T?0+J2by#d zH0dU2(pAu;yP!#zL6dHS?l^qI`u?Y)9~|sP=wNq32fGzI*uBufZiWtaH*~Pup@ZEI z9qfi^+Bd9YgWVDx?4IagH$?}#D>~S1(ZTME4t8U7usfrJ-5O2%z&}0Mz0tugjt+Kn zbg-+VgWVk+?DFVfw?_xNK04U_(X@~L3xnMt9qbC}V0TCdyF@zJEz-fRkq&l`bg+x0 zgWV)e`$ORBV0TFeyG%ORZPK=1>+OF9=Pr~Ee}x;RgIy^d>`v)mmr4h_Rhsq(!mYvX zl@4~Xbg-MHgIz5h>~87K!*{Cg=c3;YCAtSnbuX0ZJ}B4yP@xB)QV&9v9)fB;3^jVB z)9{^Y_~)WON+x=YO!YXK=?OB|lVqW%$Wl*}m7XDMJxexv4)i1TJn2qp(xuX*Tct_Y zN|WxDCS5E|x>=fZwKVB&Y0wYWo20v?Nq0+=?v^IqEls*xnsl=?>1Jut&C;Zsr9nSV zACPXACfzJex>=fZvoz^uY0}Noq?@HlH%pUlmInO@eMGuhnsl=?>1Jut&C;ZsrAaqS zlWvwK-7HPISsL`6>NC>K(xjWENjFQAZk8t9EKRyunsl=?>1Jut&C;OnR9}#8mL}aS zO}bf{bh9++W@*yR(xjWENjFQAZk7gpr}~C;voz^uY0}NoQx8s++Rf6Wo25xNOOtMv zCfzJex>*|ZooXV|&C;ZsrAaqSlWvwK-7HPIS(?Cc!u@Eq*)9PIcU?ED<;03GZE9qb4l z>tILgU}x)KhwEUc>tM(0VCU;#2kc-cY`Tls!Oqyh4%xv@ W*};z4!Oq$Cu1g*_?sn~%CI1f^kDu-U literal 0 HcmV?d00001 diff --git a/second/train.trch b/second/train.trch new file mode 100644 index 0000000000000000000000000000000000000000..e15dd305b4cac9eded24eed2e7c56ff5bd6ae631 GIT binary patch literal 40011 zcmY-1dAx69`Tz0ln1>`uNRlMUu+MNELy{!PlqAV?Ovlveux^LUA<2+3lVnJeBuSDa zNs=U)lQBt1GS8{keeciu?AP_j{ynZ|{5tn~U!QfpUF%w(edvT^=9}^Qnd`l>^d8^e zbn%rBov`uav-AHR+>I>1d=vj<#_qFb z&6qxI`plho-(hAq;ixU=<3F6W|6bFl?Y;W})A#OXKe(G?`7P(W$j{4uR=geKZxi_3+pMCf{WnIZjdrsQs^})Pe{*!&#L;f{ zzKm|{|NrQ;Nu%AIga33=baM^L$)nxeeHq<6gL2AfH*a4?H{YO)(Qf{}jBbHJId!yq zQ(s26;GkS}w0m=3Mz_$QTy3;_OJ7E}@St3Mw0mn`Mz=^yqF%*l_qJgj-J*TX<-L7a zN4HpCbAgKw>*$u~YcBB}!#cVp`SV#Avmg6K6U29N9_u+P|Nj1^6`!c$Z3`+jMb^0>8j}A)yxOMw7x{nP? z{;>7>GP;isN)lbaFQfa!pd`@^`ZBr=2PKJa)R)n1JSa(YlfI1ZlY^2(H|@*lKGm+W zUd3p)*|3i8(|yh5Z9c4{`%GVRfm;mg=sw%mT;i6)I=avGH5a+nu#Rr)zUDHw8P?Hl z+wz@E(%TKH=%%&fPOeFB-ddRSj?n`~mr5-x0 zqdTmxx!A*pb#!0uYcBVQVIAF92K6Ma_sC%#-Bpf~%M|X5zbG^q5>*$W{Yp(Ze z!#cX-`kL$g`mm1f8-3079zU$3JE5<+-V=v)bSL#S*L(7?j_#Dc=6X*Z*3q5T*Ie)E z?K3)sPwO`aRdi>xmQSgl)^GJ?bY~7qF7DfX8Qocfl1n?gFQYqWP;z1C_GNV68I)Ys zd3_n(cLyaGb$(w)_q{>MC0)>$(S3hVazPjNWpqCnlw8h5eHq;k+f~-980{_|*3tc_ zuerQShIMp5?rSda(qSFlPx_imylhxU_tU=SA}=4-(fzEixy&nub#y;(`36Z}IjExh zMLTZPq_67B=zcjUImp$08Qrf2CC9j?FQfbQpyUwO_GNUx8I&C1y1tC=w}X9M$N&2R~jP8%^D(h8@b~g{}=>F8#T;46iI=VmiH5Yj6 zu#WDwzUC5dAJ);`(brt$ox?i1yZV~Tyn9$j_m{rrLhl*Y(fzfrxzu}yb#(XjH5YsT zu#WD5zUFct9M;i2G^p#}3x6Bd(LLPPT<_n9b##yPHP`!(VIAG0ea-d$b67|BSYLC! zj}PnUp6F|?_g}+0x+nXZ>;3nzj_#?x=6e4#tfPCnuesi5hIMq$_BGf0+^~-B`Sy98 z$|v`QK^5JLt^HH$C-JESN}j*El7Q;M?GxH#s!q$o>|i%z~D2YAQH_7%qT00gtL+vWo40gRxzWDiUc&# zj50|iqRD2IDV4R8tI^3AN0g}*wUevWPOd6NS#4Y#c6BMr8sp-qAC#i3IW7+RAt}mQ zPO-Y9}em&f}t2)1@f8 zRMlS9YacDUno)KWiDq{*${r%&eBO*QLnNLrm{Dem1hl6aWiOG4Ix|WU32ARL$}EwX z_A#UED-zTf%_#ecM76&e<$!VWKPbw9W|V_OvO3s|a)?M)UoxW{Dw5SQY5Rdno*7t$?9k`$}u8Y9cxDUnn+g1Ro0tmB9Ge2*QF@m=>Jvz8PObX zMma$wniI_^Cy7LJvKi$Rk!VgeqnsuZ&FN;8Z;C{7h8g8sBGH^_M)|f#G-sJn&K8O0 z95c$fm2-_HRqwg)Bu6>VCa&)$M>*dnuDxxgl}?Y?J;8cO>&g$Y~uQDa+K?B0{dNZlpAa! z`+ahh8*M`SLvoaxY-0Oka+I5Gg8Nf)lv`|~`*U)XTW!L-Ejh~VHu2q&9OX{A{*8NA za+JGm^7~72lzVLQ`)hKPdu{T&FFDHnHu*h}9OXfq{2oe<@;96O9!`$(cbohkNsjUl zoBSS4j`B~N{2oh=@_2Q-Q+b?Ro{*yatE%0p^?_`8(v0$Nk>H*(qx?rCwx`V~&xnNf ztQqAwk;tAmqr4yz*o$VAmqg-v*^KgvNLa6$QC<^?>UA^9e=FOa+8$4rH%^D^GQ+WuWFsH4}{AC zW|TLH9DG4D%9}-wy^tB@Eh2|r*o^X4ks~i+MtPe^rx!J&yj`TzivVmXT2?fptR!;qmCY!th#Y&=j51N=(38w4lSPg^#f%anot|n& zSyiOdtC>+&7wPmGW|R+#bb3uQ%7-djr`x0DvQ~1G58K4Gc5;-D*aWsta+HtSM7C~n zl#khjwqA0SkK4qyesYvgRJTs^D7tJQMcJ^bb-F&xEgP9pHWoSfCT5gRiX3}WGs>q# z4!xNf<4??Ux*7f1CUcNRD!#O@0R@M>*Ig zzeAFve90!iLzAN%W|QCH$x*&+liv}^QNB{$?o=MVmm{SpU#)6)YJEaojxwVhEfU-@ zW|U(^V*8pISh*oh`Hw@6$ko6OuIVV!C+bBjcEy2;F4+3wV_ z=k;eKGj-d{ZE8erXff)yYwQWfR#o$x(i76WX=OQGR0++jYrN zerprl^~q6wXA|8G$x(i96W)!vXPf+PO^$M# zO@6m0N4di$zdMto++~yB-N{k@Vw2xJ$x;4lli$6`QSP(J@BZW{4^+21vF=(Pl%hOT z)$YW$ZTXuSXPemGnaujxCb)Mcvx2sX?%m0(p>4u@Pco}$oA}{*S-kFo?j%N*tS-h(L9n3!?sx?h!agm_bGMU6hVp`i|4i^b&9g`_sB%*aq zW^j>!)-##FMdDfCWd0TjX9JT7TqK%}OeXNkdS|vY7{Qa7!ENH&G?^*fCa}$tnZs=& z+dP>`+$OXwl9|PAVylbs;dz5;yt;L|?s&G6n8mAFr`wKa8aG=S8TjZbzn=H6R zj`<~%1-Hl{4>MVCiyZOGCJSzn&VI#Y!7b9+ubM2lMLK)5$%0#?v&Wh&xGP&{$94k8 zB}e(XO=rK69OZbMz)nbxa-vOSCnZNY*(S78lB1kz6WeLkt+RCpaJt0YUDZ0}J=0|77CGiwCKI>FAj6@_IoB9$GmsbqHDHVHnR%(mMm z!Do}%cUQN2wQg~qm)Le!wR^Q~ab7gpc8f&!vdOkvB)C^iw%sDJy>7DY7K!W)lU=t+ zU?Uv9Kjc(4-6C;KFxhj9gf)lBmRlsMxlDH4mF->~+v3cV%#PdUvo>Ecdv2S+7D#5( zZ4=po$?UpqLR%=AZFe<4Ba`Y={)HuW-BqoV?J55vCcAEtgDq;Z>lQiIVkWz8kwYzE zvg;P<EzNTdv1|VE@QIg7U|@&COhuR*2%G_{L3Y?9z@NNQleTj* zQ@2fEyCgGr+eEfoGLyGWXnQ0xd)ve|Bbn*DnxBzLbt^DaV)CwPoo-u!y-enAk%JeL zsaxdOvrJ}gkwfomGI5I>c|Vg~w@9ZCFxhmAbowBZJ-0}w4>8$ti*))>lO1-5-G z;P7O2+%}y)BAFexO<+eRv*)&n?5Jcm-8P{elgzH$Cbq9t^D{E3ZUK&yn7pf6XWJIw z8zytN$U#pqnYu-ed6LP@Epo_HOeSuTBc5hPIbEc)-!!9~A=24znNiLZ>Fl@7C})Xu z_G~lCIhC!mV_Sf8lcRjcrnBcINBOQzVCN^Z=eCLLf@C(`HlbaZ%&yxewu_S4c31N= zGPyogzF1<{T~#~HKjY|^nC!Ym4t}Z0u3O~TmznIkMGk$r$*x=E$XA%`x8$Mv*WJjXJm4Hs(iD=e!Hr5x;<6C#bmoJa`0PCcH1JyzTITAEpq5PP4?O% zN50!+t1Z&$drWrPBAve1WTP$8>HAIg*&>~O&}5@s**ZP;RQYeo?6hq<{r6OajZHpZI8I#?% z$g!U@*=&m(`UR7{w#boRGTCa2bov#Oowi7)Uo+Wgi*)+GCi`rWPXEtjqfHWHogRD2 zJXVhWowa7siGMTPLj=8YOd@XXwMNH;vks~f@GGB{yb}^IrTBNf}n9SEAon6voz82~1 zQYQ0tWxI#Ro-!|;%!F+d*D}e>*fxPJo6MAL6WMafOxZS}EuYMkZ4=uH)vdGjsqzOT zHrZADj7)A%l~*#^V~ZSg6_YKt$T26HQ6`BTam#r&XXUCo@e>j;hT-$W^BguT}+9t4%CiBH>o5((v%$KiiLi>0! zU%NRs4)hZqFn)Hu(a!$iY8p@&#;>V}Huz3)muu{GbDJzKSi<>8(w^j4jgXZB4$8UD-N4_DphGGGEEI>GTfC zd@b81u$_|mYPL;e)06pnwoPceR=3X8XOO!|%+gi-j7)CNAonntq(u%g!(@&YImS$r zNm}F(dznnqBAqKHle9?ZW|>UVBAwgUWRe!?+^r)@$zBAI#GCblD!nWw8;r|UDUqa8FHE-Am95ib&#-=(%>LS@)4xh)gKZPouantf+eG%8WVYBgq5U?Q zJ+@73ze{G5UEMlepK1MGVvk+L&&cHVOzRIOdu)+||IuWNEpqHXne4Dd4*h474YtUU zZ!_6ni*))9lkK%gr|&Y^U5j-3FD9F7kxu{BWN%&BIz9GG>%L_6);67fAep_jO<)ft zv$wX1?BQhg);6I%lFZ)PCbmbD*;`lFPEV=Nv>ub#SXc2={~1Sr!en19a_}cjw$&oX ze#&H5Epq6mO*YjcM}F31Pc72v=S?=%BAtHGWK%8D>6cA5)gqmK)nrp$Sv%d*U{#&W zuG%K9HGWbI zv$RO3moS;7EBP5qgJC+EY1*dKOC>W;+XS|BGV`=eWXmKoPuqmHY%=q-O>E00Gf&$D zw|p}5v`us?B(t@)32((@w$?WBt(?r(TCQ8+(PXyPHu+6TW@~Me-;`vw);9S~O=fFt zlizB|Y^`nbTO*mRwM~9&CbPA+$#1P>w$?WJt)0x)+9tnslG$2Uw>z~ylU!F~V_ntm z)b>nrJ(GR4NO0?$Y^z0L+rVU3EfU&BCYx%J$Tl(ARf`0+smZQdB(BX&cGV(bZEmuw z7Kv&LlU;QsKO+06&`iydCiH&tt>vVfYxr@oZTIArnnM~Ls$KJza!WKF743i040S4tGeBp z^%?Wo5_{~bc4xL{%;%bHu|g)*CO#; zY_h!;3Fi`%?X^fWmzr#^EBP6jGWLx5vSjwxHlM7^li6U~1a?I-J8YZCu1sc&Z4=s6 z$?UOhV!Jw-O?GwbbbZErjl>?is&%?OW4_j8i!E~S>r8goBFDboWP>eo=o?J-*CI#0 z(PVoq(&?K_cGn`EzS(4ZEz;>*Ot#k|oxas%dtJ%T$ds{X%(o}AzqaZ0oylylZ34SH znH{!GWcMVq#kL9U-emUJHnH7b-8x&JK|dfdcUQH}wr9`}nM~ax2YuLN<`y~TBPJ8K z$RQs!nYTrb_?XGWEz;R1OeSuT&OT`}af@{JDU*p?q_a<(Ox%_Hj7%AO2K{U@Gq+7= zpHF7$wh8RTWae(0$X-rn^0o=>)nsOGo7i4YX7;uT?u}$-Z=2{w=GOJD%Is|u-h^ak zZ=3k$NM`ny>;H9~E1B8bCck--nZIrFn=hFO+$O&Tl9|D6@>?*ODcmN%g_4=WZSq?< znMvFxzeSRn#clFiG?{6_#I_??!ekZ~31&%?Nn9kBrA+2#TH<`XgK1wT?%-)rJT1Tdg?Fd#(X7;xEB(0px z>}?a+Xfm_6O=OdjnZ0d7o081zZ4=wnWM*%h;8sg!_O^*`jbvu;>h?*gJBBqS=I*NY zNohNVwM?dNkqckjWabvR>~%~gZjp;#*JS1vx#aasW^R!SUf*Qq7P;IFOlEG8Ps&Cn zGq=bmWfPN`yRv;!#&!&wCNp*0d{Q<`X708LZ1ZF$Z=1-rNM`o732n<{W^bF=wyJKO ztviLSC3f3Yt+Q>Xu&v2%TjZeAOg7sh$K1hWuPt)OolLgcB1fEVveOpn?5-xia$Kad zyPN#Vagok`-sD$~i*)u2CckpLvUPTBr?6)-zjWNDvt2U3cHAbgy_5OH<2I4)lgzIk zw+Zcw$^7zho7nbG=G)oTwbM~|3I|GjH@m8Knt#U84>tL3w#dQ1Wb)l?kz*fb^4)Ba zLx0)iyV)W~{))+Wvqd`nRg>>#i*)*ElkaAWboyA6?`Dg1`Z$yK!z*j2TN->bJDE4c zZQ?p1nRmo(0y`<0x5RBCJ0+R-#BD-5EtxmPZDRXoGVhAp1oy3E-WIos?%T<{FK!dw z*~v`WHu0UC%&aZf|J`+7GSjwAe&;7MZ`Sh)(s{zw@6esn#|ml?M@wg&-tcg zrf!>0+Re$#-8O;UlFa0d2j3SC{scn1)&It1rf-|rZck?Vwh8XeWTtPM=<1k0)CS?ZG$i$pc2$(!Vr?Oq+*3eBC&o8&g1w0Vm zrP<{7u4H~&noWN1PUg3z+2r@0WPV$kO@8l9=C`HUI zF|TfSV%^%UB=LrMRl5`0)@~J(H_SzXnP^6tBofPHlQ+yoLJ5;M%tazu)#MFxkw8{A zdBa>Jjt`o=VJ;HJhfLlu7x^fC*yR24%62D?ZS6ji%=_gwpQMi_^M1KaU>{57{c@Yg zKAz0`g4;Nmx6Ey#`(!fjncIZ-sbtOhmZyt3V?V_Up~lbOtIK51V{W;VA8?672} zbDPM%oXmV~6WUjjnb2)w`)V>Xx=nCLCo`qnM0ad5bGl7<$0akV+r;;cWF~dF-l-=f zGpXC;cTzHwx=ntkBr~a3w@-52Ql2I;rB}62a@$gV(_}^$`7i#K$%HP_mT#NP=ORrx z+hjTyY00@Jv$;q^&NG?JMcQ$`$y_edj0;Sra*napHv6W&$H%;q-nU7gHy zF4s@;HOb89Hu+tf%!F=}-*w5%=r;LXpUjkQliv-=%;`4y-I&azZj;|l$xP}t`Q4n% zq;8YnEy+ykHu>FJ-R{h~Wxh?~t@NsPXSOZ#9VTz3iv)F-$y@0nG5y8lt#px){%Z18 zx=2L#nY@!O63_!Cn{knN9x~aCi-hyA$!1(6nnz4Fb%C9iIsu3OA!B{t+$tzihsi#?vUPfF zi#b;^`*531&y&nP+$OO3lG%sbM7BUO`*54k7EERzZWG%=$?U^zf?GJ5eYj0@izKrT zw+V02WcJ}U@hz6jPF${k2QQJ#R@^4PC6n2U+vK-YGMjOm{FY8;H*S;PGRbVmZSq?- znfw&XVXZJErL+$O)Rl6gbjCckZxc|+YMzwMHFL)|97 z?UQ*!-6p>slX*kkCcmANc}v|Uzg?1fPu(WJ-I94zy}I40^{s$CB;Hf6YIkaTD`1An zd+H*=%`|yWT_m=>Ox{x$39XpCr!ErNER*-tMFQK`?;Ub4T!DI>-IpRqsGq^}+PcfOmMLK($ z$qX*i*>9T6;3A#t>}2L}o50RZW)ioF?7U=VahuT2Pi7jo ziS2@9=JD#*>AFR}P+}UdYMpLdxY%+<99QzWJIb7t>mzqrBB1gW=WCj=M z^yMZKxJak3FqywaI(?yw$iZDPA2nc3SWxEqt1y=|hqDVf>ZCcK-InZ0e|yCs>~Tdx1t_SR%}AIOyVNPoX=zq7dhkt zCR4b`5f?O>!9_Z|kjVrt(%FSg=5LYCE@Co$i*$BTli9nnb#`ngxmYr@w@qi4NM`o7 z32ez^W^bFwmP%&!wh3+NWM*%h*p^9V_O=Ob*<@yKo9LEHX7;uTZ~0_qZ=3j5NM`ny z>)*jECNq26?yLN!%vCHIkXd zZSq?)nQ6Sb-HCPQxR%5$Ue)fzwsTzDWEK|*W*w7BTqKrtP3CZsP}Vb4 zdleB85fKp)5fKp)5fBj(5fIVeK6CbY*YnT)tS@kN@3rU6WzGA!^Sc%fnep+WJw6;Z zeeuLKM$GTp=ZyjU&*M|S;gg2;>(_6#&+46Z#H<-TbEo*6X|rag{`>=zmgVovnAY1n zV@l7InUkj-HZyfCm^6gH*n8~kDLr$h&6_eO4LCjx96o6fe{s&#S>BrlP3W3508N|O zn+8u9IB8)1&zLpysI0rRAD9wo1#FXlZB@ z$5m;C5-qLR#PL;HsYFXF*G=SGRB4qqTUxbbuD4p7Ev;TM7hI#wmewqpE3VaMOKX?R zCD&=QrFBc@n(MXM(y)@b==yE8v_Z*Sb;CAW+NfkMyK$Q>ZBjDV-L%b?`d&W2Pn9-n z{(5(1Y4eh~?-p&gv}OI?BYF9)nq+B2UEj$3@>`c^scIs{j4aVocN3{)REd^GH<41T zL`!3uNF`%Sv^1`X6f(XzUK2UO`6XJqpouhnVTqP5Y9dWv zT%x5#O{D2dO0;xo6KVRg5-nX`-!k8#N>{Ym(v>B1y{p=6>FScX;5BWwbZyC8@wzr! zT3j-hyuQtrZYY^+-q>bKH`VN1iI(na zB8RxUL`(NHkt5t&qNV$qNYnS1Xz76_()9OAwDe#TY5MymT6(C7H2s4TEj?V{GT)*~ zKWww5M@r^;KWek3M@!~{KW?+7$4cgkKWVe2$4lmtKW($6Craj;KWnq4Crjp{KX0?8 zr%L9kzi6|ir%UFtzihLmXG-R}ziP9kXPcb=FZ8e5Z0Wg@x$keba`%UC}|1Qze2TkO1AC_q8qb72-|CDIyoQuAf%uTmF61>sc2)R z`6ik9CCBlSn|5G6JR`s4xcriviOe_eh{J9nGT*Wzj=Gh|d_+eabZe1$)e*-WDKhWw zh?g8CG9TR$FKHt4F&*)eV@2lUI^reAi_Evl$}gGkUir34^X-IGv%S)M2O;I`s5IY6 zNIg3%&36$}(5_1J-Go%MduD#R@jTrg9hiSABR}2v{B(PY%s<@`r`t1wXh{HsE`I!0-J ztdOqeDb2qoq^skU=En=^>I9|v*M)R7Uupi0%>0=e$*-;aM3MP7%RkEBqp6dW=HC+1 z)X7ToZwqN^fztdtLYi8rH2MrTLjcnmS8qezuUN&QY45 zn>o0zsr(T?evOcWU#m2~PROwrE6uMLa_AeB<~IsC@=Z$fn}xjeElTrS zg}n4_O7q)=y!0JP^E-vS^j%8xyEE&Tu7B<2_XL~YtER5|g3a$&Q`iH+=HF9O*@MC6 z-&a%GL&4@hP*dB(!R9|yQ`{rL=08$X-J`+gKUP!TW5MP>QB&XJ!R9{|=Rbg-2sZzj zn*N>)HvhSr{+*HvhGn{+~*F28$t?uQ)&KJA$7f_H2<5Bvffsj|6NE`?tCDtyTRuFR8!Y` z!RG%`Q`q~#=Koex*$2VqAF3(sqhRy@sHyGaVDtZ~DejYC^Z%)-u3wkjcdU7THRW{$ zn-5S^-@st=LE`*L9UN@Fgqr@A3^rd%O@Bjz&6ifw-!j4G%c|*bxnT3<)$}(s*n9;w z{jC^mzLJ{$Rt`2_MNNOJ2Ai*@roYvL&DY4TKdJnh+}9MDua#AQQu8mGeQl-rIzozD zS82YUklKbR&DR%F+6GGV4TV&;kO7k6s9D66F`OZQPy^GR(S0P807WKn0Rja9MVlc~EO=0^5v#!-twtp}STTN*P1hcZ$ z)HW%YrJY^BG+z_=fg*NxR{hfX7wvwKlASH&;D;#L*+PzesFIy6J7r z=o6J}ZXriLNy+9G^3o?O+1x^2dV!M7E###aD%spZUiuUzn>(|9>H637ep)cATTNYO z1hc!<6n0iH%Uexl=LEC8)s%K#Fxy*AZ5ITyz10+VQ83$EO?8Wc+1_f(yEK^Xt){-q zgW2BV{O?&;2D87_^mlbI3tUZq*9Nn})%3SGm=&(3zZ-(t;cEK3DVQa$roUT)+2U&Y zyDgYCuBN{`g4yG0`nxNbMV?)MQu8-6_lQ{IS@kEi{%+qIrK|PHn)%?zpP|)3wh~Rlx%JxFa4^L%`N1mUsJNVg}n6ZN;Y?9 z{nCBk=DZoq>Q?j8Zw0fv)fD!2Fw0v_W$y&Dz15WVZZO+hO>OT5v%S?6_kJ+jTTOKz z1hc)>l=o3E+gnY29|yC&#rerZO_qHaYIyF07?q}E^6 zt*m5q3n^|@C7WAFZL2F;+(JrQQ_1EQQrX%{Hn)(%)>X2(h14}n$>tVP)&@#8w~(qf zQnI-->rZOmmv@^4v%1y%qzw;dcdIFE^I(>@n##5eW_znCZA38JTTN|MFxxvjzjSy0 zj;LG2?#{|D&EMnbqm`^~AqO9$WOEBS_BbVrTgaieQL?#(9Cn_I|B@1SIJ3wi0C zlx%JxFTIPB%`N1mcT=*tGxJN=Z7{lnS>0;t+B2Blt){TOf?3{bD%(4l?X9M?eS_KF zYHFJp%=T7O+)OE zCpCY^H&etK&#FJE^>=);m27b##krCtE~K_zB|BV5X>*mVa3Pf)qhx~%DQupS6)vQ% z_lV`@Z8lF_;~$<|pl>V3xR=!cGomi>s+@K`?7vO=$~* z+2d+zJ0-h*+5DZ*sUq{!vg((uzY{uLX?})~gPy50KTF6l&sLhBBjk|hD$UOma>VnM z<`)Qg*$b8C7YTXUiJtT_0?IgPPiI3^u<>O>s8|o8O|Qx?6+IZ&Op=?ZM`EsHyMHVDr1g z`Oo0HgU#l_omKzj*58plsAPW&`44(X$^I7d3=b>W z-$EYY5heRu$P+xOWPb~JfX9^VZz1(Ru4I1;DgOy2>sv_mPb%5one|U@-*+TW1+%@? zH2-uk+gnXx&jho*)l~LuFxy*AY0m|-ztzr*-v%%F=_hK+BTupf|1+&A| z)c0~QOI)0Pa$gB%i>vAH)nL}Rn*Lr3W{<1s?{z$7czLnzQEYPkCus1^U^cm$2Hy&1 nldEa)?O-;!ng-tqW|OOF@ZDfGxta#w3ucq!DSiLHj3fRJsRRtl literal 0 HcmV?d00001 diff --git a/second/val_full.trch b/second/val_full.trch new file mode 100644 index 0000000000000000000000000000000000000000..3d0cd7f5f8cc8e5bc8ba8c3a90acb8658d250127 GIT binary patch literal 75092 zcmY-2b^JADx&3h(MM?~8v9a)(o-whI1_2e24ha#MMMwzJBHf6Rq7t@ZE4E@Qwqh%` zVhh$;^F7br`?LP=_qyNe@96Qc@0s5MfF`uF9V+_}Ml*Kd3OjZRo;t6#6| z|M$3Yg$;LHcZC&JI8=ume#qX39JKq9`|7X*4mo&S(f@=U*YZDd&;f@Ze$c+V?|bk* z2kd$9xYFr6uHnD)@S_jiclW~%IA-6&#+8p7SJ`mKRsDA!w*MhId|Y*lm3Ca&I^f{L z$JMr2Wye*f{~3oIyx+L`7ONa_g#R^no&Ghaf5x)imyIhfKNaKu|NmOHdpWMW{O_yA zRhFOXan4U%f@@mj>Wj<>{yO#&5qT$ z_Uu@X>&%YLxbE!Oj`y4$yK%kQu^;a>I}YQ$mmkYz<9%kwV!ZF{SdQz@j@5X-*|8q) zKRY(#17^o|eBkWZjSrd~`|-iE<1jvC`LS9yK6G|0#tml2a(vkASd9;#9qaKCvtu(p za&~OTN6n7i_~_ZOA0IP24&#Q)kM*+gv9n__K5lj_$H&i()%b+ju^yi|J2vA+vtv6x zX?EH7PI3pmdlULvay;Si?N;^%dwdqtFfIO>#>_1o3WoA+i{p3yYb1hV?RD+ zb{xj1EIC&dZO(vhl^UV=?YB zJC@_Fvtu>xHaphi?z3Yv?lC*I$YqXJ0iZ&(Fnp%KTi8r_RsSc-s72kFT1aoALDdxgB3U zKX>C9^K(C*IX@5MS+lb*owMiXVmxPlF2{4{=W0A}ey+#!=jUd;V191L3+LxVm`k2lQE!+7KD>?`Tb z^K&u2WqvNlx6aSi__q1E9^XDcH{(0z=XQMO{M?Q2nxFge-ShJ>-ZVS=a(d7FT#WCX zpUd%m^K&)6e}1mV56sWa_`&(P9X~WbcjJfW=YIUi{5*^|&(6N4J~}@a3H^K&zPYJP6VPtVWY_?h{+A3r-k592M%&!z9H&&|)p`1$#{9KSF>SK}Aw z=X(6o{M?LRo}b(CEAw+VeszBC$FI%L!+7iL?EC8L^K&tNV}35jZ_dxv_^tW59=|<5 zH{*Ba=XU(={M?P-o1gpf`%CX$h2QFKTYf6WA1ptW;}4ggs_{q5PxbiY<)>!+$?{V> z{&e}N8-KR^)Q>-3ej3JKEIk5>>8|wC zcI?LA&yM~0huLu$|G50f1Mqh@o%$ZJN|uk?8bl0 zj{W%0*>M=}Tz>Rbd)Mq(jQ^S)%kkf{V>SL~cC5$$&W_FazuB?X3M;O#!c3i3gnF$6 z4O)4jdYM*%3atv2S`DhSI#la!P@}s;t=51#-2>{iCNyZRg_>nr8!EI8RBBzQ(mkPC z>p_j~1+}_2)agD@ulquS)?cVyru#vK?hloE095ILP^||+jUEiOdI;3%p-`_4pg|8? zs9UCoLxmmzm3kyp=}}OvM?;Ms1GU-^>hxHs*W;i;k6)-?rYAs!o(PrN2&(iXsMa#5 zQ316op-vUltA++O3k}QELWMf0R1Z}epjuCc8a)MS^;D?S)1Y2YhXy@k(v*Xz+i96L zCJQ~2EcGn1(zD50&mkK2f-JQqS!pY>*4AXBZOB&J zlAWGU_S%jdv^`iZ(+*^z7m%f1NLG3gS!+kK(N1KmoykrwCVTBd4%!v0mT5P#(C%cZ zJ;+LXlC}0C8|_WD+K244FWKuQdPU(oxLg$^J~9Y|JsDOu|vveC=PRtJ-v4k3FT zN)8%9U)3TD9Y&TqoUC*NS?fr$(aR@Je?C<1zD!3gQ0V9dN*%L6rDGSU^@;@=9k)QM z;}_`k$_09zu)v@bCrp2CRr;5#lb}K;L#0lEDxC_|It^;{DyY@zP^VWzz0QCJow<;I zC}%;1&W1{z164W~s&yXJ=zOTv1yH98pU15{>w0L=4Ga0Fa3fUc z%}}YgK$YGK)p{G$=hu+;*H@uIUt7p;Ew@62z7Cc822|;rP_1u4jlK=F`VQ3T zyHKz1L4&?OX?km!?z&~VjV$y7veXaBNW#{ohwNP zOVY`bbhIR$ElGz<(&>_Pyd<43!6{&pPMD-4Ch3exI%JYgnWSST>6|%ndZ(%UPIKP{ z3a!6DsrxNZ>HZ7UdcXpW9=JfO2QAR)!3*?y$O3~NI$?UJsit?D4WL2~gGxOds`Ln` z)+3=tkAhk~8tU{IsMm(jpvNxcpV8x>LXU?^JpropM5xwAP@^Y7t(HNZ3aD2J4XPIM zkEezTHBhM*s?qudI41Hg;1jxL9KR#I_(7Y+8G-3;)VQ9vkO#cSE$r(P^I0WT6;i^_JmsP1$EjR z>a`CvXy1kWPV*9|(0)*<{h>++K(!8p8od;1br96)Wl*n!p+Sc%HW4 zpjwARjgEj?9SL=MIn?VYXwcD~tpC>nw86*`P1ubI3yHlBLcgE1gf) zx`1qSA=&C8veU(6uS>{5mx8{QUqcqUj4bt9veN6wT9=cJt{_`oNp`x5>~%Fc=o-)$ z^y|q&*OH~)KvsGqS?f(?qwC04*OQ%YAbZ_N4tg`_tNJZup|_Hy-bPk>J6Y=;WTSUZ zoZeb$zqPz;fkN+IpwvwZRC><>wcfixqxUV)>ir9J`oIFcKDfZ34^5cfTI%Vo<-<^+ zk3gkvhAMp&s`W9b(Z``ypMW}j66*CSXwat@@(<-RP@&I4rEY;LeGaPid8pABpjKam zI(-T1^<`+#R~GWm|OvphACyO5Fif`V&;^&rqYkK&}1?b^06B>+jH@e=Ov;mVZKp?u1I+ z1y%YNRO{bRqyIpy{tI>bAJl6F-yzF?#oeG47xG)nN>HJdp;D_rl~#pntp+t(9cpzq zsMFn{UTZ*u?y->HTGoULtp$}@8>+MpRBK(R(LJG7>p`9F1@*c&H0VB)rni>qu5(Hx zofAnXMbcT3bXp{x7fB~Z(wUKTY9yT-!6`VBPL8CbBkAl&Iy{n2kEG)x>HJ7KK$1?7 z;1nTAXGqc^l5~nB9V1ESNYX))bdn?;B}r#Va0-*8(I#ZGk zm84T8IK@iRxsr6SB%LftM@!P#l61Hvoi0hoOVargoB}54gh@JLlFpcjr4E5A9SYSNp+)ai7n*Q=pH zXDsBGm@}b5XF;XThAN!{)jAhybRN{|e5lg}P_GN2K^HCLmzay8LYF|LE`=(+2C8)# z)abQPtJguDE{A$u0S&rxA-}|21r@p)Ds>H1>Ge>pYoSJOfLgr~>hvb4*LBdK>lgA% z%neYX8=+EfhAO=Us`XZ=(c7R_Z-+X)1M2loXwbVRO)oLiUAIi{CJWs}mU<6a>AhsF z_mPd>Pqz91+3AC1uMd%fJ`DN_{s>vENH{gSNoD|6osXTO!E*01@GH~J0P>bGR4-;uq3PY(J6=xh6rWT89AQhy>V{h6%w z7qZb`$yR?OJN=#P^$&8;KS5vMcanwfB1`>?tn_cP)_=%G|0P@fkL?twz>boosYBven(mPHT|8?m-S(6ZECN7FlR*veY_crFF?# z_aqywN4B~b+3DV7ultaL?z^Ci>{_2JbU(7x{mDuXAZtC4Z1fldT>@cG{5a^;mMy<3Qi;k0%Q~fh_ezveHImttXL< zmXWOrvQtU+s>ne#=-a&^3$%6SCLy$U&QezTGz?3vEu8+JdaKC0T1LveDLLt8K_m+mgMW zPY&7+^xeKaS!f5c)C&4`tT|nRMyOM==BTMa0 zR@#HCwI|tVFS6C%WT$<|Ui*@RUIO}V-;XS`KUwMkveJQMt(TIG4kBB~s{_>u7S&F%$jZT%q-!lN`H1p;s(W>bM0e z9lt=WS1!=#gaulixIm|q7U*^I0)tMO;J^9bfv@pXp+cuYrCtS9IvuL@YN*i}P^&Yc zPG>>A&V~k^vydRoFY0TdLT`Xdy%DPPCaBhRP^0UiRyRPMZiITh z85;DKg?v4~6)N;LsMOn`O7DPby%TEmE~wSJp-wkJz1{;2dhbHMl-~yxdOuX^15l+8 zLbW~wHTp2r>LXC6o1tDGg$8|WAz#HGhYEcHD)mXI(x;$WpN1NJ25R+LsM9S_ug^h) zKEIGJ;4eUhz6h225>)BSP_3^(jlK%C`Wn>fR;bt4p+VnR$k*;Sp+et+N_`uu^c|?y zccDh#gIawb>U0~_>j%)FA5NP7oMgJ|mgz@ip&yf_enM9IDOu}hWTT&xt$smvx}EIx zOLEY!KwrVXCJX(BEcIKm((lMxzb700fo%0hveO-8uRoE4{tWsu{tH>?uVksek(K^V z*7^t8=$~Y(JIPLWk-h#!4*ECfYxzH9q5qPl{zq0?!T*<^{a?STwIbPQC9>7ZWT#cg zUaOLWRs(%OuTB=a8(He^WTiF8TK6Cutx2|8YvS~}(D`*??FHPTUBDgM1>B%r!2Q_; z+@4*)-Pr})oL#^JnhSV7bAtbK^}hrEwR%6u^O=z6Ga=7sLY~irJf8`9J`?hMCgk}{ z$n%*C`LEE2L!QrsJf8`9J`?hMCgk}{$n%+y=QAPCXF{IOT*!Z2J^}K4Cgk}{$n%+y z=QAPCXF{z?$n%+y=QAPCXD;Nw8e7QonULo*AOvtmC3;A_n2gs9{kOwg#&tXCy!-PDA33&(;@(d>A5lqMvm<#!JVK>P0 zmypLVAx~dI9=?P;dkJ~;67u9F1?vs zIpm;oL0`+~k%i7DOI<)#x{$1O5!vWsvehMIr%TCRuOSCr2Ks`2Em`PwWU0%^N>`Az zt|S{>MYg({>~sy;>-FTIYe8StZy*c3ku3ElveI>At?S7~H%y$~T6({=+_-?}G#Bug z<^rD5T);z`3wTCz0gq@d;0etIJfOLN=QAfvZ!P`w)^Zc%`Ao?3nULo*Atfk_mYt z6Y@YN_tIcnX-jkP*2QMMdT`uIemQ^56T|yqZggkQz zdE^rE#3kf`OUUzZ3nUZv10VdT9VF|q{AiY zbV)j1lFpal6fj9AOwtjPbjBndGAB;2DT7~Ap1VMyO%^Eiyag(4xqYHDw#fy;;b`S;);<$kkcM-C4-xS;*~K$n{ys{n>^5%h(BW ze-?6o7IJ?Ua(@ftGZG zmUM-dbcdF7iI#MWmUNAlbdQ#Fk(P9m7W5VTZqiL!(oI^@OOla_RomUNSrbd#2Jla_RomUNSrbd#2JlNR(<{e99+ zTGCBg(oI^@OPlqb1#=CEcSX-J>Plqb1#=CEcS1eUaZux<^a8M@za#OS(r( zx<^a8M@za#yZYju?A@a!-J=D4rLRo7M@za#OS(r(x<^a8M@za#OS(r(x<^a8N4wzE zSY3;BkCt?gmUNGnbdQ#FkCt?gmUNGnbdQ#Fj~4XZzCP(5E$JRD=^icV9xdq}E$JRD z=^icV9xdq}E$F-bVWfMsq=i?fa4L(UR`blJ3!x?$MI&(UR`blJ3!x?$MI&(N3KH zjH2+Dgfv2K(LyfKLhjH)uFyhm&_XWILhjE(uFpbl&rX^$g#W$xMn9HxgO+rMmUN4j zbdQ#Fla_RsmUNqzbf1=VqZagyek$osE$LD%=~gZ2S}o~bE$Lz{>1HkIYAxw*E$AEl zT+;1Y()C)>{aVrmTha|%(iL0M9b3{RThc9C&^P*JqmQMYu%?&*fz)D63<8+Kba?7nW;joq+2yJ5F>!|v_U zzUY4xc5^rE>TcNG-LT8MVYhd~uJ4B3-wnIK8+L=2_LcwZusghAmw3Z&@rGUF4ZFu1 zc9A#iCU4kP-mts8v@id^gx%&1yUrVSpEvA6Z`h6Auq(Y`cY4Dv^@iQ*rTrNAPuRWQ zu#3H6H+%1fzkX|1d&BPbhF$IryWJahy*KQBFYO1y>R~s0!>;&--SG{(Ap~}^`Sxco3!-KHGOeI_a_TI zfGqVuveJXdS`Q{0J%nuaP_okoWUq&jgB}k02lxoG&?CuGk0L8QnymF0veAZQtH+X^ z9!K_iJUQqIpnrBxBnxdsmUpPz(CU){%vJveZCUdNNt- zDP*IklC7Rbc6vJ5>lx&rjY0p!o=FyZ7Fp`qWToejwVq2h+JtQNJhIcKWUtN0L7Rhq z=h}iSv?W<;E3(qoWUXz;M%$9Do=-O4(C=KklZEymOYKQk+Ka5UH`!<(vemw1r|Nq^h!@`9Q50^S3TV(DC6?uMAf@AzbUkaHEsLtxgViIwjoe)bOCwXkYlR3Ku#( zT)P<3H_(0@yfIwpP2p14 zg)3bju60AW(T(9&Zw_~QOSsot!-L*N`@!(`aG`gEOT9B(>0RMk?+!P*DctHk;ZE-j z_j+G=(EDjWB0dl<^uchc4}~jzI9%%^;YK%yTYWU#>0{wu9}f@u1nq~#C&Pt46)yGZ zaHY?LYkfA{=$3G+&xJdEKHTdI;Xz-d{n+?YxX_owrM?ob^wn^!uZ0`k8gBLVaHnsC zdwnxJ=v(mgU$K2VTde57+udxY6z5R=*5)`c=5sufv0WL;Lyh+i;=Zg-iWDTwN|*%+Tm8~ zggdPp?sd=bp!H}!KkgMSbnkGf`-CgqH(YD|aHIQ$Tirk0=>g$h4-5}_5bfv3gTsX$ z5-#=7aHS2xwH_93^zd-2M}#{)GTiG?;X#k4{rq@LxX^~-QjZN+dR(~HA7UBO~^*iBU^1scG`^WwK+Lx z3(yz-mSmx=$WmLAm9`;kZA&(KKG|wJveWituN}xiF93bbzmP2SBC^zuWTl#Cp$o}U7m<}NCTm?nHoBB-^%}C%Wn{0{l7n71 zap_Np{{I`1mxl{o5iWIQxYAYOT33e~T@!Bg`f#Uf!@b@R9`r`qZ}@Kt7rHK7>iTe{ z8^X123^#gnxYb+2o!%Pm^|tV!x6{7x-w`hK&Ty%Bg)6-~TqK!ks=I?)8cApik0%2z)AB=+og+p9xp` zY`E4f;YObexB7gz(-*?Mz8D_#CEAaJFNX_#C0y#O;Ywc%*Sa;_=HFbYw}l)1Al&MQ;Z8pa_xf>o&`)STB7Pb!^s{iOpNA{` zB3$eCaHC&_Tm35B>DS?2zX=ceE$xTJ@4|(CA1?KWaHT(nYuyoU^rvvEKZiU0CEV+; z;X!|+{n+?>xX?esrT!VNbZ5BMUExOm3b*=qxYK{az5W{>^gnp|6QUK?SUgY4WrbD@ zms%-YY2|RORl_j*ux(1U3|KOPb;^w4mr z4Z@Wk7OwU1aHB_rTRk$|=~3Zcj}8xd4DIK~hT%ew4VQXcxYFapwVn`e^u%zhjl!Lt z6z;VwJgA`k{3yePs&J_~T&W4yYQv4XaH~GtX$bdva(K{FXg@!m8ZPv-aH*$H@OUg=DXb$Uzr_egs@X7P^!y^%}C$Wn`__l8s(R zwz{0`bOqV#N^;OupfCQb$wJqVrCv`~x|Xc<2C~r`$yRS7J6%Wix}F?#1L*7iM$(JC zNU!oDz08aBIxo@-y-2V0BE8g$^ja^{i@iW!@;8xQ?L~UG7wP3*q_=yKUhhSEzZdBR zU!*sDkzVly`f9(K^pY>qTfRuI`69jNi}a!|(wn|Wulgdr>x=ZVuZc^4qV)e?=)NWF zg{0e*NSJ-R6!e0Cp_Uf;&mw$!5{wwSSV6+p#tzj<#3wsS%*o(lzUIiBRGO)1M zfrY&gEbNtFVJ`)vec|60_F}NGSA&JU94zehU|}x^3wuRa*h|8~UK1AfqA=Q5|LtKf z3k!Q)SlA20!d@8`_R_Gh*M^0?I4tbdVPP*1qx}%LBkToYVXqJidx==sYsA7{Bo_86 zv9Onkg}qKJ?1f^q9|w1Zy;LmhwPIl}77Kf|SlG+OCNI5FO<(q2;T{V)jYH1jkdrv% z91b~!L(brk6FB7j4LN;JT6&{e`o{Mfq_a2a=uJ9#lMddbb2sVOO*(ax4&9_PH#kLZ z(utdN;3l27Nylx{X`6J|CY`lOM{Uwc8=QhR>6}eEW|L0Yq(e68j7>UXlTO&A12*Y= z4Nmczbh;)Tu1RNW($Si9vL+p@N#|I#ZL5)T9$N=|D|7Pm_+*q|-F% zFikp3gHx0youo+zY0^2Gbc`mQqDhBn(ixg`geIM!!6`tK&d;RdGwJk9Iy{rk&ZMI= z>Euj0IFruJ;1rulr)JWjnRI3*9hpfdX3~L~bY3PMmr18(a0<($voh(ZOgbr(4$7o+ zGU=F1Iwg}1$)q###HBZ?|NnaUK4ItNu#py??>hn=Cr4$)z!=&)mS*f~1vARTs+4m(PR zou%n1OoyGO!;aHo=jpHmb=ZkI>_{DUrVcw)hn=eFDOQJ_tHTb~VJGXbqjlKXI_z*A zcDfEbUWc8p=_z1`ov_1>*kNbvutRp(DLd?#9d^#1y!6I3eTMu>sL)kVsjHz%*Fd#i z4>h_LYV`)F(;J~)Z-NG0H)-jOYw3gJ*OP^AAWPjyR(dm8>n&uXx00>iMs|8T+3OwT zpm&1)0lteY^lq}$O=P9_khR`RHhLe~>iycC|T=cWTTIhtv*3^`Xt%wQ{<|WT!8Y zy}nEi`U>bD*;mOzUn5K1N>=(hS?e2Qqi>R}zD0KWHreYtA~S%4+#%?DDA6%gK(jTg-bm=TV zT&oW^8YVBjQ%#>NeKJ(&DNw1WLY1Bd)p|PA=owI}jiFA@gnB&-8uaW*OYc-mA1{3l zS?IZBsZGdA&m(JXN;cYzY_&PrX$!K~mgJzVK>xtDCJSvtmfDuA^n9|`c4VXN$yPg% zonAopdLcRJMWBCHJCcQVB1`Q|R(dg6YZtQ7u4Jp-$WFVHz4jmn?FstFv=>=uZ?e=r zWTkz{S}!3R?MJrSpX_u1+3P@Z&`Ux8gbpGLy^Jh%Fj?skveuzwqmgVSveRK?ufxef zM}U5#I+85(aWTm6YTE~!$jwM^Yg6wo0+3R?6&?`Z|QJp{*I*}}O5?SeFveqeN zqf^ONr;(jrMfN(K9Q10?Z&YWHh0Y{PokdnUo2+#X+2~xd)p=y6^T}Qpkb^D+{YG^W zS?FT2)Fou4OUYWVAsbyrwt6ku>2+kU%gI4kOk8@S`v1?OUKw^$4m&G{otDGS%V8(x zurqVmsX6T29CmW1{YG_t*x5Pk@Emq}4m&=Fou9)F&|xR&up@NX8JeC#bl536>=+$( zjt)CWhn=Luj?!T#>9C`8*jbvM!gSbaI_x+dcAgG9P=}qU!;aKpXX>y+b=awzo?>;_ zxjO7%9d@z~J6eaGt-}u2VW;b`<8|2inw|o7*aRccGy`v?64hn+73Hzhn=^>4%}fU?yw_w*qNK2LU-7yJM7pUcJ2;4c!!<5!;aoz zXYa7Xci8Ehp5k}d`8(_Y9(Do`JA#Lu!NU&WVW;r0V|ds(oSuSs*hxI>C?0ke4?B#9 zoyNnC<6-CVumky8cwS729G;ryD}|lN!%pO3C-SfpdDw|O>_i@RA`d%}hn>jjDUpYr z$iq(LVJGsi6M5K)JnTdsb|Mcuk%yhg=_!$ioyfyZj7PUK-H@~{(m*oi#sL>_h`4?B^Eoyh4ak%yhg!%pO3C-SfpdDw|O>_i@R zA`d%}hn>jjDUpYr$iq(LVJGsi6M5K)JlyKZ;Z9Ep_j+o0(9>u?Kb{^g^o($+jl-3m z8LsuLaHD63TRkV->AB%vn}i2FkM{Fp({Q29!lgD3SK1<6Ys+w>t-`Ig4tLrn+-uwL zpy$(mery*mw0*eL4&h2K2-kXHxY3Kkt#%A|+9}*?=kTBx(|&&J5-zlBxYTapO1p<^ z?GbLY=j5e75t_bAzZc{L4>`a?&hL=pJLL2ZIlM#8?vSH9Eup2 zxsy)rq?0@8o!m($chbq7baE%1+({>Q(#f54atD2_ zpFldflTPlWlRN3;PCB`hPVS_WJL%+3I=O?s%+Da5+({>Q(#f54awnbKNhf#G$(?j^ zC!O3uU*Q*#PVS_WJL%+3I=Pch?xd4D>Eup2xsy)rpfBz#N#}Obv7K~kCmq^JXLiz& zopfR+9oR|db99^ZtCNoEq?0=7piVlclaA@6Q#$BN`dy?mI_Zc`I-!#e z=%n*G>3B{$os$mdq_a8btNFvElR4>NPCA#9j^(6NIq6VNI+K%*_h`4?B^EoyfyZj7PUK-H@~{(m*oi#sL>_h`4?B_5zWRS2b|Mcuk%yhg!%pO3 zC-SfpdDw|O>_i@RBB%Wj_(Rx_C*33`-6SX7Bq!Y@C*33`-6SX7Bq!Y@2mJ%QH|Zuh=_WbpCOPRQ zIq4=j=_WbpCOPRQIq4=j=%3v~NH@tzH_1si$w@cKNjJ$!H_1si$w@cKNjJ$s|JXJp z-6SX7Bq!Y@C*33`-6SX7Bq!Y@C*33`-6RM76DvtK$w@cKNjJ$!H_1si$w@cKNjJ$! zH_1si$w9wyJ(YBmoOF|%bd#KPlbm#uoOF|%bd#KPlbm#u9P}I4CZwC>q?_cVo8+XM zL3 z=_WbpCOPRQIq4=j=_WbpCOPRQIq4=j=_dKar8lnW7WKS!*iG`Vo8)0P$-{1vhutI( zyGb5)lRWGudDu;I+SmNSVK>RcZjy)HBoDhu9(I#F>?V2GP4cjt?V2GP4cjtRcZjy)HBoDhu9(I#F>?V2GP4cjtRc9@!3iW}Ehd;p(ubw!>>=*3r?|r&;|_a{oA$%vrm!cu!ye@hdzL%wVeYV} zxx*gk4tt(E?1ApEC%S1rHf|1kraSDR?y#r2!yfAnd#*d|!S1jpyTcys4tus6p8h+y zTf(004tusc?Ah+HXS>6m?GAgkJM7u+uxGo&p6#an{J1sj+3v7syThLC4tusc?Ah+H zXS>6m?GAgkJM7tR+Ru;M!k+C8d$v35+3v7syThLC4tusc?Ah+HXS>6m?WX*vIl})D=!@X7t4_cY_^JA58 zp;g1BRtr~JJzVQ<;YN24w^}3I=^o);Yla7{Mf>@&cDT?w;Zo~{E8R0(YrSxzdxcxw zJKX6$;a>L*4_cr0^W%QuLiZ1sdO*0+1H-i*6mIn3aI1%eJ3TbqYlHBhhtYn1JUm?J z5#dsg3|D$oxYnb?jUE$jwPCo^W5c~37asKZ$xDBtH26=Ho&XhkB2;Q4sM3?5TFanD z1=Om9I#p1w8XDA0TKW^EVfquLmMqkfrFyc`K-PLP+2|=`tEZBko<{b1IyvYWpdSz$ zlZBp1mUDgqh=a7w_OSamC?DRac*QVs4%|JgEHYW>hL6+K*th5zbYiqL6He{=9 z$xhEFdu>M!+8*@7UJ0BrCm$thFQAXeYAO&Sa+-lf8B!2ki>_5wIIsXm_&I z9%Q9G$y$4njrJy6?L&6jm+bWta?pODFaG_>LI;qg4kRnRl&p0S+300ttAoi-hmgGv zB?pb5uX~Y&4kJq)PF6aCtaT*W=;dUqqsUH2lf8~12OSIgl79tR=s2>}@noe}lC@4C z8=XkDI*IIbGTG}Ca?q)uulCc(La!oAolaJIHCgKnveB7jtFy>XXOq3oAqSm1ap_N# zrdxEG&I=bhKV0g9aHR{wwJr)bx;WhGl5nR>!@XV;9&{P)YyP$2Laz&#x;$Lzig2wf z!;P*Ax4PQ)0P^fVk)qQz@z3b>`tYD@X zaHS80YkfG}=p*4)H-|fYG~DZB;Xxm#{XqCcxX>rVr9Ks|^yzS|&x9L&cJlO2RW9>8 z)h&?oIOH@AIg3M1;*fJVkn{JX>7B~|UZ(g>I(?H4-=woQ>F7;5d6N#_ zq;ogv*iAZhgHz}xow-RzZqkXHbl@hPw@Jrs(rKG?*e0E|!6|B!PTHh{HtC#AI%bnj z*`z}@>5NS}Vv|nT;1sY)=WEjOnsmA*9j-}dYtqr0bh0KLtV!o;aEjHWQ#I*OO*&JP zj?|OgcQ1&d#KxGwI|EPQjUUZYCX@NvCGgp_z1MCLNhcCuY)tnRH$T zr?^ZyEt3w*q_Z;Vs7yL3lMc$Hb290eOgbe`oZcXoZc*ptu#>M3- zkPbUZhaIKE&eCCr>9Er@J;mv;^K{sOI_yLpcBBqFQ->X@!%o#<$Lg?iH9ZCEu#`WeZA`d%}hn>jjDUpYr$iq(LVJGsi6M5K)JnTdsb|Mcuk%yhg=_!$i zoyfyZj7PUK-H@~{(m*oi#sL>_h`4?B^E zoyh4ak%yhg!%pO3C-SfpdDw|O-0GHar_Y6ZeLg(s3$&jfUkn%eQn=KY!D9mga>_-_VeRg;X>aIm-!kvB)?)Ag)pdZnG ze*8FG=qKS)KMhy z)*57^dyuWxBs;A|_F9`9v<~QNe_gWBJ;_q*k(KU6*19*@=sskt`;wj3CwtwG9CUxs zm;D3CLJuTMJ&3IIV6xUj$VLw(TWvsgdKlU3;pCu4fWG1%Nfvq(S?bYbrN@x9HY6K8 zmTdJnveVrMK@ z-lV_mP5RT`q`&P=`s3cDzwS-;+L-kBy+L2%&m#SOZ_?lQCjEVH(%<(c{e5rJ-}ff{ zeQ(m=_a^;)Z_rowmZZP$P5S%Zq`&V?`upCbzwb@@``)C#?@jvq-lV_pJ#pzzjHX-E zANUUY3*TXX;ydhbe24v!@36n}9rkCw!~V{9*dO{1`%B-nulYT~{?>QcANvmbYu{mi z?mO)7eTV(Q@36o49rh=`!~W(s?F;|Fu)q2p_GiDt{_c0!AN~&e%im#t`aA4ze~10? z@36oAP5bIUJnZj(hy4NYu)hEv_9wu@{sws19{~^hE8t;&20ZNVfYW{m93S?Vz{CC& zc-Y?p5Bp=_VSf!g?9YLR{XOuoKL{T77r|*i4o(mIo8V!86g=#&f`|QC@X1SWP^I6X z&V~w|1C=@#s&pPy>wKuu1yHLCp-vY;y)K3ZT{3Cu4Ql#=w=N|My@o7x8CmJIWUbec zjV>o!T|sublI(RAIp}K8Kd5WSLa!%FT}xJa16k{hWTQ8ct*#?GT~GG9fgE%r=%3M> z$wF@-OTCq>^ft2A+sQ`nAX~kY?DQ_O*SpCl z_sCw~CkNdI`VHy_WT79DrG7+K`Y~DSCuF0alC6G5cKSKl>lfsp+d;oU{gN#7E3(wD z$x6Q=YyFmN^gFWE@5xSoAbb6h9CXLTr8lVQ7G0)4g$w;TTe8|2pA9>xN6+GhAuCaIJfV8{IqH z>OSF4_YL=2KRoDuv>yWZ4;OksxYPr~l^ztX_26)$hlE=_G~8)}aIc4j2R)qjFMEK&j=6NnD)csnc+gu3YU6z zxYBdNwVoSpv`M(t^TM4r4fonCJZN*;kBu$Dg|-Zr+A3UW>u{}Y!i}~Kw|aiK({|xr z+lL4308js&+Y7>lUKlR*qHv`h!?kt_H`+Pe>c!zsyM%k~8XmM8?dQkt;X-?aOYIr1 zv{$&+-r+|3gj?+!?(~vyul>S<_NV>)I3Qf;z;LOTPF_l2(|5xUg1mnX^7=K%+t(m3 zUxU1R4f5(W$eY(7FJ6PZcWu&A0$ciS_>rXdu94omMtbiW>Ah>D_pXuNyGDBN8tJ`j zr1!3Y{txs7(tFoP?_DFkca8MkHPU<6Nbg-Ey?2fD-Zj#D*FZmp&mg^bjr86%(tFoP z?_DFkca8MkHPU<6Nbg-Ey>|`t!}lW6d)G+sT_e4Bjr86%(tFoP?_DFkca8MkHPU<6 zKtFP?B)xZy^xiend)G+sT_e4Bjr86%(tFoP?_DFkcMbG|_IlEL*GTVOBfWQx^xien zd)G+sT_e4Bjr86%(tFoHKVIKOdhZ(Py=$cRu94omMtbiW>Ah>D_pXuNyGDBN8t8}W zhe_{UBfWQx^xiend)G+sT_e4Bjr86%(tFoP?_C4^DE$oSt!t#0u94okMtbEM>5Xfo z7p{@sw?=y18tH9o6PFU$|9?09t6^_j3wzsI*xS~^-nJI@wzaUgt%bd9E$nS;VQ*Wb z{RI79*xS~^-nJI@wzaUgt%bd9E$nS;VQ*Utd)r#r+tz4b_&*PO+gjM$*23Pl7WTHa zu(z#+y=^V*ZEIm~TMK*J8ttq94`FXx3wzsI*xS~^-nJI@wzaUgt%bd9E$nS;VQ*Wb z{Sf$P*xS~^-nJI@wzaUgt%bd9E$nS;VQ*Ut_gdjz_&wC#e@^>xuu|Cj&%@q-9`^q8 zu=k&bz5hJy{pVrtKM#BVdD#2UX+Id&41521*!$1J-hUqU{`0W+pNGBwJna4FVedZ= zd;dA@N5p-@-hUqU{`0W+pNGBwJna4FVedZ=d;fXZ`_IGPe@^>hu|e4T&%@q-9`^q8 zu=k&bz5hJy{pVrtKM#BVdD#2UX+Jg|7xw=1u=k&bz5hJy{pVrtKM#BVdD#2U!`^=$ z_WpBtN?>)^`_IGPe;)S!^RV}yhrRzi?EU9q?>`TF|9RN^&uKqDo*wr8^Khw+!`^=$ z_Wtv*_n(Ko|2*vd=V9+Z4}1SP?dQj)VedZ=d;fXZ`_IGPe;)S!^RV}yhrRzi?EU9q z??0#g{Mat+{pVrtKM#BVdD#2U!`^=$_Wtv*_n(Ko|2*vd=d_<6yM(>}Jna4FVedZ= z*V-fO{paCUdxbmg9qzSHc+kGIpC2y?7uqjeYX5Mh1H!cq3^#gdxYa@7PA?1hIygM& z5Zcd=L&Jr}a4Cf=9Tu*2c(~CK;Z{e6JH0&I>!|RcqiH`sjtLh!HeBiz;Y!DaYaJhM z^vZCn6T+QN4EH)IJm_TF&yQ2Wg-#8ZIxSr3RpDBv+sh}J%WC(sZ}jT;XS6ya-0944 zud~8~&ZhkYIVW7`+;FM$!j;Ys*Sa9w=)!QTi^82Q4)?kwJm^w;-;GXKY1eUujs6cK CD8=mn literal 0 HcmV?d00001 diff --git a/torchplus/train/checkpoint.py b/torchplus/train/checkpoint.py index 4db43ccf..b922e17d 100644 --- a/torchplus/train/checkpoint.py +++ b/torchplus/train/checkpoint.py @@ -104,7 +104,8 @@ def save(model_dir, min_step = min([get_step(name) for name in all_ckpts]) ckpt_to_delete = "{}-{}.tckpt".format(model_name, min_step) all_ckpts.remove(ckpt_to_delete) - os.remove(str(Path(model_dir) / ckpt_to_delete)) + if os.path.exists(str(Path(model_dir) / ckpt_to_delete)): + os.remove(str(Path(model_dir) / ckpt_to_delete)) all_ckpts_filename = _ordered_unique([Path(f).name for f in all_ckpts]) ckpt_info_dict['all_ckpts'][model_name] = all_ckpts_filename with open(ckpt_info_path, 'w') as f: @@ -142,6 +143,7 @@ def try_restore_latest_checkpoints(model_dir, models): name_to_model = _get_name_to_model_map(models) for name, model in name_to_model.items(): latest_ckpt = latest_checkpoint(model_dir, name) + print(latest_ckpt) if latest_ckpt is not None: restore(latest_ckpt, model) From f4661dd543f5bc3104558f2a38d7a735cedd8bfc Mon Sep 17 00:00:00 2001 From: Gunjan Kholapure Date: Wed, 19 Feb 2020 22:10:15 +0530 Subject: [PATCH 2/7] updated readme --- README.md | 172 +++--------------------------------------------------- 1 file changed, 8 insertions(+), 164 deletions(-) diff --git a/README.md b/README.md index f74b0ae8..02be3903 100644 --- a/README.md +++ b/README.md @@ -1,170 +1,14 @@ -# PointPillars +For point_pillars installation refer to https://github.com/nutonomy/second.pytorch. -Welcome to PointPillars. +The code has been modified to run on Argoverse 3D dataset. -This repo demonstrates how to reproduce the results from -[_PointPillars: Fast Encoders for Object Detection from Point Clouds_](https://arxiv.org/abs/1812.05784) (to be published at CVPR 2019) on the -[KITTI dataset](http://www.cvlibs.net/datasets/kitti/) by making the minimum required changes from the preexisting -open source codebase [SECOND](https://github.com/traveller59/second.pytorch). +Command to train final model (in point_pillars/second/) - +python ./pytorch/train.py train --config_path=./configs/pointpillars/car/xyres_20_argo_upper.proto --model_dir=./models --device=0 --include_roi=True --dr_area=False --include_road_points=False -This is not an official nuTonomy codebase, but it can be used to match the published PointPillars results. +For inference on test set (in point_pillars/second)- -**WARNING: This code is not being actively maintained. This code can be used to reproduce the results in the first version of the paper, https://arxiv.org/abs/1812.05784v1. For an actively maintained repository that can also reproduce PointPillars results on nuScenes, we recommend using [SECOND](https://github.com/traveller59/second.pytorch). We are not the owners of the repository, but we have worked with the author and endorse his code.** +python pp_inference.py --config_path=./configs/pointpillars/car/xyres_20_argo_upper.proto --model_dir=./models --device=0 --model_path="path_to_model/voxelnet-xxx.tckpt" --save_path="path_to_save/xxx.pkl" --include_roi=1 --include_road_points=0 --dr_area=0 -![Example Results](https://raw.githubusercontent.com/nutonomy/second.pytorch/master/images/pointpillars_kitti_results.png) +Command to get Results in AB3DMOT format( in point_pilllars/second) - +python get_tracking_result.py --model_path=path_to_model --sv_dir=path_to_AB3DMOT/data/argo/car_3d_det_val_upper/ --set=val (or test) - -## Getting Started - -This is a fork of [SECOND for KITTI object detection](https://github.com/traveller59/second.pytorch) and the relevant -subset of the original README is reproduced here. - -### Code Support - -ONLY supports python 3.6+, pytorch 0.4.1+. Code has only been tested on Ubuntu 16.04/18.04. - -### Install - -#### 1. Clone code - -```bash -git clone https://github.com/nutonomy/second.pytorch.git -``` - -#### 2. Install Python packages - -It is recommend to use the Anaconda package manager. - -First, use Anaconda to configure as many packages as possible. -```bash -conda create -n pointpillars python=3.7 anaconda -source activate pointpillars -conda install shapely pybind11 protobuf scikit-image numba pillow -conda install pytorch torchvision -c pytorch -conda install google-sparsehash -c bioconda -``` - -Then use pip for the packages missing from Anaconda. -```bash -pip install --upgrade pip -pip install fire tensorboardX -``` - -Finally, install SparseConvNet. This is not required for PointPillars, but the general SECOND code base expects this -to be correctly configured. -```bash -git clone git@github.com:facebookresearch/SparseConvNet.git -cd SparseConvNet/ -bash build.sh -# NOTE: if bash build.sh fails, try bash develop.sh instead -``` - -Additionally, you may need to install Boost geometry: - -```bash -sudo apt-get install libboost-all-dev -``` - - -#### 3. Setup cuda for numba - -You need to add following environment variables for numba to ~/.bashrc: - -```bash -export NUMBAPRO_CUDA_DRIVER=/usr/lib/x86_64-linux-gnu/libcuda.so -export NUMBAPRO_NVVM=/usr/local/cuda/nvvm/lib64/libnvvm.so -export NUMBAPRO_LIBDEVICE=/usr/local/cuda/nvvm/libdevice -``` - -#### 4. PYTHONPATH - -Add second.pytorch/ to your PYTHONPATH. - -### Prepare dataset - -#### 1. Dataset preparation - -Download KITTI dataset and create some directories first: - -```plain -└── KITTI_DATASET_ROOT - ├── training <-- 7481 train data - | ├── image_2 <-- for visualization - | ├── calib - | ├── label_2 - | ├── velodyne - | └── velodyne_reduced <-- empty directory - └── testing <-- 7580 test data - ├── image_2 <-- for visualization - ├── calib - ├── velodyne - └── velodyne_reduced <-- empty directory -``` - -Note: PointPillar's protos use ```KITTI_DATASET_ROOT=/data/sets/kitti_second/```. - -#### 2. Create kitti infos: - -```bash -python create_data.py create_kitti_info_file --data_path=KITTI_DATASET_ROOT -``` - -#### 3. Create reduced point cloud: - -```bash -python create_data.py create_reduced_point_cloud --data_path=KITTI_DATASET_ROOT -``` - -#### 4. Create groundtruth-database infos: - -```bash -python create_data.py create_groundtruth_database --data_path=KITTI_DATASET_ROOT -``` - -#### 5. Modify config file - -The config file needs to be edited to point to the above datasets: - -```bash -train_input_reader: { - ... - database_sampler { - database_info_path: "/path/to/kitti_dbinfos_train.pkl" - ... - } - kitti_info_path: "/path/to/kitti_infos_train.pkl" - kitti_root_path: "KITTI_DATASET_ROOT" -} -... -eval_input_reader: { - ... - kitti_info_path: "/path/to/kitti_infos_val.pkl" - kitti_root_path: "KITTI_DATASET_ROOT" -} -``` - - -### Train - -```bash -cd ~/second.pytorch/second -python ./pytorch/train.py train --config_path=./configs/pointpillars/car/xyres_16.proto --model_dir=/path/to/model_dir -``` - -* If you want to train a new model, make sure "/path/to/model_dir" doesn't exist. -* If "/path/to/model_dir" does exist, training will be resumed from the last checkpoint. -* Training only supports a single GPU. -* Training uses a batchsize=2 which should fit in memory on most standard GPUs. -* On a single 1080Ti, training xyres_16 requires approximately 20 hours for 160 epochs. - - -### Evaluate - - -```bash -cd ~/second.pytorch/second/ -python pytorch/train.py evaluate --config_path= configs/pointpillars/car/xyres_16.proto --model_dir=/path/to/model_dir -``` - -* Detection result will saved in model_dir/eval_results/step_xxx. -* By default, results are stored as a result.pkl file. To save as official KITTI label format use --pickle_result=False. From fe6bba267d15e8502a1243a3137fdbf5ea1cd9ba Mon Sep 17 00:00:00 2001 From: Gunjan Kholapure Date: Wed, 19 Feb 2020 22:12:53 +0530 Subject: [PATCH 3/7] updated readme --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 02be3903..8a48d011 100644 --- a/README.md +++ b/README.md @@ -3,6 +3,7 @@ For point_pillars installation refer to https://github.com/nutonomy/second.pytor The code has been modified to run on Argoverse 3D dataset. Command to train final model (in point_pillars/second/) - + python ./pytorch/train.py train --config_path=./configs/pointpillars/car/xyres_20_argo_upper.proto --model_dir=./models --device=0 --include_roi=True --dr_area=False --include_road_points=False For inference on test set (in point_pillars/second)- @@ -10,5 +11,6 @@ For inference on test set (in point_pillars/second)- python pp_inference.py --config_path=./configs/pointpillars/car/xyres_20_argo_upper.proto --model_dir=./models --device=0 --model_path="path_to_model/voxelnet-xxx.tckpt" --save_path="path_to_save/xxx.pkl" --include_roi=1 --include_road_points=0 --dr_area=0 Command to get Results in AB3DMOT format( in point_pilllars/second) - + python get_tracking_result.py --model_path=path_to_model --sv_dir=path_to_AB3DMOT/data/argo/car_3d_det_val_upper/ --set=val (or test) From 528324c609ffcd785855b6c41a4f660727f25371 Mon Sep 17 00:00:00 2001 From: Gunjan Kholapure Date: Wed, 19 Feb 2020 22:16:10 +0530 Subject: [PATCH 4/7] updated readme --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 8a48d011..fbaea398 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ For point_pillars installation refer to https://github.com/nutonomy/second.pytorch. -The code has been modified to run on Argoverse 3D dataset. +The code has been modified to run on Argoverse 3D dataset. Changes are done mainly in dataloading part in point_pillars/second/data/preprocess.py Command to train final model (in point_pillars/second/) - From 430c38d29329bc884eca823b6e8a866f1758dd47 Mon Sep 17 00:00:00 2001 From: GunjanKholapure Date: Mon, 9 Mar 2020 16:46:16 +0530 Subject: [PATCH 5/7] Update README.md --- README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.md b/README.md index fbaea398..3622e665 100644 --- a/README.md +++ b/README.md @@ -6,6 +6,10 @@ Command to train final model (in point_pillars/second/) - python ./pytorch/train.py train --config_path=./configs/pointpillars/car/xyres_20_argo_upper.proto --model_dir=./models --device=0 --include_roi=True --dr_area=False --include_road_points=False +Pre-trained model can be downloaded from - +https://drive.google.com/file/d/1ebtmoQSJdfIKVVJ93GSCdeGy-ZGqGXyN/view?usp=sharing + + For inference on test set (in point_pillars/second)- python pp_inference.py --config_path=./configs/pointpillars/car/xyres_20_argo_upper.proto --model_dir=./models --device=0 --model_path="path_to_model/voxelnet-xxx.tckpt" --save_path="path_to_save/xxx.pkl" --include_roi=1 --include_road_points=0 --dr_area=0 From c0ce336ec5f5f592ea2976f1202bbadc05253d6d Mon Sep 17 00:00:00 2001 From: GunjanKholapure Date: Mon, 16 Mar 2020 11:06:32 +0530 Subject: [PATCH 6/7] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 3622e665..8c705ac3 100644 --- a/README.md +++ b/README.md @@ -16,5 +16,5 @@ python pp_inference.py --config_path=./configs/pointpillars/car/xyres_20_argo_up Command to get Results in AB3DMOT format( in point_pilllars/second) - -python get_tracking_result.py --model_path=path_to_model --sv_dir=path_to_AB3DMOT/data/argo/car_3d_det_val_upper/ --set=val (or test) +python get_tracking_result.py --model_path=path_to_model --sv_dir=path_to_AB3DMOT/data/argo/car_3d_det_val/ --set=val (or test) From c66488f0a59bdd6239a97caaa997f8150390d42c Mon Sep 17 00:00:00 2001 From: Gunjan Kholapure Date: Mon, 16 Mar 2020 11:46:46 +0530 Subject: [PATCH 7/7] bev representation of results --- second/core/non_max_suppression/nms_gpu.py | 8 +- second/result_vis.py | 205 +++++++++++++++++++++ 2 files changed, 209 insertions(+), 4 deletions(-) create mode 100644 second/result_vis.py diff --git a/second/core/non_max_suppression/nms_gpu.py b/second/core/non_max_suppression/nms_gpu.py index 18337137..7b574fcb 100644 --- a/second/core/non_max_suppression/nms_gpu.py +++ b/second/core/non_max_suppression/nms_gpu.py @@ -166,7 +166,7 @@ def nms_gpu(dets, nms_overlap_thresh, device_id=1): return list(order[keep]) -def nms_gpu_cc(dets, nms_overlap_thresh, device_id=0): +def nms_gpu_cc(dets, nms_overlap_thresh, device_id=1): boxes_num = dets.shape[0] keep = np.zeros(boxes_num, dtype=np.int32) scores = dets[:, 4] @@ -452,7 +452,7 @@ def rotate_nms_kernel(n_boxes, nms_overlap_thresh, dev_boxes, dev_mask): dev_mask[cur_box_idx * col_blocks + col_start] = t -def rotate_nms_gpu(dets, nms_overlap_thresh, device_id=0): +def rotate_nms_gpu(dets, nms_overlap_thresh, device_id=1): """nms in gpu. WARNING: this function can provide right result but its performance isn't be tested @@ -523,7 +523,7 @@ def rotate_iou_kernel(N, K, dev_boxes, dev_query_boxes, dev_iou): block_boxes[tx * 5:tx * 5 + 5]) -def rotate_iou_gpu(boxes, query_boxes, device_id=0): +def rotate_iou_gpu(boxes, query_boxes, device_id=1): """rotated box iou running in gpu. 500x faster than cpu version (take 5ms in one example with numba.cuda code). convert from [this project]( @@ -617,7 +617,7 @@ def rotate_iou_kernel_eval(N, criterion) -def rotate_iou_gpu_eval(boxes, query_boxes, criterion=-1, device_id=0): +def rotate_iou_gpu_eval(boxes, query_boxes, criterion=-1, device_id=1): """rotated box iou running in gpu. 500x faster than cpu version (take 5ms in one example with numba.cuda code). convert from [this project]( diff --git a/second/result_vis.py b/second/result_vis.py new file mode 100644 index 00000000..d51560c2 --- /dev/null +++ b/second/result_vis.py @@ -0,0 +1,205 @@ +import json +import cv2 +import argoverse +from argoverse.data_loading.argoverse_tracking_loader import ArgoverseTrackingLoader + +import os +import os.path +import numpy as np +import time +import torch +from torch.utils.data import Dataset, DataLoader +from torchvision import transforms +import math +import pickle +import sys +from pprint import pprint +import operator +import time + +from argoverse.map_representation.map_api import ArgoverseMap +from shapely.geometry.polygon import Polygon +import numpy as np +import matplotlib.pyplot as plt +#%matplotlib inline +am = ArgoverseMap() + +def quaternion_to_euler(quat): + w,x,y,z = quat + + t0 = +2.0 * (w * x + y * z) + t1 = +1.0 - 2.0 * (x * x + y * y) + roll = math.atan2(t0, t1) + t2 = +2.0 * (w * y - z * x) + t2 = +1.0 if t2 > +1.0 else t2 + t2 = -1.0 if t2 < -1.0 else t2 + pitch = math.asin(t2) + t3 = +2.0 * (w * z + x * y) + t4 = +1.0 - 2.0 * (y * y + z * z) + yaw = math.atan2(t3, t4) + return yaw, pitch, roll + +def center_to_argo_2d_box(location,dimensions,yaw): + crnrs = [] + for i in range(len(location)): + x,y,z = location[i] + l,h,w = dimensions[i] + corners = np.array([ [l/2,w/2],[l/2,-w/2],[-l/2,-w/2],[-l/2,w/2] ]) + + rot_mat = np.array( [[ np.cos(yaw[i]) ,-np.sin(yaw[i]) ], + [np.sin(yaw[i]), np.cos(yaw[i])] ] ) + rot_corners = np.dot(rot_mat,corners.T).T + rot_corners = rot_corners + [x,y] + crnrs.append(rot_corners) + return crnrs + + +track_id = {} +gt_track_id = {} +cnt,gt_cnt = 1,1 +argoverse_loader = ArgoverseTrackingLoader("./../../argodataset/argoverse-tracking/val") + +for seq in range(len(argoverse_loader)): + argoverse_data = argoverse_loader[seq] + seq_name = argoverse_loader.log_list[seq] + print(seq_name) + nlf = argoverse_data.num_lidar_frame + gt_to_pred = {} + cur_time = time.time() + for frame in range(nlf): + points = argoverse_data.get_lidar(frame) + gt_objects = argoverse_data.get_label_object(frame) + + + points[:,[0,1,2]] = points[:,[1,2,0]] + points[:,[0,1]] = -points[:,[0,1]] + + bev = np.ones((5000,5000,3)) + for pt in points: + x,z = int(np.ceil((pt[0]+250)/0.1)), int(np.ceil((pt[2]+250)/0.1)) + #if 0<=x<1200 and 0<=z<1200: + bev[x,z] = 0 + + + lt = argoverse_data.lidar_timestamp_list[frame] + path = "./../../AB3DMOT/argo_results/val_final757k_t25.0_a22_h3_ioudot10.0_ka3_v0.3_d2.0/"+seq_name +"/per_sweep_annotations_amodal/tracked_object_labels_" + str(lt) + ".json" + + + print(time.time()-cur_time) + cur_time = time.time() + objects = json.load(open(path,'r')) + save_corners = [] + + obj_centers,obj_ids,obj_corners = [],[],[] + + tp_cnt,fp_cnt,fn_cnt,sw_cnt = 0,0,0,0 + for obj in objects: + if obj['label_class'] == "VEHICLE": + + center = [[ obj['center']['x'], obj['center']['y'], obj['center']['z'] ]] + dimensions = [[obj['length'],obj['height'],obj['width']]] + quat = (obj['rotation']['w'], obj['rotation']['x'], obj['rotation']['y'], obj['rotation']['z']) + + yaw,pitch,roll = quaternion_to_euler(quat) + corners = center_to_argo_2d_box(center,dimensions,[yaw])[0] + corners = corners[:,[0,1]] + corners[:,[1]] = -corners[:,[1]] + + if obj['track_label_uuid'] not in track_id: + track_id[obj['track_label_uuid']] = cnt + if cnt==3: + print(obj['track_label_uuid']) + cnt += 1 + + obj_centers.append(center[0]) + obj_ids.append(track_id[ obj['track_label_uuid'] ] ) + obj_corners.append(corners) + + + obj_centers = np.array(obj_centers) + mark_obj = [False]*len(obj_centers) + for gt_obj in gt_objects: + if gt_obj.label_class == "VEHICLE": + points = gt_obj.as_3d_bbox() + centroid = np.reshape(np.mean(points,axis=0), (1,3) ) + test_vec = (centroid - obj_centers) + #print(centroid.shape,obj_centers.shape,test_vec.shape) + #print(centroid,obj_centers,test_vec) + l2_dist = np.linalg.norm(test_vec,axis=1) + closest_det,closest_id = np.min( l2_dist),np.argmin(l2_dist) + #print(closest_det,closest_id,obj_ids[closest_id],centroid, test_vec[closest_id]) + #print(dir(gt_obj)) + + if gt_obj.track_id not in gt_track_id: + gt_track_id[gt_obj.track_id] = gt_cnt + gt_cnt += 1 + + + + corners = gt_obj.as_2d_bbox() + corners = corners[:,[0,1]] + corners[:,[1]] = -corners[:,[1]] + corners[[2,3],:] = corners[[3,2],:] + + save_corners.append((corners,yaw)) + corners = (corners +[250,250])/0.1 + xp,yp = np.mean(corners,axis=0) + font = cv2.FONT_HERSHEY_SIMPLEX + bottomLeftCornerOfText = (int(xp-30),int(yp-10)) + fontScale = 1.2 + fontColor = (0,0,255) + lineType = 8 + + if closest_det<2 and not mark_obj[closest_id]: + if gt_obj.track_id not in gt_to_pred: + gt_to_pred[ gt_obj.track_id ] = obj_ids[closest_id] + + mark_obj[closest_id] = True + + if gt_to_pred[gt_obj.track_id] != obj_ids[closest_id]: + sw_cnt += 1 + cv2.polylines(bev, np.int32([corners]), True, (255, 255, 0), 2) + cv2.putText(bev,f"SW", bottomLeftCornerOfText, font, fontScale,fontColor,lineType) + else: + tp_cnt+=1 + cv2.putText(bev,f"TP", bottomLeftCornerOfText, font, fontScale,fontColor,lineType) + cv2.polylines(bev, np.int32([corners]), True, (255, 0, 0), 2) + + else: + fn_cnt+=1 + cv2.putText(bev,f"FN", bottomLeftCornerOfText, font, fontScale,fontColor,lineType) + cv2.polylines(bev, np.int32([corners]), True, (0, 0, 255), 2) + + + #if not all(mark_obj): + # print("yes",seq,frame) + for fp_id,tps in enumerate(mark_obj): + if not tps: + fp_cnt += 1 + fp_corners = obj_corners[fp_id] + fp_corners = (fp_corners +[250,250])/0.1 + xp,yp = np.mean(fp_corners,axis=0) + cv2.polylines(bev, np.int32([fp_corners]), True, (0, 255, 0), 2) + font = cv2.FONT_HERSHEY_SIMPLEX + bottomLeftCornerOfText = (int(xp-30),int(yp-10)) + fontScale = 1.2 + fontColor = (0,0,255) + lineType = 8 + + cv2.putText(bev,f"FP", bottomLeftCornerOfText, font, fontScale,fontColor,lineType) + + fontScale=1.5 + cv2.putText(bev,f"TP:{int(tp_cnt)}",(10,100), font, fontScale,(0,0,0),lineType) + cv2.putText(bev,f"FN:{fn_cnt}", (10,150), font, fontScale,(0,0,0),lineType) + cv2.putText(bev,f"FP:{fp_cnt}", (10,200), font, fontScale,(0,0,0),lineType) + cv2.putText(bev,f"SW:{sw_cnt}", (10,250), font, fontScale,(0,0,0),lineType) + + + plt.figure(figsize=(20,20)) + plt.imshow(bev) + if not os.path.exists(f"./track_analysis/{seq_name}/"): + os.mkdir(f"./track_analysis/{seq_name}/") + plt.savefig(f"./track_analysis/{seq_name}/pred_"+str(frame)+ ".png") + #plt.show() + +print(cnt,gt_cnt) \ No newline at end of file