This repository has been archived by the owner on Apr 25, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 14
/
test_evaluation.py
164 lines (142 loc) · 5.13 KB
/
test_evaluation.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
import sys
import cv2
import os
import datetime
import json
import codecs
import numpy as np
from tqdm import tqdm
import tensorflow as tf
gpu = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(gpu[0], True)
from model.siamese.model_generator import base_models
from model.detection_model.detection_model import DefaultDetectionModel
from model.siamese.siamese_model import DefaultSiameseModel
from model.siamese.classification_model import ClassificationModel
from model.tracker.default_tracker import DefaultTracker
from model.tracker.simple_siamese_tracker import SimpleSiameseTracker
from model.tracker.tracker import Tracker
from model.tracker.avg_embedding_tracker import AvgEmbeddingTracker
from model.tracker.default_tracker_with_path_correction import (
DefaultTrackerWithPathCorrection,
)
from model.model import Model
from data.evaluator import Evaluator
from data.names import names
from helpers.score_processing import extract_scores, print_path_comparison
def generate_test_dir(basemodel, tracker, video):
test_dir = os.path.join(
"experiments", "tracking", basemodel
)
if not os.path.isdir(test_dir):
os.mkdir(test_dir)
test_tracker_dir = os.path.join(
test_dir, tracker
)
if not os.path.isdir(test_tracker_dir):
os.mkdir(test_tracker_dir)
out = os.path.join(
test_tracker_dir, video+"_"+datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
if not os.path.isdir(out):
os.mkdir(out)
return out
dirname = os.path.dirname(__file__)
weights_dir = os.path.join(
dirname, "model/siamese/weights", "MobileNetV2", "siam-118-0.0001-1.0a_0.0633.h5"
)
vectors_dir = os.path.join(
dirname, "model/siamese/vectors", "vecs-conc-MobileNetV2.tsv"
)
meta_dir = os.path.join(
dirname, "model/siamese/vectors", "meta-conc-MobileNetV2.tsv"
)
base_model = list(base_models.keys())[0] # MobileNetV2
# model = Model(DefaultDetectionModel(), DefaultSiameseModel(), DefaultTracker(names))
# model = Model(DefaultDetectionModel(), DefaultSiameseModel(), Tracker(7))
videos_paths = [
"11_nursery_high_activity_day-cropped.mp4",
"12_nursery_low_activity_day-cropped.mp4",
"13_nursery_low_activity_night-cropped.mp4",
"14_nursery_medium_activity_day-cropped.mp4",
"15_nursery_medium_activity_night-cropped.mp4"
]
annotations_paths = [
"data/tracking/11/pigs_tracking.json",
"data/tracking/12/pigs_tracking.json",
"data/tracking/13/pigs_tracking.json",
"data/tracking/14/pigs_tracking.json",
"data/tracking/15/pigs_tracking.json",
]
start_times = [6000, 6000, 6000, 6000, 6000]
num_of_pigs_per_video = [16, 15, 16, 16, 16]
detection_obj = DefaultDetectionModel()
siamese_obj = DefaultSiameseModel(weights_path=weights_dir, base_model=base_model)
trackers = [
"DefaultTracker",
"AvgEmbeddingTracker"
"KalmanTracker"
]
for idx in range(0, len(videos_paths)):
for tracker in trackers:
selectedTracker = None
if tracker == "DefaultTracker":
selectedTracker = DefaultTrackerWithPathCorrection(names)
if tracker == "AvgEmbeddingTracker":
selectedTracker = AvgEmbeddingTracker(names, vectors_path=vectors_dir, meta_path=meta_dir)
if tracker == "KalmanTracker":
selectedTracker = Tracker(num_of_pigs_per_video[idx])
video_path = videos_paths[idx]
annotation_path = annotations_paths[idx]
offset_val = start_times[idx]
model = Model(
detection_obj,
siamese_obj,
selectedTracker,
)
evaluator = Evaluator(
model,
videos_paths,
annotations_paths,
)
scores, annotations, paths = evaluator.run_evaluation_for_video(
video_path,
annotation_path,
"tracking_only",
video_frame_offset=offset_val,
compare_parts=True,
compare_part_interval=5,
video_out_path=None
)
scores = extract_scores(scores, paths)
out_dir = generate_test_dir("MobileNetV2", tracker, video_path)
if not os.path.isdir(out_dir):
os.mkdir(out_dir)
for obj_id, annotation in annotations.items():
print_path_comparison(
out_dir,
annotation[offset_val: offset_val+len(paths[obj_id])],
paths[obj_id],
obj_id,
interval=scores[obj_id]["intervals"]["interval"],
parts=scores[obj_id]["intervals"]["parts"],
)
json.dump(
annotations,
codecs.open(os.path.join(out_dir, "annotations.json"), "w", encoding="utf-8"),
sort_keys=False,
separators=(",", ":"),
)
json.dump(
paths,
codecs.open(os.path.join(out_dir, "out.json"), "w", encoding="utf-8"),
sort_keys=False,
separators=(",", ":"),
)
json.dump(
scores,
codecs.open(os.path.join(out_dir, "scores.json"), "w", encoding="utf-8"),
indent=2,
sort_keys=False,
separators=(",", ":"),
)