-
Notifications
You must be signed in to change notification settings - Fork 1
/
ProcessVideos.m
343 lines (319 loc) · 12.6 KB
/
ProcessVideos.m
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
%% Track Probe in Video
% Detect and localize the probe in video frames.
%
% ## Usage
% Modify the parameters and the paths to input data in the first code
% section below, then run.
%
% ## Probe Detection Assumptions and Limitations
% - See notes on this topic in 'DetectProbeScript.m'.
%
% ## Input
%
% ### Probe detection model
% A '.mat' file containing several variables, which is the output of
% 'CreateProbeDetectionModel.m'. Refer to the documentation of
% 'CreateProbeDetectionModel.m' for details.
%
% ### Colour noise parameters
% Required if at least one of the parameters 'uniform_background_initial',
% or 'uniform_background_final' is false.
%
% A '.mat' file containing a 'rgb_sigma_polyfit' variable, as output by the
% script '.\EstimateRGBStandardDeviations.m'. 'rgb_sigma_polyfit' describes
% the variation in RGB channel standard deviations with RGB values in the
% image. This information should be computed from images taken
% under the same conditions and with the same camera parameters as the
% videos in which the probe is to be detected.
%
% 'rgb_sigma_polyfit' is used to compute a colour distribution for the
% image background. Instead, if 'uniform_background_initial' is true, a
% uniform distribution is used during the first colour detection pass.
% Likewise, if 'uniform_background_final' is true, a uniform distribution
% is used during the second colour pass.
%
% ### Camera calibration
% A '.mat' file containing a 'cameraParams' variable. The variable is a
% structure of class 'cameraParameters' describing the camera, such as
% output by the MATLAB Camera Calibrator app.
%
% Note: The camera should be calibrated using the same units of measurement
% as used for the measurements of the probe in the file referred to by
% the 'model_filename' variable in the file referred to by
% 'detection_model_filename' below.
%
% ### Videos containing the probe
%
% This script can process either live video, or one or more saved videos.
% Refer to the MATLAB documentation for the 'vision.VideoFileReader' system
% object concerning compatible video file formats. The videos should have
% been captured under the same camera parameters and, ideally, the same
% lighting conditions, as used during probe detection model creation.
%
% Video frames will be corrected for lens distortion during processing.
%
% ## Output
%
% One output file of each of the following types will be generated for each
% input video source, except in the case of CSV files, if 'concatenate_csv'
% is `true` (see below). Output files will be saved in the directories
% referred to by the 'output_*_directory' variables below. If an output
% directory variable is empty, no output data files of the corresponding
% type will be produced.
%
% ### Input videos
%
% Copies of the input videos, or videos captured from cameras, will be
% saved in the directory referred to by 'output_raw_video_directory'
% below. These videos are not corrected for lens distortion.
%
% ### Annotated videos
%
% The input videos will be annotated with the reprojection of the probe,
% and saved in the directory referred to by
% 'output_annotated_video_directory' below. These videos are corrected for
% lens distortion.
%
% The annotations on the videos are those described in the documentation of
% 'trackInVideo()' (refer to 'trackInVideo.m').
%
% ### Point clouds
%
% A CSV file will be generated containing frame numbers, the corresponding
% 3D locations of the probe tip, and the corresponding 3D unit direction
% vectors from the probe tip to its other end. Additionally, the first
% column of the CSV file will be the index of the video (starting from 1)
% in the list of videos processed by this script. For videos captured
% directly from cameras, the index is one greater than the variable
% 'video_index', if it exists, or is 1, if 'video_index' does not exist.
% 'video_index' is left in the workspace for use by future executions of
% this script.
%
% The CSV file will be saved in the directory referred to by
% 'output_point_cloud_directory' below. If 'concatenate_csv' is `true`, the
% CSV files for all input videos, or for live videos captured until the
% 'video_index' variable is cleared from the workspace, will be combined
% into one. The name of the CSV file will be the name ordinarily given to
% the CSV file for only the first video.
%
% The output point cloud is also described in the documentation of the
% `out_filenames.out_csv` input argument of 'trackInVideo()' (refer to
% 'trackInVideo.m').
%
% ### Probe detection and localization results
%
% A '.mat' file containing the following variables will be saved in the
% directory referred to by 'output_data_directory' below:
%
% - 'model_filename': The path to the file containing user-provided
% measurements of the probe in the structure 'probe'. The 'probe'
% structure is provided to this script via the output of
% 'CreateProbeDetectionModel.m', and so 'model_filename' is a direct
% parameter of 'CreateProbeDetectionModel.m' and an indirect parameter of
% this script. It is retrieved from the output of
% 'CreateProbeDetectionModel.m', and copied to the output of this script
% for completeness.
%
% - 'localizations': The `localizations` output argument of
% 'trackInVideo()'. Refer to the documentation of 'trackInVideo.m' for
% details.
%
% - 'detections': The `detections` output argument of 'trackInVideo()'.
% Refer to the documentation of 'trackInVideo.m' for details.
%
% Additionally, the file contains the values of all parameters in the first
% section of the script below, for reference. (Specifically, those listed
% in `parameters_list`, which should be updated if the set of parameters is
% changed.)
%
% ## References
% - M.-C. Chuang, J.-N. Hwang, K. Williams and R. Towler. "Tracking Live
% Fish from Low-Contrast and Low-Frame-Rate Stereo Videos". IEEE
% Transactions on Circuits and Systems for Video Technology, vol. 25, no.
% 1, pp. 167-179, Jan. 2015.
% - T. Gevers and H. Stokman. "Robust Histogram Construction from Color
% Invariants for Object Recognition". IEEE Transactions on Pattern
% Analysis and Machine Intelligence, vol. 26, no. 1, pp. 113-118, Jan.
% 2004.
% - R. Hartley and A. Zisserman. Multiple View Geometry in Computer Vision,
% 2nd Edition. Cambridge, UK: Cambridge University Press, 2003.
% Bernard Llanos
% Supervised by Dr. Y.H. Yang
% University of Alberta, Department of Computing Science
% File created January 19, 2018
%% Input data and parameters
% List of parameters to save with results
parameters_list = {
'detection_model_filename',...
'rgb_sigma_filename',...
'camera_params_filename',...
'concatenate_csv',...
'first_video_index',...
'video_filenames',...
'use_kernel_estimators',...
'detectionParams',...
'localizationParams',...
'options'...
};
% Probe detection model
detection_model_filename = '';
% RGB noise parameters
rgb_sigma_filename = '';
% Camera calibration
camera_params_filename = '';
% Wildcard for 'ls()' to find the videos to process.
% Leave empty (`[]`) to read live video
input_video_wildcard = '';
% Output directory for raw videos
% Leave empty (`[]`) for no output raw video
output_raw_video_directory = [];
% Output directory for annotated videos
% Leave empty (`[]`) for no output annotated video
output_annotated_video_directory = [];
% Output directory for CSV format point cloud
% Leave empty (`[]`) for no output point cloud
output_point_cloud_directory = [];
% Combine CSV files
concatenate_csv = true;
% Output directory for comprehensive numerical results
% Leave empty (`[]`) for no output data file
output_data_directory = [];
% Video processing options
% Refer to the documentation of the `options` parameter of 'trackInVideo()'
% in 'trackInVideo.m'. Some fields of this parameter structure will be
% filled automatically later in this script.
options.video_mode = 'webcam'; % Determines which camera and video mode to use
options.silent = false;
options.frame_rate = 30;
options.record_only = false;
options.show_errors = true;
% Parameters which do not usually need to be changed
run('SetFixedParameters.m')
%% Find the videos
if isempty(input_video_wildcard)
video_filenames = {[]};
n_videos = 1;
else
% Find all filenames
video_filenames = listFiles(input_video_wildcard);
n_videos = length(video_filenames);
end
%% Load calibration data
model_variables_required = { 'probe', 'model_filename' };
if use_kernel_estimators
model_variables_required(end + 1) = {'probe_color_distributions_kernel'};
else
model_variables_required(end + 1) = {'probe_color_distributions_gaussian'};
end
load(detection_model_filename, model_variables_required{:});
if ~all(ismember(model_variables_required, who))
error('One or more of the probe detection model variables is not loaded.')
end
if use_kernel_estimators
probe_color_distributions = probe_color_distributions_kernel;
else
probe_color_distributions = probe_color_distributions_gaussian;
end
if ~uniform_background_initial || ~uniform_background_final
load(rgb_sigma_filename, 'rgb_sigma_polyfit');
if ~exist('rgb_sigma_polyfit', 'var')
error('No variable called ''rgb_sigma_polyfit'' is loaded (which would contain the camera RGB noise model).')
end
else
rgb_sigma_polyfit = [];
end
load(camera_params_filename, 'cameraParams');
if ~exist('cameraParams', 'var')
error('No camera calibration (`cameraParams`) found in ''%s''.', camera_params_filename)
end
%% Process each video (or live video)
save_variables_list = [ parameters_list, {...
'model_filename',...
'localizations',...
'detections'
} ];
any_output_files = ~all([
isempty(output_raw_video_directory);
isempty(output_annotated_video_directory);
isempty(output_point_cloud_directory);
isempty(output_data_directory)
]);
if exist('video_index', 'var')
video_index = video_index + 1;
else
video_index = 1;
end
first_video_index = video_index;
if ~exist('point_cloud_filename', 'var')
point_cloud_filename = [];
end
for i = 1:n_videos
% Generate output filenames
if any_output_files && isempty(video_filenames{i})
cdate = replace(datestr(now, 31), {'-',' ',':'},'_');
elseif any_output_files
[filepath, name] = fileparts(video_filenames{i});
end
if isempty(output_raw_video_directory)
raw_video_filename = [];
else
if isempty(video_filenames{i})
raw_video_filename = ['live_' cdate '.avi'];
else
raw_video_filename = [name '_copy.avi'];
end
raw_video_filename = fullfile(output_raw_video_directory, raw_video_filename);
end
if isempty(output_annotated_video_directory)
annotated_video_filename = [];
else
if isempty(video_filenames{i})
annotated_video_filename = ['live_' cdate '_annotated.avi'];
else
annotated_video_filename = [name '_annotated.avi'];
end
annotated_video_filename = fullfile(output_annotated_video_directory, annotated_video_filename);
end
if isempty(output_point_cloud_directory)
point_cloud_filename = [];
elseif ~concatenate_csv || isempty(point_cloud_filename)
if isempty(video_filenames{i})
point_cloud_filename = ['live_' cdate '_pointCloud.csv'];
else
point_cloud_filename = [name '_pointCloud.csv'];
end
point_cloud_filename = fullfile(output_point_cloud_directory, point_cloud_filename);
end
if ~isempty(output_data_directory)
if isempty(video_filenames{i})
save_data_filename = ['live_' cdate '_fullResults.mat'];
else
save_data_filename = [name '_fullResults.mat'];
end
save_data_filename = fullfile(output_data_directory, save_data_filename);
end
out_filenames = struct(...
'raw_video', raw_video_filename,...
'out_video', annotated_video_filename,...
'out_csv', point_cloud_filename...
);
% Video processing
options_i = options;
options_i.video_index = video_index;
options_i.append_csv = (video_index ~= 1) && concatenate_csv;
if ~isempty(output_data_directory)
[ localizations, detections ] = trackInVideo(...
video_filenames{i}, out_filenames,...
probe, probe_color_distributions, rgb_sigma_polyfit,...
cameraParams, detectionParams, localizationParams, options_i...
);
save(save_data_filename, save_variables_list{:});
else
trackInVideo(...
video_filenames{i}, out_filenames,...
probe, probe_color_distributions, rgb_sigma_polyfit,...
cameraParams, detectionParams, localizationParams, options_i...
);
end
video_index = video_index + 1;
end