diff --git a/pupil_src/launchables/marker_detectors.py b/pupil_src/launchables/marker_detectors.py index cd8d4cd80d..53334a977b 100644 --- a/pupil_src/launchables/marker_detectors.py +++ b/pupil_src/launchables/marker_detectors.py @@ -34,11 +34,12 @@ def circle_detector(ipc_push_url, pair_url, source_path, batch_size=20): # imports from time import sleep - from video_capture import File_Source, EndofVideoFileError + from video_capture import init_playback_source, EndofVideoError from circle_detector import CircleTracker try: - src = File_Source(Empty(), source_path, timed_playback=False) + src = init_playback_source(Empty(), source_path, timed_playback=False) + frame = src.get_frame() logger.info('Starting calibration marker detection...') frame_count = src.get_frame_count() @@ -77,7 +78,7 @@ def circle_detector(ipc_push_url, pair_url, source_path, batch_size=20): frame = src.get_frame() - except EndofVideoFileError: + except EndofVideoError: process_pipe.send(topic='progress', payload={'data': queue}) process_pipe.send(topic='finished', payload={}) logger.debug("Process finished") diff --git a/pupil_src/launchables/player.py b/pupil_src/launchables/player.py index 9901fe01b5..4692d86bee 100644 --- a/pupil_src/launchables/player.py +++ b/pupil_src/launchables/player.py @@ -70,7 +70,7 @@ def player(rec_dir, ipc_pub_url, ipc_sub_url, from pyglui.cygl.utils import Named_Texture, RGBA import gl_utils # capture - from video_capture import File_Source, EndofVideoFileError + from video_capture import init_playback_source, EndofVideoError # helpers/utils from version_utils import VersionFormat @@ -183,8 +183,6 @@ def on_drop(window, count, paths): def get_dt(): return next(tick) - video_path = [f for f in glob(os.path.join(rec_dir, "world.*")) - if os.path.splitext(f)[1] in ('.mp4', '.mkv', '.avi', '.h264', '.mjpeg')][0] pupil_data_path = os.path.join(rec_dir, "pupil_data") meta_info = load_meta_info(rec_dir) @@ -208,8 +206,10 @@ def get_dt(): g_pool.plugin_by_name = {p.__name__: p for p in plugins} g_pool.camera_render_size = None - # sets itself to g_pool.capture - File_Source(g_pool, video_path) + valid_ext = ('.mp4', '.mkv', '.avi', '.h264', '.mjpeg', '.fake') + video_path = [f for f in glob(os.path.join(rec_dir, "world.*")) + if os.path.splitext(f)[1] in valid_ext][0] + init_playback_source(g_pool, source_path=video_path) # load session persistent settings session_settings = Persistent_Dict(os.path.join(user_dir, "user_settings_player")) @@ -319,7 +319,7 @@ def toggle_general_settings(collapsed): g_pool.gui = ui.UI() g_pool.gui_user_scale = session_settings.get('gui_scale', 1.) g_pool.menubar = ui.Scrolling_Menu("Settings", pos=(-500, 0), size=(-icon_bar_width, 0), header_pos='left') - g_pool.iconbar = ui.Scrolling_Menu("Icons", pos=(-icon_bar_width,0),size=(0,0),header_pos='hidden') + g_pool.iconbar = ui.Scrolling_Menu("Icons", pos=(-icon_bar_width, 0), size=(0, 0), header_pos='hidden') g_pool.timelines = ui.Container((0, 0), (0, 0), (0, 0)) g_pool.timelines.horizontal_constraint = g_pool.menubar g_pool.user_timelines = ui.Timeline_Menu('User Timelines', pos=(0., -150.), @@ -437,10 +437,10 @@ def handle_notifications(n): g_pool.new_seek = False try: new_frame = g_pool.capture.get_frame() - except EndofVideoFileError: + except EndofVideoError: # end of video logic: pause at last frame. g_pool.capture.play = False - logger.warning("end of video") + logger.warning("End of video") frame = new_frame.copy() events = {} @@ -636,8 +636,12 @@ def on_drop(window, count, paths): glfw.glfwSwapBuffers(window) if rec_dir: - update_recording_to_recent(rec_dir) - glfw.glfwSetWindowShouldClose(window, True) + try: + update_recording_to_recent(rec_dir) + except AssertionError as err: + logger.error(str(err)) + else: + glfw.glfwSetWindowShouldClose(window, True) glfw.glfwPollEvents() diff --git a/pupil_src/shared_modules/background_helper.py b/pupil_src/shared_modules/background_helper.py index c7748afab1..51f42328f8 100644 --- a/pupil_src/shared_modules/background_helper.py +++ b/pupil_src/shared_modules/background_helper.py @@ -57,6 +57,9 @@ def _wrapper(self, pipe, _should_terminate_flag, generator, *args, **kwargs): def fetch(self): '''Fetches progress and available results from background''' + if self.completed or self.canceled: + return + while self.pipe.poll(0): try: datum = self.pipe.recv() diff --git a/pupil_src/shared_modules/exporter.py b/pupil_src/shared_modules/exporter.py index 3a434bcce5..f99299112b 100644 --- a/pupil_src/shared_modules/exporter.py +++ b/pupil_src/shared_modules/exporter.py @@ -20,8 +20,7 @@ import os from time import time from glob import glob -import numpy as np -from video_capture import File_Source, EndofVideoFileError +from video_capture import init_playback_source, EndofVideoError from player_methods import update_recording_to_recent, load_meta_info from av_writer import AV_Writer from file_methods import load_object @@ -74,8 +73,6 @@ def export(rec_dir, user_dir, min_data_confidence, start_frame=None, end_frame=N update_recording_to_recent(rec_dir) - video_path = [f for f in glob(os.path.join(rec_dir, "world.*")) - if os.path.splitext(f)[-1] in ('.mp4', '.mkv', '.avi', '.mjpeg')][0] pupil_data_path = os.path.join(rec_dir, "pupil_data") audio_path = os.path.join(rec_dir, "audio.mp4") @@ -84,7 +81,12 @@ def export(rec_dir, user_dir, min_data_confidence, start_frame=None, end_frame=N g_pool = Global_Container() g_pool.app = 'exporter' g_pool.min_data_confidence = min_data_confidence - cap = File_Source(g_pool, video_path) + + valid_ext = ('.mp4', '.mkv', '.avi', '.h264', '.mjpeg', '.fake') + video_path = [f for f in glob(os.path.join(rec_dir, "world.*")) + if os.path.splitext(f)[1] in valid_ext][0] + cap = init_playback_source(g_pool, source_path=video_path) + timestamps = cap.timestamps # Out file path verification, we do this before but if one uses a separate tool, this will kick in. @@ -155,7 +157,7 @@ def export(rec_dir, user_dir, min_data_confidence, start_frame=None, end_frame=N while frames_to_export > current_frame: try: frame = cap.get_frame() - except EndofVideoFileError: + except EndofVideoError: break events = {'frame': frame} diff --git a/pupil_src/shared_modules/gaze_producers.py b/pupil_src/shared_modules/gaze_producers.py index 62a819a6e2..aeab587815 100644 --- a/pupil_src/shared_modules/gaze_producers.py +++ b/pupil_src/shared_modules/gaze_producers.py @@ -347,7 +347,7 @@ def trim(format_only=False): minutes = ts // 60 seconds = ts - (minutes * 60.) time_fmt += ' {:02.0f}:{:02.0f} -'.format(abs(minutes), seconds) - button.outer_label = time_fmt[:-2] # remove final ' - ' + button.outer_label = time_fmt[:-2] # remove final ' -' button.function = trim section_menu.append(ui.Text_Input('label', sec, label='Label')) @@ -569,9 +569,9 @@ def toggle_marker_detection(self): self.start_marker_detection() def start_marker_detection(self): - self.process_pipe = zmq_tools.Msg_Pair_Server(self.g_pool.zmq_ctx) self.circle_marker_positions = [] source_path = self.g_pool.capture.source_path + self.process_pipe = zmq_tools.Msg_Pair_Server(self.g_pool.zmq_ctx) self.notify_all({'subject': 'circle_detector_process.should_start', 'source_path': source_path, "pair_url": self.process_pipe.url}) diff --git a/pupil_src/shared_modules/marker_detector_cacher.py b/pupil_src/shared_modules/marker_detector_cacher.py index fca9437bb0..19801e9e3c 100644 --- a/pupil_src/shared_modules/marker_detector_cacher.py +++ b/pupil_src/shared_modules/marker_detector_cacher.py @@ -9,11 +9,12 @@ ---------------------------------------------------------------------------~(*) ''' + class Global_Container(object): pass -def fill_cache(visited_list, video_file_path, q, seek_idx, run,min_marker_perimeter, invert_image): +def fill_cache(visited_list, video_file_path, q, seek_idx, run, min_marker_perimeter, invert_image): ''' this function is part of marker_detector it is run as a seperate process. it must be kept in a seperate file for namespace sanatisation @@ -22,11 +23,11 @@ def fill_cache(visited_list, video_file_path, q, seek_idx, run,min_marker_perime import logging logger = logging.getLogger(__name__+' with pid: '+str(os.getpid())) logger.debug('Started cacher process for Marker Detector') - from video_capture import File_Source, EndofVideoFileError, FileSeekError + from video_capture import init_playback_source, EndofVideoError, FileSeekError from square_marker_detect import detect_markers_robust aperture = 9 markers = [] - cap = File_Source(Global_Container(), video_file_path) + cap = init_playback_source(Global_Container(), video_file_path) def next_unvisited_idx(frame_idx): try: @@ -67,7 +68,7 @@ def handle_frame(next_frame): try: frame = cap.get_frame() - except EndofVideoFileError: + except EndofVideoError: logger.debug("Video File's last frame(s) not accesible") # could not read frame diff --git a/pupil_src/shared_modules/offline_surface_tracker.py b/pupil_src/shared_modules/offline_surface_tracker.py index 39bc24ed96..0f0e1f7fb5 100644 --- a/pupil_src/shared_modules/offline_surface_tracker.py +++ b/pupil_src/shared_modules/offline_surface_tracker.py @@ -283,19 +283,24 @@ def invalidate_marker_cache(self): self.init_marker_cacher() def init_marker_cacher(self): + from marker_detector_cacher import fill_cache visited_list = [False if x is False else True for x in self.cache] - video_file_path = self.g_pool.capture.source_path self.cache_queue = mp.Queue() - self.cacher_seek_idx = mp.Value('i',0) - self.cacher_run = mp.Value(c_bool,True) - self.cacher = mp.Process(target=fill_cache, args=(visited_list,video_file_path,self.cache_queue,self.cacher_seek_idx,self.cacher_run,self.min_marker_perimeter_cacher,self.invert_image)) + self.cacher_seek_idx = mp.Value('i', 0) + self.cacher_run = mp.Value(c_bool, True) + + video_file_path = self.g_pool.capture.source_path + args = (visited_list, video_file_path, self.cache_queue, + self.cacher_seek_idx, self.cacher_run, + self.min_marker_perimeter_cacher, self.invert_image) + self.cacher = mp.Process(target=fill_cache, args=args) self.cacher.start() def update_marker_cache(self): while not self.cache_queue.empty(): - idx,c_m = self.cache_queue.get() - self.cache.update(idx,c_m) + idx, c_m = self.cache_queue.get() + self.cache.update(idx, c_m) for s in self.surfaces: s.update_cache(self.cache, min_marker_perimeter=self.min_marker_perimeter, diff --git a/pupil_src/shared_modules/player_methods.py b/pupil_src/shared_modules/player_methods.py index 0e13498f5a..cca9dca2f6 100644 --- a/pupil_src/shared_modules/player_methods.py +++ b/pupil_src/shared_modules/player_methods.py @@ -121,6 +121,8 @@ def update_recording_to_recent(rec_dir): update_recording_v0913_to_v0915(rec_dir) if rec_version < VersionFormat('1.3'): update_recording_v0915_v13(rec_dir) + if rec_version < VersionFormat('1.4'): + update_recording_v13_v14(rec_dir) # How to extend: # if rec_version < VersionFormat('FUTURE FORMAT'): @@ -456,6 +458,43 @@ def update_recording_v0915_v13(rec_dir): update_meta_info(rec_dir, meta_info) +def update_recording_v13_v14(rec_dir): + logger.info("Updating recording from v1.3 to v1.4") + valid_ext = ('.mp4', '.mkv', '.avi', '.h264', '.mjpeg') + existing_videos = [f for f in glob.glob(os.path.join(rec_dir, 'world.*')) + if os.path.splitext(f)[1] in valid_ext] + + if not existing_videos: + min_ts = np.inf + max_ts = -np.inf + for f in glob.glob(os.path.join(rec_dir, "eye*_timestamps.npy")): + try: + eye_ts = np.load(f) + assert len(eye_ts.shape) == 1 + assert eye_ts.shape[0] > 1 + min_ts = min(min_ts, eye_ts[0]) + max_ts = max(max_ts, eye_ts[-1]) + except (FileNotFoundError, AssertionError): + pass + + error_msg = 'Could not generate world timestamps from eye timestamps. This is an invalid recording.' + assert -np.inf < min_ts < max_ts < np.inf, error_msg + + logger.warning('No world video found. Constructing an artificial replacement.') + + frame_rate = 30 + timestamps = np.arange(min_ts, max_ts, 1/frame_rate) + np.save(os.path.join(rec_dir, 'world_timestamps'), timestamps) + save_object({'frame_rate': frame_rate, 'frame_size': (1280, 720), 'version': 0}, + os.path.join(rec_dir, 'world.fake')) + + meta_info_path = os.path.join(rec_dir, "info.csv") + with open(meta_info_path, 'r', encoding='utf-8') as csvfile: + meta_info = csv_utils.read_key_value_file(csvfile) + meta_info['Data Format Version'] = 'v1.4' + update_meta_info(rec_dir, meta_info) + + def update_recording_bytes_to_unicode(rec_dir): logger.info("Updating recording from bytes to unicode.") diff --git a/pupil_src/shared_modules/seek_control.py b/pupil_src/shared_modules/seek_control.py index 8c47fd8d63..39cb18dc4e 100644 --- a/pupil_src/shared_modules/seek_control.py +++ b/pupil_src/shared_modules/seek_control.py @@ -67,10 +67,13 @@ def play(self): def play(self, new_state): if new_state and self.current_ts == self.trim_right_ts: self.g_pool.capture.seek_to_frame(self.trim_left) + self.g_pool.new_seek = True elif new_state and self.current_ts >= self.g_pool.timestamps[-10]: - self.g_pool.capture.seek_to_frame(0) # avoid pause set by hitting trimmark pause. + self.g_pool.capture.seek_to_frame(0) + self.g_pool.new_seek = True logger.warning("End of video - restart at beginning.") - self.g_pool.capture.play = new_state + else: + self.g_pool.capture.play = new_state @property def trim_left_ts(self): @@ -119,7 +122,6 @@ def forwards(self, x): self.g_pool.capture.playback_speed = speeds[new_idx] else: # frame-by-frame mode, seek one frame forward - self.g_pool.capture.seek_to_next_frame() self.g_pool.new_seek = True @property diff --git a/pupil_src/shared_modules/video_capture/__init__.py b/pupil_src/shared_modules/video_capture/__init__.py index 4ab9fceef0..6e422b4baa 100644 --- a/pupil_src/shared_modules/video_capture/__init__.py +++ b/pupil_src/shared_modules/video_capture/__init__.py @@ -18,17 +18,22 @@ These backends are available: - UVC: Local USB sources - NDSI: Remote Pupil Mobile sources -- Fake: Fallback, static random image +- Fake: Fallback, static grid image - File: Loads video from file ''' +import os +import numpy as np +from glob import glob +from camera_models import load_intrinsics + import logging logger = logging.getLogger(__name__) -from .base_backend import InitialisationError, StreamError +from .base_backend import InitialisationError, StreamError, EndofVideoError from .base_backend import Base_Source, Base_Manager from .fake_backend import Fake_Source, Fake_Manager -from .file_backend import FileCaptureError, EndofVideoFileError, FileSeekError +from .file_backend import FileCaptureError, FileSeekError from .file_backend import File_Source, File_Manager from .uvc_backend import UVC_Source, UVC_Manager @@ -50,3 +55,10 @@ else: source_classes.append(Realsense_Source) manager_classes.append(Realsense_Manager) + + +def init_playback_source(g_pool, source_path=None, *args, **kwargs): + if source_path is None or os.path.splitext(source_path)[1] == '.fake': + return Fake_Source(g_pool, source_path=source_path, *args, **kwargs) + else: + return File_Source(g_pool, source_path=source_path, *args, **kwargs) diff --git a/pupil_src/shared_modules/video_capture/base_backend.py b/pupil_src/shared_modules/video_capture/base_backend.py index facc1d766d..04aded902c 100644 --- a/pupil_src/shared_modules/video_capture/base_backend.py +++ b/pupil_src/shared_modules/video_capture/base_backend.py @@ -9,6 +9,7 @@ ---------------------------------------------------------------------------~(*) ''' +from time import time, sleep from plugin import Plugin import gl_utils @@ -21,15 +22,17 @@ class InitialisationError(Exception): - def __init__(self, msg=None): - super().__init__() - self.message = msg + pass class StreamError(Exception): pass +class EndofVideoError(Exception): + pass + + class Base_Source(Plugin): """Abstract source class @@ -191,3 +194,38 @@ def replace_backend_manager(manager_class): # here is where you add all your menu entries. self.menu.label = "Backend Manager" + + +class Playback_Source(Base_Source): + allowed_speeds = [.25, .5, 1., 1.5, 2., 4.] + + def __init__(self, g_pool, timed_playback=False, playback_speed=1., *args, **kwargs): + super().__init__(g_pool) + self.playback_speed = playback_speed + self.timed_playback = timed_playback + self.time_discrepancy = 0. + self._recent_wait_idx = -1 + self.play = True + + def seek_to_frame(self, frame_idx): + raise NotImplementedError() + + def get_frame_index(self): + raise NotImplementedError() + + def seek_to_prev_frame(self): + raise NotImplementedError() + + def get_frame(self): + raise NotImplementedError() + + def wait(self, frame): + if frame.index == self._recent_wait_idx: + sleep(1/60) # 60 fps on Player pause + elif self.time_discrepancy: + wait_time = frame.timestamp - self.time_discrepancy - time() + wait_time /= self.playback_speed + if 1 > wait_time > 0: + sleep(wait_time) + self._recent_wait_idx = frame.index + self.time_discrepancy = frame.timestamp - time() diff --git a/pupil_src/shared_modules/video_capture/fake_backend.py b/pupil_src/shared_modules/video_capture/fake_backend.py index c3c269972d..ad0afd55ae 100644 --- a/pupil_src/shared_modules/video_capture/fake_backend.py +++ b/pupil_src/shared_modules/video_capture/fake_backend.py @@ -9,29 +9,31 @@ ---------------------------------------------------------------------------~(*) ''' -from .base_backend import Base_Source, Base_Manager +from .base_backend import Playback_Source, Base_Manager, EndofVideoError +import os import cv2 import numpy as np -from time import time,sleep +from time import time, sleep from pyglui import ui from camera_models import Dummy_Camera +from file_methods import load_object -#logging +# logging import logging logger = logging.getLogger(__name__) class Frame(object): """docstring of Frame""" - def __init__(self, timestamp,img,index): + def __init__(self, timestamp, img, index): self.timestamp = timestamp self._img = img self.bgr = img - self.height,self.width,_ = img.shape + self.height, self.width, _ = img.shape self._gray = None self.index = index - #indicate that the frame does not have a native yuv or jpeg buffer + # indicate that the frame does not have a native yuv or jpeg buffer self.yuv_buffer = None self.jpeg_buffer = None @@ -42,14 +44,14 @@ def img(self): @property def gray(self): if self._gray is None: - self._gray = cv2.cvtColor(self._img,cv2.COLOR_BGR2GRAY) + self._gray = cv2.cvtColor(self._img, cv2.COLOR_BGR2GRAY) return self._gray - @gray.setter - def gray(self, value): - raise Exception('Read only.') + def copy(self): + return Frame(self.timestamp, self._img.copy(), self.index) -class Fake_Source(Base_Source): + +class Fake_Source(Playback_Source): """Simple source which shows random, static image. It is used as falback in case the original source fails. `preferred_source` @@ -57,51 +59,126 @@ class Fake_Source(Base_Source): it becomes accessible again. Attributes: - frame_count (int): Sequence counter + current_frame_idx (int): Sequence counter frame_rate (int) frame_size (tuple) """ - def __init__(self, g_pool, name,frame_size,frame_rate): - super().__init__(g_pool) + def __init__(self, g_pool, source_path=None, frame_size=None, + frame_rate=None, name='Fake Source', *args, **kwargs): + super().__init__(g_pool, *args, **kwargs) + if source_path: + meta = load_object(source_path) + frame_size = meta['frame_size'] + frame_rate = meta['frame_rate'] + self.timestamps = np.load(os.path.splitext(source_path)[0] + '_timestamps.npy') + else: + self.timestamps = None + self.fps = frame_rate self._name = name - self.presentation_time = time() self.make_img(tuple(frame_size)) - self.frame_count = 0 + self.source_path = source_path + self.current_frame_idx = 0 + self.target_frame_idx = 0 def init_ui(self): self.add_menu() self.menu.label = "Static Image Source" - from pyglui import ui text = ui.Info_Text("Fake capture source streaming test images.") self.menu.append(text) def deinit_ui(self): self.remove_menu() - def make_img(self,size): - c_w ,c_h = max(1,size[0]/30),max(1,size[1]/30) - coarse = np.random.randint(0,200,size=(int(c_h),int(c_w),3)).astype(np.uint8) - # coarse[:,:,1] /=5 - # coarse[:,:,2] *=0 - # coarse[:,:,1] /=30 - # self._img = np.ones((size[1],size[0],3),dtype=np.uint8) - self._img = cv2.resize(coarse,size,interpolation=cv2.INTER_LANCZOS4) + def make_img(self, size): + # Generate Pupil Labs colored gradient + self._img = np.zeros((size[1], size[0], 3), dtype=np.uint8) + self._img[:, :, 0] += np.linspace(91, 157, self.frame_size[0], dtype=np.uint8) + self._img[:, :, 1] += np.linspace(165, 161, self.frame_size[0], dtype=np.uint8) + self._img[:, :, 2] += np.linspace(35, 112, self.frame_size[0], dtype=np.uint8) + self._intrinsics = Dummy_Camera(size, self.name) - def recent_events(self,events): - now = time() - spent = now - self.presentation_time - wait = max(0, 1./self.fps - spent) - sleep(wait) - self.presentation_time = time() - self.frame_count += 1 - timestamp = self.g_pool.get_timestamp() - frame = Frame(timestamp,self._img.copy(),self.frame_count) - cv2.putText(frame.img, "Fake Source Frame %s"%self.frame_count,(20,20), cv2.FONT_HERSHEY_SIMPLEX,0.5,(255,100,100)) - events['frame'] = frame - self._recent_frame = frame + def recent_events(self, events): + try: + frame = self.get_frame() + except IndexError: + logger.info('Recording has ended.') + self.play = False + else: + self.wait(frame) + self._recent_frame = frame + events['frame'] = frame + + def get_frame(self): + try: + timestamp = self.timestamps[self.target_frame_idx] + except IndexError: + raise EndofVideoError('Reached end of timestamps list.') + except TypeError: + timestamp = self.g_pool.get_timestamp() + + frame = Frame(timestamp, self._img.copy(), self.target_frame_idx) + + frame_txt_font_name = cv2.FONT_HERSHEY_SIMPLEX + frame_txt_font_scale = 1. + frame_txt_thickness = 1 + + # first line: frame index + frame_txt = "Fake source frame {}".format(frame.index) + frame_txt_size = cv2.getTextSize(frame_txt, frame_txt_font_name, + frame_txt_font_scale, + frame_txt_thickness)[0] + + frame_txt_loc = (self.frame_size[0] // 2 - frame_txt_size[0] // 2, + self.frame_size[1] // 2 - frame_txt_size[1]) + + cv2.putText(frame.img, frame_txt, frame_txt_loc, frame_txt_font_name, + frame_txt_font_scale, (255, 255, 255), + thickness=frame_txt_thickness, lineType=cv2.LINE_8) + + # second line: resolution @ fps + frame_txt = "{}x{} @ {} fps".format(*self.frame_size, self.frame_rate) + frame_txt_size = cv2.getTextSize(frame_txt, frame_txt_font_name, + frame_txt_font_scale, + frame_txt_thickness)[0] + + frame_txt_loc = (self.frame_size[0] // 2 - frame_txt_size[0] // 2, + self.frame_size[1] // 2 + frame_txt_size[1]) + + cv2.putText(frame.img, frame_txt, frame_txt_loc, frame_txt_font_name, + frame_txt_font_scale, (255, 255, 255), + thickness=frame_txt_thickness, lineType=cv2.LINE_8) + + self.current_frame_idx = self.target_frame_idx + self.target_frame_idx += 1 + + if self.timed_playback: + now = time() + spent = now - self.time_discrepancy + wait = max(0, 1./self.fps - spent) + wait /= self.playback_speed + sleep(wait) + self.time_discrepancy = time() + + return frame + + def get_frame_count(self): + try: + return len(self.timestamps) + except TypeError: + return self.current_frame_idx + 1 + + def seek_to_frame(self, frame_idx): + self.target_frame_idx = max(0, min(frame_idx, self.get_frame_count() - 1)) + self.time_discrepancy = 0 + + def get_frame_index(self): + return self.current_frame_idx + + def seek_to_prev_frame(self): + self.seek_to_frame(self.current_frame_idx - 1) @property def name(self): @@ -123,7 +200,7 @@ def frame_size(self): @frame_size.setter def frame_size(self, new_size): # closest match for size - sizes = [abs(r[0]-new_size[0]) for r in self.frame_sizesp] + sizes = [abs(r[0]-new_size[0]) for r in self.frame_sizes] best_size_idx = sizes.index(min(sizes)) size = self.frame_sizes[best_size_idx] if size != new_size: @@ -132,22 +209,23 @@ def frame_size(self, new_size): @property def frame_rates(self): - return (30,60,90,120) + return (30, 60, 90, 120) @property def frame_sizes(self): - return ((640,480),(1280,720),(1920,1080)) + return ((640, 480), (1280, 720), (1920, 1080)) @property def frame_rate(self): return self.fps + @frame_rate.setter - def frame_rate(self,new_rate): - rates = [ abs(r-new_rate) for r in self.frame_rates ] + def frame_rate(self, new_rate): + rates = [abs(r-new_rate) for r in self.frame_rates] best_rate_idx = rates.index(min(rates)) rate = self.frame_rates[best_rate_idx] if rate != new_rate: - logger.warning("%sfps capture mode not available at (%s) on 'Fake Source'. Selected %sfps. "%(new_rate,self.frame_size,rate)) + logger.warning("%sfps capture mode not available at (%s) on 'Fake Source'. Selected %sfps. "%(new_rate, self.frame_size, rate)) self.fps = rate @property @@ -180,13 +258,14 @@ def init_ui(self): text = ui.Info_Text('Convenience manager to select a fake source explicitly.') def activate(): - #a capture leaving is a must stop for recording. - self.notify_all( {'subject':'recording.should_stop'} ) + # a capture leaving is a must stop for recording. + self.notify_all({'subject': 'recording.should_stop'}) settings = {} + settings['timed_playback'] = True settings['frame_rate'] = self.g_pool.capture.frame_rate settings['frame_size'] = self.g_pool.capture.frame_size settings['name'] = self.g_pool.capture.name - #if the user set fake capture, we dont want it to auto jump back to the old capture. + # if the user set fake capture, we dont want it to auto jump back to the old capture. if self.g_pool.process == 'world': self.notify_all({'subject':'start_plugin',"name":"Fake_Source",'args':settings}) else: diff --git a/pupil_src/shared_modules/video_capture/file_backend.py b/pupil_src/shared_modules/video_capture/file_backend.py index 2543c6fbf1..1fd6c841cc 100644 --- a/pupil_src/shared_modules/video_capture/file_backend.py +++ b/pupil_src/shared_modules/video_capture/file_backend.py @@ -9,45 +9,32 @@ ---------------------------------------------------------------------------~(*) ''' -import os,sys +import os import av -assert av.__version__ >= '0.2.5' - +from time import sleep -from .base_backend import Base_Source, Base_Manager +from .base_backend import Playback_Source, Base_Manager, EndofVideoError from camera_models import load_intrinsics import numpy as np -from time import time,sleep -from fractions import Fraction -from multiprocessing import cpu_count +from multiprocessing import cpu_count import os.path -#logging +# logging import logging logger = logging.getLogger(__name__) +assert av.__version__ >= '0.2.5' av.logging.set_level(av.logging.ERROR) logging.getLogger('libav').setLevel(logging.ERROR) -class FileCaptureError(Exception): - """General Exception for this module""" - def __init__(self, arg): - super().__init__() - self.arg = arg - -class EndofVideoFileError(Exception): - """docstring for EndofVideoFileError""" - def __init__(self, arg): - super().__init__() - self.arg = arg +class FileCaptureError(Exception): + pass class FileSeekError(Exception): - """docstring for EndofVideoFileError""" - def __init__(self): - super().__init__() + pass class Frame(object): @@ -88,7 +75,7 @@ def gray(self): return self._gray -class File_Source(Base_Source): +class File_Source(Playback_Source): """Simple file capture. Attributes: @@ -96,17 +83,13 @@ class File_Source(Base_Source): timestamps (str): Path to timestamps file """ - allowed_speeds = [.25, .5, 1., 1.5, 2., 4.] - - def __init__(self, g_pool, source_path=None, timed_playback=False, loop=False, playback_speed=1.): - super().__init__(g_pool) + def __init__(self, g_pool, source_path=None, loop=False, *args, **kwargs): + super().__init__(g_pool, *args, **kwargs) # minimal attribute set self._initialised = True - self.playback_speed = playback_speed self.source_path = source_path self.timestamps = None - self.timed_playback = timed_playback self.loop = loop if not source_path or not os.path.isfile(source_path): @@ -136,8 +119,6 @@ def __init__(self, g_pool, source_path=None, timed_playback=False, loop=False, p self._initialised = False return - self.time_discrepancy = 0. - self._recent_wait_idx = -1 self.target_frame_idx = 0 self.current_frame_idx = 0 @@ -171,7 +152,6 @@ def __init__(self, g_pool, source_path=None, timed_playback=False, loop=False, p loc, name = os.path.split(os.path.splitext(source_path)[0]) self._intrinsics = load_intrinsics(loc, name, self.frame_size) - self.play = True def ensure_initialisation(fallback_func=None, requires_playback=False): from functools import wraps @@ -265,36 +245,25 @@ def get_frame(self): return self.get_frame() else: logger.info("End of videofile %s %s"%(self.current_frame_idx,len(self.timestamps))) - raise EndofVideoFileError('Reached end of videofile') + raise EndofVideoError('Reached end of video file') try: timestamp = self.timestamps[index] except IndexError: logger.info("Reached end of timestamps list.") - raise EndofVideoFileError("Reached end of timestamps list.") + raise EndofVideoError("Reached end of timestamps list.") self.show_time = timestamp self.target_frame_idx = index+1 self.current_frame_idx = index return Frame(timestamp, frame, index=index) - def wait(self, frame): - if frame.index == self._recent_wait_idx: - sleep(1/60) # 60 fps on Player pause - elif self.time_discrepancy: - wait_time = frame.timestamp - self.time_discrepancy - time() - wait_time /= self.playback_speed - if 1 > wait_time > 0: - sleep(wait_time) - self._recent_wait_idx = frame.index - self.time_discrepancy = frame.timestamp - time() - @ensure_initialisation(fallback_func=lambda evt: sleep(0.05), requires_playback=True) def recent_events(self, events): try: frame = self.get_frame() - except EndofVideoFileError: + except EndofVideoError: logger.info('Video has ended.') - self.notify_all({"subject":'file_source.video_finished', 'source_path': self.source_path}) + self.notify_all({"subject": 'file_source.video_finished', 'source_path': self.source_path}) self.play = False else: if self.timed_playback: @@ -304,9 +273,9 @@ def recent_events(self, events): @ensure_initialisation() def seek_to_frame(self, seek_pos): - ###frame accurate seeking + # frame accurate seeking try: - self.video_stream.seek(self.idx_to_pts(seek_pos),mode='time') + self.video_stream.seek(self.idx_to_pts(seek_pos), mode='time') except av.AVError as e: raise FileSeekError() else: @@ -334,9 +303,6 @@ def on_notify(self, notification): elif notification['subject'] == 'file_source.should_pause' and notification.get('source_path') == self.source_path: self.play = False - def seek_to_next_frame(self): - self.seek_to_frame(min(self.current_frame_idx + 1, self.get_frame_count() - 1)) - def seek_to_prev_frame(self): self.seek_to_frame(max(0, self.current_frame_idx - 1)) diff --git a/pupil_src/shared_modules/vis_eye_video_overlay.py b/pupil_src/shared_modules/vis_eye_video_overlay.py index 5fb57527c7..f743336a22 100644 --- a/pupil_src/shared_modules/vis_eye_video_overlay.py +++ b/pupil_src/shared_modules/vis_eye_video_overlay.py @@ -20,7 +20,7 @@ from plugin import Visualizer_Plugin_Base from player_methods import transparent_image_overlay from methods import normalize, denormalize -from video_capture import EndofVideoFileError, FileCaptureError, File_Source +from video_capture import EndofVideoError, FileCaptureError, File_Source # logging import logging @@ -111,7 +111,7 @@ def visualize(self, frame, alpha, scale, show_ellipses, pupil_positions): try: self.current_eye_frame = self.source.get_frame() - except EndofVideoFileError: + except EndofVideoError: logger.info("Reached the end of the eye video for eye video {}.".format(self.eyeid)) # 2. dragging image