From e25c7c373dec837fa9989d8bb52aafd917102ef1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Agust=C3=ADn=20Castro?= Date: Mon, 26 Feb 2024 18:39:41 -0300 Subject: [PATCH] More consistent naming: always coord_transormations --- demos/camera_motion/src/demo.py | 2 +- norfair/drawing/fixed_camera.py | 42 ++++++++++++++++++++------------- norfair/drawing/path.py | 20 ++++++++-------- 3 files changed, 36 insertions(+), 28 deletions(-) diff --git a/demos/camera_motion/src/demo.py b/demos/camera_motion/src/demo.py index 07cd96b5..b90edc58 100644 --- a/demos/camera_motion/src/demo.py +++ b/demos/camera_motion/src/demo.py @@ -286,7 +286,7 @@ def run(): if args.draw_paths: frame = path_drawer.draw( - frame, tracked_objects, coord_transform=coord_transformations + frame, tracked_objects, coord_transformations=coord_transformations ) if use_fixed_camera: diff --git a/norfair/drawing/fixed_camera.py b/norfair/drawing/fixed_camera.py index 28f4f791..4abfce89 100644 --- a/norfair/drawing/fixed_camera.py +++ b/norfair/drawing/fixed_camera.py @@ -58,7 +58,7 @@ class FixedCamera: >>> video.write(bigger_frame) """ - def __init__(self, scale: float = 2, attenuation: float = 0.05): + def __init__(self, scale: float = None, attenuation: float = 0.05): self.scale = scale self._background = None self._attenuation_factor = 1 - attenuation @@ -66,7 +66,7 @@ def __init__(self, scale: float = 2, attenuation: float = 0.05): def adjust_frame( self, frame: np.ndarray, - coord_transformation: Union[ + coord_transformations: Union[ HomographyTransformation, TranslationTransformation ], ) -> np.ndarray: @@ -77,7 +77,7 @@ def adjust_frame( ---------- frame : np.ndarray The OpenCV frame. - coord_transformation : TranslationTransformation + coord_transformations : Union[TranslationTransformation, HomographyTransformation] The coordinate transformation as returned by the [`MotionEstimator`][norfair.camera_motion.MotionEstimator] Returns @@ -88,6 +88,12 @@ def adjust_frame( # initialize background if necessary if self._background is None: + if self.scale is None: + if coord_transformations is None: + self.scale = 1 + else: + self.scale = 3 + original_size = ( frame.shape[1], frame.shape[0], @@ -113,12 +119,12 @@ def adjust_frame( # warp the frame with the following composition: # top_left_translation o rel_to_abs - if isinstance(coord_transformation, HomographyTransformation): + if isinstance(coord_transformations, HomographyTransformation): top_left_translation = np.array( [[1, 0, self.top_left[0]], [0, 1, self.top_left[1]], [0, 0, 1]] ) full_transformation = ( - top_left_translation @ coord_transformation.inverse_homography_matrix + top_left_translation @ coord_transformations.inverse_homography_matrix ) background_with_current_frame = cv2.warpPerspective( frame, @@ -128,12 +134,12 @@ def adjust_frame( borderMode=cv2.BORDER_CONSTANT, borderValue=(0, 0, 0), ) - elif isinstance(coord_transformation, TranslationTransformation): + elif isinstance(coord_transformations, TranslationTransformation): full_transformation = np.array( [ - [1, 0, self.top_left[0] - coord_transformation.movement_vector[0]], - [0, 1, self.top_left[1] - coord_transformation.movement_vector[1]], + [1, 0, self.top_left[0] - coord_transformations.movement_vector[0]], + [0, 1, self.top_left[1] - coord_transformations.movement_vector[1]], ] ) background_with_current_frame = cv2.warpAffine( @@ -144,12 +150,14 @@ def adjust_frame( borderMode=cv2.BORDER_CONSTANT, borderValue=(0, 0, 0), ) - - self._background = cv2.addWeighted( - self._background, - 0.5, - background_with_current_frame, - 0.5, - 0.0, - ) - return self._background + try: + self._background = cv2.addWeighted( + self._background, + 0.5, + background_with_current_frame, + 0.5, + 0.0, + ) + return self._background + except UnboundLocalError: + return frame diff --git a/norfair/drawing/path.py b/norfair/drawing/path.py index 83a8917a..3b5a0554 100644 --- a/norfair/drawing/path.py +++ b/norfair/drawing/path.py @@ -216,7 +216,7 @@ def get_points_to_draw(obj): self.path_blend_factor = path_blend_factor self.frame_blend_factor = frame_blend_factor - def draw(self, frame, tracked_objects, coord_transform=None): + def draw(self, frame, tracked_objects, coord_transformations=None): """ the objects have a relative frame: frame_det the objects have an absolute frame: frame_one @@ -226,19 +226,19 @@ def draw(self, frame, tracked_objects, coord_transform=None): 1. top_left is an arbitrary coordinate of some pixel inside background logic: 1. draw track.get_estimate(absolute=True) + top_left, in background - 2. transform background with the composition (coord_transform.abs_to_rel o minus_top_left_translation). If coord_transform is None, only use minus_top_left_translation. + 2. transform background with the composition (coord_transformations.abs_to_rel o minus_top_left_translation). If coord_transformations is None, only use minus_top_left_translation. 3. crop [:frame.width, :frame.height] from the result 4. overlay that over frame Remark: - In any case, coord_transform should be the coordinate transformation between the tracker absolute coords (as abs) and frame coords (as rel) + In any case, coord_transformations should be the coordinate transformation between the tracker absolute coords (as abs) and frame coords (as rel) """ # initialize background if necessary if self._background is None: if self.scale is None: - # set the default scale, depending if coord_transform is provided or not - if coord_transform is None: + # set the default scale, depending if coord_transformations is provided or not + if coord_transformations is None: self.scale = 1 else: self.scale = 3 @@ -293,12 +293,12 @@ def draw(self, frame, tracked_objects, coord_transform=None): ) # apply warp to self._background with composition abs_to_rel o -top_left_translation to background, and crop [:width, :height] to get frame overdrawn - if isinstance(coord_transform, HomographyTransformation): + if isinstance(coord_transformations, HomographyTransformation): minus_top_left_translation = np.array( [[1, 0, -self.top_left[0]], [0, 1, -self.top_left[1]], [0, 0, 1]] ) full_transformation = ( - coord_transform.homography_matrix @ minus_top_left_translation + coord_transformations.homography_matrix @ minus_top_left_translation ) background_size_frame = cv2.warpPerspective( self._background, @@ -308,11 +308,11 @@ def draw(self, frame, tracked_objects, coord_transform=None): borderMode=cv2.BORDER_CONSTANT, borderValue=(0, 0, 0), ) - elif isinstance(coord_transform, TranslationTransformation): + elif isinstance(coord_transformations, TranslationTransformation): full_transformation = np.array( [ - [1, 0, coord_transform.movement_vector[0] - self.top_left[0]], - [0, 1, coord_transform.movement_vector[1] - self.top_left[1]], + [1, 0, coord_transformations.movement_vector[0] - self.top_left[0]], + [0, 1, coord_transformations.movement_vector[1] - self.top_left[1]], ] ) background_size_frame = cv2.warpAffine(