diff --git a/moten/extras.py b/moten/extras.py index e202e3f..c26e481 100644 --- a/moten/extras.py +++ b/moten/extras.py @@ -14,6 +14,36 @@ except ImportError: with_tqdm = False +def process_motion_energy_from_files(filenames, + size=None, + nimages=np.inf, + batch_size=1000, + dtype='float32', + mask=None, + ): + ''' + ''' + import moten.io + if not isinstance(filenames, (list, tuple)): + filenames = [filenames] + + XTX = 0 + NFRAMES = 0 + for fl in filenames: + generator = moten.io.generate_frame_difference_from_greyvideo( + fl, size=size, nimages=nimages, dtype=dtype) + + if mask is not None: + generator = moten.io.apply_mask(mask, generator) + + nframes, xtx = pixbypix_covariance_from_frames_generator(generator, + batch_size=batch_size, + mask=mask) + XTX += xtx + NFRAMES = nframes + return NFRAMES, XTX + + def pixbypix_covariance_from_frames_generator(data_generator, batch_size=1000, output_nonlinearity=pointwise_square, @@ -37,12 +67,13 @@ def pixbypix_covariance_from_frames_generator(data_generator, >>> fdiffgen = moten.io.generate_frame_difference_from_greyvideo(video_file, size=small_size, nimages=333) >>> nimages, XTX = moten.extras.pixbypix_covariance_from_frames_generator(fdiffgen) # doctest: +SKIP ''' - first_frame = data_generator.__next__() + first_frame = next(data_generator) + vdim, hdim = first_frame.shape npixels = vdim*hdim framediff_buffer = np.zeros((batch_size, npixels), dtype=dtype) - XTX = np.zeros((npixels, npixels), dtype=np.float64) + XTX = np.zeros((npixels, npixels), dtype=dtype) nframes = 0 if with_tqdm: @@ -57,7 +88,8 @@ def pixbypix_covariance_from_frames_generator(data_generator, framediff_buffer *= 0.0 # clear buffer try: for batch_frame_idx in range(batch_size): - frame_difference = data_generator.__next__().reshape(1, -1) + frame_difference = next(data_generator).reshape(1, -1) + framediff_buffer[batch_frame_idx] = output_nonlinearity(frame_difference) except StopIteration: RUN = False @@ -139,11 +171,14 @@ def __init__(self, video_file, size=None, nimages=np.inf, - batch_size=100, + batch_size=1000, output_nonlinearity=pointwise_square, - dtype='float32'): + dtype='float32', + mask=None, + ): ''' ''' + self.mask = mask self.size = size self.dtype = dtype self.nimages = nimages @@ -159,6 +194,9 @@ def get_frame_difference_generator(self): generator = moten.io.generate_frame_difference_from_greyvideo( self.video_file, size=self.size, nimages=self.nimages, dtype=self.dtype) + if self.mask is not None: + generator = moten.io.apply_mask(self.mask, generator) + return generator def compute_pixel_by_pixel_covariance(self, @@ -258,7 +296,7 @@ def compute_temporal_pcs(self, generator=None, skip_first=False): if skip_first: # drop the first frame b/c the difference is with 0's # and so projection is with itself - generator.__next__() + next(generator) self.decomposition_temporal_pcs = [] ## TODO: batch for faster performance diff --git a/moten/io.py b/moten/io.py index cd594d0..7b13333 100644 --- a/moten/io.py +++ b/moten/io.py @@ -297,3 +297,36 @@ def load_image_luminance(image_files, hdim=None, vdim=None): stimulus = rgb2lab(stimulus/255.)[...,0] stimuli.append(stimulus) return np.asarray(stimuli) + + +def apply_mask(mask, generator): + ''' + Parameters + ---------- + mask : 2D np.ndarray + generator : generator + Yields a video frame + + Yields + ------ + masked_image : 2D np.ndarray + Masked image of each frame (i.e. ``original_image[mask]``) + + Examples + -------- + >>> import moten + >>> video_file = 'http://anwarnunez.github.io/downloads/avsnr150s24fps_tiny.mp4' + >>> small_size = (36, 64) # downsample to (vdim, hdim) 16:9 aspect ratio + >>> oim = next(moten.io.generate_frame_difference_from_greyvideo(video_file, size=small_size)) + >>> mask = np.zeros(small_size, dtype=np.bool) + >>> mask[16:, :40] = True + >>> nim = next(moten.io.apply_mask(mask, moten.io.generate_frame_difference_from_greyvideo(video_file, size=small_size))) + >>> np.allclose(oim[16:, :40], nim) + ''' + assert mask.ndim == 2 + vshape = np.unique(mask.sum(0)).max() + hshape = np.unique(mask.sum(1)).max() + shape = (vshape, hshape) + print('mask size:', shape) + for im in generator: + yield im[mask].reshape(shape)