diff --git a/.circleci/config.yml b/.circleci/config.yml index 3795a54a1..2499e83a0 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -105,9 +105,7 @@ version: 2.1 workflows: main: jobs: - # disable lint build, for merge to default branch - # turn back on when rc/2.8.0 is started - # - lint + - lint - linux-py36 - linux-py37 # disabled osx build. covered by github-actions diff --git a/CHANGELOG.md b/CHANGELOG.md index 6c67a046b..4ae4b3a6b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,7 +1,12 @@ # Change Log All notable changes to this project will be documented in this file. -## [2.7.0] = TBD +## [2.8.0] = TBD +- Created lookup table to get monitor_delay for cases where calculation from data fails +- If sync timestamp file has more timestamps than eye tracking moving has frame, trim excess timestamps (up to 15) +- Session API returns both warped and unwarped stimulus images, and both are written to NWB + +## [2.7.0] = 2021-02-19 - Refactored behavior and ophys session and data APIs to remove a circular inheritance issue - Fixed segmentation mask and roi_mask misregistration in 'BehaviorOphysSession' - Replaces BehaviorOphysSession.get_roi_masks() method with roi_masks property diff --git a/allensdk/__init__.py b/allensdk/__init__.py index 33f5a7d03..be865ce81 100644 --- a/allensdk/__init__.py +++ b/allensdk/__init__.py @@ -35,10 +35,7 @@ # import logging - - - -__version__ = '2.7.0' +__version__ = '2.8.0' try: @@ -61,7 +58,8 @@ def one(x): except TypeError: return x if xlen != 1: - raise OneResultExpectedError('Expected length one result, received: {} results from query'.format(x)) + raise OneResultExpectedError("Expected length one result, received: " + f"{x} results from queryr") if isinstance(x, set): return list(x)[0] else: @@ -75,6 +73,7 @@ def one(x): 'allensdk.api.api.retrieve_file_over_http') file_download_log.setLevel(logging.INFO) console = logging.StreamHandler() - formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s') + formatter = logging.Formatter("%(asctime)s %(name)-12s " + "%(levelname)-8s %(message)s") console.setFormatter(formatter) file_download_log.addHandler(console) diff --git a/allensdk/brain_observatory/behavior/behavior_ophys_session.py b/allensdk/brain_observatory/behavior/behavior_ophys_session.py index b14165878..3399e0d6c 100644 --- a/allensdk/brain_observatory/behavior/behavior_ophys_session.py +++ b/allensdk/brain_observatory/behavior/behavior_ophys_session.py @@ -234,7 +234,7 @@ def events(self) -> pd.DataFrame: @events.setter def events(self, value): - self_events = value + self._events = value @property def cell_specimen_table(self) -> pd.DataFrame: diff --git a/allensdk/brain_observatory/behavior/behavior_session.py b/allensdk/brain_observatory/behavior/behavior_session.py index 87780200a..9aed9fb0c 100644 --- a/allensdk/brain_observatory/behavior/behavior_session.py +++ b/allensdk/brain_observatory/behavior/behavior_session.py @@ -174,7 +174,7 @@ def get_performance_metrics(self, engaged_trial_reward_rate_threshold=2): performance_metrics['total_reward_count'] = len(self.rewards) performance_metrics['total_reward_volume'] = self.rewards.volume.sum() - rpdf = self.get_rpdf() + rpdf = self.get_rolling_performance_df() engaged_trial_mask = ( rpdf['reward_rate'] > engaged_trial_reward_rate_threshold) @@ -313,24 +313,17 @@ def stimulus_presentations(self, value): self._stimulus_presentations = value @property - def stimulus_templates(self) -> StimulusTemplate: + def stimulus_templates(self) -> pd.DataFrame: """Get stimulus templates (movies, scenes) for behavior session. Returns ------- - StimulusTemplate - A StimulusTemplate object containing the stimulus images for the - experiment. Relevant properties include: - image_set_name: The name of the image set that the - StimulusTemplate encapsulates - image_names: A list of individual image names in the image set - images: A list of StimulusImage (inherits from np.ndarray) - objects. - Also has a to_dataframe() method to convert to a dataframe - where indices are image names, an 'image' column contains image - arrays, and the df.name is the image set. + pd.DataFrame + A pandas DataFrame object containing the stimulus images for the + experiment. Indices are image names, 'warped' and 'unwarped' + columns contains image arrays, and the df.name is the image set. """ - return self._stimulus_templates + return self._stimulus_templates.to_dataframe() @stimulus_templates.setter def stimulus_templates(self, value): diff --git a/allensdk/brain_observatory/behavior/eye_tracking_processing.py b/allensdk/brain_observatory/behavior/eye_tracking_processing.py index 08f0108ea..733348779 100644 --- a/allensdk/brain_observatory/behavior/eye_tracking_processing.py +++ b/allensdk/brain_observatory/behavior/eye_tracking_processing.py @@ -44,7 +44,7 @@ def load_eye_tracking_hdf(eye_tracking_file: Path) -> pd.DataFrame: # Values in the hdf5 may be complex (likely an artifact of the ellipse # fitting process). Take only the real component. - eye_tracking_data = eye_tracking_data.apply(lambda x: np.real(x.to_numpy())) + eye_tracking_data = eye_tracking_data.apply(lambda x: np.real(x.to_numpy())) # noqa: E501 return eye_tracking_data.astype(float) @@ -186,9 +186,23 @@ def process_eye_tracking_data(eye_data: pd.DataFrame, eye tracking frames. """ - if len(frame_times) != len(eye_data.index): + n_sync = len(frame_times) + n_eye_frames = len(eye_data.index) + + # If n_sync exceeds n_eye_frames by <= 15, + # just trim the excess sync pulses from the end + # of the timestamps array. + # + # This solution was discussed in + # https://github.com/AllenInstitute/AllenSDK/issues/1545 + + if n_sync > n_eye_frames and n_sync <= n_eye_frames+15: + frame_times = frame_times[:n_eye_frames] + n_sync = len(frame_times) + + if n_sync != n_eye_frames: raise RuntimeError(f"Error! The number of sync file frame times " - f"({len(frame_times)} does not match the " + f"({len(frame_times)}) does not match the " f"number of eye tracking frames " f"({len(eye_data.index)})!") @@ -217,7 +231,7 @@ def process_eye_tracking_data(eye_data: pd.DataFrame, cr_areas[likely_blinks] = np.nan eye_areas[likely_blinks] = np.nan - eye_data.insert(0, "time", frame_times) + eye_data.insert(0, "timestamps", frame_times) eye_data.insert(1, "cr_area", cr_areas) eye_data.insert(2, "eye_area", eye_areas) eye_data.insert(3, "pupil_area", pupil_areas) diff --git a/allensdk/brain_observatory/behavior/metadata_processing.py b/allensdk/brain_observatory/behavior/metadata_processing.py index 429942169..1513627c4 100644 --- a/allensdk/brain_observatory/behavior/metadata_processing.py +++ b/allensdk/brain_observatory/behavior/metadata_processing.py @@ -1,4 +1,6 @@ +from typing import Dict import re +import numpy as np description_dict = { # key is a regex and value is returned on match @@ -55,25 +57,82 @@ def get_expt_description(session_type: str) -> str: return match.popitem()[1] -def get_task_parameters(data): +def get_task_parameters(data: Dict) -> Dict: + """ + Read task_parameters metadata from the behavior stimulus pickle file. + + Parameters + ---------- + data: dict + The nested dict read in from the behavior stimulus pickle file. + All of the data expected by this method lives under + data['items']['behavior'] + + Returns + ------- + dict + A dict containing the task_parameters associated with this session. + """ behavior = data["items"]["behavior"] + stimuli = behavior['stimuli'] + config = behavior["config"] + doc = config["DoC"] task_parameters = {} + task_parameters['blank_duration_sec'] = \ - [float(x) for x in behavior['config']['DoC']['blank_duration_range']] - task_parameters['stimulus_duration_sec'] = \ - behavior['config']['DoC']['stimulus_window'] + [float(x) for x in doc['blank_duration_range']] + + if 'images' in stimuli: + stim_key = 'images' + elif 'grating' in stimuli: + stim_key = 'grating' + else: + msg = "Cannot get stimulus_duration_sec\n" + msg += "'images' and/or 'grating' not a valid " + msg += "key in pickle file under " + msg += "['items']['behavior']['stimuli']\n" + msg += f"keys: {list(stimuli.keys())}" + raise RuntimeError(msg) + + stim_duration = stimuli[stim_key]['flash_interval_sec'] + + # from discussion in + # https://github.com/AllenInstitute/AllenSDK/issues/1572 + # + # 'flash_interval' contains (stimulus_duration, gray_screen_duration) + # (as @matchings said above). That second value is redundant with + # 'blank_duration_range'. I'm not sure what would happen if they were + # set to be conflicting values in the params. But it looks like + # they're always consistent. It should always be (0.25, 0.5), + # except for TRAINING_0 and TRAINING_1, which have statically + # displayed stimuli (no flashes). + + if stim_duration is None: + stim_duration = np.NaN + else: + stim_duration = stim_duration[0] + + task_parameters['stimulus_duration_sec'] = stim_duration + task_parameters['omitted_flash_fraction'] = \ behavior['params'].get('flash_omit_probability', float('nan')) task_parameters['response_window_sec'] = \ - [float(x) for x in behavior["config"]["DoC"]["response_window"]] - task_parameters['reward_volume'] = \ - behavior["config"]["reward"]["reward_volume"] - task_parameters['stage'] = behavior["params"]["stage"] + [float(x) for x in doc["response_window"]] + task_parameters['reward_volume'] = config["reward"]["reward_volume"] + task_parameters['auto_reward_volume'] = doc['auto_reward_volume'] + task_parameters['session_type'] = behavior["params"]["stage"] task_parameters['stimulus'] = next(iter(behavior["stimuli"])) - task_parameters['stimulus_distribution'] = \ - behavior["config"]["DoC"]["change_time_dist"] - task_parameters['task'] = behavior["config"]["behavior"]["task_id"] + task_parameters['stimulus_distribution'] = doc["change_time_dist"] + + task_id = config['behavior']['task_id'] + if 'DoC' in task_id: + task_parameters['task'] = 'change detection' + else: + msg = "metadata_processing.get_task_parameters does not " + msg += f"know how to parse 'task_id' = {task_id}" + raise RuntimeError(msg) + n_stimulus_frames = 0 for stim_type, stim_table in behavior["stimuli"].items(): n_stimulus_frames += sum(stim_table.get("draw_log", [])) diff --git a/allensdk/brain_observatory/behavior/rewards_processing.py b/allensdk/brain_observatory/behavior/rewards_processing.py index de13d9d8e..81b66bb9d 100644 --- a/allensdk/brain_observatory/behavior/rewards_processing.py +++ b/allensdk/brain_observatory/behavior/rewards_processing.py @@ -1,7 +1,6 @@ from typing import Dict import numpy as np import pandas as pd -from collections import defaultdict def get_rewards(data: Dict, @@ -31,13 +30,14 @@ def get_rewards(data: Dict, trial_df = pd.DataFrame(data["items"]["behavior"]["trial_log"]) rewards_dict = {"volume": [], "timestamps": [], "autorewarded": []} for idx, trial in trial_df.iterrows(): - rewards = trial["rewards"] # as i write this there can only ever be one reward per trial + rewards = trial["rewards"] + # as i write this there can only ever be one reward per trial if rewards: rewards_dict["volume"].append(rewards[0][0]) rewards_dict["timestamps"].append(timestamps[rewards[0][2]]) auto_rwrd = trial["trial_params"]["auto_reward"] rewards_dict["autorewarded"].append(auto_rwrd) - df = pd.DataFrame(rewards_dict).set_index("timestamps", drop=True) + df = pd.DataFrame(rewards_dict) return df diff --git a/allensdk/brain_observatory/behavior/schemas.py b/allensdk/brain_observatory/behavior/schemas.py index 919eba83d..5c48b8b2b 100644 --- a/allensdk/brain_observatory/behavior/schemas.py +++ b/allensdk/brain_observatory/behavior/schemas.py @@ -215,6 +215,7 @@ class BehaviorTaskParametersSchema(RaisingSchema): stimulus_duration_sec = fields.Float( doc='Duration of each stimulus presentation in seconds', required=True, + allow_nan=True ) omitted_flash_fraction = fields.Float( doc='Fraction of flashes/image presentations that were omitted', @@ -232,7 +233,11 @@ class BehaviorTaskParametersSchema(RaisingSchema): doc='Volume of water (in mL) delivered as reward', required=True, ) - stage = fields.String( + auto_reward_volume = fields.Float( + doc='Volume of water (in mL) delivered as an automatic reward', + required=True, + ) + session_type = fields.String( doc='Stage of behavioral task', required=True, ) diff --git a/allensdk/brain_observatory/behavior/session_apis/data_io/behavior_nwb_api.py b/allensdk/brain_observatory/behavior/session_apis/data_io/behavior_nwb_api.py index 3af2a89fc..4f34fc0b8 100644 --- a/allensdk/brain_observatory/behavior/session_apis/data_io/behavior_nwb_api.py +++ b/allensdk/brain_observatory/behavior/session_apis/data_io/behavior_nwb_api.py @@ -18,7 +18,7 @@ from allensdk.brain_observatory.behavior.schemas import ( BehaviorTaskParametersSchema, OphysBehaviorMetadataSchema) from allensdk.brain_observatory.behavior.stimulus_processing import \ - StimulusTemplate + StimulusTemplate, StimulusTemplateFactory from allensdk.brain_observatory.behavior.trials_processing import ( TRIAL_COLUMN_DESCRIPTION_DICT ) @@ -75,12 +75,14 @@ def save(self, session_object): from_dataframe=True) # Add stimulus template data to NWB in-memory object: - # Not all sessions will have stimulus_templates (e.g. gratings) - if session_object.stimulus_templates: - self._add_stimulus_templates( - nwbfile=nwbfile, - stimulus_templates=session_object.stimulus_templates, - stimulus_presentations=session_object.stimulus_presentations) + # Use the semi-private _stimulus_templates attribute because it is + # a StimulusTemplate object. The public stimulus_templates property + # of the session_object returns a DataFrame. + session_stimulus_templates = session_object._stimulus_templates + self._add_stimulus_templates( + nwbfile=nwbfile, + stimulus_templates=session_stimulus_templates, + stimulus_presentations=session_object.stimulus_presentations) # search for omitted rows and add stop_time before writing to NWB file set_omitted_stop_time( @@ -200,12 +202,10 @@ def get_stimulus_templates(self, **kwargs) -> Optional[StimulusTemplate]: image_attributes = [{'image_name': image_name} for image_name in image_data.control_description] - stimulus_templates = StimulusTemplate( - image_set_name=image_set_name, - image_attributes=image_attributes, - images=image_data.data[:] + return StimulusTemplateFactory.from_processed( + image_set_name=image_set_name, image_attributes=image_attributes, + warped=image_data.data[:], unwarped=image_data.unwarped[:] ) - return stimulus_templates def get_stimulus_timestamps(self) -> np.ndarray: stim_module = self.nwbfile.processing['stimulus'] @@ -224,7 +224,7 @@ def get_licks(self) -> np.ndarray: licks = lick_module.get_data_interface('licks') return pd.DataFrame({ - 'time': licks.timestamps[:], + 'timestamps': licks.timestamps[:], 'frame': licks.data[:] }) else: @@ -238,11 +238,11 @@ def get_rewards(self) -> np.ndarray: volume = rewards.get_data_interface('volume').data[:] return pd.DataFrame({ 'volume': volume, 'timestamps': time, - 'autorewarded': autorewarded}).set_index('timestamps') + 'autorewarded': autorewarded}) else: return pd.DataFrame({ 'volume': [], 'timestamps': [], - 'autorewarded': []}).set_index('timestamps') + 'autorewarded': []}) def get_metadata(self) -> dict: diff --git a/allensdk/brain_observatory/behavior/session_apis/data_io/behavior_ophys_json_api.py b/allensdk/brain_observatory/behavior/session_apis/data_io/behavior_ophys_json_api.py index 32b345623..c870f30b7 100644 --- a/allensdk/brain_observatory/behavior/session_apis/data_io/behavior_ophys_json_api.py +++ b/allensdk/brain_observatory/behavior/session_apis/data_io/behavior_ophys_json_api.py @@ -14,9 +14,10 @@ class BehaviorOphysJsonApi(BehaviorOphysDataTransforms): a specified raw data source (extractor). Contains all methods needed to fill a BehaviorOphysSession.""" - def __init__(self, data): + def __init__(self, data: dict, skip_eye_tracking: bool = False): extractor = BehaviorOphysJsonExtractor(data=data) - super().__init__(extractor=extractor) + super().__init__(extractor=extractor, + skip_eye_tracking=skip_eye_tracking) class BehaviorOphysJsonExtractor(BehaviorJsonExtractor, diff --git a/allensdk/brain_observatory/behavior/session_apis/data_io/behavior_ophys_lims_api.py b/allensdk/brain_observatory/behavior/session_apis/data_io/behavior_ophys_lims_api.py index 7e5a186c0..4a2b91d72 100644 --- a/allensdk/brain_observatory/behavior/session_apis/data_io/behavior_ophys_lims_api.py +++ b/allensdk/brain_observatory/behavior/session_apis/data_io/behavior_ophys_lims_api.py @@ -27,7 +27,8 @@ def __init__(self, ophys_experiment_id: Optional[int] = None, lims_credentials: Optional[DbCredentials] = None, mtrain_credentials: Optional[DbCredentials] = None, - extractor: Optional[BehaviorOphysDataExtractorBase] = None): + extractor: Optional[BehaviorOphysDataExtractorBase] = None, + skip_eye_tracking: bool = False): if extractor is None: if ophys_experiment_id is not None: @@ -40,7 +41,8 @@ def __init__(self, "BehaviorOphysLimsApi must be provided either an " "instantiated 'extractor' or an 'ophys_experiment_id'!") - super().__init__(extractor=extractor) + super().__init__(extractor=extractor, + skip_eye_tracking=skip_eye_tracking) class BehaviorOphysLimsExtractor(OphysLimsExtractor, BehaviorLimsExtractor, diff --git a/allensdk/brain_observatory/behavior/session_apis/data_io/behavior_ophys_nwb_api.py b/allensdk/brain_observatory/behavior/session_apis/data_io/behavior_ophys_nwb_api.py index 22cedef12..8a72ed6ec 100644 --- a/allensdk/brain_observatory/behavior/session_apis/data_io/behavior_ophys_nwb_api.py +++ b/allensdk/brain_observatory/behavior/session_apis/data_io/behavior_ophys_nwb_api.py @@ -93,12 +93,14 @@ def save(self, session_object): from_dataframe=True) # Add stimulus template data to NWB in-memory object: - # Not all sessions will have stimulus_templates (e.g. gratings) - if session_object.stimulus_templates: - self._add_stimulus_templates( - nwbfile=nwbfile, - stimulus_templates=session_object.stimulus_templates, - stimulus_presentations=session_object.stimulus_presentations) + # Use the semi-private _stimulus_templates attribute because it is + # a StimulusTemplate object. The public stimulus_templates property + # of the session_object returns a DataFrame. + session_stimulus_templates = session_object._stimulus_templates + self._add_stimulus_templates( + nwbfile=nwbfile, + stimulus_templates=session_stimulus_templates, + stimulus_presentations=session_object.stimulus_presentations) # search for omitted rows and add stop_time before writing to NWB file set_omitted_stop_time( @@ -154,11 +156,13 @@ def save(self, session_object): # Add motion correction to NWB in-memory object: nwb.add_motion_correction(nwbfile, session_object.motion_correction) - # Add eye tracking and rig geometry to NWB in-memory object. - self.add_eye_tracking_data_to_nwb( - nwbfile=nwbfile, - eye_tracking_df=session_object.eye_tracking, - eye_tracking_rig_geometry=session_object.eye_tracking_rig_geometry) + # Add eye tracking and rig geometry to NWB in-memory object + # if eye_tracking data exists. + if session_object.eye_tracking is not None: + self.add_eye_tracking_data_to_nwb( + nwbfile, + session_object.eye_tracking, + session_object.eye_tracking_rig_geometry) # Add events self.add_events(nwbfile=nwbfile, events=session_object.events) @@ -229,7 +233,7 @@ def get_eye_tracking(self, eye_tracking_acquisition.corneal_reflection_tracking eye_tracking_dict = { - "time": eye_tracking.timestamps[:], + "timestamps": eye_tracking.timestamps[:], "cr_area": corneal_reflection_tracking.area_raw[:], "eye_area": eye_tracking.area_raw[:], "pupil_area": pupil_tracking.area_raw[:], @@ -476,7 +480,7 @@ def add_eye_tracking_data_to_nwb(self, nwbfile: NWBFile, width=eye_tracking_df['eye_width'].values, height=eye_tracking_df['eye_height'].values, angle=eye_tracking_df['eye_phi'].values, - timestamps=eye_tracking_df['time'].values + timestamps=eye_tracking_df['timestamps'].values ) pupil_tracking = EllipseSeries( diff --git a/allensdk/brain_observatory/behavior/session_apis/data_transforms/behavior_data_transforms.py b/allensdk/brain_observatory/behavior/session_apis/data_transforms/behavior_data_transforms.py index a5b4b224d..317cfccc8 100644 --- a/allensdk/brain_observatory/behavior/session_apis/data_transforms/behavior_data_transforms.py +++ b/allensdk/brain_observatory/behavior/session_apis/data_transforms/behavior_data_transforms.py @@ -4,6 +4,7 @@ from typing import Any, Dict, Optional import warnings +import imageio import numpy as np import pandas as pd import pytz @@ -19,8 +20,7 @@ get_stimulus_metadata, get_stimulus_presentations, get_stimulus_templates, StimulusTemplate) from allensdk.brain_observatory.behavior.trials_processing import ( - get_extended_trials, get_trials) -from allensdk.brain_observatory.running_speed import RunningSpeed + get_extended_trials, get_trials_from_data_transform) from allensdk.core.exceptions import DataFrameIndexError @@ -66,6 +66,7 @@ def get_behavior_session_uuid(self) -> Optional[str]: return behavior_pkl_uuid + @memoize def get_licks(self) -> pd.DataFrame: """Get lick data from pkl file. This function assumes that the first sensor in the list of @@ -90,9 +91,28 @@ def get_licks(self) -> pd.DataFrame: stimulus_timestamps = self.get_stimulus_timestamps() lick_frames = (data["items"]["behavior"]["lick_sensors"][0] ["lick_events"]) + + # there's an occasional bug where the number of logged + # frames is one greater than the number of vsync intervals. + # If the animal licked on this last frame it will cause an + # error here. This fixes the problem. + # see: https://github.com/AllenInstitute/visual_behavior_analysis/issues/572 # noqa: E501 + # & https://github.com/AllenInstitute/visual_behavior_analysis/issues/379 # noqa:E501 + # + # This bugfix copied from + # https://github.com/AllenInstitute/visual_behavior_analysis/blob/master/visual_behavior/translator/foraging2/extract.py#L640-L647 + + if len(lick_frames) > 0: + if lick_frames[-1] == len(stimulus_timestamps): + lick_frames = lick_frames[:-1] + self.logger.error('removed last lick - ' + 'it fell outside of stimulus_timestamps ' + 'range') + lick_times = [stimulus_timestamps[frame] for frame in lick_frames] - return pd.DataFrame({"time": lick_times, "frame": lick_frames}) + return pd.DataFrame({"timestamps": lick_times, "frame": lick_frames}) + @memoize def get_rewards(self) -> pd.DataFrame: """Get reward data from pkl file, based on pkl file timestamps (not sync file). @@ -224,8 +244,62 @@ def get_stimulus_templates(self) -> Optional[StimulusTemplate]: ------- StimulusTemplate or None if there are no images for the experiment """ + + # TODO: Eventually the `grating_images_dict` should be provided by the + # BehaviorLimsExtractor/BehaviorJsonExtractor classes. + # - NJM 2021/2/23 + grating_images_dict = { + "gratings_0.0": { + "warped": np.asarray(imageio.imread( + "/allen/programs/braintv/production/visualbehavior" + "/prod5/project_VisualBehavior/warped_grating_0.png")), + "unwarped": np.asarray(imageio.imread( + "/allen/programs/braintv/production/visualbehavior" + "/prod5/project_VisualBehavior/" + "masked_unwarped_grating_0.png")) + }, + "gratings_90.0": { + "warped": np.asarray(imageio.imread( + "/allen/programs/braintv/production/visualbehavior" + "/prod5/project_VisualBehavior/warped_grating_90.png")), + "unwarped": np.asarray(imageio.imread( + "/allen/programs/braintv/production/visualbehavior" + "/prod5/project_VisualBehavior" + "/masked_unwarped_grating_90.png")) + }, + "gratings_180.0": { + "warped": np.asarray(imageio.imread( + "/allen/programs/braintv/production/visualbehavior" + "/prod5/project_VisualBehavior/warped_grating_180.png")), + "unwarped": np.asarray(imageio.imread( + "/allen/programs/braintv/production/visualbehavior" + "/prod5/project_VisualBehavior/" + "masked_unwarped_grating_180.png")) + }, + "gratings_270.0": { + "warped": np.asarray(imageio.imread( + "/allen/programs/braintv/production/visualbehavior" + "/prod5/project_VisualBehavior/warped_grating_270.png")), + "unwarped": np.asarray(imageio.imread( + "/allen/programs/braintv/production/visualbehavior" + "/prod5/project_VisualBehavior" + "/masked_unwarped_grating_270.png")) + } + } + pkl = self._behavior_stimulus_file() - return get_stimulus_templates(pkl=pkl) + return get_stimulus_templates(pkl=pkl, + grating_images_dict=grating_images_dict) + + def get_monitor_delay(self) -> float: + """ + Return monitor delay for behavior only sessions + (in seconds) + """ + # This is the median estimate across all rigs + # as discussed in + # https://github.com/AllenInstitute/AllenSDK/issues/1318 + return 0.02115 def get_stimulus_timestamps(self) -> np.ndarray: """Get stimulus timestamps (vsyncs) from pkl file. Align to the @@ -257,6 +331,7 @@ def get_task_parameters(self) -> dict: data = self._behavior_stimulus_file() return get_task_parameters(data) + @memoize def get_trials(self) -> pd.DataFrame: """Get trials from pkl file @@ -266,15 +341,8 @@ def get_trials(self) -> pd.DataFrame: A dataframe containing behavioral trial start/stop times, and trial data """ - timestamps = self.get_stimulus_timestamps() - licks = self.get_licks() - data = self._behavior_stimulus_file() - rewards = self.get_rewards() - trial_df = get_trials(data, - licks, - rewards, - timestamps) + trial_df = get_trials_from_data_transform(self) return trial_df diff --git a/allensdk/brain_observatory/behavior/session_apis/data_transforms/behavior_ophys_data_transforms.py b/allensdk/brain_observatory/behavior/session_apis/data_transforms/behavior_ophys_data_transforms.py index 808824d04..5302a98e4 100644 --- a/allensdk/brain_observatory/behavior/session_apis/data_transforms/behavior_ophys_data_transforms.py +++ b/allensdk/brain_observatory/behavior/session_apis/data_transforms/behavior_ophys_data_transforms.py @@ -9,6 +9,8 @@ import xarray as xr import pandas as pd +import warnings + from allensdk.api.cache import memoize from allensdk.brain_observatory.behavior.event_detection import \ filter_events_array @@ -20,7 +22,6 @@ from allensdk.brain_observatory import sync_utilities from allensdk.internal.brain_observatory.time_sync import OphysTimeAligner from allensdk.brain_observatory.behavior.rewards_processing import get_rewards -from allensdk.brain_observatory.behavior.trials_processing import get_trials from allensdk.brain_observatory.behavior.eye_tracking_processing import ( load_eye_tracking_hdf, process_eye_tracking_data) from allensdk.brain_observatory.behavior.image_api import ImageApi @@ -36,14 +37,16 @@ class BehaviorOphysDataTransforms(BehaviorDataTransforms, BehaviorOphysBase): populating a BehaviorOphysSession. """ - def __init__(self, extractor: BehaviorOphysDataExtractorBase): + def __init__(self, + extractor: BehaviorOphysDataExtractorBase, + skip_eye_tracking: bool): super().__init__(extractor=extractor) # Type checker not able to resolve that self.extractor is a # BehaviorOphysDataExtractorBase. Explicitly adding as instance # attribute fixes the issue. self.extractor = extractor - + self._skip_eye_tracking = skip_eye_tracking self.logger = logging.getLogger(self.__class__.__name__) def get_ophys_session_id(self): @@ -52,8 +55,11 @@ def get_ophys_session_id(self): def get_ophys_experiment_id(self): return self.extractor.get_ophys_experiment_id() - def get_eye_tracking_rig_geometry(self) -> dict: - return self.extractor.get_eye_tracking_rig_geometry() + def get_eye_tracking_rig_geometry(self) -> Optional[dict]: + if self._skip_eye_tracking: + return None + else: + return self.extractor.get_eye_tracking_rig_geometry() @memoize def get_cell_specimen_table(self): @@ -138,12 +144,63 @@ def get_sync_data(self): sync_path = self.extractor.get_sync_file() return get_sync_data(sync_path) - @memoize - def get_stimulus_timestamps(self): + def _load_stimulus_timestamps_and_delay(self): + """ + Load the stimulus timestamps (uncorrected for + monitor delay) and the monitor delay + """ sync_path = self.extractor.get_sync_file() - timestamps, _, _ = (OphysTimeAligner(sync_file=sync_path) - .corrected_stim_timestamps) - return np.array(timestamps) + aligner = OphysTimeAligner(sync_file=sync_path) + (self._stimulus_timestamps, + delta) = aligner.clipped_stim_timestamps + + try: + delay = aligner.monitor_delay + except ValueError as ee: + rig_name = self.get_metadata()['rig_name'] + + warning_msg = 'Monitory delay calculation failed ' + warning_msg += 'with ValueError\n' + warning_msg += f' "{ee}"' + warning_msg += '\nlooking monitor delay up from table ' + warning_msg += f'for rig: {rig_name} ' + + # see + # https://github.com/AllenInstitute/AllenSDK/issues/1318 + # https://github.com/AllenInstitute/AllenSDK/issues/1916 + delay_lookup = {'CAM2P.1': 0.020842, + 'CAM2P.2': 0.037566, + 'CAM2P.3': 0.021390, + 'CAM2P.4': 0.021102, + 'CAM2P.5': 0.021192, + 'MESO.1': 0.03613} + + if rig_name not in delay_lookup: + msg = warning_msg + msg += f'\nrig_name {rig_name} not in lookup table' + raise RuntimeError(msg) + delay = delay_lookup[rig_name] + warning_msg += f'\ndelay: {delay} seconds' + warnings.warn(warning_msg) + + self._monitor_delay = delay + + def get_stimulus_timestamps(self): + """ + Return a numpy array of stimulus timestamps uncorrected + for monitor delay (in seconds) + """ + if not hasattr(self, '_stimulus_timestamps'): + self._load_stimulus_timestamps_and_delay() + return self._stimulus_timestamps + + def get_monitor_delay(self): + """ + Return the monitor delay (in seconds) + """ + if not hasattr(self, '_monitor_delay'): + self._load_stimulus_timestamps_and_delay() + return self._monitor_delay @staticmethod def _process_ophys_plane_timestamps( @@ -274,46 +331,12 @@ def get_sync_licks(self): lick_times = self.get_sync_data()['lick_times'] return pd.DataFrame({'time': lick_times}) - @memoize - def get_licks(self) -> pd.DataFrame: - """ - Returns - ------- - pd.DataFrame - Two columns: "time", which contains the sync time - of the licks that occurred in this session and "frame", - the frame numbers of licks that occurred in this session - """ - data = self._behavior_stimulus_file() - timestamps = self.get_stimulus_timestamps() - # Get licks from pickle file (need to add an offset to align with - # the trial_log time stream) - lick_frames = (data["items"]["behavior"]["lick_sensors"][0] - ["lick_events"]) - lick_times = timestamps[lick_frames] - return pd.DataFrame({"time": lick_times, "frame": lick_frames}) - @memoize def get_rewards(self): data = self._behavior_stimulus_file() timestamps = self.get_stimulus_timestamps() return get_rewards(data, timestamps) - @memoize - def get_trials(self): - - timestamps = self.get_stimulus_timestamps() - licks = self.get_licks() - rewards = self.get_rewards() - data = self._behavior_stimulus_file() - - trial_df = get_trials(data, - licks, - rewards, - timestamps) - - return trial_df - @memoize def get_corrected_fluorescence_traces(self): demix_file = self.extractor.get_demix_file() @@ -379,7 +402,7 @@ def get_motion_correction(self): @memoize def get_eye_tracking(self, z_threshold: float = 3.0, - dilation_frames: int = 2): + dilation_frames: int = 2) -> Optional[pd.DataFrame]: """Gets corneal, eye, and pupil ellipse fit data Parameters @@ -394,7 +417,7 @@ def get_eye_tracking(self, Returns ------- - pd.DataFrame + Optional[pd.DataFrame] *_area *_center_x *_center_y @@ -403,12 +426,15 @@ def get_eye_tracking(self, *_width likely_blink where "*" can be "corneal", "pupil" or "eye" + + Will return None if class attr _skip_eye_tracking is True. """ - logger = logging.getLogger("BehaviorOphysLimsApi") + if self._skip_eye_tracking: + return None - logger.info(f"Getting eye_tracking_data with " - f"'z_threshold={z_threshold}', " - f"'dilation_frames={dilation_frames}'") + self.logger.info(f"Getting eye_tracking_data with " + f"'z_threshold={z_threshold}', " + f"'dilation_frames={dilation_frames}'") filepath = Path(self.extractor.get_eye_tracking_filepath()) sync_path = Path(self.extractor.get_sync_file()) diff --git a/allensdk/brain_observatory/behavior/stimulus_processing/__init__.py b/allensdk/brain_observatory/behavior/stimulus_processing/__init__.py index 54ff4d02a..e3fa74852 100644 --- a/allensdk/brain_observatory/behavior/stimulus_processing/__init__.py +++ b/allensdk/brain_observatory/behavior/stimulus_processing/__init__.py @@ -1,12 +1,13 @@ import numpy as np import pandas as pd import pickle +import warnings from typing import Dict, List, Tuple, Union, Optional from allensdk.brain_observatory.behavior.stimulus_processing.util import \ convert_filepath_caseinsensitive, get_image_set_name from allensdk.brain_observatory.behavior.stimulus_processing.stimulus_templates import \ - StimulusTemplate + StimulusTemplate, StimulusTemplateFactory def load_pickle(pstream): @@ -25,11 +26,16 @@ def get_stimulus_presentations(data, stimulus_timestamps) -> pd.DataFrame: as what stimuli was presented """ stimulus_table = get_visual_stimuli_df(data, stimulus_timestamps) - # workaround to rename columns to harmonize with visual coding and rebase timestamps to sync time - stimulus_table.insert(loc=0, column='flash_number', value=np.arange(0, len(stimulus_table))) + # workaround to rename columns to harmonize with visual + # coding and rebase timestamps to sync time + stimulus_table.insert(loc=0, column='flash_number', + value=np.arange(0, len(stimulus_table))) stimulus_table = stimulus_table.rename( - columns={'frame': 'start_frame', 'time': 'start_time', 'flash_number': 'stimulus_presentations_id'}) - stimulus_table.start_time = [stimulus_timestamps[int(start_frame)] for start_frame in + columns={'frame': 'start_frame', + 'time': 'start_time', + 'flash_number': 'stimulus_presentations_id'}) + stimulus_table.start_time = [stimulus_timestamps[int(start_frame)] + for start_frame in stimulus_table.start_frame.values] end_time = [] for end_frame in stimulus_table.end_frame.values: @@ -59,15 +65,17 @@ def get_images_dict(pkl) -> Dict: ------- Dict: A dictionary containing keys images, metadata, and image_attributes. - These correspond to paths to images to images presented, metadata + These correspond to paths to image arrays presented, metadata on the whole set of images, and metadata on specific images, respectively. """ # Sometimes the source is a zipped pickle: - metadata = {'image_set': pkl["items"]["behavior"]["stimuli"]["images"]["image_path"]} + pkl_stimuli = pkl["items"]["behavior"]["stimuli"] + metadata = {'image_set': pkl_stimuli["images"]["image_path"]} - # Get image file name; these are encoded case-insensitive in the pickle file :/ + # Get image file name; + # These are encoded case-insensitive in the pickle file :/ filename = convert_filepath_caseinsensitive(metadata['image_set']) image_set = load_pickle(open(filename, 'rb')) @@ -107,8 +115,11 @@ def get_gratings_metadata(stimuli: Dict, start_idx: int = 0) -> pd.DataFrame: this experiment it returns an empty dataframe with the expected columns. Parameters ---------- - stimuli: the stimuli loaded from the experiment pkl file - start_idx: the index to start index column + stimuli: + The stimuli field (pkl['items']['behavior']['stimuli']) loaded + from the experiment pkl file. + start_idx: + The index to start index column Returns ------- @@ -152,12 +163,26 @@ def get_gratings_metadata(stimuli: Dict, start_idx: int = 0) -> pd.DataFrame: return grating_df -def get_stimulus_templates(pkl) -> Optional[StimulusTemplate]: +def get_stimulus_templates(pkl: dict, + grating_images_dict: Optional[dict] = None + ) -> Optional[StimulusTemplate]: """ - Gets images presented during experimentation + Gets images presented during experiments from the behavior stimulus file + (*.pkl) + Parameters ---------- - pkl: pkl file containing the data for the presented stimuli + pkl : dict + Loaded pkl dict containing data for the presented stimuli. + grating_images_dict : Optional[dict] + Because behavior pkl files do not contain image versions of grating + stimuli, they must be obtained from an external source. The + grating_images_dict is a nested dictionary where top level keys + correspond to grating image names (e.g. 'gratings_0.0', + 'gratings_270.0') as they would appear in table returned by + get_gratings_metadata(). Sub-nested dicts are expected to have 'warped' + and 'unwarped' keys where values are numpy image arrays + of aforementioned warped or unwarped grating stimuli. Returns ------- @@ -166,17 +191,52 @@ def get_stimulus_templates(pkl) -> Optional[StimulusTemplate]: the experiment """ - if 'images' not in pkl['items']['behavior']['stimuli']: - return None - images = get_images_dict(pkl) - image_set_filepath = images['metadata']['image_set'] - image_set_name = get_image_set_name(image_set_path=image_set_filepath) - image_set_name = convert_filepath_caseinsensitive( - image_set_name) - return StimulusTemplate( - image_set_name=image_set_name, - image_attributes=images['image_attributes'], images=images['images']) + pkl_stimuli = pkl['items']['behavior']['stimuli'] + if 'images' in pkl_stimuli: + images = get_images_dict(pkl) + image_set_filepath = images['metadata']['image_set'] + image_set_name = get_image_set_name(image_set_path=image_set_filepath) + image_set_name = convert_filepath_caseinsensitive( + image_set_name) + + return StimulusTemplateFactory.from_unprocessed( + image_set_name=image_set_name, + image_attributes=images['image_attributes'], + images=images['images'] + ) + elif 'grating' in pkl_stimuli: + if (grating_images_dict is None) or (not grating_images_dict): + raise RuntimeError("The 'grating_images_dict' param MUST " + "be provided to get stimulus templates " + "because this pkl data contains " + "gratings presentations.") + gratings_metadata = get_gratings_metadata( + pkl_stimuli).to_dict(orient='records') + + unwarped_images = [] + warped_images = [] + for image_attrs in gratings_metadata: + image_name = image_attrs['image_name'] + grating_imgs_sub_dict = grating_images_dict[image_name] + unwarped_images.append(grating_imgs_sub_dict['unwarped']) + warped_images.append(grating_imgs_sub_dict['warped']) + + return StimulusTemplateFactory.from_processed( + image_set_name='grating', + image_attributes=gratings_metadata, + unwarped=unwarped_images, + warped=warped_images + ) + else: + warnings.warn( + "Could not determine stimulus template images from pkl file. " + f"The pkl stimuli nested dict " + "(pkl['items']['behavior']['stimuli']) contained neither " + "'images' nor 'grating' but instead: " + f"'{pkl_stimuli.keys()}'" + ) + return None def get_stimulus_metadata(pkl) -> pd.DataFrame: @@ -202,7 +262,8 @@ def get_stimulus_metadata(pkl) -> pd.DataFrame: if 'images' in stimuli: images = get_images_dict(pkl) stimulus_index_df = pd.DataFrame(images['image_attributes']) - image_set_filename = convert_filepath_caseinsensitive(images['metadata']['image_set']) + image_set_filename = convert_filepath_caseinsensitive( + images['metadata']['image_set']) stimulus_index_df['image_set'] = get_image_set_name( image_set_path=image_set_filename) else: @@ -369,8 +430,8 @@ def get_visual_stimuli_df(data, time) -> pd.DataFrame: for idx, (epoch_start, epoch_end,) in enumerate(draw_epochs): # visual stimulus doesn't actually change until start of - # following frame, so we need to bump the epoch_start & epoch_end - # to get the timing right + # following frame, so we need to bump the + # epoch_start & epoch_end to get the timing right epoch_start += 1 epoch_end += 1 @@ -381,7 +442,8 @@ def get_visual_stimuli_df(data, time) -> pd.DataFrame: "end_frame": epoch_end, "time": time[epoch_start], "duration": time[epoch_end] - time[epoch_start], - # this will always work because an epoch will never occur near the end of time + # this will always work because an epoch + # will never occur near the end of time "omitted": False, }) @@ -396,14 +458,17 @@ def get_visual_stimuli_df(data, time) -> pd.DataFrame: omitted_flash_frame_log = dict() omitted_flash_list = [] - - for stimuli_group_name, omitted_flash_frames in omitted_flash_frame_log.items(): + for _, omitted_flash_frames in omitted_flash_frame_log.items(): stim_frames = visual_stimuli_df['frame'].values omitted_flash_frames = np.array(omitted_flash_frames) - # Test offsets of omitted flash frames to see if they are in the stim log + # Test offsets of omitted flash frames + # to see if they are in the stim log offsets = np.arange(-3, 4) - offset_arr = np.add(np.repeat(omitted_flash_frames[:, np.newaxis], offsets.shape[0], axis=1), offsets) + offset_arr = np.add( + np.repeat(omitted_flash_frames[:, np.newaxis], + offsets.shape[0], axis=1), + offsets) matched_any_offset = np.any(np.isin(offset_arr, stim_frames), axis=1) # Remove omitted flashes that also exist in the stimulus log @@ -415,8 +480,11 @@ def get_visual_stimuli_df(data, time) -> pd.DataFrame: omitted = np.ones_like(omitted_flash_list).astype(bool) time = [time[fi] for fi in omitted_flash_list] - omitted_df = pd.DataFrame({'omitted': omitted, 'frame': omitted_flash_list, 'time': time, + omitted_df = pd.DataFrame({'omitted': omitted, + 'frame': omitted_flash_list, + 'time': time, 'image_name': 'omitted'}) - df = pd.concat((visual_stimuli_df, omitted_df), sort=False).sort_values('frame').reset_index() + df = pd.concat((visual_stimuli_df, omitted_df), + sort=False).sort_values('frame').reset_index() return df diff --git a/allensdk/brain_observatory/behavior/stimulus_processing/stimulus_templates.py b/allensdk/brain_observatory/behavior/stimulus_processing/stimulus_templates.py index 434709f7e..bad59e0b9 100644 --- a/allensdk/brain_observatory/behavior/stimulus_processing/stimulus_templates.py +++ b/allensdk/brain_observatory/behavior/stimulus_processing/stimulus_templates.py @@ -1,46 +1,101 @@ from typing import Dict, List -import warnings import numpy as np import pandas as pd from allensdk.brain_observatory.behavior.stimulus_processing.util import \ convert_filepath_caseinsensitive +from allensdk.brain_observatory.stimulus_info import BrainObservatoryMonitor -class StimulusImage(np.ndarray): +class StimulusImage: """Container class for image stimuli""" - def __new__(cls, input_array: np.ndarray, name: str): + + def __init__(self, warped: np.ndarray, unwarped: np.ndarray, name: str): """ Parameters ---------- + warped: + The warped stimulus image + unwarped: + The unwarped stimulus image name: - Name of the image - values - The unwarped image values + Name of the stimulus image """ - obj = np.asarray(input_array).view(cls) - obj._name = name - return obj + self._name = name + self.warped = warped + self.unwarped = unwarped @property def name(self): return self._name +class StimulusImageFactory: + """Factory for StimulusImage""" + _monitor = BrainObservatoryMonitor() + + def from_unprocessed(self, input_array: np.ndarray, + name: str) -> StimulusImage: + """Creates a StimulusImage from unprocessed input (usually pkl). + Image needs to be warped and preprocessed""" + resized, unwarped = self._get_unwarped(arr=input_array) + warped = self._get_warped(arr=resized) + image = StimulusImage(name=name, warped=warped, unwarped=unwarped) + return image + + @staticmethod + def from_processed(warped: np.ndarray, unwarped: np.ndarray, + name: str) -> StimulusImage: + """Creates a StimulusImage from processed input (usually nwb). + Image has already been warped and preprocessed""" + image = StimulusImage(name=name, warped=warped, unwarped=unwarped) + return image + + def _get_warped(self, arr: np.ndarray): + """Note: The Stimulus image is warped when shown to the mice to account + "for distance of the flat screen to the eye at each point on + the monitor.""" + return self._monitor.warp_image(img=arr) + + def _get_unwarped(self, arr: np.ndarray): + """This produces the pixels that would be visible in the unwarped image + post-warping""" + # 1. Resize image to the same size as the monitor + resized_array = self._monitor.natural_scene_image_to_screen( + arr, origin='upper') + # 2. Remove unseen pixels + arr = self._exclude_unseen_pixels(arr=resized_array) + + return resized_array, arr + + def _exclude_unseen_pixels(self, arr: np.ndarray): + """After warping, some pixels are not visible on the screen. + This sets those pixels to nan to make downstream analysis easier.""" + mask = self._monitor.get_mask() + arr = arr.astype(np.float) + arr *= mask + arr[arr == 0] = np.nan + return arr + + def _warp(self, arr: np.ndarray) -> np.ndarray: + """The Stimulus image is warped when shown to the mice to account + "for distance of the flat screen to the eye at each point on + the monitor." This applies the warping.""" + return self._monitor.warp_image(img=arr) + + class StimulusTemplate: """Container class for a collection of image stimuli""" - def __init__(self, image_set_name: str, image_attributes: List[dict], - images: List[np.ndarray]): + + def __init__(self, image_set_name: str, images: List[StimulusImage]): """ Parameters ---------- image_set_name: the name of the image set - image_attributes - List of image attributes as returned by the stimulus pkl images - List of images as returned by the stimulus pkl + List of images """ self._image_set_name = image_set_name @@ -50,9 +105,8 @@ def __init__(self, image_set_name: str, image_attributes: List[dict], self._images: Dict[str, StimulusImage] = {} - for attr, image in zip(image_attributes, images): - image_name = attr['image_name'] - self.__add_image(name=image_name, values=image) + for image in images: + self._images[image.name] = image @property def image_set_name(self) -> str: @@ -77,20 +131,30 @@ def items(self): def to_dataframe(self) -> pd.DataFrame: index = pd.Index(self.image_names, name='image_name') - df = pd.DataFrame({'image': self.images}, index=index) + warped = [img.warped for img in self.images] + unwarped = [img.unwarped for img in self.images] + df = pd.DataFrame({'unwarped': unwarped, 'warped': warped}, + index=index) df.name = self._image_set_name return df - def __add_image(self, name: str, values: np.ndarray): + def __add_image(self, warped_values: np.ndarray, + unwarped_values: np.ndarray, name: str): """ Parameters ---------- - name: + name : str Name of the image - values - The unwarped image values + warped_values : np.ndarray + The image array corresponding to the 'warped' version of the + stimuli. + unwarped_values : np.ndarray + The image array corresponding to the 'unwarped' version of the + stimuli. """ - image = StimulusImage(input_array=values, name=name) + image = StimulusImage(warped=warped_values, + unwarped=unwarped_values, + name=name) self._images[name] = image def __getitem__(self, item) -> StimulusImage: @@ -118,7 +182,12 @@ def __eq__(self, other: object): for (img_name, self_img) in self.items(): other_img = other._images[img_name] - if not np.array_equal(self_img, other_img): + warped_equal = np.array_equal( + self_img.warped, other_img.warped) + unwarped_equal = np.allclose(self_img.unwarped, + other_img.unwarped, + equal_nan=True) + if not (warped_equal and unwarped_equal): return False return True @@ -126,3 +195,96 @@ def __eq__(self, other: object): raise NotImplementedError( "Cannot compare a StimulusTemplate with an object of type: " f"{type(other)}!") + + +class StimulusTemplateFactory: + """Factory for StimulusTemplate""" + + @staticmethod + def from_unprocessed(image_set_name: str, image_attributes: List[dict], + images: List[np.ndarray]) -> StimulusTemplate: + """Create StimulusTemplate from pkl or unprocessed input. Stimulus + templates created this way need to be processed to acquire unwarped + versions of the images presented. + + NOTE: The ordering of image_attributes and images matter! + + NOTE: Warped images display what was seen on a monitor by a subject. + Unwarped images display a 'diagnostic' version of the stimuli to be + presented. + + Parameters + ---------- + image_set_name : str + The name of the image set. Example: + Natural_Images_Lum_Matched_set_TRAINING_2017.07.14 + image_attributes : List[dict] + A list of dictionaries containing image metadata. Must at least + contain the key: + image_name + But will usually also contain: + image_category, orientation, phase, + spatial_frequency, image_index + images : List[np.ndarray] + A list of image arrays + + Returns + ------- + StimulusTemplate + A StimulusTemplate object + """ + stimulus_images = [] + for i, image in enumerate(images): + name = image_attributes[i]['image_name'] + stimulus_image = StimulusImageFactory().from_unprocessed( + name=name, input_array=image) + stimulus_images.append(stimulus_image) + return StimulusTemplate(image_set_name=image_set_name, + images=stimulus_images) + + @staticmethod + def from_processed(image_set_name: str, image_attributes: List[dict], + unwarped: List[np.ndarray], + warped: List[np.ndarray]) -> StimulusTemplate: + """Create StimulusTemplate from nwb or other processed input. + Stimulus templates created this way DO NOT need to be processed + to acquire unwarped versions of the images presented. + + NOTE: The ordering of image_attributes, unwarped, and warped matter! + + NOTE: Warped images display what was seen on a monitor by a subject. + Unwarped images display a 'diagnostic' version of the stimuli to be + presented. + + Parameters + ---------- + image_set_name : str + The name of the image set. Example: + Natural_Images_Lum_Matched_set_TRAINING_2017.07.14 + image_attributes : List[dict] + A list of dictionaries containing image metadata. Must at least + contain the key: + image_name + But will usually also contain: + image_category, orientation, phase, + spatial_frequency, image_index + unwarped : List[np.ndarray] + A list of unwarped image arrays + warped : List[np.ndarray] + A list of warped image arrays + + Returns + ------- + StimulusTemplate + A StimulusTemplate object + """ + stimulus_images = [] + for i, attrs in enumerate(image_attributes): + warped_image = warped[i] + unwarped_image = unwarped[i] + name = attrs['image_name'] + stimulus_image = StimulusImageFactory.from_processed( + name=name, warped=warped_image, unwarped=unwarped_image) + stimulus_images.append(stimulus_image) + return StimulusTemplate(image_set_name=image_set_name, + images=stimulus_images) diff --git a/allensdk/brain_observatory/behavior/trials_processing.py b/allensdk/brain_observatory/behavior/trials_processing.py index ec3ef9566..9053e9677 100644 --- a/allensdk/brain_observatory/behavior/trials_processing.py +++ b/allensdk/brain_observatory/behavior/trials_processing.py @@ -13,21 +13,24 @@ TRIAL_COLUMN_DESCRIPTION_DICT = {} EDF_COLUMNS = ['index', 'lick_times', 'auto_rewarded', 'cumulative_volume', - 'cumulative_reward_number', 'reward_volume', 'reward_times', - 'reward_frames', 'rewarded', 'optogenetics', 'response_type', - 'response_time', 'change_time', 'change_frame', 'response_latency', - 'starttime', 'startframe', 'trial_length', 'scheduled_change_time', - 'endtime', 'endframe', 'initial_image_category', 'initial_image_name', - 'change_image_name', 'change_image_category', 'change_ori', - 'change_contrast', 'initial_ori', 'initial_contrast', 'delta_ori', - 'mouse_id', 'response_window', 'task', 'stage', 'session_duration', - 'user_id', 'LDT_mode', 'blank_screen_timeout', 'stim_duration', - 'blank_duration_range', 'prechange_minimum', 'stimulus_distribution', - 'stimulus', 'distribution_mean', 'computer_name', - 'behavior_session_uuid', 'startdatetime', 'date', 'year', 'month', - 'day', 'hour', 'dayofweek', 'number_of_rewards', 'rig_id', 'trial_type', - 'lick_frames', 'reward_licks', 'reward_lick_count', - 'reward_lick_latency', 'reward_rate', 'response', 'color'] + 'cumulative_reward_number', 'reward_volume', 'reward_times', + 'reward_frames', 'rewarded', 'optogenetics', 'response_type', + 'response_time', 'change_time', 'change_frame', + 'response_latency', 'starttime', 'startframe', 'trial_length', + 'scheduled_change_time', 'endtime', 'endframe', + 'initial_image_category', 'initial_image_name', + 'change_image_name', 'change_image_category', 'change_ori', + 'change_contrast', 'initial_ori', 'initial_contrast', + 'delta_ori', 'mouse_id', 'response_window', 'task', 'stage', + 'session_duration', 'user_id', 'LDT_mode', + 'blank_screen_timeout', 'stim_duration', + 'blank_duration_range', 'prechange_minimum', + 'stimulus_distribution', 'stimulus', 'distribution_mean', + 'computer_name', 'behavior_session_uuid', 'startdatetime', + 'date', 'year', 'month', 'day', 'hour', 'dayofweek', + 'number_of_rewards', 'rig_id', 'trial_type', + 'lick_frames', 'reward_licks', 'reward_lick_count', + 'reward_lick_latency', 'reward_rate', 'response', 'color'] RIG_NAME = { 'W7DTMJ19R2F': 'A1', @@ -119,7 +122,9 @@ def resolve_initial_image(stimuli, start_frame): for set_event in stim_dict["set_log"]: set_frame = set_event[3] if set_frame <= start_frame and set_frame >= max_frame: - initial_image_group = initial_image_name = set_event[1] # hack assumes initial_image_group == initial_image_name, only initial_image_name is present for natual_scenes + # hack assumes initial_image_group == initial_image_name, + # only initial_image_name is present for natual_scenes + initial_image_group = initial_image_name = set_event[1] initial_image_category_name = stim_category_name if initial_image_category_name == 'grating': initial_image_name = f'gratings_{initial_image_name}' @@ -131,7 +136,7 @@ def resolve_initial_image(stimuli, start_frame): def trial_data_from_log(trial): ''' Infer trial logic from trial log. Returns a dictionary. - + * reward volume: volume of water delivered on the trial, in mL Each of the following values is boolean: @@ -142,17 +147,20 @@ def trial_data_from_log(trial): stimulus_change/sham_change are mutually exclusive * stimulus_change: did the stimulus change (True on 'go' trials) - * sham_change: stimulus did not change, but response was evaluated (True on 'catch' trials) + * sham_change: stimulus did not change, but response was evaluated + (True on 'catch' trials) Each trial can be one (and only one) of the following: * hit (stimulus changed, animal responded in response window) * miss (stimulus changed, animal did not respond in response window) - * false_alarm (stimulus did not change, animal responded in response window) - * correct_reject (stimulus did not change, animal did not respond in response window) + * false_alarm (stimulus did not change, + animal responded in response window) + * correct_reject (stimulus did not change, + animal did not respond in response window) * aborted (animal responded before change time) - * auto_rewarded (reward was automatically delivered following the change. This will bias the animals choice and should not be categorized as hit/miss) - - + * auto_rewarded (reward was automatically delivered following the change. + This will bias the animals choice and should not be + categorized as hit/miss) ''' trial_event_names = [val[0] for val in trial['events']] hit = 'hit' in trial_event_names @@ -190,21 +198,26 @@ def trial_data_from_log(trial): def validate_trial_condition_exclusivity(trial_index, **trial_conditions): - '''ensure that only one of N possible mutually exclusive trial conditions is True''' + '''ensure that only one of N possible mutually + exclusive trial conditions is True''' on = [] for condition, value in trial_conditions.items(): if value: on.append(condition) - + if len(on) != 1: all_conditions = list(trial_conditions.keys()) - raise AssertionError(f"expected exactly 1 trial condition out of {all_conditions} to be True, instead {on} were True (trial {trial_index})") + msg = f"expected exactly 1 trial condition out of {all_conditions} " + msg += f"to be True, instead {on} were True (trial {trial_index})" + raise AssertionError(msg) -def get_trial_reward_time(rebased_reward_times, start_time, stop_time): +def get_trial_reward_time(rebased_reward_times, + start_time, + stop_time): '''extract reward times in time range''' reward_times = rebased_reward_times[np.where(np.logical_and( - rebased_reward_times >= start_time, + rebased_reward_times >= start_time, rebased_reward_times <= stop_time ))] return float('nan') if len(reward_times) == 0 else one(reward_times) @@ -244,7 +257,8 @@ def get_trial_timing( event_dict: dict, licks: List[float], go: bool, catch: bool, auto_rewarded: bool, hit: bool, false_alarm: bool, aborted: bool, - timestamps: np.ndarray): + timestamps: np.ndarray, + monitor_delay: float): """ Extract a dictionary of trial timing data. See trial_data_from_log for a description of the trial types. @@ -273,6 +287,8 @@ def get_trial_timing( timestamps: np.ndarray[1d] Array of ground truth timestamps for the session (sync times, if available) + monitor_delay: float + The monitor delay in seconds associated with the session Returns ======= @@ -324,10 +340,10 @@ def get_trial_timing( if go or auto_rewarded: change_frame = event_dict.get(('stimulus_changed', ''))['frame'] - change_time = timestamps[change_frame] + change_time = timestamps[change_frame] + monitor_delay elif catch: change_frame = event_dict.get(('sham_change', ''))['frame'] - change_time = timestamps[change_frame] + change_time = timestamps[change_frame] + monitor_delay else: change_time = float("nan") change_frame = float("nan") @@ -372,7 +388,10 @@ def get_trial_image_names(trial, stimuli) -> Dict[str, str]: if len(trial["stimulus_changes"]) == 0: change_image_name = initial_image_name else: - (from_set, from_name), (to_set, to_name), _, _ = trial["stimulus_changes"][0] + ((from_set, from_name), + (to_set, to_name), + _, _) = trial["stimulus_changes"][0] + # do this to fix names if the stimuli is a grating if from_set in grating_oris: from_name = f'gratings_{from_name}' @@ -400,10 +419,12 @@ def get_trial_bounds(trial_log: List) -> List: Returns ------- list - Each element in the list is a tuple of the form (start_frame, end_frame) - so that the ith element of the list gives the start and end frames of - the ith trial. The endframe of the last trial will be -1, indicating that - it should map to the last timestamp in the session + Each element in the list is a tuple of the form + (start_frame, end_frame) so that the ith element + of the list gives the start and end frames of + the ith trial. The endframe of the last trial will + be -1, indicating that it should map to the last + timestamp in the session """ start_frames = [] @@ -431,41 +452,73 @@ def get_trial_bounds(trial_log: List) -> List: return list([(s, e) for s, e in zip(start_frames, end_frames)]) -def get_trials(data: Dict, - licks_df: pd.DataFrame, - rewards_df: pd.DataFrame, - timestamps: np.ndarray) -> pd.DataFrame: +def get_trials_from_data_transform(input_transform) -> pd.DataFrame: """ Create and return a pandas DataFrame containing data about the trials associated with this session Parameters ---------- - data: dict - The dict resulting from reading in this session's - stimulus_data pickle file - - licks_df: pd.DataFrame - A dataframe whose only column is the timestamps - of licks. - - rewards_df: pd.DataFrame - A dataframe containing data about rewards given - during this session. Output of - allensdk/brain_observatory/behavior/rewards_processing.get_rewards - - timestamps: np.ndarray[1d] - An ndarray containing the timestamps associated with each - stimulus frame in this session. Should be the sync timestamps - if available. + input_transform: + An instantiation of a class that inherits from either + BehaviorDataTransform or BehaviorOphysDataTransform. + This object will be used to get at the data needed by + this method to create the trials dataframe. Returns ------- pd.DataFrame A dataframe containing data pertaining to the trials that make up this session + + Notes + ----- + The input_transform object must have the following methods: + + input_transform._behavior_stimulus_file + Which returns the dict resulting from reading in this session's + stimulus_data pickle file + + input_transform.get_rewards + Which returns a dataframe containing data about rewards given + during this session, i.e. the output of + allensdk/brain_observatory/behavior/rewards_processing.get_rewards + + input_transform.get_licks + Which returns a dataframe containing the columns `time` and `frame` + denoting the time (in seconds) and frame number at which licks + occurred during this session + + input_transform.get_stimulus_timestamps + Which returns a numpy.ndarray of timestamps (in seconds) associated + with the frames presented in this session. + + input_transform.get_monitor_delay + Which returns the monitory delay (in seconds) associated with the + experimental rig """ - assert rewards_df.index.name == 'timestamps' + + missing_data_streams = [] + for method_name in ('get_rewards', 'get_licks', + 'get_stimulus_timestamps', + 'get_monitor_delay', + '_behavior_stimulus_file'): + if not hasattr(input_transform, method_name): + missing_data_streams.append(method_name) + if len(missing_data_streams) > 0: + msg = 'Cannot run trials_processing.get_trials\n' + msg += 'The object you passed as input is missing ' + msg += 'the following required methods:\n' + for method_name in missing_data_streams: + msg += f'{method_name}\n' + raise ValueError(msg) + + rewards_df = input_transform.get_rewards() + licks_df = input_transform.get_licks() + timestamps = input_transform.get_stimulus_timestamps() + monitor_delay = input_transform.get_monitor_delay() + data = input_transform._behavior_stimulus_file() + stimuli = data["items"]["behavior"]["stimuli"] trial_log = data["items"]["behavior"]["trial_log"] @@ -473,13 +526,13 @@ def get_trials(data: Dict, all_trial_data = [None] * len(trial_log) lick_frames = licks_df.frame.values - reward_times = rewards_df.index.values + reward_times = rewards_df['timestamps'].values for idx, trial in enumerate(trial_log): # match each event in the trial log to the sync timestamps - event_dict = {(e[0], e[1]): {'timestamp':timestamps[e[3]], - 'frame':e[3]} - for e in trial['events']} + event_dict = {(e[0], e[1]): {'timestamp': timestamps[e[3]], + 'frame': e[3]} + for e in trial['events']} tr_data = {"trial": trial["index"]} @@ -498,21 +551,24 @@ def get_trials(data: Dict, # https://github.com/AllenInstitute/visual_behavior_analysis/issues/482 if trial_end < 0: - if 'fingerprint' in data['items']['behavior']['items'].keys(): - trial_end = data['items']['behavior']['items']['fingerprint']['starting_frame'] + bhv = data['items']['behavior']['items'] + if 'fingerprint' in bhv.keys(): + trial_end = bhv['fingerprint']['starting_frame'] # select licks that fall between trial_start and trial_end; # licks on the boundary get assigned to the trial that is ending, # rather than the trial that is starting if trial_end > 0: - valid_idx = np.where(np.logical_and(lick_frames>trial_start, - lick_frames<=trial_end)) + valid_idx = np.where(np.logical_and(lick_frames > trial_start, + lick_frames <= trial_end)) else: - valid_idx = np.where(lick_frames>trial_start) + valid_idx = np.where(lick_frames > trial_start) valid_licks = lick_frames[valid_idx] - - tr_data["lick_times"] = timestamps[valid_licks] + if len(valid_licks) > 0: + tr_data["lick_times"] = timestamps[valid_licks] + else: + tr_data["lick_times"] = np.array([], dtype=float) tr_data["reward_time"] = get_trial_reward_time( reward_times, @@ -529,15 +585,22 @@ def get_trials(data: Dict, tr_data['hit'], tr_data['false_alarm'], tr_data["aborted"], - timestamps + timestamps, + monitor_delay )) tr_data.update(get_trial_image_names(trial, stimuli)) - # ensure that only one trial condition is True (they are mutually exclusive) + # ensure that only one trial condition is True + # (they are mutually exclusive) condition_dict = {} - for key in ['hit','miss','false_alarm','correct_reject','auto_rewarded','aborted']: + for key in ['hit', + 'miss', + 'false_alarm', + 'correct_reject', + 'auto_rewarded', + 'aborted']: condition_dict[key] = tr_data[key] - validate_trial_condition_exclusivity(idx,**condition_dict) + validate_trial_condition_exclusivity(idx, **condition_dict) all_trial_data[idx] = tr_data @@ -551,7 +614,8 @@ def get_trials(data: Dict, def local_time(iso_timestamp, timezone=None): datetime = pd.to_datetime(iso_timestamp) if not datetime.tzinfo: - datetime = datetime.replace(tzinfo=dateutil.tz.gettz('America/Los_Angeles')) + tzinfo = dateutil.tz.gettz('America/Los_Angeles') + datetime = datetime.replace(tzinfo=tzinfo) return datetime.isoformat() @@ -563,7 +627,8 @@ def get_time(exp_data): def data_to_licks(data, time): lick_frames = data['items']['behavior']['lick_sensors'][0]['lick_events'] lick_times = time[lick_frames] - return pd.DataFrame(data={"frame": lick_frames, 'time': lick_times}) + return pd.DataFrame(data={"timestamps": lick_times, + "frame": lick_frames}) def get_mouse_id(exp_data): @@ -576,7 +641,8 @@ def get_params(exp_data): params.update(exp_data["items"]["behavior"].get("cl_params", {})) if "response_window" in params: - params["response_window"] = list(params["response_window"]) # tuple to list + # tuple to list + params["response_window"] = list(params["response_window"]) return params @@ -597,50 +663,60 @@ def get_even_sampling(data): stimuli = data['items']['behavior']['stimuli'] for stimuli_group_name, stim in stimuli.items(): - if stim['obj_type'].lower() == 'docimagestimulus' and stim['sampling'] in ['even', 'file']: + if (stim['obj_type'].lower() == 'docimagestimulus' + and stim['sampling'] in ['even', 'file']): + return True + return False def data_to_metadata(data, time): + config = data['items']['behavior']['config'] + doc = config['DoC'] + stimuli = data['items']['behavior']['stimuli'] + metadata = { - "startdatetime": local_time(data["start_time"], timezone='America/Los_Angeles'), - "rig_id": RIG_NAME.get(data['platform_info']['computer_name'].lower(), 'unknown'), + "startdatetime": local_time(data["start_time"], + timezone='America/Los_Angeles'), + "rig_id": RIG_NAME.get(data['platform_info']['computer_name'].lower(), + 'unknown'), "computer_name": data['platform_info']['computer_name'], - "reward_vol": data["items"]["behavior"]["config"]["reward"]["reward_volume"], - "auto_reward_vol": data["items"]["behavior"]["config"]["DoC"]["auto_reward_volume"], + "reward_vol": config["reward"]["reward_volume"], + "auto_reward_vol": doc["auto_reward_volume"], "params": get_params(data), - "mouseid": data["items"]["behavior"]['config']['behavior']['mouse_id'], - "response_window": list(data["items"]["behavior"].get("config", {}).get("DoC", {}).get("response_window")), - "task": data["items"]["behavior"]["config"]["behavior"]['task_id'], + "mouseid": config['behavior']['mouse_id'], + "response_window": list(data["items"]["behavior"].get("config", {}).get("DoC", {}).get("response_window")), # noqa: E501 + "task": config["behavior"]['task_id'], "stage": data["items"]["behavior"]["params"]["stage"], "stoptime": time[-1] - time[0], "userid": data["items"]["behavior"]['cl_params']['user_id'], "lick_detect_training_mode": "single", "blankscreen_on_timeout": False, - "stim_duration": data["items"]["behavior"]["config"]['DoC']['stimulus_window'] * 1000, - "blank_duration_range": list(data["items"]["behavior"]["config"]['DoC']['blank_duration_range']), - "delta_minimum": data["items"]["behavior"]["config"]['DoC']['pre_change_time'], - "stimulus_distribution": data["items"]["behavior"]["config"]["DoC"]["change_time_dist"], - "delta_mean": data["items"]["behavior"]['config']["DoC"]["change_time_scale"], + "stim_duration": doc['stimulus_window'] * 1000, + "blank_duration_range": list(doc['blank_duration_range']), + "delta_minimum": doc['pre_change_time'], + "stimulus_distribution": doc["change_time_dist"], + "delta_mean": doc["change_time_scale"], "trial_duration": None, - "n_stimulus_frames": sum([sum(s.get("draw_log", [])) for s in data["items"]["behavior"]["stimuli"].values()]), - "stimulus": list(data["items"]["behavior"]["stimuli"].keys())[0], - "warm_up_trials": data["items"]["behavior"]["config"]["DoC"]["warm_up_trials"], - "stimulus_window": data["items"]["behavior"]["config"]["DoC"]["stimulus_window"], - "volume_limit": data["items"]["behavior"]["config"]["behavior"]["volume_limit"], - "failure_repeats": data["items"]["behavior"]["config"]["DoC"]["failure_repeats"], - "catch_frequency": data["items"]["behavior"]["config"]["DoC"]["catch_freq"], - "auto_reward_delay": data["items"]["behavior"]["config"]["DoC"].get("auto_reward_delay", 0.0), - "free_reward_trials": data["items"]["behavior"]["config"]["DoC"]["free_reward_trials"], - "min_no_lick_time": data["items"]["behavior"]["config"]["DoC"]["min_no_lick_time"], - "max_session_duration": data["items"]["behavior"]["config"]["DoC"]["max_task_duration_min"], - "abort_on_early_response": data["items"]["behavior"]["config"]["DoC"]["abort_on_early_response"], - "initial_blank_duration": data["items"]["behavior"]["config"]["DoC"]["initial_blank"], + "n_stimulus_frames": sum([sum(s.get("draw_log", [])) + for s in stimuli.values()]), + "stimulus": list(stimuli.keys())[0], + "warm_up_trials": doc["warm_up_trials"], + "stimulus_window": doc["stimulus_window"], + "volume_limit": config["behavior"]["volume_limit"], + "failure_repeats": doc["failure_repeats"], + "catch_frequency": doc["catch_freq"], + "auto_reward_delay": doc.get("auto_reward_delay", 0.0), + "free_reward_trials": doc["free_reward_trials"], + "min_no_lick_time": doc["min_no_lick_time"], + "max_session_duration": doc["max_task_duration_min"], + "abort_on_early_response": doc["abort_on_early_response"], + "initial_blank_duration": doc["initial_blank"], "even_sampling_enabled": get_even_sampling(data), "behavior_session_uuid": uuid.UUID(data["session_uuid"]), - "periodic_flash": data['items']['behavior']['config']['DoC']['periodic_flash'], + "periodic_flash": doc['periodic_flash'], "platform_info": data['platform_info'] } @@ -659,10 +735,16 @@ def get_change_time_frame_response_latency(trial): for change_event in trial['events']: if change_event[0] in ['stimulus_changed', 'sham_change']: - return change_event[2], change_event[3], get_response_latency(change_event, trial) + return (change_event[2], + change_event[3], + get_response_latency(change_event, trial)) return None, None, None -def get_stimulus_attr_changes(stim_dict, change_frame, first_frame, last_frame): + +def get_stimulus_attr_changes(stim_dict, + change_frame, + first_frame, + last_frame): """ Notes ----- @@ -690,19 +772,31 @@ def get_image_info_from_trial(trial_log, ti): raise RuntimeError('Should not have been possible') if len(trial_log[ti]["stimulus_changes"]) == 1: - (from_group, from_name, ), (to_group, to_name), _, _ = trial_log[ti]["stimulus_changes"][0] + + ((from_group, from_name, ), + (to_group, to_name), + _, _) = trial_log[ti]["stimulus_changes"][0] + return from_group, from_name, to_group, to_name else: - _, _, prev_group, prev_name = get_image_info_from_trial(trial_log, ti - 1) + + (_, _, + prev_group, + prev_name) = get_image_info_from_trial(trial_log, ti - 1) + return prev_group, prev_name, prev_group, prev_name def get_ori_info_from_trial(trial_log, ti, ): if ti == -1: raise IndexError('No change on first trial.') - + if len(trial_log[ti]["stimulus_changes"]) == 1: - (initial_group, initial_orientation), (change_group, change_orientation, ), _, _ = trial_log[ti]["stimulus_changes"][0] + + ((initial_group, initial_orientation), + (change_group, change_orientation, ), + _, _) = trial_log[ti]["stimulus_changes"][0] + return change_orientation, change_orientation, None else: return get_ori_info_from_trial(trial_log, ti - 1) @@ -712,50 +806,70 @@ def get_trials_v0(data, time): stimuli = data["items"]["behavior"]["stimuli"] if len(list(stimuli.keys())) != 1: raise ValueError('Only one stimuli supported.') - + stim_name, stim = next(iter(stimuli.items())) if stim_name not in ['images', 'grating', ]: raise ValueError('Unsupported stimuli name: {}.'.format(stim_name)) + doc = data["items"]["behavior"]["config"]["DoC"] + implied_type = stim["obj_type"] trial_log = data["items"]["behavior"]["trial_log"] - pre_change_time = data["items"]["behavior"]["config"]['DoC']['pre_change_time'] - initial_blank_duration = data["items"]["behavior"]["config"]["DoC"]["initial_blank"] + pre_change_time = doc['pre_change_time'] + initial_blank_duration = doc["initial_blank"] - initial_stim = stim['set_log'][0] # we need this for the situations where a change doesn't occur on the first trial + # we need this for the situations where a + # change doesn't occur in the first trial + initial_stim = stim['set_log'][0] trials = collections.defaultdict(list) for ti, trial in enumerate(trial_log): trials['index'].append(trial["index"]) trials['lick_times'].append([lick[0] for lick in trial["licks"]]) - trials['auto_rewarded'].append(trial["trial_params"]["auto_reward"] if trial['trial_params']['catch'] == False else None) + trials['auto_rewarded'].append(trial["trial_params"]["auto_reward"] + if not trial['trial_params']['catch'] + else None) + trials['cumulative_volume'].append(trial["cumulative_volume"]) trials['cumulative_reward_number'].append(trial["cumulative_rewards"]) - trials['reward_volume'].append(sum([r[0] for r in trial.get("rewards", [])])) - trials['reward_times'].append([reward[1] for reward in trial["rewards"]]) - trials['reward_frames'].append([reward[2] for reward in trial["rewards"]]) + + trials['reward_volume'].append(sum([r[0] + for r in trial.get("rewards", [])])) + + trials['reward_times'].append([reward[1] + for reward in trial["rewards"]]) + + trials['reward_frames'].append([reward[2] + for reward in trial["rewards"]]) + trials['rewarded'].append(trial["trial_params"]["catch"] is False) - trials['optogenetics'].append(trial["trial_params"].get("optogenetics", False)) + trials['optogenetics'].append(trial["trial_params"].get("optogenetics", False)) # noqa: E501 trials['response_type'].append([]) trials['response_time'].append([]) - trials['change_time'].append(get_change_time_frame_response_latency(trial)[0]) - trials['change_frame'].append(get_change_time_frame_response_latency(trial)[1]) - trials['response_latency'].append(get_change_time_frame_response_latency(trial)[2]) + trials['change_time'].append(get_change_time_frame_response_latency(trial)[0]) # noqa: E501 + trials['change_frame'].append(get_change_time_frame_response_latency(trial)[1]) # noqa: E501 + trials['response_latency'].append(get_change_time_frame_response_latency(trial)[2]) # noqa: E501 trials['starttime'].append(trial["events"][0][2]) trials['startframe'].append(trial["events"][0][3]) - trials['trial_length'].append(trial["events"][-1][2] - trial["events"][0][2]) - trials['scheduled_change_time'].append(pre_change_time + initial_blank_duration + trial["trial_params"]["change_time"]) + trials['trial_length'].append(trial["events"][-1][2] - + trial["events"][0][2]) + trials['scheduled_change_time'].append(pre_change_time + + initial_blank_duration + + trial["trial_params"]["change_time"]) # noqa: E501 trials['endtime'].append(trial["events"][-1][2]) trials['endframe'].append(trial["events"][-1][3]) # Stimulus: if implied_type == 'DoCImageStimulus': - from_group, from_name, to_group, to_name = get_image_info_from_trial(trial_log, ti) + (from_group, + from_name, + to_group, + to_name) = get_image_info_from_trial(trial_log, ti) trials['initial_image_name'].append(from_name) trials['initial_image_category'].append(from_group) trials['change_image_name'].append(to_name) - trials['change_image_category'].append(to_group) + trials['change_image_category'].append(to_group) trials['change_ori'].append(None) trials['change_contrast'].append(None) trials['initial_ori'].append(None) @@ -763,9 +877,13 @@ def get_trials_v0(data, time): trials['delta_ori'].append(None) elif implied_type == 'DoCGratingStimulus': try: - change_orientation, initial_orientation, delta_orientation = get_ori_info_from_trial(trial_log, ti) + (change_orientation, + initial_orientation, + delta_orientation) = get_ori_info_from_trial(trial_log, ti) except IndexError: - orientation = initial_stim[1] # shape: group_name, orientation, stimulus time relative to start, frame + # shape: group_name, orientation, + # stimulus time relative to start, frame + orientation = initial_stim[1] change_orientation = orientation initial_orientation = orientation delta_orientation = None @@ -779,7 +897,8 @@ def get_trials_v0(data, time): trials['initial_contrast'].append(None) trials['delta_ori'].append(delta_orientation) else: - raise NotImplementedError('Unsupported stimulus type: {}'.format(implied_type), ) + msg = 'Unsupported stimulus type: {}'.format(implied_type) + raise NotImplementedError(msg) return pd.DataFrame(trials) @@ -807,34 +926,51 @@ def find_licks(reward_times, licks, window=3.5): return [] else: reward_time = one(reward_times) - reward_lick_mask = ((licks['time'] > reward_time) & (licks['time'] < (reward_time + window))) + reward_lick_mask = ((licks['timestamps'] > reward_time) & + (licks['timestamps'] < (reward_time + window))) tr_licks = licks[reward_lick_mask].copy() - tr_licks['time'] -= reward_time - return tr_licks['time'].values + tr_licks['timestamps'] -= reward_time + return tr_licks['timestamps'].values -def calculate_reward_rate(response_latency=None, starttime=None, window=0.75, trial_window=25, initial_trials=10): +def calculate_reward_rate(response_latency=None, + starttime=None, + window=0.75, + trial_window=25, + initial_trials=10): + assert len(response_latency) == len(starttime) - df = pd.DataFrame({'response_latency': response_latency, 'starttime':starttime}) + df = pd.DataFrame({'response_latency': response_latency, + 'starttime': starttime}) # adds a column called reward_rate to the input dataframe # the reward_rate column contains a rolling average of rewards/min - # window sets the window in which a response is considered correct, so a window of 1.0 means licks before 1.0 second are considered correct + # window sets the window in which a response is considered correct, + # so a window of 1.0 means licks before 1.0 second are considered correct + # # Reorganized into this unit-testable form by Nick Cain April 25 2019 reward_rate = np.zeros(len(df)) - reward_rate[:initial_trials] = np.inf # make the initial reward rate infinite, so that you include the first trials automatically. + # make the initial reward rate infinite, + # so that you include the first trials automatically. + reward_rate[:initial_trials] = np.inf + for trial_number in range(initial_trials, len(df)): min_index = np.max((0, trial_number - trial_window)) max_index = np.min((trial_number + trial_window, len(df))) df_roll = df.iloc[min_index:max_index] - correct = len(df_roll[df_roll.response_latency < window]) # get a rolling number of correct trials - time_elapsed = df_roll.starttime.iloc[-1] - df_roll.starttime.iloc[0] # get the time elapsed over the trials - reward_rate_on_this_lap = correct / time_elapsed * 60 # calculate the reward rate, rewards/min + # get a rolling number of correct trials + correct = len(df_roll[df_roll.response_latency < window]) + + # get the time elapsed over the trials + time_elapsed = df_roll.starttime.iloc[-1] - df_roll.starttime.iloc[0] + + # calculate the reward rate, rewards/min + reward_rate_on_this_lap = correct / time_elapsed * 60 reward_rate[trial_number] = reward_rate_on_this_lap return reward_rate @@ -846,13 +982,13 @@ def get_response_type(trials): for idx in trials.index: if trials.loc[idx].trial_type.lower() == 'aborted': response_type.append('EARLY_RESPONSE') - elif (trials.loc[idx].rewarded == True) & (trials.loc[idx].response == 1): + elif (trials.loc[idx].rewarded) & (trials.loc[idx].response == 1): response_type.append('HIT') - elif (trials.loc[idx].rewarded == True) & (trials.loc[idx].response != 1): + elif (trials.loc[idx].rewarded) & (trials.loc[idx].response != 1): response_type.append('MISS') - elif (trials.loc[idx].rewarded == False) & (trials.loc[idx].response == 1): + elif (not trials.loc[idx].rewarded) & (trials.loc[idx].response == 1): response_type.append('FA') - elif (trials.loc[idx].rewarded == False) & (trials.loc[idx].response != 1): + elif (not trials.loc[idx].rewarded) & (trials.loc[idx].response != 1): response_type.append('CR') else: response_type.append('other') @@ -882,32 +1018,71 @@ def colormap(trial_type, response_type): def create_extended_trials(trials=None, metadata=None, time=None, licks=None): startdatetime = dateutil.parser.parse(metadata['startdatetime']) - edf = trials[~pd.isnull(trials['reward_times'])].reset_index(drop=True).copy() + edf = trials[~pd.isnull(trials['reward_times'])].reset_index(drop=True).copy() # noqa: E501 # Buggy computation of trial_length (for backwards compatibility) edf.drop(['trial_length'], axis=1, inplace=True) - edf['endtime_buggy'] = [edf['starttime'].iloc[ti + 1] if ti < len(edf) - 1 else time[-1] for ti in range(len(edf))] + + edf['endtime_buggy'] = [edf['starttime'].iloc[ti + 1] + if ti < len(edf) - 1 + else time[-1] + for ti in range(len(edf))] + edf['trial_length'] = edf['endtime_buggy'] - edf['starttime'] edf.drop(['endtime_buggy'], axis=1, inplace=True) # Make trials contiguous, and rebase time: - edf.drop(['endframe', 'starttime', 'endtime', 'change_time', 'lick_times', 'reward_times'], axis=1, inplace=True) - edf['endframe'] = [edf['startframe'].iloc[ti + 1] if ti < len(edf) - 1 else len(time) - 1 for ti in range(len(edf))] - edf['lick_frames'] = [licks['frame'][np.logical_and(licks['frame'] > int(row['startframe']), licks['frame'] <= int(row['endframe']))].values for _, row in edf.iterrows()] - edf['starttime'] = [time[edf['startframe'].iloc[ti]] for ti in range(len(edf))] - edf['endtime'] = [time[edf['endframe'].iloc[ti]] for ti in range(len(edf))] - + edf.drop(['endframe', + 'starttime', + 'endtime', + 'change_time', + 'lick_times', + 'reward_times'], axis=1, inplace=True) + + edf['endframe'] = [edf['startframe'].iloc[ti + 1] + if ti < len(edf) - 1 + else len(time) - 1 + for ti in range(len(edf))] + + _lks = licks['frame'] + edf['lick_frames'] = [_lks[np.logical_and(_lks > int(row['startframe']), + _lks <= int(row['endframe']))].values + for _, row in edf.iterrows()] + + # this variable was created to bring code into + # line with pep8; deleting to protect against + # changing logic + del _lks + + edf['starttime'] = [time[edf['startframe'].iloc[ti]] + for ti in range(len(edf))] + + edf['endtime'] = [time[edf['endframe'].iloc[ti]] + for ti in range(len(edf))] + # Proper computation of trial_length: # edf['trial_length'] = edf['endtime'] - edf['starttime'] - edf['change_time'] = [time[int(cf)] if not np.isnan(cf) else float('nan') for cf in edf['change_frame']] - edf['lick_times'] = [[time[fi] for fi in frame_arr] for frame_arr in edf['lick_frames']] + edf['change_time'] = [time[int(cf)] + if not np.isnan(cf) + else float('nan') + for cf in edf['change_frame']] + + edf['lick_times'] = [[time[fi] for fi in frame_arr] + for frame_arr in edf['lick_frames']] + edf['trial_type'] = edf.apply(categorize_one_trial, axis=1) - edf['reward_times'] = [[time[fi] for fi in frame_list] for frame_list in edf['reward_frames']] + + edf['reward_times'] = [[time[fi] for fi in frame_list] + for frame_list in edf['reward_frames']] + edf['number_of_rewards'] = edf['reward_times'].map(len) edf['reward_licks'] = edf['reward_times'].apply(find_licks, args=(licks,)) edf['reward_lick_count'] = edf['reward_licks'].map(len) - edf['reward_lick_latency'] = edf['reward_licks'].map(lambda ll: None if len(ll) == 0 else np.min(ll)) + + edf['reward_lick_latency'] = edf['reward_licks'].map(lambda ll: None + if len(ll) == 0 + else np.min(ll)) # Things that dont depend on time/trial: edf['mouse_id'] = metadata['mouseid'] @@ -937,26 +1112,56 @@ def create_extended_trials(trials=None, metadata=None, time=None, licks=None): edf['cumulative_volume'] = edf['reward_volume'].cumsum() # Compute response latency (kinda tricky): - edf['valid_response_licks'] = [[l for l in t.lick_times if l - t.change_time > t.response_window[0]] for _, t in edf.iterrows()] - edf['response_latency'] = edf['valid_response_licks'].map(lambda x: float('inf') if len(x) == 0 else x[0]) - edf['change_time'] + edf['valid_response_licks'] = [[lk for lk in tt.lick_times + if lk - tt.change_time > tt.response_window[0]] # noqa: E50 + for _, tt in edf.iterrows()] + + edf['response_latency'] = edf['valid_response_licks'].map(lambda x: float('inf') # noqa: E501 + if len(x) == 0 + else x[0]) + edf['response_latency'] -= edf['change_time'] + edf.drop('valid_response_licks', axis=1, inplace=True) # Complicated: assert len(edf.startdatetime.unique()) == 1 np.testing.assert_array_equal(list(edf.index.values), np.arange(len(edf))) - edf['reward_rate'] = calculate_reward_rate(response_latency=edf['response_latency'].values, starttime=edf['starttime'].values) + + _latency = edf['response_latency'].values + _starttime = edf['starttime'].values + edf['reward_rate'] = calculate_reward_rate(response_latency=_latency, + starttime=_starttime) + + # this variable was created to bring code into + # line with pep8; deleting to protect against + # changing logic + del _latency + del _starttime # Response/trial metadata encoding: + _lt = edf['response_latency'] <= metadata['response_window'][1] + _gt = edf['response_latency'] >= metadata['response_window'][0] edf['response'] = (~pd.isnull(edf['change_time']) & ~pd.isnull(edf['response_latency']) & - (edf['response_latency'] >= metadata['response_window'][0]) & - (edf['response_latency'] <= metadata['response_window'][1])).astype(np.float64) - edf['response_type'] = get_response_type(edf[['trial_type', 'response', 'rewarded']]) - edf['color'] = [colormap(trial.trial_type, trial.response_type) for _, trial in edf.iterrows()] + _gt & + _lt).astype(np.float64) + + # this variable was created to bring code into + # line with pep8; deleting to protect against + # changing logic + del _lt + del _gt + + edf['response_type'] = get_response_type(edf[['trial_type', + 'response', + 'rewarded']]) + edf['color'] = [colormap(trial.trial_type, trial.response_type) + for _, trial in edf.iterrows()] # Reorder columns for backwards-compatibility: return edf[EDF_COLUMNS] + def get_extended_trials(data, time=None): if time is None: time = get_time(data) @@ -965,5 +1170,3 @@ def get_extended_trials(data, time=None): metadata=data_to_metadata(data, time), time=time, licks=data_to_licks(data, time)) - - diff --git a/allensdk/brain_observatory/behavior/write_nwb/__main__.py b/allensdk/brain_observatory/behavior/write_nwb/__main__.py index af3aa721f..67effe5c6 100644 --- a/allensdk/brain_observatory/behavior/write_nwb/__main__.py +++ b/allensdk/brain_observatory/behavior/write_nwb/__main__.py @@ -15,7 +15,9 @@ from allensdk.brain_observatory.session_api_utils import sessions_are_equal -def write_behavior_ophys_nwb(session_data, nwb_filepath): +def write_behavior_ophys_nwb(session_data: dict, + nwb_filepath: str, + skip_eye_tracking: bool): nwb_filepath_inprogress = nwb_filepath+'.inprogress' nwb_filepath_error = nwb_filepath+'.error' @@ -28,10 +30,12 @@ def write_behavior_ophys_nwb(session_data, nwb_filepath): os.remove(filename) try: - json_session = BehaviorOphysSession( - api=BehaviorOphysJsonApi(session_data)) + json_api = BehaviorOphysJsonApi(data=session_data, + skip_eye_tracking=skip_eye_tracking) + json_session = BehaviorOphysSession(api=json_api) lims_api = BehaviorOphysLimsApi( - ophys_experiment_id=session_data['ophys_experiment_id']) + ophys_experiment_id=session_data['ophys_experiment_id'], + skip_eye_tracking=skip_eye_tracking) lims_session = BehaviorOphysSession(api=lims_api) logging.info("Comparing a BehaviorOphysSession created from JSON " @@ -72,8 +76,10 @@ def main(): raise err try: + skip_eye_tracking = parser.args['skip_eye_tracking'] output = write_behavior_ophys_nwb(parser.args['session_data'], - parser.args['output_path']) + parser.args['output_path'], + skip_eye_tracking) logging.info('File successfully created') except Exception as err: logging.error('NWB write failure') diff --git a/allensdk/brain_observatory/behavior/write_nwb/_schemas.py b/allensdk/brain_observatory/behavior/write_nwb/_schemas.py index f288cffb6..aca452345 100644 --- a/allensdk/brain_observatory/behavior/write_nwb/_schemas.py +++ b/allensdk/brain_observatory/behavior/write_nwb/_schemas.py @@ -133,6 +133,10 @@ class Meta: 'used for this experiment') output_path = String(required=True, validate=check_write_access_overwrite, description='write outputs to here') + skip_eye_tracking = Boolean( + required=True, default=False, + description="Whether or not to skip processing eye tracking data. " + "If True, no eye tracking data will be written to NWB") class OutputSchema(RaisingSchema): diff --git a/allensdk/brain_observatory/behavior/write_nwb/extensions/stimulus_template/__init__.py b/allensdk/brain_observatory/behavior/write_nwb/extensions/stimulus_template/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/allensdk/brain_observatory/behavior/write_nwb/extensions/stimulus_template/extension_builder.py b/allensdk/brain_observatory/behavior/write_nwb/extensions/stimulus_template/extension_builder.py new file mode 100644 index 000000000..954a2ac25 --- /dev/null +++ b/allensdk/brain_observatory/behavior/write_nwb/extensions/stimulus_template/extension_builder.py @@ -0,0 +1,55 @@ +import os.path + +from pynwb.spec import NWBNamespaceBuilder, export_spec, NWBGroupSpec, \ + NWBDatasetSpec + +NAMESPACE = 'ndx-aibs-stimulus-template' + + +def main(): + + ns_builder = NWBNamespaceBuilder( + doc="Stimulus images", + name=f"""{NAMESPACE}""", + version="""0.1.0""", + author="""Allen Institute for Brain Science""", + contact="""waynew@alleninstitute.org""" + ) + + ns_builder.include_type('ImageSeries', namespace='core') + ns_builder.include_type('TimeSeries', namespace='core') + ns_builder.include_type('NWBDataInterface', namespace='core') + + stimulus_template_spec = NWBGroupSpec( + neurodata_type_def='StimulusTemplate', + neurodata_type_inc='ImageSeries', + doc='Note: image names in control_description are referenced by ' + 'stimulus/presentation table as well as intervals ' + '\n' + 'Each image shown to the animals is warped to account for ' + 'distance and eye position relative to the monitor. This ' + 'extension stores the warped images that were shown to the animal ' + 'as well as an unwarped version of each image in which a mask has ' + 'been applied such that only the pixels visible after warping are ' + 'included', + datasets=[ + NWBDatasetSpec( + name='unwarped', + dtype='float', + doc='Original image with mask applied such that only the ' + 'pixels visible after warping are included', + shape=(None, None, None) + ) + ] + ) + + new_data_types = [stimulus_template_spec] + + # export the spec to yaml files in the spec folder + output_dir = os.path.abspath(os.path.join(os.path.dirname(__file__))) + export_spec(ns_builder, new_data_types, output_dir) + + +if __name__ == "__main__": + # usage: python create_extension_spec.py + main() diff --git a/allensdk/brain_observatory/behavior/write_nwb/extensions/stimulus_template/ndx-aibs-stimulus-template.extensions.yaml b/allensdk/brain_observatory/behavior/write_nwb/extensions/stimulus_template/ndx-aibs-stimulus-template.extensions.yaml new file mode 100644 index 000000000..23d3c63a0 --- /dev/null +++ b/allensdk/brain_observatory/behavior/write_nwb/extensions/stimulus_template/ndx-aibs-stimulus-template.extensions.yaml @@ -0,0 +1,16 @@ +groups: +- neurodata_type_def: StimulusTemplate + neurodata_type_inc: ImageSeries + doc: Each image shown to the animals is warped to account for distance and eye position + relative to the monitor. This extension stores the warped images that were shown + to the animal as well as an unwarped version of each image in which a mask has + been applied such that only the pixels visible after warping are included + datasets: + - name: unwarped + dtype: float + shape: + - null + - null + - null + doc: Original image with mask applied such that only the pixels visible after + warping are included diff --git a/allensdk/brain_observatory/behavior/write_nwb/extensions/stimulus_template/ndx-aibs-stimulus-template.namespace.yaml b/allensdk/brain_observatory/behavior/write_nwb/extensions/stimulus_template/ndx-aibs-stimulus-template.namespace.yaml new file mode 100644 index 000000000..bf40bfa51 --- /dev/null +++ b/allensdk/brain_observatory/behavior/write_nwb/extensions/stimulus_template/ndx-aibs-stimulus-template.namespace.yaml @@ -0,0 +1,13 @@ +namespaces: +- author: Allen Institute for Brain Science + contact: waynew@alleninstitute.org + doc: Stimulus images + name: ndx-aibs-stimulus-template + schema: + - namespace: core + neurodata_types: + - ImageSeries + - TimeSeries + - NWBDataInterface + - source: ndx-aibs-stimulus-template.extensions.yaml + version: 0.1.0 diff --git a/allensdk/brain_observatory/behavior/write_nwb/extensions/stimulus_template/ndx_stimulus_template.py b/allensdk/brain_observatory/behavior/write_nwb/extensions/stimulus_template/ndx_stimulus_template.py new file mode 100644 index 000000000..1e90c78da --- /dev/null +++ b/allensdk/brain_observatory/behavior/write_nwb/extensions/stimulus_template/ndx_stimulus_template.py @@ -0,0 +1,15 @@ +import os +from pynwb import load_namespaces, get_class + +# Set path of the namespace.yaml file to the expected install location +ndx_stimulus_template_specpath = os.path.join( + os.path.dirname(__file__), + 'ndx-aibs-stimulus-template.namespace.yaml' +) + +# Load the namespace +load_namespaces(ndx_stimulus_template_specpath) + + +StimulusTemplateExtension = get_class('StimulusTemplate', + 'ndx-aibs-stimulus-template') diff --git a/allensdk/brain_observatory/nwb/__init__.py b/allensdk/brain_observatory/nwb/__init__.py index 68bbe91d2..94a932df6 100644 --- a/allensdk/brain_observatory/nwb/__init__.py +++ b/allensdk/brain_observatory/nwb/__init__.py @@ -1,7 +1,7 @@ import logging import warnings from pathlib import Path -from typing import Iterable, Union +from typing import Iterable import h5py import marshmallow @@ -13,12 +13,12 @@ import pynwb from pynwb.base import TimeSeries, Images from pynwb import ProcessingModule, NWBFile -from pynwb.image import ImageSeries, GrayscaleImage, IndexSeries +from pynwb.image import GrayscaleImage, IndexSeries from pynwb.ophys import ( DfOverF, ImageSegmentation, OpticalChannel, Fluorescence) -from allensdk.brain_observatory.behavior.stimulus_processing.stimulus_templates import \ - StimulusImage, StimulusTemplate +from allensdk.brain_observatory.behavior.stimulus_processing.stimulus_templates import StimulusTemplate # noqa: E501 +from allensdk.brain_observatory.behavior.write_nwb.extensions.stimulus_template.ndx_stimulus_template import StimulusTemplateExtension # noqa: E501 from allensdk.brain_observatory.nwb.nwb_utils import (get_column_name) from allensdk.brain_observatory import dict_to_indexed_array from allensdk.brain_observatory.behavior.image_api import Image @@ -34,12 +34,12 @@ log = logging.getLogger("allensdk.brain_observatory.nwb") CELL_SPECIMEN_COL_DESCRIPTIONS = { - 'cell_specimen_id': 'Unified id of segmented cell across experiments (after' - ' cell matching)', + 'cell_specimen_id': 'Unified id of segmented cell across experiments ' + '(after cell matching)', 'height': 'Height of ROI in pixels', 'width': 'Width of ROI in pixels', - 'mask_image_plane': 'Which image plane an ROI resides on. Overlapping ROIs ' - 'are stored on different mask image planes.', + 'mask_image_plane': 'Which image plane an ROI resides on. Overlapping ' + 'ROIs are stored on different mask image planes.', 'max_correction_down': 'Max motion correction in down direction in pixels', 'max_correction_left': 'Max motion correction in left direction in pixels', 'max_correction_up': 'Max motion correction in up direction in pixels', @@ -51,31 +51,31 @@ 'y': 'y position of ROI in Image Plane in pixels (top left corner)' } + def check_nwbfile_version(nwbfile_path: str, desired_minimum_version: str, warning_msg: str): - - with h5py.File(nwbfile_path, 'r') as f: - # nwb 2.x files store version as an attribute + with h5py.File(nwbfile_path, 'r') as f: + # nwb 2.x files store version as an attribute + try: + nwb_version = str(f.attrs["nwb_version"]).split(".") + except KeyError: + # nwb 1.x files store version as dataset try: - nwb_version = str(f.attrs["nwb_version"]).split(".") - except KeyError: - # nwb 1.x files store version as dataset - try: - nwb_version = str(f["nwb_version"][...].astype(str)) - # Stored in the form: `NWB-x.y.z` - nwb_version = nwb_version.split("-")[1].split(".") - except (KeyError, IndexError): - nwb_version = None - - if nwb_version is None: - warnings.warn(f"'{nwbfile_path}' doesn't appear to be a valid " - f"Neurodata Without Borders (*.nwb) format file as " - f"neither a 'nwb_version' field nor dataset could " - f"be found!") - else: - if tuple(nwb_version) < tuple(desired_minimum_version.split(".")): - warnings.warn(warning_msg) + nwb_version = str(f["nwb_version"][...].astype(str)) + # Stored in the form: `NWB-x.y.z` + nwb_version = nwb_version.split("-")[1].split(".") + except (KeyError, IndexError): + nwb_version = None + + if nwb_version is None: + warnings.warn(f"'{nwbfile_path}' doesn't appear to be a valid " + f"Neurodata Without Borders (*.nwb) format file as " + f"neither a 'nwb_version' field nor dataset could " + f"be found!") + else: + if tuple(nwb_version) < tuple(desired_minimum_version.split(".")): + warnings.warn(warning_msg) def read_eye_dlc_tracking_ellipses(input_path: Path) -> dict: @@ -119,44 +119,58 @@ def read_eye_gaze_mappings(input_path: Path) -> dict: *_eye_areas: Area of eye (in pixels^2) over time *_pupil_areas: Area of pupil (in pixels^2) over time *_screen_coordinates: y, x screen coordinates (in cm) over time - *_screen_coordinates_spherical: y, x screen coordinates (in deg) over time - synced_frame_timestamps: synced timestamps for video frames (in sec) + *_screen_coordinates_spherical: y, x screen coordinates (in deg) + over time + synced_frame_timestamps: synced timestamps for video frames + (in sec) """ eye_gaze_data = {} - - eye_gaze_data["raw_eye_areas"] = pd.read_hdf(input_path, key="raw_eye_areas") - eye_gaze_data["raw_pupil_areas"] = pd.read_hdf(input_path, key="raw_pupil_areas") - eye_gaze_data["raw_screen_coordinates"] = pd.read_hdf(input_path, key="raw_screen_coordinates") - eye_gaze_data["raw_screen_coordinates_spherical"] = pd.read_hdf(input_path, key="raw_screen_coordinates_spherical") - - eye_gaze_data["new_eye_areas"] = pd.read_hdf(input_path, key="new_eye_areas") - eye_gaze_data["new_pupil_areas"] = pd.read_hdf(input_path, key="new_pupil_areas") - eye_gaze_data["new_screen_coordinates"] = pd.read_hdf(input_path, key="new_screen_coordinates") - eye_gaze_data["new_screen_coordinates_spherical"] = pd.read_hdf(input_path, key="new_screen_coordinates_spherical") - - eye_gaze_data["synced_frame_timestamps"] = pd.read_hdf(input_path, key="synced_frame_timestamps") + eye_gaze_data["raw_eye_areas"] = \ + pd.read_hdf(input_path, key="raw_eye_areas") + eye_gaze_data["raw_pupil_areas"] = \ + pd.read_hdf(input_path, key="raw_pupil_areas") + eye_gaze_data["raw_screen_coordinates"] = \ + pd.read_hdf(input_path, key="raw_screen_coordinates") + eye_gaze_data["raw_screen_coordinates_spherical"] = \ + pd.read_hdf(input_path, key="raw_screen_coordinates_spherical") + eye_gaze_data["new_eye_areas"] = \ + pd.read_hdf(input_path, key="new_eye_areas") + eye_gaze_data["new_pupil_areas"] = \ + pd.read_hdf(input_path, key="new_pupil_areas") + eye_gaze_data["new_screen_coordinates"] = \ + pd.read_hdf(input_path, key="new_screen_coordinates") + eye_gaze_data["new_screen_coordinates_spherical"] = \ + pd.read_hdf(input_path, key="new_screen_coordinates_spherical") + eye_gaze_data["synced_frame_timestamps"] = \ + pd.read_hdf(input_path, key="synced_frame_timestamps") return eye_gaze_data def create_eye_gaze_mapping_dataframe(eye_gaze_data: dict) -> pd.DataFrame: - eye_gaze_mapping_df = pd.DataFrame( - { - "raw_eye_area": eye_gaze_data["raw_eye_areas"].values, - "raw_pupil_area": eye_gaze_data["raw_pupil_areas"].values, - "raw_screen_coordinates_x_cm": eye_gaze_data["raw_screen_coordinates"]["x_pos_cm"].values, - "raw_screen_coordinates_y_cm": eye_gaze_data["raw_screen_coordinates"]["y_pos_cm"].values, - "raw_screen_coordinates_spherical_x_deg": eye_gaze_data["raw_screen_coordinates_spherical"]["x_pos_deg"].values, - "raw_screen_coordinates_spherical_y_deg": eye_gaze_data["raw_screen_coordinates_spherical"]["y_pos_deg"].values, - - "filtered_eye_area": eye_gaze_data["new_eye_areas"].values, - "filtered_pupil_area": eye_gaze_data["new_pupil_areas"].values, - "filtered_screen_coordinates_x_cm": eye_gaze_data["new_screen_coordinates"]["x_pos_cm"].values, - "filtered_screen_coordinates_y_cm": eye_gaze_data["new_screen_coordinates"]["y_pos_cm"].values, - "filtered_screen_coordinates_spherical_x_deg": eye_gaze_data["new_screen_coordinates_spherical"]["x_pos_deg"].values, - "filtered_screen_coordinates_spherical_y_deg": eye_gaze_data["new_screen_coordinates_spherical"]["y_pos_deg"].values + eye_gaze_mapping_df = pd.DataFrame({ + "raw_eye_area": eye_gaze_data["raw_eye_areas"].values, + "raw_pupil_area": eye_gaze_data["raw_pupil_areas"].values, + "raw_screen_coordinates_x_cm": + eye_gaze_data["raw_screen_coordinates"]["x_pos_cm"].values, + "raw_screen_coordinates_y_cm": + eye_gaze_data["raw_screen_coordinates"]["y_pos_cm"].values, + "raw_screen_coordinates_spherical_x_deg": + eye_gaze_data["raw_screen_coordinates_spherical"]["x_pos_deg"].values, + "raw_screen_coordinates_spherical_y_deg": + eye_gaze_data["raw_screen_coordinates_spherical"]["y_pos_deg"].values, + "filtered_eye_area": eye_gaze_data["new_eye_areas"].values, + "filtered_pupil_area": eye_gaze_data["new_pupil_areas"].values, + "filtered_screen_coordinates_x_cm": + eye_gaze_data["new_screen_coordinates"]["x_pos_cm"].values, + "filtered_screen_coordinates_y_cm": + eye_gaze_data["new_screen_coordinates"]["y_pos_cm"].values, + "filtered_screen_coordinates_spherical_x_deg": + eye_gaze_data["new_screen_coordinates_spherical"]["x_pos_deg"].values, + "filtered_screen_coordinates_spherical_y_deg": + eye_gaze_data["new_screen_coordinates_spherical"]["y_pos_deg"].values }, index=eye_gaze_data["synced_frame_timestamps"].values ) @@ -185,31 +199,37 @@ def eye_tracking_data_is_valid(eye_dlc_tracking_data: dict, log.warn("The number of camera sync pulses in the " f"sync file ({len(synced_timestamps)}) do not match " "with the number of eye tracking frames " - f"({pupil_params.shape[0]})! No ellipse fits will be written!") + f"({pupil_params.shape[0]})! No ellipse fits will be " + "written!") is_valid = False return is_valid def create_eye_tracking_nwb_processing_module(eye_dlc_tracking_data: dict, - synced_timestamps: pd.Series) -> pynwb.ProcessingModule: + synced_timestamps: pd.Series + ) -> pynwb.ProcessingModule: # Top level container for eye tracking processed data - eye_tracking_mod = pynwb.ProcessingModule(name='eye_tracking', - description='Eye tracking processing module') + eye_tracking_mod = pynwb.ProcessingModule( + name='eye_tracking', + description='Eye tracking processing module') # Data interfaces of dlc_fits_container - pupil_fits = eye_dlc_tracking_data["pupil_params"].assign(timestamps=synced_timestamps) - pupil_params = pynwb.core.DynamicTable.from_dataframe(df=pupil_fits, - name="pupil_ellipse_fits") + pupil_fits = eye_dlc_tracking_data["pupil_params"].assign( + timestamps=synced_timestamps) + pupil_params = pynwb.core.DynamicTable.from_dataframe( + df=pupil_fits, name="pupil_ellipse_fits") - cr_fits = eye_dlc_tracking_data["cr_params"].assign(timestamps=synced_timestamps) + cr_fits = eye_dlc_tracking_data["cr_params"].assign( + timestamps=synced_timestamps) cr_params = pynwb.core.DynamicTable.from_dataframe(df=cr_fits, name="cr_ellipse_fits") - eye_fits = eye_dlc_tracking_data["eye_params"].assign(timestamps=synced_timestamps) - eye_params = pynwb.core.DynamicTable.from_dataframe(df=eye_fits, - name="eye_ellipse_fits") + eye_fits = eye_dlc_tracking_data["eye_params"].assign( + timestamps=synced_timestamps) + eye_params = pynwb.core.DynamicTable.from_dataframe( + df=eye_fits, name="eye_ellipse_fits") eye_tracking_mod.add_data_interface(pupil_params) eye_tracking_mod.add_data_interface(cr_params) @@ -223,7 +243,8 @@ def add_eye_gaze_data_interfaces(pynwb_container: pynwb.NWBContainer, eye_areas: pd.Series, screen_coordinates: pd.DataFrame, screen_coordinates_spherical: pd.DataFrame, - synced_timestamps: pd.Series) -> pynwb.NWBContainer: + synced_timestamps: pd.Series + ) -> pynwb.NWBContainer: pupil_area_ts = pynwb.base.TimeSeries( name="pupil_area", @@ -263,35 +284,40 @@ def add_eye_gaze_data_interfaces(pynwb_container: pynwb.NWBContainer, def create_gaze_mapping_nwb_processing_modules(eye_gaze_data: dict): # Container for raw gaze mapped data - raw_gaze_mapping_mod = pynwb.ProcessingModule(name='raw_gaze_mapping', - description='Gaze mapping processing module raw outputs') - - raw_gaze_mapping_mod = add_eye_gaze_data_interfaces(raw_gaze_mapping_mod, - pupil_areas=eye_gaze_data["raw_pupil_areas"], - eye_areas=eye_gaze_data["raw_eye_areas"], - screen_coordinates=eye_gaze_data["raw_screen_coordinates"], - screen_coordinates_spherical=eye_gaze_data["raw_screen_coordinates_spherical"], - synced_timestamps=eye_gaze_data["synced_frame_timestamps"]) + raw_gaze_mapping_mod = pynwb.ProcessingModule( + name='raw_gaze_mapping', + description='Gaze mapping processing module raw outputs') + + raw_gaze_mapping_mod = add_eye_gaze_data_interfaces( + raw_gaze_mapping_mod, + pupil_areas=eye_gaze_data["raw_pupil_areas"], + eye_areas=eye_gaze_data["raw_eye_areas"], + screen_coordinates=eye_gaze_data["raw_screen_coordinates"], + screen_coordinates_spherical=eye_gaze_data["raw_screen_coordinates_spherical"], # noqa: E501 + synced_timestamps=eye_gaze_data["synced_frame_timestamps"]) # Container for filtered gaze mapped data - filt_gaze_mapping_mod = pynwb.ProcessingModule(name='filtered_gaze_mapping', - description='Gaze mapping processing module filtered outputs') - - filt_gaze_mapping_mod = add_eye_gaze_data_interfaces(filt_gaze_mapping_mod, - pupil_areas=eye_gaze_data["new_pupil_areas"], - eye_areas=eye_gaze_data["new_eye_areas"], - screen_coordinates=eye_gaze_data["new_screen_coordinates"], - screen_coordinates_spherical=eye_gaze_data["new_screen_coordinates_spherical"], - synced_timestamps=eye_gaze_data["synced_frame_timestamps"]) + filt_gaze_mapping_mod = pynwb.ProcessingModule( + name='filtered_gaze_mapping', + description='Gaze mapping processing module filtered outputs') + + filt_gaze_mapping_mod = add_eye_gaze_data_interfaces( + filt_gaze_mapping_mod, + pupil_areas=eye_gaze_data["new_pupil_areas"], + eye_areas=eye_gaze_data["new_eye_areas"], + screen_coordinates=eye_gaze_data["new_screen_coordinates"], + screen_coordinates_spherical=eye_gaze_data["new_screen_coordinates_spherical"], # noqa: E501 + synced_timestamps=eye_gaze_data["synced_frame_timestamps"]) return (raw_gaze_mapping_mod, filt_gaze_mapping_mod) def add_eye_tracking_ellipse_fit_data_to_nwbfile(nwbfile: pynwb.NWBFile, eye_dlc_tracking_data: dict, - synced_timestamps: pd.Series) -> pynwb.NWBFile: - eye_tracking_mod = create_eye_tracking_nwb_processing_module(eye_dlc_tracking_data, - synced_timestamps) + synced_timestamps: pd.Series + ) -> pynwb.NWBFile: + eye_tracking_mod = create_eye_tracking_nwb_processing_module( + eye_dlc_tracking_data, synced_timestamps) nwbfile.add_processing_module(eye_tracking_mod) return nwbfile @@ -299,7 +325,8 @@ def add_eye_tracking_ellipse_fit_data_to_nwbfile(nwbfile: pynwb.NWBFile, def add_eye_gaze_mapping_data_to_nwbfile(nwbfile: pynwb.NWBFile, eye_gaze_data: dict) -> pynwb.NWBFile: - raw_gaze_mapping_mod, filt_gaze_mapping_mod = create_gaze_mapping_nwb_processing_modules(eye_gaze_data) + raw_gaze_mapping_mod, filt_gaze_mapping_mod = \ + create_gaze_mapping_nwb_processing_modules(eye_gaze_data) nwbfile.add_processing_module(raw_gaze_mapping_mod) nwbfile.add_processing_module(filt_gaze_mapping_mod) @@ -406,17 +433,22 @@ def add_running_speed_to_nwbfile(nwbfile, running_speed, def add_stimulus_template(nwbfile: NWBFile, stimulus_template: StimulusTemplate): - images = [] + unwarped_images = [] + warped_images = [] image_names = [] for image_name, image_data in stimulus_template.items(): image_names.append(image_name) - images.append(image_data) + unwarped_images.append(image_data.unwarped) + warped_images.append(image_data.warped) + + image_index = np.zeros(len(image_names)) + image_index[:] = np.nan - image_index = list(range(len(images))) visual_stimulus_image_series = \ - ImageSeries( + StimulusTemplateExtension( name=stimulus_template.image_set_name, - data=images, + data=warped_images, + unwarped=unwarped_images, control=list(range(len(image_names))), control_description=image_names, unit='NA', @@ -427,11 +459,13 @@ def add_stimulus_template(nwbfile: NWBFile, return nwbfile -def create_stimulus_presentation_time_interval(name: str, description: str, - columns_to_add: Iterable) -> pynwb.epoch.TimeIntervals: +def create_stimulus_presentation_time_interval( + name: str, description: str, + columns_to_add: Iterable) -> pynwb.epoch.TimeIntervals: column_descriptions = { "stimulus_name": "Name of stimulus", - "stimulus_block": "Index of contiguous presentations of one stimulus type", + "stimulus_block": ("Index of contiguous presentations of " + "one stimulus type"), "temporal_frequency": "Temporal frequency of stimulus", "x_position": "Horizontal position of stimulus on screen", "y_position": "Vertical position of stimulus on screen", @@ -463,13 +497,15 @@ def create_stimulus_presentation_time_interval(name: str, description: str, for column_name in columns_to_add: if column_name not in columns_to_ignore: - description = column_descriptions.get(column_name, "No description") + description = column_descriptions.get( + column_name, "No description") interval.add_column(name=column_name, description=description) return interval -def add_stimulus_presentations(nwbfile, stimulus_table, tag='stimulus_time_interval'): +def add_stimulus_presentations(nwbfile, stimulus_table, + tag='stimulus_time_interval'): """Adds a stimulus table (defining stimulus characteristics for each time point in a session) to an nwbfile as TimeIntervals. @@ -477,9 +513,10 @@ def add_stimulus_presentations(nwbfile, stimulus_table, tag='stimulus_time_inter ---------- nwbfile : pynwb.NWBFile stimulus_table: pd.DataFrame - Each row corresponds to an interval of time. Columns define the interval - (start and stop time) and its characteristics. - Nans in columns with string data will be replaced with the empty strings. + Each row corresponds to an interval of time. Columns define the + interval (start and stop time) and its characteristics. + Nans in columns with string data will be replaced with the empty + strings. Required columns are: start_time :: the time at which this interval started stop_time :: the time at which this interval ended @@ -500,7 +537,7 @@ def add_stimulus_presentations(nwbfile, stimulus_table, tag='stimulus_time_inter stimulus_names = stimulus_table[stimulus_name_column].unique() for stim_name in sorted(stimulus_names): - specific_stimulus_table = stimulus_table[stimulus_table[stimulus_name_column] == stim_name] + specific_stimulus_table = stimulus_table[stimulus_table[stimulus_name_column] == stim_name] # noqa: E501 # Drop columns where all values in column are NaN cleaned_table = specific_stimulus_table.dropna(axis=1, how='all') # For columns with mixed strings and NaNs, fill NaNs with 'N/A' @@ -511,7 +548,10 @@ def add_stimulus_presentations(nwbfile, stimulus_table, tag='stimulus_time_inter cleaned_table[colname] = series.transform(str) interval_description = (f"Presentation times and stimuli details " - f"for '{stim_name}' stimuli") + f"for '{stim_name}' stimuli. " + f"\n" + f"Note: image_name references " + f"control_description in stimulus/templates") presentation_interval = create_stimulus_presentation_time_interval( name=f"{stim_name}_presentations", description=interval_description, @@ -569,7 +609,8 @@ def setup_table_for_invalid_times(invalid_epochs): Returns ------- - pd.DataFrame of invalid times if epochs are not empty, otherwise return None + pd.DataFrame of invalid times if epochs are not empty, + otherwise return None """ if invalid_epochs: @@ -594,21 +635,23 @@ def setup_table_for_invalid_times(invalid_epochs): def setup_table_for_epochs(table, timeseries, tag): - table = table.copy() - indices = np.searchsorted(timeseries.timestamps[:], table['start_time'].values) + indices = np.searchsorted(timeseries.timestamps[:], + table['start_time'].values) if len(indices > 0): - diffs = np.concatenate([np.diff(indices), [table.shape[0] - indices[-1]]]) + diffs = np.concatenate([np.diff(indices), + [table.shape[0] - indices[-1]]]) else: diffs = [] table['tags'] = [(tag,)] * table.shape[0] - table['timeseries'] = [[[indices[ii], diffs[ii], timeseries]] for ii in range(table.shape[0])] + table['timeseries'] = [[[indices[ii], diffs[ii], timeseries]] + for ii in range(table.shape[0])] return table -def add_stimulus_timestamps(nwbfile, stimulus_timestamps, module_name='stimulus'): - +def add_stimulus_timestamps(nwbfile, stimulus_timestamps, + module_name='stimulus'): stimulus_ts = TimeSeries( data=stimulus_timestamps, name='timestamps', @@ -625,22 +668,32 @@ def add_stimulus_timestamps(nwbfile, stimulus_timestamps, module_name='stimulus' def add_trials(nwbfile, trials, description_dict={}): - order = list(trials.index) for _, row in trials[['start_time', 'stop_time']].iterrows(): row_dict = row.to_dict() nwbfile.add_trial(**row_dict) - for c in [c for c in trials.columns if c not in ['start_time', 'stop_time']]: + for c in trials.columns: + if c in ['start_time', 'stop_time']: + continue index, data = dict_to_indexed_array(trials[c].to_dict(), order) if data.dtype == ' dict: return key_dict -def monitor_delay(sync_dset, stim_times, photodiode_key, - transition_frame_interval=TRANSITION_FRAME_INTERVAL, - max_monitor_delay=MAX_MONITOR_DELAY): +def calculate_monitor_delay(sync_dset, stim_times, photodiode_key, + transition_frame_interval=TRANSITION_FRAME_INTERVAL, # noqa: E501 + max_monitor_delay=MAX_MONITOR_DELAY): """Calculate monitor delay.""" transitions = stim_times[::transition_frame_interval] photodiode_events = get_real_photodiode_events(sync_dset, photodiode_key) @@ -244,6 +244,11 @@ def __init__(self, sync_file, scanner=None, dff_file=None, self._dataset = Dataset(sync_file) self._keys = get_keys(self._dataset) self.long_stim_threshold = long_stim_threshold + + self._monitor_delay = None + self._clipped_stim_ts_delta = None + self._clipped_stim_timestamp_values = None + if dff_file is not None: self.ophys_data_length = get_ophys_data_length(dff_file) else: @@ -265,7 +270,6 @@ def __init__(self, sync_file, scanner=None, dff_file=None, def dataset(self): return self._dataset - @property def ophys_timestamps(self): """Get the timestamps for the ophys data.""" @@ -314,8 +318,7 @@ def stim_timestamps(self): return self.dataset.get_falling_edges(stim_key, units="seconds") - @property - def corrected_stim_timestamps(self): + def _get_clipped_stim_timestamps(self): timestamps = self.stim_timestamps delta = 0 @@ -339,8 +342,72 @@ def corrected_stim_timestamps(self): elif self.stim_data_length is None: logging.info("No data length provided for stim stream") + return timestamps, delta + + @property + def clipped_stim_timestamps(self): + """ + Return the stimulus timestamps with the erroneous initial spike + removed (if relevant) + + Returns + ------- + timestamps: np.ndarray + An array of stimulus timestamps in seconds with th emonitor delay + added + + delta: int + Difference between the length of timestamps + and the number of frames reported in the stimulus + pickle file, i.e. + len(timestamps) - len(pkl_file['items']['behavior']['intervalsms'] + """ + if self._clipped_stim_ts_delta is None: + (self._clipped_stim_timestamp_values, + self._clipped_stim_ts_delta) = self._get_clipped_stim_timestamps() + + return (self._clipped_stim_timestamp_values, + self._clipped_stim_ts_delta) + + def _get_monitor_delay(self): + timestamps, delta = self.clipped_stim_timestamps photodiode_key = self._keys["photodiode"] - delay = monitor_delay(self.dataset, timestamps, photodiode_key) + delay = calculate_monitor_delay(self.dataset, + timestamps, + photodiode_key) + return delay + + @property + def monitor_delay(self): + """ + The monitor delay (in seconds) associated with the session + """ + if self._monitor_delay is None: + self._monitor_delay = self._get_monitor_delay() + return self._monitor_delay + + @property + def corrected_stim_timestamps(self): + """ + The stimulus timestamps corrected for monitor delay + + Returns + ------- + timestamps: np.ndarray + An array of stimulus timestamps in seconds with th emonitor delay + added + + delta: int + Difference between the length of timestamps and + the number of frames reported in the stimulus + pickle file, i.e. + len(timestamps) - len(pkl_file['items']['behavior']['intervalsms'] + + delay: float + The monitor delay in seconds + """ + timestamps, delta = self.clipped_stim_timestamps + delay = self.monitor_delay return timestamps + delay, delta, delay diff --git a/allensdk/test/brain_observatory/behavior/conftest.py b/allensdk/test/brain_observatory/behavior/conftest.py index daafd8101..1f102e1bd 100644 --- a/allensdk/test/brain_observatory/behavior/conftest.py +++ b/allensdk/test/brain_observatory/behavior/conftest.py @@ -12,10 +12,15 @@ from allensdk.brain_observatory.behavior.session_apis.data_transforms import \ BehaviorOphysDataTransforms from allensdk.brain_observatory.behavior.stimulus_processing import \ - StimulusTemplate + StimulusTemplateFactory from allensdk.test_utilities.custom_comparators import WhitespaceStrippedString +def get_resources_dir(): + behavior_dir = os.path.dirname(__file__) + return os.path.join(behavior_dir, 'resources') + + def pytest_assertrepr_compare(config, op, left, right): if isinstance(left, WhitespaceStrippedString) and op == "==": if isinstance(right, WhitespaceStrippedString): @@ -27,7 +32,8 @@ def pytest_assertrepr_compare(config, op, left, right): def pytest_ignore_collect(path, config): - ''' The brain_observatory.ecephys submodule uses python 3.6 features that may not be backwards compatible! + ''' The brain_observatory.ecephys submodule uses + python 3.6 features that may not be backwards compatible! ''' if sys.version_info < (3, 6): @@ -55,7 +61,7 @@ def stimulus_templates(): images = [np.zeros((4, 4)), np.ones((4, 4))] image_attributes = [{'image_name': 'test1'}, {'image_name': 'test2'}] - stimulus_templates = StimulusTemplate( + stimulus_templates = StimulusTemplateFactory.from_unprocessed( image_set_name='test', image_attributes=image_attributes, images=images) return stimulus_templates @@ -74,19 +80,25 @@ def trials(): 'a': [0.5, 0.4, 0.3, 0.2, 0.1], 'b': [[], [1], [2, 2], [3], []], 'c': ['a', 'bb', 'ccc', 'dddd', 'eeeee'], - 'd': [np.array([1]), np.array([1, 2]), np.array([1, 2, 3]), np.array([1, 2, 3, 4]), np.array([1, 2, 3, 4, 5])], + 'd': [np.array([1]), + np.array([1, 2]), + np.array([1, 2, 3]), + np.array([1, 2, 3, 4]), + np.array([1, 2, 3, 4, 5])], }, index=pd.Index(name='trials_id', data=[0, 1, 2, 3, 4])) @pytest.fixture def licks(): - return pd.DataFrame({'time': [1., 2., 3.], 'frame': [4., 5., 6.]}) + return pd.DataFrame({'timestamps': [1., 2., 3.], + 'frame': [4., 5., 6.]}) @pytest.fixture def rewards(): - return pd.DataFrame({'volume': [.01, .01, .01], 'autorewarded': [True, False, False]}, - index=pd.Index(data=[1., 2., 3.], name='timestamps')) + return pd.DataFrame({'volume': [.01, .01, .01], + 'timestamps': [1., 2., 3.], + 'autorewarded': [True, False, False]}) @pytest.fixture @@ -111,14 +123,20 @@ def segmentation_mask_image(max_projection): @pytest.fixture -def stimulus_presentations_behavior(stimulus_templates, stimulus_presentations): +def stimulus_presentations_behavior(stimulus_templates, + stimulus_presentations): - image_sets = ['test1','test1', 'test1', 'test2', 'test2' ] + image_sets = ['test1', 'test1', 'test1', 'test2', 'test2'] + start_time = stimulus_presentations['start_time'] stimulus_index_df = pd.DataFrame({'image_set': image_sets, 'image_index': [0] * len(image_sets)}, - index=pd.Index(stimulus_presentations['start_time'], dtype=np.float64, name='timestamps')) + index=pd.Index(start_time, + dtype=np.float64, + name='timestamps')) - df = stimulus_presentations.merge(stimulus_index_df, left_on='start_time', right_index=True) + df = stimulus_presentations.merge(stimulus_index_df, + left_on='start_time', + right_index=True) return df[sorted(df.columns)] @@ -133,7 +151,7 @@ def behavior_only_metadata_fixture(): "driver_line": ["Camk2a-tTA", "Slc17a7-IRES2-Cre"], "LabTracks_ID": 416369, "full_genotype": "Slc17a7-IRES2-Cre/wt;Camk2a-tTA/wt;" - "Ai93(TITL-GCaMP6f)/wt", + "Ai93(TITL-GCaMP6f)/wt", "behavior_session_uuid": uuid.uuid4(), "stimulus_frame_rate": 60.0, "rig_name": 'my_device', @@ -212,11 +230,29 @@ def task_parameters(): "omitted_flash_fraction": float('nan'), "response_window_sec": [0.15, 0.75], "reward_volume": 0.007, - "stage": "OPHYS_6_images_B", + "session_type": "OPHYS_6_images_B", + "stimulus": "images", + "stimulus_distribution": "geometric", + "task": "DoC_untranslated", + "n_stimulus_frames": 69882, + "auto_reward_volume": 0.005 + } + + +@pytest.fixture +def task_parameters_nan_stimulus_duration(): + + return {"blank_duration_sec": [0.5, 0.5], + "stimulus_duration_sec": np.NaN, + "omitted_flash_fraction": float('nan'), + "response_window_sec": [0.15, 0.75], + "reward_volume": 0.007, + "session_type": "OPHYS_6_images_B", "stimulus": "images", "stimulus_distribution": "geometric", "task": "DoC_untranslated", - "n_stimulus_frames": 69882 + "n_stimulus_frames": 69882, + "auto_reward_volume": 0.005 } @@ -253,13 +289,15 @@ def valid_roi_ids(cell_specimen_table): def dff_traces(ophys_timestamps, cell_specimen_table): return pd.DataFrame({'cell_roi_id': cell_specimen_table['cell_roi_id'], 'dff': [np.ones_like(ophys_timestamps)]}, - index=cell_specimen_table.index) + index=cell_specimen_table.index) + @pytest.fixture def corrected_fluorescence_traces(ophys_timestamps, cell_specimen_table): return pd.DataFrame({'cell_roi_id': cell_specimen_table['cell_roi_id'], - 'corrected_fluorescence': [np.ones_like(ophys_timestamps)]}, - index=cell_specimen_table.index) + 'corrected_fluorescence': [np.ones_like(ophys_timestamps)]}, # noqa: E501 + index=cell_specimen_table.index) + @pytest.fixture def motion_correction(ophys_timestamps): @@ -276,8 +314,8 @@ def session_data(): 'ophys_experiment_id': 789359614, 'surface_2p_pixel_size_um': 0.78125, 'foraging_id': '69cdbe09-e62b-4b42-aab1-54b5773dfe78', - "max_projection_file": "/allen/programs/braintv/production/visualbehavior/prod0/specimen_756577249/ophys_session_789220000/ophys_experiment_789359614/processed/ophys_cell_segmentation_run_789410052/maxInt_a13a.png", - "sync_file": "/allen/programs/braintv/production/visualbehavior/prod0/specimen_756577249/ophys_session_789220000/789220000_sync.h5", + "max_projection_file": "/allen/programs/braintv/production/visualbehavior/prod0/specimen_756577249/ophys_session_789220000/ophys_experiment_789359614/processed/ophys_cell_segmentation_run_789410052/maxInt_a13a.png", # noqa: E501 + "sync_file": "/allen/programs/braintv/production/visualbehavior/prod0/specimen_756577249/ophys_session_789220000/789220000_sync.h5", # noqa: E501 "rig_name": "CAM2P.5", "movie_width": 447, "movie_height": 512, @@ -289,15 +327,15 @@ def session_data(): "reporter_line": ["Ai93(TITL-GCaMP6f)"], "driver_line": ['Camk2a-tTA', 'Slc17a7-IRES2-Cre'], "external_specimen_name": 416369, - "full_genotype": "Slc17a7-IRES2-Cre/wt;Camk2a-tTA/wt;Ai93(TITL-GCaMP6f)/wt", - "behavior_stimulus_file": "/allen/programs/braintv/production/visualbehavior/prod0/specimen_756577249/behavior_session_789295700/789220000.pkl", - "dff_file": "/allen/programs/braintv/production/visualbehavior/prod0/specimen_756577249/ophys_session_789220000/ophys_experiment_789359614/789359614_dff.h5", + "full_genotype": "Slc17a7-IRES2-Cre/wt;Camk2a-tTA/wt;Ai93(TITL-GCaMP6f)/wt", # noqa: E501 + "behavior_stimulus_file": "/allen/programs/braintv/production/visualbehavior/prod0/specimen_756577249/behavior_session_789295700/789220000.pkl", # noqa: E501 + "dff_file": "/allen/programs/braintv/production/visualbehavior/prod0/specimen_756577249/ophys_session_789220000/ophys_experiment_789359614/789359614_dff.h5", # noqa: E501 "ophys_cell_segmentation_run_id": 789410052, - "cell_specimen_table_dict": pd.read_json(os.path.join("/allen", "aibs", "informatics", "nileg", "module_test_data", 'cell_specimen_table_789359614.json'), 'r'), # TODO: I can't write to /allen/aibs/informatics/module_test_data/behavior - "demix_file": "/allen/programs/braintv/production/visualbehavior/prod0/specimen_756577249/ophys_session_789220000/ophys_experiment_789359614/demix/789359614_demixed_traces.h5", - "average_intensity_projection_image_file": "/allen/programs/braintv/production/visualbehavior/prod0/specimen_756577249/ophys_session_789220000/ophys_experiment_789359614/processed/ophys_cell_segmentation_run_789410052/avgInt_a1X.png", - "rigid_motion_transform_file": "/allen/programs/braintv/production/visualbehavior/prod0/specimen_756577249/ophys_session_789220000/ophys_experiment_789359614/processed/789359614_rigid_motion_transform.csv", - "segmentation_mask_image_file": "/allen/programs/braintv/production/visualbehavior/prod0/specimen_756577249/ophys_session_789220000/ophys_experiment_789359614/processed/ophys_cell_segmentation_run_789410052/maxInt_masks.tif", + "cell_specimen_table_dict": pd.read_json(os.path.join("/allen", "aibs", "informatics", "nileg", "module_test_data", 'cell_specimen_table_789359614.json'), 'r'), # TODO: I can't write to /allen/aibs/informatics/module_test_data/behavior # noqa: E501 + "demix_file": "/allen/programs/braintv/production/visualbehavior/prod0/specimen_756577249/ophys_session_789220000/ophys_experiment_789359614/demix/789359614_demixed_traces.h5", # noqa: E501 + "average_intensity_projection_image_file": "/allen/programs/braintv/production/visualbehavior/prod0/specimen_756577249/ophys_session_789220000/ophys_experiment_789359614/processed/ophys_cell_segmentation_run_789410052/avgInt_a1X.png", # noqa: E501 + "rigid_motion_transform_file": "/allen/programs/braintv/production/visualbehavior/prod0/specimen_756577249/ophys_session_789220000/ophys_experiment_789359614/processed/789359614_rigid_motion_transform.csv", # noqa: E501 + "segmentation_mask_image_file": "/allen/programs/braintv/production/visualbehavior/prod0/specimen_756577249/ophys_session_789220000/ophys_experiment_789359614/processed/ophys_cell_segmentation_run_789410052/maxInt_masks.tif", # noqa: E501 "sex": "F", "age": "P139", "imaging_plane_group": None, @@ -330,29 +368,43 @@ def behavior_stimuli_data_fixture(request): grating_phase = request.param.get("grating_phase", None) grating_spatial_frequency = request.param.get("grating_spatial_frequency", None) - resources_dir = os.path.join(os.path.dirname(__file__), 'resources') + + has_images = request.param.get("has_images", True) + has_grating = request.param.get("has_grating", True) + + resources_dir = get_resources_dir() + + image_data = { + "set_log": images_set_log, + "draw_log": images_draw_log, + "image_path": os.path.join(resources_dir, + 'stimulus_template', + 'input', + 'test_image_set.pkl') + } + + grating_data = { + "set_log": grating_set_log, + "draw_log": grating_draw_log, + "phase": grating_phase, + "sf": grating_spatial_frequency + } data = { "items": { "behavior": { - "stimuli": { - "images": { - "set_log": images_set_log, - "draw_log": images_draw_log, - "image_path": os.path.join(resources_dir, - 'test_image_set.pkl') - }, - "grating": { - "set_log": grating_set_log, - "draw_log": grating_draw_log, - "phase": grating_phase, - "sf": grating_spatial_frequency - } - }, + "stimuli": {}, "omitted_flash_frame_log": omitted_flash_frame_log } } } + + if has_images: + data["items"]["behavior"]["stimuli"]["images"] = image_data + + if has_grating: + data["items"]["behavior"]["stimuli"]["grating"] = grating_data + return data diff --git a/allensdk/test/brain_observatory/behavior/resources/stimulus_template/expected/im065_unwarped.pkl b/allensdk/test/brain_observatory/behavior/resources/stimulus_template/expected/im065_unwarped.pkl new file mode 100644 index 000000000..50d9b6054 Binary files /dev/null and b/allensdk/test/brain_observatory/behavior/resources/stimulus_template/expected/im065_unwarped.pkl differ diff --git a/allensdk/test/brain_observatory/behavior/resources/stimulus_template/expected/im065_warped.pkl b/allensdk/test/brain_observatory/behavior/resources/stimulus_template/expected/im065_warped.pkl new file mode 100644 index 000000000..b899ede13 Binary files /dev/null and b/allensdk/test/brain_observatory/behavior/resources/stimulus_template/expected/im065_warped.pkl differ diff --git a/allensdk/test/brain_observatory/behavior/resources/stimulus_template/input/test_image_set.pkl b/allensdk/test/brain_observatory/behavior/resources/stimulus_template/input/test_image_set.pkl new file mode 100644 index 000000000..c529064e7 Binary files /dev/null and b/allensdk/test/brain_observatory/behavior/resources/stimulus_template/input/test_image_set.pkl differ diff --git a/allensdk/test/brain_observatory/behavior/resources/test_image_set.pkl b/allensdk/test/brain_observatory/behavior/resources/test_image_set.pkl deleted file mode 100644 index 9706062cf..000000000 Binary files a/allensdk/test/brain_observatory/behavior/resources/test_image_set.pkl and /dev/null differ diff --git a/allensdk/test/brain_observatory/behavior/test_behavior_data_xforms.py b/allensdk/test/brain_observatory/behavior/test_behavior_data_xforms.py index d506fbed8..650a94127 100644 --- a/allensdk/test/brain_observatory/behavior/test_behavior_data_xforms.py +++ b/allensdk/test/brain_observatory/behavior/test_behavior_data_xforms.py @@ -1,3 +1,5 @@ +import pytest +import logging import numpy as np import pandas as pd from allensdk.brain_observatory.behavior.session_apis.data_transforms import BehaviorDataTransforms # noqa: E501 @@ -88,7 +90,7 @@ def dummy_stimulus_file(self): 'timestamps': [0.04, 0.1], 'autorewarded': [True, False]} expected_df = pd.DataFrame(expected_dict) - expected_df = expected_df.set_index('timestamps', drop=True) + expected_df = expected_df assert expected_df.equals(rewards) @@ -142,13 +144,189 @@ def dummy_stimulus_file(self): licks = xforms.get_licks() - expected_dict = {'time': [0.12, 0.15, 0.90, 1.36], + expected_dict = {'timestamps': [0.12, 0.15, 0.90, 1.36], 'frame': [12, 15, 90, 136]} expected_df = pd.DataFrame(expected_dict) assert expected_df.columns.equals(licks.columns) - np.testing.assert_array_almost_equal(expected_df.time.to_numpy(), - licks.time.to_numpy(), + np.testing.assert_array_almost_equal(expected_df.timestamps.to_numpy(), + licks.timestamps.to_numpy(), decimal=10) np.testing.assert_array_almost_equal(expected_df.frame.to_numpy(), licks.frame.to_numpy(), decimal=10) + + +def test_empty_licks(monkeypatch): + """ + Test that BehaviorDataTransforms.get_licks() in the case where + there are no licks + """ + + def dummy_init(self): + self.logger = logging.getLogger('dummy') + pass + + def dummy_stimulus_timestamps(self): + return np.arange(0, 2.0, 0.01) + + def dummy_stimulus_file(self): + + # in this test, the trial log exists to make sure + # that get_licks is *not* reading the licks from + # here + trial_log = [] + trial_log.append({'licks': [(-1.0, 100), (-1.0, 200)]}) + trial_log.append({'licks': [(-1.0, 300), (-1.0, 400)]}) + trial_log.append({'licks': [(-1.0, 500), (-1.0, 600)]}) + + lick_events = [] + lick_events = [{'lick_events': lick_events}] + + data = {} + data['items'] = {} + data['items']['behavior'] = {} + data['items']['behavior']['trial_log'] = trial_log + data['items']['behavior']['lick_sensors'] = lick_events + return data + + with monkeypatch.context() as ctx: + ctx.setattr(BehaviorDataTransforms, + '__init__', + dummy_init) + + ctx.setattr(BehaviorDataTransforms, + 'get_stimulus_timestamps', + dummy_stimulus_timestamps) + + ctx.setattr(BehaviorDataTransforms, + '_behavior_stimulus_file', + dummy_stimulus_file) + + xforms = BehaviorDataTransforms() + + licks = xforms.get_licks() + + expected_dict = {'timestamps': [], + 'frame': []} + expected_df = pd.DataFrame(expected_dict) + assert expected_df.columns.equals(licks.columns) + np.testing.assert_array_equal(expected_df.timestamps.to_numpy(), + licks.timestamps.to_numpy()) + np.testing.assert_array_equal(expected_df.frame.to_numpy(), + licks.frame.to_numpy()) + + +def test_get_licks_excess(monkeypatch): + """ + Test that BehaviorDataTransforms.get_licks() in the case where + there is an extra frame at the end of the trial log and the mouse + licked on that frame + + https://github.com/AllenInstitute/visual_behavior_analysis/blob/master/visual_behavior/translator/foraging2/extract.py#L640-L647 + """ + + def dummy_init(self): + self.logger = logging.getLogger('dummy') + pass + + def dummy_stimulus_timestamps(self): + return np.arange(0, 2.0, 0.01) + + def dummy_stimulus_file(self): + + # in this test, the trial log exists to make sure + # that get_licks is *not* reading the licks from + # here + trial_log = [] + trial_log.append({'licks': [(-1.0, 100), (-1.0, 200)]}) + trial_log.append({'licks': [(-1.0, 300), (-1.0, 400)]}) + trial_log.append({'licks': [(-1.0, 500), (-1.0, 600)]}) + + lick_events = [12, 15, 90, 136, 200] # len(timestamps) == 200 + lick_events = [{'lick_events': lick_events}] + + data = {} + data['items'] = {} + data['items']['behavior'] = {} + data['items']['behavior']['trial_log'] = trial_log + data['items']['behavior']['lick_sensors'] = lick_events + return data + + with monkeypatch.context() as ctx: + ctx.setattr(BehaviorDataTransforms, + '__init__', + dummy_init) + + ctx.setattr(BehaviorDataTransforms, + 'get_stimulus_timestamps', + dummy_stimulus_timestamps) + + ctx.setattr(BehaviorDataTransforms, + '_behavior_stimulus_file', + dummy_stimulus_file) + + xforms = BehaviorDataTransforms() + + licks = xforms.get_licks() + + expected_dict = {'timestamps': [0.12, 0.15, 0.90, 1.36], + 'frame': [12, 15, 90, 136]} + expected_df = pd.DataFrame(expected_dict) + assert expected_df.columns.equals(licks.columns) + np.testing.assert_array_almost_equal(expected_df.timestamps.to_numpy(), + licks.timestamps.to_numpy(), + decimal=10) + np.testing.assert_array_almost_equal(expected_df.frame.to_numpy(), + licks.frame.to_numpy(), + decimal=10) + + +def test_get_licks_failure(monkeypatch): + """ + Test that BehaviorDataTransforms.get_licks() fails if the last lick + is more than one frame beyond the end of the timestamps + """ + + def dummy_init(self): + self.logger = logging.getLogger('dummy') + pass + + def dummy_stimulus_timestamps(self): + return np.arange(0, 2.0, 0.01) + + def dummy_stimulus_file(self): + + # in this test, the trial log exists to make sure + # that get_licks is *not* reading the licks from + # here + trial_log = [] + trial_log.append({'licks': [(-1.0, 100), (-1.0, 200)]}) + trial_log.append({'licks': [(-1.0, 300), (-1.0, 400)]}) + trial_log.append({'licks': [(-1.0, 500), (-1.0, 600)]}) + + lick_events = [12, 15, 90, 136, 201] # len(timestamps) == 200 + lick_events = [{'lick_events': lick_events}] + + data = {} + data['items'] = {} + data['items']['behavior'] = {} + data['items']['behavior']['trial_log'] = trial_log + data['items']['behavior']['lick_sensors'] = lick_events + return data + + with monkeypatch.context() as ctx: + ctx.setattr(BehaviorDataTransforms, + '__init__', + dummy_init) + + ctx.setattr(BehaviorDataTransforms, + 'get_stimulus_timestamps', + dummy_stimulus_timestamps) + + ctx.setattr(BehaviorDataTransforms, + '_behavior_stimulus_file', + dummy_stimulus_file) + + xforms = BehaviorDataTransforms() + with pytest.raises(IndexError): + xforms.get_licks() diff --git a/allensdk/test/brain_observatory/behavior/test_behavior_lims_api.py b/allensdk/test/brain_observatory/behavior/test_behavior_lims_api.py index a07a08dd8..702d742dd 100644 --- a/allensdk/test/brain_observatory/behavior/test_behavior_lims_api.py +++ b/allensdk/test/brain_observatory/behavior/test_behavior_lims_api.py @@ -201,7 +201,7 @@ def test_get_stimulus_timestamps(MockBehaviorLimsApi): def test_get_licks(MockBehaviorLimsApi): api = MockBehaviorLimsApi - expected = pd.DataFrame({"time": [0.016 * i for i in [2., 6., 9.]], + expected = pd.DataFrame({"timestamps": [0.016 * i for i in [2., 6., 9.]], "frame": [2, 6, 9]}) pd.testing.assert_frame_equal(expected, api.get_licks()) @@ -273,9 +273,8 @@ def test_stim_file_regression(self): == self.od.extractor.get_behavior_stimulus_file()) def test_get_rewards_regression(self): - """Index is timestamps here, so remove it before comparing.""" - bd_rewards = self.bd.get_rewards().reset_index(drop=True) - od_rewards = self.od.get_rewards().reset_index(drop=True) + bd_rewards = self.bd.get_rewards().drop(columns=['timestamps']) + od_rewards = self.od.get_rewards().drop(columns=['timestamps']) pd.testing.assert_frame_equal(bd_rewards, od_rewards) def test_ophys_experiment_id_regression(self): @@ -324,7 +323,15 @@ def test_get_stimulus_template_regression(self): od_template = self.od.get_stimulus_templates() assert bd_template.keys() == od_template.keys() for k in bd_template.keys(): - assert np.array_equal(bd_template[k], od_template[k]) + bd_template_img = bd_template[k] + od_template_img = od_template[k] + + assert np.allclose(bd_template_img.unwarped, + od_template_img.unwarped, + equal_nan=True) + assert np.allclose(bd_template_img.warped, + od_template_img.warped, + equal_nan=True) def test_get_task_parameters_regression(self): bd_params = self.bd.get_task_parameters() diff --git a/allensdk/test/brain_observatory/behavior/test_behavior_ophys_data_xforms.py b/allensdk/test/brain_observatory/behavior/test_behavior_ophys_data_xforms.py index 01eda2a28..dfd0288e7 100644 --- a/allensdk/test/brain_observatory/behavior/test_behavior_ophys_data_xforms.py +++ b/allensdk/test/brain_observatory/behavior/test_behavior_ophys_data_xforms.py @@ -1,8 +1,9 @@ import pytest - +import logging import numpy as np import pandas as pd from allensdk.brain_observatory.behavior.session_apis.data_transforms import BehaviorOphysDataTransforms # noqa: E501 +from allensdk.internal.brain_observatory.time_sync import OphysTimeAligner @pytest.mark.parametrize("roi_ids,expected", [ @@ -96,13 +97,13 @@ def dummy_stimulus_file(self): 'timestamps': [0.04, 0.1], 'autorewarded': [True, False]} expected_df = pd.DataFrame(expected_dict) - expected_df = expected_df.set_index('timestamps', drop=True) + expected_df = expected_df assert expected_df.equals(rewards) def test_get_licks(monkeypatch): """ - Test that BehaviorDataTransforms.get_licks() a dataframe + Test that BehaviorOphysDataTransforms.get_licks() a dataframe of licks whose timestamps are based on their frame number with respect to the stimulus_timestamps """ @@ -150,13 +151,346 @@ def dummy_stimulus_file(self): licks = xforms.get_licks() - expected_dict = {'time': [0.12, 0.15, 0.90, 1.36], + expected_dict = {'timestamps': [0.12, 0.15, 0.90, 1.36], + 'frame': [12, 15, 90, 136]} + expected_df = pd.DataFrame(expected_dict) + assert expected_df.columns.equals(licks.columns) + np.testing.assert_array_almost_equal(expected_df.timestamps.to_numpy(), + licks.timestamps.to_numpy(), + decimal=10) + np.testing.assert_array_almost_equal(expected_df.frame.to_numpy(), + licks.frame.to_numpy(), + decimal=10) + + +def test_get_licks_excess(monkeypatch): + """ + Test that BehaviorOphysDataTransforms.get_licks() in the case where + there is an extra frame at the end of the trial log and the mouse + licked on that frame + + https://github.com/AllenInstitute/visual_behavior_analysis/blob/master/visual_behavior/translator/foraging2/extract.py#L640-L647 + """ + + def dummy_init(self): + self.logger = logging.getLogger('dummy') + pass + + def dummy_stimulus_timestamps(self): + return np.arange(0, 2.0, 0.01) + + def dummy_stimulus_file(self): + + # in this test, the trial log exists to make sure + # that get_licks is *not* reading the licks from + # here + trial_log = [] + trial_log.append({'licks': [(-1.0, 100), (-1.0, 200)]}) + trial_log.append({'licks': [(-1.0, 300), (-1.0, 400)]}) + trial_log.append({'licks': [(-1.0, 500), (-1.0, 600)]}) + + lick_events = [12, 15, 90, 136, 200] # len(timestamps) == 200 + lick_events = [{'lick_events': lick_events}] + + data = {} + data['items'] = {} + data['items']['behavior'] = {} + data['items']['behavior']['trial_log'] = trial_log + data['items']['behavior']['lick_sensors'] = lick_events + return data + + with monkeypatch.context() as ctx: + ctx.setattr(BehaviorOphysDataTransforms, + '__init__', + dummy_init) + + ctx.setattr(BehaviorOphysDataTransforms, + 'get_stimulus_timestamps', + dummy_stimulus_timestamps) + + ctx.setattr(BehaviorOphysDataTransforms, + '_behavior_stimulus_file', + dummy_stimulus_file) + + xforms = BehaviorOphysDataTransforms() + + licks = xforms.get_licks() + + expected_dict = {'timestamps': [0.12, 0.15, 0.90, 1.36], 'frame': [12, 15, 90, 136]} expected_df = pd.DataFrame(expected_dict) assert expected_df.columns.equals(licks.columns) - np.testing.assert_array_almost_equal(expected_df.time.to_numpy(), - licks.time.to_numpy(), + np.testing.assert_array_almost_equal(expected_df.timestamps.to_numpy(), + licks.timestamps.to_numpy(), decimal=10) np.testing.assert_array_almost_equal(expected_df.frame.to_numpy(), licks.frame.to_numpy(), decimal=10) + + +def test_empty_licks(monkeypatch): + """ + Test that BehaviorOphysDataTransforms.get_licks() in the case where + there are no licks + """ + + def dummy_init(self): + self.logger = logging.getLogger('dummy') + pass + + def dummy_stimulus_timestamps(self): + return np.arange(0, 2.0, 0.01) + + def dummy_stimulus_file(self): + + # in this test, the trial log exists to make sure + # that get_licks is *not* reading the licks from + # here + trial_log = [] + trial_log.append({'licks': [(-1.0, 100), (-1.0, 200)]}) + trial_log.append({'licks': [(-1.0, 300), (-1.0, 400)]}) + trial_log.append({'licks': [(-1.0, 500), (-1.0, 600)]}) + + lick_events = [] + lick_events = [{'lick_events': lick_events}] + + data = {} + data['items'] = {} + data['items']['behavior'] = {} + data['items']['behavior']['trial_log'] = trial_log + data['items']['behavior']['lick_sensors'] = lick_events + return data + + with monkeypatch.context() as ctx: + ctx.setattr(BehaviorOphysDataTransforms, + '__init__', + dummy_init) + + ctx.setattr(BehaviorOphysDataTransforms, + 'get_stimulus_timestamps', + dummy_stimulus_timestamps) + + ctx.setattr(BehaviorOphysDataTransforms, + '_behavior_stimulus_file', + dummy_stimulus_file) + + xforms = BehaviorOphysDataTransforms() + + licks = xforms.get_licks() + + expected_dict = {'timestamps': [], + 'frame': []} + expected_df = pd.DataFrame(expected_dict) + assert expected_df.columns.equals(licks.columns) + np.testing.assert_array_equal(expected_df.timestamps.to_numpy(), + licks.timestamps.to_numpy()) + np.testing.assert_array_equal(expected_df.frame.to_numpy(), + licks.frame.to_numpy()) + + +def test_get_licks_failure(monkeypatch): + """ + Test that BehaviorOphysDataTransforms.get_licks() fails if the last lick + is more than one frame beyond the end of the timestamps + """ + + def dummy_init(self): + self.logger = logging.getLogger('dummy') + pass + + def dummy_stimulus_timestamps(self): + return np.arange(0, 2.0, 0.01) + + def dummy_stimulus_file(self): + + # in this test, the trial log exists to make sure + # that get_licks is *not* reading the licks from + # here + trial_log = [] + trial_log.append({'licks': [(-1.0, 100), (-1.0, 200)]}) + trial_log.append({'licks': [(-1.0, 300), (-1.0, 400)]}) + trial_log.append({'licks': [(-1.0, 500), (-1.0, 600)]}) + + lick_events = [12, 15, 90, 136, 201] # len(timestamps) == 200 + lick_events = [{'lick_events': lick_events}] + + data = {} + data['items'] = {} + data['items']['behavior'] = {} + data['items']['behavior']['trial_log'] = trial_log + data['items']['behavior']['lick_sensors'] = lick_events + return data + + with monkeypatch.context() as ctx: + ctx.setattr(BehaviorOphysDataTransforms, + '__init__', + dummy_init) + + ctx.setattr(BehaviorOphysDataTransforms, + 'get_stimulus_timestamps', + dummy_stimulus_timestamps) + + ctx.setattr(BehaviorOphysDataTransforms, + '_behavior_stimulus_file', + dummy_stimulus_file) + + xforms = BehaviorOphysDataTransforms() + with pytest.raises(IndexError): + xforms.get_licks() + + +def test_timestamps_and_delay(monkeypatch): + """ + Test that BehaviorOphysDataTransforms returns the right values + with get_stimulus_timestamps and get_monitor_delay + """ + def dummy_loader(self): + self._stimulus_timestamps = np.array([2, 3, 7]) + self._monitor_delay = 99.3 + + def dummy_init(self): + pass + + with monkeypatch.context() as ctx: + ctx.setattr(BehaviorOphysDataTransforms, + "__init__", + dummy_init) + ctx.setattr(BehaviorOphysDataTransforms, + "_load_stimulus_timestamps_and_delay", + dummy_loader) + + xforms = BehaviorOphysDataTransforms() + np.testing.assert_array_equal(xforms.get_stimulus_timestamps(), + np.array([2, 3, 7])) + assert abs(xforms.get_monitor_delay() - 99.3) < 1.0e-10 + + # need to reverse order to make sure loader works + # correctly + xforms = BehaviorOphysDataTransforms() + assert abs(xforms.get_monitor_delay() - 99.3) < 1.0e-10 + np.testing.assert_array_equal(xforms.get_stimulus_timestamps(), + np.array([2, 3, 7])) + + +def test_monitor_delay(monkeypatch): + """ + Check that BehaviorOphysDataTransforms can handle all + edge cases of monitor delay calculation + """ + + # first test case where monitor delay calculation succeeds + def xform_init(self): + class DummyExtractor(object): + def get_sync_file(self): + return '' + self.extractor = DummyExtractor() + + def aligner_init(self, sync_file=None): + self._monitor_delay = None + self._clipped_stim_ts_delta = None + + def dummy_clipped(self): + return np.array([1, 2, 3, 4, 5], dtype=int), -1 + + def dummy_delay(self): + return 1.12 + + with monkeypatch.context() as ctx: + ctx.setattr(BehaviorOphysDataTransforms, + '__init__', + xform_init) + + ctx.setattr(OphysTimeAligner, + '__init__', + aligner_init) + + ctx.setattr(OphysTimeAligner, + '_get_clipped_stim_timestamps', + dummy_clipped) + + ctx.setattr(OphysTimeAligner, + '_get_monitor_delay', + dummy_delay) + + xforms = BehaviorOphysDataTransforms() + assert abs(xforms.get_monitor_delay() - 1.12) < 1.0e-6 + np.testing.assert_array_equal(xforms.get_stimulus_timestamps(), + np.array([1, 2, 3, 4, 5], dtype=int)) + + # now try case where monitor delay fails, but value can + # be looked up + def dummy_delay(self): + raise ValueError("that did not work") + + delay_lookup = {'CAM2P.1': 0.020842, + 'CAM2P.2': 0.037566, + 'CAM2P.3': 0.021390, + 'CAM2P.4': 0.021102, + 'CAM2P.5': 0.021192, + 'MESO.1': 0.03613} + + for rig_name in delay_lookup.keys(): + expected_delay = delay_lookup[rig_name] + + def dummy_get_metadata(self): + return {'rig_name': rig_name} + + with monkeypatch.context() as ctx: + ctx.setattr(BehaviorOphysDataTransforms, + '__init__', + xform_init) + + ctx.setattr(BehaviorOphysDataTransforms, + 'get_metadata', + dummy_get_metadata) + + ctx.setattr(OphysTimeAligner, + '__init__', + aligner_init) + + ctx.setattr(OphysTimeAligner, + '_get_clipped_stim_timestamps', + dummy_clipped) + + ctx.setattr(OphysTimeAligner, + '_get_monitor_delay', + dummy_delay) + + xforms = BehaviorOphysDataTransforms() + with pytest.warns(UserWarning): + m = xforms.get_monitor_delay() + assert abs(m - expected_delay) < 1.0e-6 + np.testing.assert_array_equal(xforms.get_stimulus_timestamps(), + np.array([1, 2, 3, 4, 5], dtype=int)) + + # finally, try case with unknown rig name + def dummy_get_metadata(self): + return {'rig_name': 'spam'} + + def dummy_delay(self): + raise ValueError("that did not work") + + with monkeypatch.context() as ctx: + ctx.setattr(BehaviorOphysDataTransforms, + '__init__', + xform_init) + + ctx.setattr(BehaviorOphysDataTransforms, + 'get_metadata', + dummy_get_metadata) + + ctx.setattr(OphysTimeAligner, + '__init__', + aligner_init) + + ctx.setattr(OphysTimeAligner, + '_get_clipped_stim_timestamps', + dummy_clipped) + + ctx.setattr(OphysTimeAligner, + '_get_monitor_delay', + dummy_delay) + + xforms = BehaviorOphysDataTransforms() + with pytest.raises(RuntimeError): + xforms.get_monitor_delay() diff --git a/allensdk/test/brain_observatory/behavior/test_behavior_ophys_session.py b/allensdk/test/brain_observatory/behavior/test_behavior_ophys_session.py index d26b400df..0c29fe706 100644 --- a/allensdk/test/brain_observatory/behavior/test_behavior_ophys_session.py +++ b/allensdk/test/brain_observatory/behavior/test_behavior_ophys_session.py @@ -1,7 +1,6 @@ import os import datetime import uuid -import math import pytest import pandas as pd import pytz @@ -17,6 +16,7 @@ BehaviorOphysNwbApi, BehaviorOphysLimsApi) from allensdk.brain_observatory.session_api_utils import ( sessions_are_equal, compare_session_fields) +from allensdk.brain_observatory.stimulus_info import MONITOR_DIMENSIONS @pytest.mark.requires_bamboo @@ -73,25 +73,26 @@ def test_visbeh_ophys_data_set(): # All sorts of assert relationships: assert data_set.api.extractor.get_foraging_id() == \ - str(data_set.api.get_behavior_session_uuid()) + str(data_set.api.get_behavior_session_uuid()) - stimulus_templates = data_set.stimulus_templates + stimulus_templates = data_set._stimulus_templates assert len(stimulus_templates) == 8 - assert stimulus_templates['im000'].shape == (918, 1174) + assert stimulus_templates['im000'].warped.shape == MONITOR_DIMENSIONS + assert stimulus_templates['im000'].unwarped.shape == MONITOR_DIMENSIONS - assert len(data_set.licks) == 2421 and list(data_set.licks.columns) \ - == ['time', 'frame'] - assert len(data_set.rewards) == 85 and list(data_set.rewards.columns) == \ - ['volume', 'autorewarded'] + assert len(data_set.licks) == 2421 and set(data_set.licks.columns) \ + == set(['timestamps', 'frame']) + assert len(data_set.rewards) == 85 and set(data_set.rewards.columns) == \ + set(['timestamps', 'volume', 'autorewarded']) assert len(data_set.corrected_fluorescence_traces) == 258 and \ - sorted(data_set.corrected_fluorescence_traces.columns) == \ - ['cell_roi_id', 'corrected_fluorescence'] + set(data_set.corrected_fluorescence_traces.columns) == \ + set(['cell_roi_id', 'corrected_fluorescence']) np.testing.assert_array_almost_equal(data_set.running_speed.timestamps, data_set.stimulus_timestamps) assert len(data_set.cell_specimen_table) == len(data_set.dff_traces) assert data_set.average_projection.data.shape == \ - data_set.max_projection.data.shape - assert list(data_set.motion_correction.columns) == ['x', 'y'] + data_set.max_projection.data.shape + assert set(data_set.motion_correction.columns) == set(['x', 'y']) assert len(data_set.trials) == 602 expected_metadata = { @@ -126,14 +127,15 @@ def test_visbeh_ophys_data_set(): assert data_set.metadata == expected_metadata assert data_set.task_parameters == {'reward_volume': 0.007, 'stimulus_distribution': u'geometric', - 'stimulus_duration_sec': 6.0, + 'stimulus_duration_sec': 0.25, 'stimulus': 'images', 'omitted_flash_fraction': 0.05, 'blank_duration_sec': [0.5, 0.5], 'n_stimulus_frames': 69882, - 'task': 'DoC_untranslated', + 'task': 'change detection', 'response_window_sec': [0.15, 0.75], - 'stage': u'OPHYS_6_images_B'} + 'session_type': u'OPHYS_6_images_B', + 'auto_reward_volume': 0.005} @pytest.mark.requires_bamboo diff --git a/allensdk/test/brain_observatory/behavior/test_eye_tracking_processing.py b/allensdk/test/brain_observatory/behavior/test_eye_tracking_processing.py index 8fa7fcdba..622770cbc 100644 --- a/allensdk/test/brain_observatory/behavior/test_eye_tracking_processing.py +++ b/allensdk/test/brain_observatory/behavior/test_eye_tracking_processing.py @@ -32,8 +32,8 @@ def create_area_df(data: np.ndarray) -> pd.DataFrame: def create_refined_eye_tracking_df(data: np.ndarray) -> pd.DataFrame: - columns = ["time", "cr_area", "eye_area", "pupil_area", "likely_blink", - "pupil_area_raw", "cr_area_raw", "eye_area_raw", + columns = ["timestamps", "cr_area", "eye_area", "pupil_area", + "likely_blink", "pupil_area_raw", "cr_area_raw", "eye_area_raw", "cr_center_x", "cr_center_y", "cr_width", "cr_height", "cr_phi", "eye_center_x", "eye_center_y", "eye_width", "eye_height", "eye_phi", "pupil_center_x", "pupil_center_y", "pupil_width", @@ -174,14 +174,37 @@ def test_determine_likely_blinks(eye_areas, pupil_areas, outliers, (create_loaded_eye_tracking_df( np.array([[1, 1, 2, 1, 1, 1, 2, 1, 1, 2, 1, 1, 1, 2, 1], [2, 2, 1, 1, 2, 2, 1, 2, 2, 1, 2, 1, 2, 1, 2]])), - pd.Series([0.1, 0.2, 0.3])), + pd.Series(np.arange(0, 1.8, 0.1))), ]) def test_process_eye_tracking_data_raises_on_sync_error(eye_tracking_df, frame_times): + """ + Test that an error is raised when the number of sync timestamps exceeds + the number of eye tracking frames by more than 15 + """ with pytest.raises(RuntimeError, match='Error! The number of sync file'): process_eye_tracking_data(eye_tracking_df, frame_times) +@pytest.mark.parametrize("eye_tracking_df, frame_times", [ + (create_loaded_eye_tracking_df( + np.array([[1, 1, 2, 1, 1, 1, 2, 1, 1, 2, 1, 1, 1, 2, 1], + [2, 2, 1, 1, 2, 2, 1, 2, 2, 1, 2, 1, 2, 1, 2]])), + pd.Series(np.arange(0, 1.7, 0.1))), +]) +def test_process_eye_tracking_data_truncation(eye_tracking_df, + frame_times): + """ + Test that the array of sync times is truncated when the number + of raw sync timestamps exceeds the numer of eye tracking frames + by <= 15 + """ + df = process_eye_tracking_data(eye_tracking_df, frame_times) + np.testing.assert_array_almost_equal(df.timestamps.to_numpy(), + np.array([0.0, 0.1]), + decimal=10) + + @pytest.mark.parametrize("eye_tracking_df, frame_times, expected", [ (create_loaded_eye_tracking_df( np.array([[1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., diff --git a/allensdk/test/brain_observatory/behavior/test_get_trial_methods.py b/allensdk/test/brain_observatory/behavior/test_get_trial_methods.py index 8ef656530..daf2c4fc3 100644 --- a/allensdk/test/brain_observatory/behavior/test_get_trial_methods.py +++ b/allensdk/test/brain_observatory/behavior/test_get_trial_methods.py @@ -59,14 +59,18 @@ def dummy_init(self): n_t = len(pkl_data['items']['behavior']['intervalsms']) + 1 timestamps = np.linspace(0, 1, n_t) + def dummy_loader(self): + self._stimulus_timestamps = np.copy(timestamps) + self._monitor_delay = 0.021 + with monkeypatch.context() as ctx: ctx.setattr(BehaviorOphysDataTransforms, '__init__', dummy_init) ctx.setattr(BehaviorOphysDataTransforms, - 'get_stimulus_timestamps', - lambda x: timestamps) + '_load_stimulus_timestamps_and_delay', + dummy_loader) ctx.setattr(BehaviorOphysDataTransforms, '_behavior_stimulus_file', diff --git a/allensdk/test/brain_observatory/behavior/test_metadata_processing.py b/allensdk/test/brain_observatory/behavior/test_metadata_processing.py index 871d8ad8f..ef5344d45 100644 --- a/allensdk/test/brain_observatory/behavior/test_metadata_processing.py +++ b/allensdk/test/brain_observatory/behavior/test_metadata_processing.py @@ -5,16 +5,16 @@ description_dict, get_task_parameters, get_expt_description) -def test_get_task_parameters(): - data = { +@pytest.mark.parametrize("data, expected", + [pytest.param({ # noqa: E128 "items": { "behavior": { "config": { "DoC": { "blank_duration_range": (0.5, 0.6), - "stimulus_window": 6.0, "response_window": [0.15, 0.75], "change_time_dist": "geometric", + "auto_reward_volume": 0.002, }, "reward": { "reward_volume": 0.007, @@ -28,24 +28,152 @@ def test_get_task_parameters(): "flash_omit_probability": 0.05 }, "stimuli": { - "images": {"draw_log": [1]*10} + "images": {"draw_log": [1]*10, + "flash_interval_sec": [0.32, -1.0]} }, } } - } + }, + { + "blank_duration_sec": [0.5, 0.6], + "stimulus_duration_sec": 0.32, + "omitted_flash_fraction": 0.05, + "response_window_sec": [0.15, 0.75], + "reward_volume": 0.007, + "session_type": "TRAINING_3_images_A", + "stimulus": "images", + "stimulus_distribution": "geometric", + "task": "change detection", + "n_stimulus_frames": 10, + "auto_reward_volume": 0.002 + }, id='basic'), + pytest.param({ + "items": { + "behavior": { + "config": { + "DoC": { + "blank_duration_range": (0.5, 0.5), + "response_window": [0.15, 0.75], + "change_time_dist": "geometric", + "auto_reward_volume": 0.002 + }, + "reward": { + "reward_volume": 0.007, + }, + "behavior": { + "task_id": "DoC_untranslated", + }, + }, + "params": { + "stage": "TRAINING_3_images_A", + "flash_omit_probability": 0.05 + }, + "stimuli": { + "images": {"draw_log": [1]*10, + "flash_interval_sec": [0.32, -1.0]} + }, + } + } + }, + { + "blank_duration_sec": [0.5, 0.5], + "stimulus_duration_sec": 0.32, + "omitted_flash_fraction": 0.05, + "response_window_sec": [0.15, 0.75], + "reward_volume": 0.007, + "session_type": "TRAINING_3_images_A", + "stimulus": "images", + "stimulus_distribution": "geometric", + "task": "change detection", + "n_stimulus_frames": 10, + "auto_reward_volume": 0.002 + }, id='single_value_blank_duration'), + pytest.param({ + "items": { + "behavior": { + "config": { + "DoC": { + "blank_duration_range": (0.5, 0.5), + "response_window": [0.15, 0.75], + "change_time_dist": "geometric", + "auto_reward_volume": 0.002 + }, + "reward": { + "reward_volume": 0.007, + }, + "behavior": { + "task_id": "DoC_untranslated", + }, + }, + "params": { + "stage": "TRAINING_3_images_A", + "flash_omit_probability": 0.05 + }, + "stimuli": { + "grating": {"draw_log": [1]*10, + "flash_interval_sec": [0.34, -1.0]} + }, + } + } + }, + { + "blank_duration_sec": [0.5, 0.5], + "stimulus_duration_sec": 0.34, + "omitted_flash_fraction": 0.05, + "response_window_sec": [0.15, 0.75], + "reward_volume": 0.007, + "session_type": "TRAINING_3_images_A", + "stimulus": "grating", + "stimulus_distribution": "geometric", + "task": "change detection", + "n_stimulus_frames": 10, + "auto_reward_volume": 0.002 + }, id='stimulus_duration_from_grating'), + pytest.param({ + "items": { + "behavior": { + "config": { + "DoC": { + "blank_duration_range": (0.5, 0.5), + "response_window": [0.15, 0.75], + "change_time_dist": "geometric", + "auto_reward_volume": 0.002 + }, + "reward": { + "reward_volume": 0.007, + }, + "behavior": { + "task_id": "DoC_untranslated", + }, + }, + "params": { + "stage": "TRAINING_3_images_A", + "flash_omit_probability": 0.05 + }, + "stimuli": { + "grating": {"draw_log": [1]*10, + "flash_interval_sec": None} + }, + } + } + }, + { + "blank_duration_sec": [0.5, 0.5], + "stimulus_duration_sec": np.NaN, + "omitted_flash_fraction": 0.05, + "response_window_sec": [0.15, 0.75], + "reward_volume": 0.007, + "session_type": "TRAINING_3_images_A", + "stimulus": "grating", + "stimulus_distribution": "geometric", + "task": "change detection", + "n_stimulus_frames": 10, + "auto_reward_volume": 0.002 + }, id='stimulus_duration_none') + ] +) +def test_get_task_parameters(data, expected): actual = get_task_parameters(data) - expected = { - "blank_duration_sec": [0.5, 0.6], - "stimulus_duration_sec": 6.0, - "omitted_flash_fraction": 0.05, - "response_window_sec": [0.15, 0.75], - "reward_volume": 0.007, - "stage": "TRAINING_3_images_A", - "stimulus": "images", - "stimulus_distribution": "geometric", - "task": "DoC_untranslated", - "n_stimulus_frames": 10 - } for k, v in actual.items(): # Special nan checking since pytest doesn't do it well try: @@ -55,7 +183,91 @@ def test_get_task_parameters(): assert expected[k] == v except (TypeError, ValueError): assert expected[k] == v - assert list(actual.keys()) == list(expected.keys()) + + actual_keys = list(actual.keys()) + actual_keys.sort() + expected_keys = list(expected.keys()) + expected_keys.sort() + assert actual_keys == expected_keys + + +def test_get_task_parameters_task_id_exception(): + """ + Test that, when task_id has an unexpected value, + get_task_parameters throws the correct exception + """ + input_data = { + "items": { + "behavior": { + "config": { + "DoC": { + "blank_duration_range": (0.5, 0.6), + "response_window": [0.15, 0.75], + "change_time_dist": "geometric", + "auto_reward_volume": 0.002 + }, + "reward": { + "reward_volume": 0.007, + }, + "behavior": { + "task_id": "junk", + }, + }, + "params": { + "stage": "TRAINING_3_images_A", + "flash_omit_probability": 0.05 + }, + "stimuli": { + "images": {"draw_log": [1]*10, + "flash_interval_sec": [0.32, -1.0]} + }, + } + } + } + + with pytest.raises(RuntimeError) as error: + _ = get_task_parameters(input_data) + assert "does not know how to parse 'task_id'" in error.value.args[0] + + +def test_get_task_parameters_flash_duration_exception(): + """ + Test that, when 'images' or 'grating' not present in 'stimuli', + get_task_parameters throws the correct exception + """ + input_data = { + "items": { + "behavior": { + "config": { + "DoC": { + "blank_duration_range": (0.5, 0.6), + "response_window": [0.15, 0.75], + "change_time_dist": "geometric", + "auto_reward_volume": 0.002 + }, + "reward": { + "reward_volume": 0.007, + }, + "behavior": { + "task_id": "DoC", + }, + }, + "params": { + "stage": "TRAINING_3_images_A", + "flash_omit_probability": 0.05 + }, + "stimuli": { + "junk": {"draw_log": [1]*10, + "flash_interval_sec": [0.32, -1.0]} + }, + } + } + } + + with pytest.raises(RuntimeError) as error: + _ = get_task_parameters(input_data) + shld_be = "'images' and/or 'grating' not a valid key" + assert shld_be in error.value.args[0] @pytest.mark.parametrize("session_type, expected_description", [ @@ -88,5 +300,5 @@ def test_get_expt_description_with_valid_session_type(session_type, ("OPHYS_7") ]) def test_get_expt_description_raises_with_invalid_session_type(session_type): - with pytest.raises(RuntimeError, match=f"session type should match.*"): + with pytest.raises(RuntimeError, match="session type should match.*"): get_expt_description(session_type) diff --git a/allensdk/test/brain_observatory/behavior/test_rewards_processing.py b/allensdk/test/brain_observatory/behavior/test_rewards_processing.py index efb42fdcf..d344bce6e 100644 --- a/allensdk/test/brain_observatory/behavior/test_rewards_processing.py +++ b/allensdk/test/brain_observatory/behavior/test_rewards_processing.py @@ -30,7 +30,7 @@ def test_get_rewards(): expected = pd.DataFrame( {"volume": [0.007, 0.008], "timestamps": [14.0, 15.0], - "autorewarded": [False, True]}).set_index("timestamps", drop=True) + "autorewarded": [False, True]}) timesteps = -1*np.ones(100, dtype=float) timesteps[55] = 14.0 diff --git a/allensdk/test/brain_observatory/behavior/test_stimulus_processing.py b/allensdk/test/brain_observatory/behavior/test_stimulus_processing.py index 13b667953..e77284424 100644 --- a/allensdk/test/brain_observatory/behavior/test_stimulus_processing.py +++ b/allensdk/test/brain_observatory/behavior/test_stimulus_processing.py @@ -1,3 +1,5 @@ +import os + import pandas as pd import numpy as np import pytest @@ -5,9 +7,10 @@ from allensdk.brain_observatory.behavior.stimulus_processing import ( get_stimulus_presentations, _get_stimulus_epoch, _get_draw_epochs, get_visual_stimuli_df, get_stimulus_metadata, get_gratings_metadata, - get_stimulus_templates, StimulusTemplate) -from allensdk.brain_observatory.behavior.stimulus_processing.stimulus_templates import \ - StimulusImage + get_stimulus_templates) +from allensdk.brain_observatory.behavior.stimulus_processing\ + .stimulus_templates import StimulusImage +from allensdk.test.brain_observatory.behavior.conftest import get_resources_dir @pytest.fixture() @@ -95,22 +98,60 @@ def test_get_draw_epochs(behavior_stimuli_data_fixture, @pytest.mark.parametrize("behavior_stimuli_data_fixture", ({},), indirect=["behavior_stimuli_data_fixture"]) def test_get_stimulus_templates(behavior_stimuli_data_fixture): - templates = get_stimulus_templates(behavior_stimuli_data_fixture) - images = [np.ones((4, 4)) * 127, np.ones((4, 4)) * 127] + templates = get_stimulus_templates(behavior_stimuli_data_fixture, + grating_images_dict={}) assert templates.image_set_name == 'test_image_set' - assert len(templates) == 2 - assert list(templates.keys()) == ['im000', 'im106'] + assert len(templates) == 1 + assert list(templates.keys()) == ['im065'] for img in templates.values(): assert isinstance(img, StimulusImage) - for i, img_name in enumerate(templates): + expected_path = os.path.join(get_resources_dir(), 'stimulus_template', + 'expected') + + expected_unwarped_path = os.path.join( + expected_path, 'im065_unwarped.pkl') + expected_unwarped = pd.read_pickle(expected_unwarped_path) + + expected_warped_path = os.path.join( + expected_path, 'im065_warped.pkl') + expected_warped = pd.read_pickle(expected_warped_path) + + for img_name in templates: img = templates[img_name] - assert np.array_equal(a1=images[i], a2=img) + assert np.allclose(a=expected_unwarped, + b=img.unwarped, equal_nan=True) + assert np.allclose(a=expected_warped, + b=img.warped, equal_nan=True) - for i, (img_name, img) in enumerate(templates.items()): - assert np.array_equal(a1=images[i], a2=img) + for img_name, img in templates.items(): + img = templates[img_name] + assert np.allclose(a=expected_unwarped, + b=img.unwarped, equal_nan=True) + assert np.allclose(a=expected_warped, + b=img.warped, equal_nan=True) + + +@pytest.mark.parametrize(("behavior_stimuli_data_fixture, " + "grating_images_dict, expected"), [ + ({"has_images": False}, + {"gratings_90.0": {"warped": np.ones((2, 2)), + "unwarped": np.ones((2, 2)) * 2}}, + {}), +], indirect=["behavior_stimuli_data_fixture"]) +def test_get_stimulus_templates_for_gratings(behavior_stimuli_data_fixture, + grating_images_dict, expected): + templates = get_stimulus_templates(behavior_stimuli_data_fixture, + grating_images_dict=grating_images_dict) + + assert templates.image_set_name == 'grating' + assert list(templates.keys()) == ['gratings_90.0'] + assert np.allclose(templates['gratings_90.0'].warped, + np.array([[1, 1], [1, 1]])) + assert np.allclose(templates['gratings_90.0'].unwarped, + np.array([[2, 2], [2, 2]])) # def test_get_images_dict(): diff --git a/allensdk/test/brain_observatory/behavior/test_trials_processing.py b/allensdk/test/brain_observatory/behavior/test_trials_processing.py index 46c73a093..a2c74e741 100644 --- a/allensdk/test/brain_observatory/behavior/test_trials_processing.py +++ b/allensdk/test/brain_observatory/behavior/test_trials_processing.py @@ -1,6 +1,7 @@ import pytest import pandas as pd import numpy as np +from itertools import combinations from allensdk.brain_observatory.behavior.session_apis.data_io import ( BehaviorLimsApi) @@ -14,7 +15,10 @@ (881236761, 0, None, IndexError,) ] ) -def test_get_ori_info_from_trial(behavior_experiment_id, ti, expected, exception, ): +def test_get_ori_info_from_trial(behavior_experiment_id, + ti, + expected, + exception, ): """was feeling worried that the values would be wrong, this helps reaffirm that maybe they are not... @@ -32,99 +36,135 @@ def test_get_ori_info_from_trial(behavior_experiment_id, ti, expected, exception with pytest.raises(exception): trials_processing.get_ori_info_from_trial(trial_log, ti, ) else: - assert trials_processing.get_ori_info_from_trial(trial_log, ti, ) == expected + assert trials_processing.get_ori_info_from_trial(trial_log, ti, ) == expected # noqa: E501 _test_response_latency_0 = np.array( - [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, 0.3669842, np.nan, np.nan, np.nan, - np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, - np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, - np.nan, np.nan, np.nan, np.nan, np.nan, 0.41701037, np.nan, np.nan, 0.31692564, np.nan, np.nan, np.nan, np.nan, - np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, - 0.28356898, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, - 0.33363652, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, - np.nan, np.nan, np.nan, np.nan, np.nan, 0.21683128, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, - np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, 0.38365788, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, - np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, - np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, - np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, - np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, - np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, - np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]) + [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, + np.nan, np.nan, 0.3669842, np.nan, np.nan, np.nan, np.nan, np.nan, + np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, + np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, + np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, + np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, 0.41701037, + np.nan, np.nan, 0.31692564, np.nan, np.nan, np.nan, np.nan, np.nan, + np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, + np.nan, np.nan, np.nan, np.nan, np.nan, 0.28356898, np.nan, np.nan, + np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, + np.nan, np.nan, 0.33363652, np.nan, np.nan, np.nan, np.nan, np.nan, + np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, + np.nan, np.nan, np.nan, np.nan, 0.21683128, np.nan, np.nan, np.nan, + np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, + np.nan, np.nan, np.nan, np.nan, 0.38365788, np.nan, np.nan, np.nan, + np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, + np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, + np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, + np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, + np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, + np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, + np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, + np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, + np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]) _test_starttime_0 = np.array( - [19.99986754, 22.9857173, 25.25430697, 27.50627203, 30.50876019, 33.54466563, 37.314543, 38.06517225, 41.0677066, - 42.58570486, 44.83770039, 52.3774269, 57.63187756, 62.91968583, 66.67286547, 68.17414034, 69.67541489, 74.19590836, - 77.19846323, 78.69971244, 80.9516403, 83.95418202, 86.22276367, 87.72406035, 89.97592663, 92.97849208, 95.23039781, - 97.49898909, 100.50154941, 102.75344423, 104.25473193, 107.25726563, 109.50917136, 111.7610819, 114.01299179, - 116.26490714, 119.26744951, 121.51938378, 124.53862772, 127.55782612, 129.80972512, 132.81226619, 137.31609721, - 138.81734546, 141.08594764, 143.33787839, 146.35708545, 149.35962973, 156.86599991, 159.88526245, 163.65508305, - 173.44672267, 175.69862167, 178.70117974, 180.95312267, 183.22166712, 186.22421269, 189.24343194, 191.49533542, - 195.28190828, 198.28443076, 201.30364616, 203.55556985, 205.82415567, 209.577343, 213.3472146, 216.34976979, - 218.61835433, 222.38820892, 226.14139112, 233.64779273, 234.39842519, 235.16570215, 237.4176191, 239.66953991, - 241.93811899, 244.94066135, 247.94320564, 249.44448821, 252.44703217, 253.96497334, 254.71559618, 256.2168919, - 263.73995532, 266.74247811, 268.99441912, 271.2629764, 275.0161833, 276.51745143, 278.03538907, 280.2873025, - 282.53921593, 284.80780047, 287.81034956, 290.06226106, 291.56355774, 294.56608823, 297.58530492, 299.83719974, - 302.08911637, 304.34102787, 308.1109572, 315.6172597, 317.11854964, 319.37043805, 321.65571199, 323.90761451, - 325.40892787, 327.66080216, 329.91271591, 332.9319358, 336.68513692, 339.70434687, 341.20561115, 341.95625997, - 345.72611233, 347.22738976, 349.4626279, 357.01902553, 359.27093992, 362.29016334, 365.29272109, 368.31197049, - 372.06513441, 373.58308296, 375.83498259, 377.3362504, 379.58815677, 382.60747224, 387.11118876, 390.11375903, - 392.3823596, 394.63425956, 396.88616336, 399.92207297, 402.92464964, 405.94383939, 408.19575538, 410.46437038, - 413.46690377, 416.4694612, 418.72134384, 420.95662335, 423.95913236, 425.46040562, 427.71231745, 429.98091481, - 432.98349662, 434.48471376, 435.98598863, 438.98854735, 442.0077961, 444.25968163, 446.54494947, 448.02954296, - 451.79940525, 454.81863412, 457.07053664, 459.33912952, 461.59103621, 463.85962107, 465.360903, 467.61282348, - 469.8814, 471.3826691, 474.38523327, 477.42112684, 479.67303705, 481.94162769, 484.94417229, 487.96340758, - 490.21530914, 492.46722064, 494.73581031, 498.48898834, 501.50822074, 503.00949272, 505.2780933, 507.53000255, - 510.54922212, 514.31907961, 516.57098918, 518.83957885, 520.34085308, 523.3600781, 525.61201879, 529.39853328, - 531.6504467, 533.91904696, 536.17094563, 539.94081562, 542.19272616, 545.19526724, 546.69653986, 548.19781249, + [19.99986754, 22.9857173, 25.25430697, 27.50627203, 30.50876019, + 33.54466563, 37.314543, 38.06517225, 41.0677066, 42.58570486, + 44.83770039, 52.3774269, 57.63187756, 62.91968583, 66.67286547, + 68.17414034, 69.67541489, 74.19590836, 77.19846323, 78.69971244, + 80.9516403, 83.95418202, 86.22276367, 87.72406035, 89.97592663, + 92.97849208, 95.23039781, 97.49898909, 100.50154941, 102.75344423, + 104.25473193, 107.25726563, 109.50917136, 111.7610819, 114.01299179, + 116.26490714, 119.26744951, 121.51938378, 124.53862772, 127.55782612, + 129.80972512, 132.81226619, 137.31609721, 138.81734546, 141.08594764, + 143.33787839, 146.35708545, 149.35962973, 156.86599991, 159.88526245, + 163.65508305, 173.44672267, 175.69862167, 178.70117974, 180.95312267, + 183.22166712, 186.22421269, 189.24343194, 191.49533542, 195.28190828, + 198.28443076, 201.30364616, 203.55556985, 205.82415567, 209.577343, + 213.3472146, 216.34976979, 218.61835433, 222.38820892, 226.14139112, + 233.64779273, 234.39842519, 235.16570215, 237.4176191, 239.66953991, + 241.93811899, 244.94066135, 247.94320564, 249.44448821, 252.44703217, + 253.96497334, 254.71559618, 256.2168919, 263.73995532, 266.74247811, + 268.99441912, 271.2629764, 275.0161833, 276.51745143, 278.03538907, + 280.2873025, 282.53921593, 284.80780047, 287.81034956, 290.06226106, + 291.56355774, 294.56608823, 297.58530492, 299.83719974, 302.08911637, + 304.34102787, 308.1109572, 315.6172597, 317.11854964, 319.37043805, + 321.65571199, 323.90761451, 325.40892787, 327.66080216, 329.91271591, + 332.9319358, 336.68513692, 339.70434687, 341.20561115, 341.95625997, + 345.72611233, 347.22738976, 349.4626279, 357.01902553, 359.27093992, + 362.29016334, 365.29272109, 368.31197049, 372.06513441, 373.58308296, + 375.83498259, 377.3362504, 379.58815677, 382.60747224, 387.11118876, + 390.11375903, 392.3823596, 394.63425956, 396.88616336, 399.92207297, + 402.92464964, 405.94383939, 408.19575538, 410.46437038, 413.46690377, + 416.4694612, 418.72134384, 420.95662335, 423.95913236, 425.46040562, + 427.71231745, 429.98091481, 432.98349662, 434.48471376, 435.98598863, + 438.98854735, 442.0077961, 444.25968163, 446.54494947, 448.02954296, + 451.79940525, 454.81863412, 457.07053664, 459.33912952, 461.59103621, + 463.85962107, 465.360903, 467.61282348, 469.8814, 471.3826691, + 474.38523327, 477.42112684, 479.67303705, 481.94162769, 484.94417229, + 487.96340758, 490.21530914, 492.46722064, 494.73581031, 498.48898834, + 501.50822074, 503.00949272, 505.2780933, 507.53000255, 510.54922212, + 514.31907961, 516.57098918, 518.83957885, 520.34085308, 523.3600781, + 525.61201879, 529.39853328, 531.6504467, 533.91904696, 536.17094563, + 539.94081562, 542.19272616, 545.19526724, 546.69653986, 548.19781249, 550.44972206, 553.46895029, 556.4714981, 559.47403694, 562.47658507]) expected_result_0 = np.array([ - np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, 0.85743611, 0.82215855, 0.79754855, - 0.77420232, 0.74532604, 0.72504419, 0.73828876, 0.73168092, - 0.73168145, 0.73844044, 0.745635, 0.75997116, 0.73889553, 0.7457893, 0.73212764, 0.72533739, 0., 0., 0., 0., 0., 0., - 0., 0.83147215, 0.76759434, 0.76013235, 1.50562915, 1.37576878, - 1.36403067, 1.3524898, 1.3640296, 1.36377167, 1.35249025, 1.35223636, 1.35223623, 1.31828762, 1.31828779, - 1.30726822, - 1.30726804, 1.30703059, 1.28600222, 1.27551339, 1.26541718, 1.27551391, 1.26541723, 1.86854445, 1.78508514, - 1.85409645, 1.86822076, 1.86854435, - 1.86854454, 1.88321881, 1.88321885, 1.31756348, 1.33989546, 1.35147388, 0.74517267, 0.75933052, 1.54807324, - 1.44950587, 1.43676766, 1.44979704, - 1.46306592, 1.43676702, 1.47718591, 1.50468411, 1.51930166, 1.51930185, 1.51930188, 1.53387944, 1.5642303, - 1.59545215, 1.58003398, 1.59580631, - 1.62831513, 0.87666335, 0.85784626, 1.6450693, 1.53453391, 1.54940651, 1.54974049, 1.56423021, 1.57968714, - 1.5796865, 1.59545254, 1.5800338, - 1.53420629, 1.49127149, 0.78984375, 0.80576787, 0.82234767, 0.80576784, 0.83089596, 1.64507108, 1.51930204, - 1.51930202, 1.50468432, 1.49096252, - 1.49065321, 1.46336336, 1.46306626, 1.4765797, 1.50468436, 1.50468414, 1.4903434, 1.44979783, 1.46336463, - 0.78160518, 0.77403664, 0.77403649, - 0.76661287, 0.75932993, 0.74501851, 0.74501813, 0.74486366, 0.74501799, 0.75202743, 0.7593303, 0.75234155, - 0.73168169, 0.7524993, 0.74548119, - 0.74517234, 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., - 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., - 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., - 0., 0., + np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, + np.inf, np.inf, 0.85743611, 0.82215855, 0.79754855, 0.77420232, + 0.74532604, 0.72504419, 0.73828876, 0.73168092, 0.73168145, + 0.73844044, 0.745635, 0.75997116, 0.73889553, 0.7457893, 0.73212764, + 0.72533739, 0., 0., 0., 0., 0., 0., 0., 0.83147215, 0.76759434, + 0.76013235, 1.50562915, 1.37576878, 1.36403067, 1.3524898, + 1.3640296, 1.36377167, 1.35249025, 1.35223636, 1.35223623, + 1.31828762, 1.31828779, 1.30726822, 1.30726804, 1.30703059, 1.28600222, + 1.27551339, 1.26541718, 1.27551391, 1.26541723, 1.86854445, 1.78508514, + 1.85409645, 1.86822076, 1.86854435, 1.86854454, 1.88321881, 1.88321885, + 1.31756348, 1.33989546, 1.35147388, 0.74517267, 0.75933052, 1.54807324, + 1.44950587, 1.43676766, 1.44979704, 1.46306592, 1.43676702, 1.47718591, + 1.50468411, 1.51930166, 1.51930185, 1.51930188, 1.53387944, 1.5642303, + 1.59545215, 1.58003398, 1.59580631, 1.62831513, 0.87666335, 0.85784626, + 1.6450693, 1.53453391, 1.54940651, 1.54974049, 1.56423021, 1.57968714, + 1.5796865, 1.59545254, 1.5800338, 1.53420629, 1.49127149, 0.78984375, + 0.80576787, 0.82234767, 0.80576784, 0.83089596, 1.64507108, 1.51930204, + 1.51930202, 1.50468432, 1.49096252, 1.49065321, 1.46336336, 1.46306626, + 1.4765797, 1.50468436, 1.50468414, 1.4903434, 1.44979783, 1.46336463, + 0.78160518, 0.77403664, 0.77403649, 0.76661287, 0.75932993, 0.74501851, + 0.74501813, 0.74486366, 0.74501799, 0.75202743, 0.7593303, 0.75234155, + 0.73168169, 0.7524993, 0.74548119, 0.74517234, 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., ]) expected_result_1 = np.array( - [np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, 1.811146, 1.944290, 1.898119, - 1.811146, 1.733465, 1.771897, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, - 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, - 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 2.417308, 2.047210, 1.994977, - 3.890695, 3.321282, 3.253684, 3.190197, 3.190196, 3.255157, 3.255157, 1.853143, 1.898129, 1.897124, 0.000000, - 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 2.153860, 1.855050, 1.945345, - 2.044882, 2.155151, 2.279434, 2.344817, 2.279435, 2.347877, 2.574765, 0.000000, 0.000000, 0.000000, 3.191613, - 2.492687, 2.418930, 2.494413, 2.572924, 2.346344, 2.492686, 2.492686, 2.346343, 2.279434, 0.000000, 0.000000, - 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 2.574758, 2.157737, 2.217599, 2.157739, - 2.214870, 2.279435, 2.346341, - 2.346345, 2.346345, 2.417310, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 2.752063, 2.213507, - 2.277990, 2.343290, 2.344815, 2.213503, 1.992768, 2.153859, 2.097345, 2.152573, 0.000000, 0.000000, 0.000000, - 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, - 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, - 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, - 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, - 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, - 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, - 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, ]) + [np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, + np.inf, np.inf, 1.811146, 1.944290, 1.898119, 1.811146, 1.733465, + 1.771897, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 2.417308, 2.047210, 1.994977, 3.890695, 3.321282, + 3.253684, 3.190197, 3.190196, 3.255157, 3.255157, 1.853143, 1.898129, + 1.897124, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 2.153860, 1.855050, 1.945345, 2.044882, + 2.155151, 2.279434, 2.344817, 2.279435, 2.347877, 2.574765, 0.000000, + 0.000000, 0.000000, 3.191613, 2.492687, 2.418930, 2.494413, 2.572924, + 2.346344, 2.492686, 2.492686, 2.346343, 2.279434, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + 2.574758, 2.157737, 2.217599, 2.157739, 2.214870, 2.279435, 2.346341, + 2.346345, 2.346345, 2.417310, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 2.752063, 2.213507, 2.277990, 2.343290, 2.344815, + 2.213503, 1.992768, 2.153859, 2.097345, 2.152573, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, ]) @pytest.mark.parametrize('kwargs, expected', [ @@ -176,7 +216,9 @@ def trial_data_and_expectation_0(): 18706)], 'success': False, 'cumulative_volume': 0.005, - 'trial_params': {'catch': False, 'auto_reward': True, 'change_time': 5}, + 'trial_params': {'catch': False, + 'auto_reward': True, + 'change_time': 5}, 'rewards': [(0.005, 317.92325660388286, 18715)], 'events': [['trial_start', '', 314.0120642698258, 18481], ['initial_blank', 'enter', 314.01216666808074, 18481], @@ -221,7 +263,9 @@ def trial_data_and_expectation_1(): 'stimulus_changes': [], 'success': False, 'cumulative_volume': 0.005, - 'trial_params': {'catch': False, 'auto_reward': True, 'change_time': 6}, + 'trial_params': {'catch': False, + 'auto_reward': True, + 'change_time': 6}, 'rewards': [], 'events': [['trial_start', '', 322.2688823113451, 18976], ['initial_blank', 'enter', 322.2689858798658, 18976], @@ -280,7 +324,9 @@ def trial_data_and_expectation_2(): 32161)], 'success': True, 'cumulative_volume': 0.067, - 'trial_params': {'catch': False, 'auto_reward': False, 'change_time': 4}, + 'trial_params': {'catch': False, + 'auto_reward': False, + 'change_time': 4}, 'rewards': [(0.007, 542.620156599114, 32186)], 'events': [['trial_start', '', 539.1971251251088, 31981], ['initial_blank', 'enter', 539.197228401063, 31981], @@ -336,8 +382,9 @@ def test_trial_data_from_log(data_exp_getter): "both `hit` and `false_alarm` cannot be True"), # hit and false alarm (True, True, False, False, False, False, "both `go` and `catch` cannot be True"), # go and catch + # go and auto_rewarded (True, False, True, False, False, False, - "both `go` and `auto_rewarded` cannot be True") # go and auto_rewarded + "both `go` and `auto_rewarded` cannot be True") ] ) def test_get_trial_timing_exclusivity_assertions( @@ -345,7 +392,7 @@ def test_get_trial_timing_exclusivity_assertions( with pytest.raises(AssertionError) as e: trials_processing.get_trial_timing( None, None, go, catch, auto_rewarded, hit, false_alarm, - aborted, np.array([])) + aborted, np.array([]), 0.0) assert errortext in str(e.value) @@ -356,11 +403,14 @@ def test_get_trial_timing(): 'frame': 18075}, ('initial_blank', 'exit'): {'timestamp': 306.4787637603285, 'frame': 18075}, - ('pre_change', 'enter'): {'timestamp': 306.47883573270514, 'frame': 18075}, - ('pre_change', 'exit'): {'timestamp': 306.4789062422286, 'frame': 18075}, + ('pre_change', 'enter'): {'timestamp': 306.47883573270514, + 'frame': 18075}, + ('pre_change', 'exit'): {'timestamp': 306.4789062422286, + 'frame': 18075}, ('stimulus_window', 'enter'): {'timestamp': 306.478977629464, 'frame': 18075}, - ('stimulus_changed', ''): {'timestamp': 310.9827406729944, 'frame': 18345}, + ('stimulus_changed', ''): {'timestamp': 310.9827406729944, + 'frame': 18345}, ('auto_reward', ''): {'timestamp': 310.98279450599154, 'frame': 18345}, ('response_window', 'enter'): {'timestamp': 311.13223900212347, 'frame': 18354}, @@ -404,7 +454,8 @@ def test_get_trial_timing(): hit=False, false_alarm=False, aborted=False, - timestamps=timestamps + timestamps=timestamps, + monitor_delay=0.0 ) expected_result = { @@ -417,8 +468,10 @@ def test_get_trial_timing(): 'response_latency': 0.4778999999999769 } - # use assert_frame_equal to take advantage of the nice way it deals with NaNs - pd.testing.assert_frame_equal(pd.DataFrame(result, index=[0]), pd.DataFrame(expected_result, index=[0]), + # use assert_frame_equal to take advantage of the + # nice way it deals with NaNs + pd.testing.assert_frame_equal(pd.DataFrame(result, index=[0]), + pd.DataFrame(expected_result, index=[0]), check_names=False) @@ -480,7 +533,9 @@ def test_resolve_initial_image(behavior_stimuli_data_fixture, start_frame, ], 'stimulus_changes': [ - (('horizontal', 90), ('vertical', 180), None, + (('horizontal', 90), + ('vertical', 180), + None, None) ] }, @@ -517,14 +572,22 @@ def test_get_trial_image_names(behavior_stimuli_data_fixture, trial, trial_image_names = trials_processing.get_trial_image_names(trial, stimuli) assert trial_image_names == expected + @pytest.mark.parametrize("trial_log,expected", - [([{'events':[('trial_start', 4), ('trial_end', 5)]}, - {'events':[('trial_start', 6), ('trial_end', 9)]}], + [([{'events': [('trial_start', 4), + ('trial_end', 5)]}, + {'events': [('trial_start', 6), + ('trial_end', 9)]}], [(4, 6), (6, -1)]), - ([{'events':[('trial_start', 2), ('trial_end', 9)]}, - {'events':[('trial_start', 5), ('trial_end', 11)]}, - {'events':[('junk', 4), ('trial_start', 7), ('trial_end', 14)]}, - {'events':[('trial_start', 13), ('trial_end', 22)]}], + ([{'events': [('trial_start', 2), + ('trial_end', 9)]}, + {'events': [('trial_start', 5), + ('trial_end', 11)]}, + {'events': [('junk', 4), + ('trial_start', 7), + ('trial_end', 14)]}, + {'events': [('trial_start', 13), + ('trial_end', 22)]}], [(2, 5), (5, 7), (7, 13), (13, -1)])]) def test_get_trial_bounds(trial_log, expected): bounds = trials_processing.get_trial_bounds(trial_log) @@ -540,14 +603,17 @@ def test_get_trial_bounds_exception(): {'events': [('trial_end', 2)]}] with pytest.raises(ValueError): - bounds = trials_processing.get_trial_bounds(trial_log) + _ = trials_processing.get_trial_bounds(trial_log) @pytest.mark.parametrize("trial_log", - [([{'events':[('trial_start', 4), ('trial_end', 5)]}, - {'events':[('trial_start', 2), ('trial_end', 9)]}, - {'events':[('trial_start', 6), ('trial_end', 11)]}])] - ) + [([{'events': [('trial_start', 4), + ('trial_end', 5)]}, + {'events': [('trial_start', 2), + ('trial_end', 9)]}, + {'events': [('trial_start', 6), + ('trial_end', 11)]}])] + ) def test_get_trial_bounds_order_exceptions(trial_log): """ Test that, when trial_start and trial_end are out of order, @@ -556,3 +622,51 @@ def test_get_trial_bounds_order_exceptions(trial_log): with pytest.raises(ValueError) as error: _ = trials_processing.get_trial_bounds(trial_log) assert 'order' in error.value.args[0] + + +def test_input_validation(monkeypatch): + """ + Test that get_trials raises the appropriate errors when input object + is malformed + + Note: this test does not test the case in which get_trials runs through + to completion. That is covered by the smoke tests in + allensdk/test/brain_observatory/behavior/test_get_trials_methods + """ + + class DummyObj(object): + def __init__(self): + pass + + def dummy_method(self): + pass + + # loop over all of the incomplete subsets of + # methods that the argument in get_trials_from_data_transform + # must have; make sure that the correct error with + # the correct error message is raised + + method_names_tuple = ('_behavior_stimulus_file', + 'get_rewards', 'get_licks', + 'get_stimulus_timestamps', + 'get_monitor_delay') + + for n_methods in range(1, 5): + method_iterator = combinations(method_names_tuple, + n_methods) + for local_method_name_tuple in method_iterator: + with monkeypatch.context() as ctx: + for method_name in local_method_name_tuple: + ctx.setattr(DummyObj, + method_name, + dummy_method, + raising=False) + + obj = DummyObj() + with pytest.raises(ValueError) as error: + _ = trials_processing.get_trials_from_data_transform(obj) + for method_name in method_names_tuple: + if method_name not in local_method_name_tuple: + assert method_name in error.value.args[0] + else: + assert method_name not in error.value.args[0] diff --git a/allensdk/test/brain_observatory/behavior/test_write_behavior_nwb.py b/allensdk/test/brain_observatory/behavior/test_write_behavior_nwb.py index 25d7ff16c..be84a63bd 100644 --- a/allensdk/test/brain_observatory/behavior/test_write_behavior_nwb.py +++ b/allensdk/test/brain_observatory/behavior/test_write_behavior_nwb.py @@ -9,7 +9,8 @@ from allensdk.brain_observatory.behavior.session_apis.data_io import ( BehaviorNwbApi) from allensdk.brain_observatory.behavior.stimulus_processing import \ - StimulusTemplate + StimulusTemplate, get_stimulus_templates + # pytest fixtures: # nwbfile: test.brain_observatory.conftest @@ -55,9 +56,14 @@ def test_add_running_speed_to_nwbfile(nwbfile, running_speed, obt_running_speed['speed']) -@pytest.mark.parametrize('roundtrip', [True, False]) -def test_add_stimulus_templates(nwbfile, stimulus_templates: StimulusTemplate, +@pytest.mark.parametrize('roundtrip,behavior_stimuli_data_fixture', + [(True, {}), (False, {})], + indirect=["behavior_stimuli_data_fixture"]) +def test_add_stimulus_templates(nwbfile, behavior_stimuli_data_fixture, roundtrip, roundtripper): + stimulus_templates = get_stimulus_templates(behavior_stimuli_data_fixture, + grating_images_dict={}) + nwb.add_stimulus_template(nwbfile, stimulus_templates) if roundtrip: @@ -67,10 +73,7 @@ def test_add_stimulus_templates(nwbfile, stimulus_templates: StimulusTemplate, stimulus_templates_obt = obt.get_stimulus_templates() - for img_name in stimulus_templates_obt: - assert np.array_equal( - a1=stimulus_templates[img_name], - a2=stimulus_templates_obt[img_name]) + assert stimulus_templates_obt == stimulus_templates @pytest.mark.parametrize('roundtrip', [True, False]) @@ -201,3 +204,32 @@ def test_add_task_parameters(nwbfile, roundtrip, assert math.isnan(val) else: assert val == task_parameters_obt[key] + + +@pytest.mark.parametrize('roundtrip', [True, False]) +def test_add_task_parameters_stim_nan(nwbfile, roundtrip, + roundtripper, + task_parameters_nan_stimulus_duration): + """ + Same as test_add_task_parameters, but stimulus_duration_sec is NaN + """ + task_params = task_parameters_nan_stimulus_duration + nwb.add_task_parameters(nwbfile, task_params) + + if roundtrip: + obt = roundtripper(nwbfile, BehaviorNwbApi) + else: + obt = BehaviorNwbApi.from_nwbfile(nwbfile) + + task_parameters_obt = obt.get_task_parameters() + + assert len(task_parameters_obt) == len(task_params) + for key, val in task_params.items(): + if key in ('omitted_flash_fraction', + 'stimulus_duration_sec'): + if math.isnan(val): + assert math.isnan(task_parameters_obt[key]) + if math.isnan(task_parameters_obt[key]): + assert math.isnan(val) + else: + assert val == task_parameters_obt[key] diff --git a/allensdk/test/internal/brain_observatory/test_time_sync.py b/allensdk/test/internal/brain_observatory/test_time_sync.py index ad991d926..db106e08b 100644 --- a/allensdk/test/internal/brain_observatory/test_time_sync.py +++ b/allensdk/test/internal/brain_observatory/test_time_sync.py @@ -347,7 +347,8 @@ def test_get_corrected_stim_times(stim_data_length, start_delay): aligner = ts.OphysTimeAligner("test") aligner.stim_data_length = stim_data_length - with patch.object(ts, "monitor_delay", return_value=ASSUMED_DELAY): + with patch.object(ts, "calculate_monitor_delay", + return_value=ASSUMED_DELAY): with patch.object(ts.Dataset, "get_falling_edges", return_value=true_falling) as mock_falling: with patch.object(ts.Dataset, "get_rising_edges", @@ -462,15 +463,18 @@ def test_monitor_delay(sync_dset, stim_times, transition_interval, expected, monkeypatch): monkeypatch.setattr(ts, "get_real_photodiode_events", mock_get_real_photodiode_events) - pytest.approx(expected, ts.monitor_delay(sync_dset, stim_times, "key", + pytest.approx(expected, + ts.calculate_monitor_delay(sync_dset, stim_times, "key", transition_interval)) @pytest.mark.parametrize( "sync_dset,stim_times,transition_interval", [ - (np.array([1.0, 2.0, 3.0]), np.array([0.9, 1.9, 2.9]), 1,), # negative - (np.array([1.0, 2.0, 3.0, 4.0]), np.array([1.1, 2.1, 3.1, 4.1]), 1,), # too big + # Negative + (np.array([1.0, 2.0, 3.0]), np.array([0.9, 1.9, 2.9]), 1,), + # Too big + (np.array([1.0, 2.0, 3.0, 4.0]), np.array([1.1, 2.1, 3.1, 4.1]), 1,), ], ) def test_monitor_delay_raises_error( @@ -479,7 +483,8 @@ def test_monitor_delay_raises_error( monkeypatch.setattr(ts, "get_real_photodiode_events", mock_get_real_photodiode_events) with pytest.raises(ValueError): - ts.monitor_delay(sync_dset, stim_times, "key", transition_interval) + ts.calculate_monitor_delay(sync_dset, stim_times, + "key", transition_interval) @pytest.mark.parametrize( diff --git a/doc_template/index.rst b/doc_template/index.rst index bbf9f73a4..79ec086fe 100644 --- a/doc_template/index.rst +++ b/doc_template/index.rst @@ -91,6 +91,13 @@ The Allen SDK provides Python code for accessing experimental metadata along wit See the `mouse connectivity section `_ for more details. +What's New - 2.8.0 +----------------------------------------------------------------------- +- Created lookup table to get monitor_delay for cases where calculation from data fails +- If sync timestamp file has more timestamps than eye tracking moving has frame, trim excess timestamps (up to 15) +- Session API returns both warped and unwarped stimulus images, and both are written to NWB + + What's New - 2.7.0 ----------------------------------------------------------------------- - Refactored behavior and ophys session and data APIs to remove a circular inheritance issue