diff --git a/allensdk/__init__.py b/allensdk/__init__.py index fd71c64b9..709a77084 100644 --- a/allensdk/__init__.py +++ b/allensdk/__init__.py @@ -35,7 +35,7 @@ # import logging -__version__ = '1.1.1' +__version__ = '1.2.0' try: from logging import NullHandler diff --git a/allensdk/api/cache.py b/allensdk/api/cache.py index 666c4844a..55fd9581c 100755 --- a/allensdk/api/cache.py +++ b/allensdk/api/cache.py @@ -42,25 +42,51 @@ import pandas.io.json as pj import functools -from functools import wraps +from functools import wraps, _make_key import os import logging import csv def memoize(f): - memodict = dict() + """ + Creates an unbound cache of function calls and results. Note that arguments + of different types are not cached separately (so f(3.0) and f(3) are not + treated as distinct calls) + + Arguments to the cached function must be hashable. + + View the cache size with f.cache_size(). + Clear the cache with f.cache_clear(). + Access the underlying function with f.__wrapped__. + """ + cache = {} + sentinel = object() # unique object for cache misses + make_key = _make_key # efficient key building from function args + cache_get = cache.get + cache_len = cache.__len__ + + @wraps(f) + def wrapper(*args, **kwargs): + key = make_key(args, kwargs, typed=False) # Don't consider 3.0 and 3 different + result = cache_get(key, sentinel) + if result is not sentinel: + return result + result = f(*args, **kwargs) + cache[key] = result + return result + + def cache_clear(): + cache.clear() - @wraps(f) - def wrapper(*args, **kwargs): - key = (args, tuple(kwargs.items())) + def cache_size(): + return cache_len() - if key not in memodict: - memodict[key] = f(*args, **kwargs) + wrapper.cache_clear = cache_clear + wrapper.cache_size = cache_size - return memodict[key] + return wrapper - return wrapper class Cache(object): _log = logging.getLogger('allensdk.api.cache') diff --git a/allensdk/api/caching_utilities.py b/allensdk/api/caching_utilities.py index bc1525cbb..ab5de267e 100644 --- a/allensdk/api/caching_utilities.py +++ b/allensdk/api/caching_utilities.py @@ -2,6 +2,7 @@ from pathlib import Path import warnings import os +import logging from typing import overload, Callable, Any, Union, Optional, TypeVar @@ -87,18 +88,23 @@ def call_caching( The result of calling read """ + logger = logging.getLogger("call_caching") try: if not lazy or read is None: + logger.info("Fetching data from remote") data = fetch() if pre_write is not None: data = pre_write(data) + logger.info("Writing data to cache") write(data) - if read is not None: + if read is not None: + logger.info("Reading data from cache") return read() - - except Exception: + except Exception as e: + if isinstance(e, FileNotFoundError): + logger.info("No cache file found.") if cleanup is not None and not lazy: cleanup() @@ -150,7 +156,6 @@ def one_file_call_caching( Path at which the data will be stored """ - def safe_unlink(): try: os.unlink(path) diff --git a/allensdk/brain_observatory/behavior/behavior_data_session.py b/allensdk/brain_observatory/behavior/behavior_data_session.py new file mode 100644 index 000000000..9aca0997c --- /dev/null +++ b/allensdk/brain_observatory/behavior/behavior_data_session.py @@ -0,0 +1,192 @@ +from typing import Any, Optional, List, Dict, Type, Tuple +import logging +import pandas as pd +import numpy as np +import inspect + +from allensdk.internal.api.behavior_data_lims_api import BehaviorDataLimsApi +from allensdk.brain_observatory.behavior.internal import BehaviorBase +from allensdk.brain_observatory.running_speed import RunningSpeed + +BehaviorDataApi = Type[BehaviorBase] + + +class BehaviorDataSession(object): + def __init__(self, api: Optional[BehaviorDataApi] = None): + self.api = api + + @classmethod + def from_lims(cls, behavior_session_id: int) -> "BehaviorDataSession": + return cls(api=BehaviorDataLimsApi(behavior_session_id)) + + @classmethod + def from_nwb_path( + cls, nwb_path: str, **api_kwargs: Any) -> "BehaviorDataSession": + return NotImplementedError + + @property + def behavior_session_id(self) -> int: + """Unique identifier for this experimental session. + :rtype: int + """ + return self.api.behavior_session_id + + @property + def ophys_session_id(self) -> Optional[int]: + """The unique identifier for the ophys session associated + with this behavior session (if one exists) + :rtype: int + """ + return self.api.ophys_session_id + + @property + def ophys_experiment_ids(self) -> Optional[List[int]]: + """The unique identifiers for the ophys experiment(s) associated + with this behavior session (if one exists) + :rtype: int + """ + return self.api.ophys_experiment_ids + + @property + def licks(self) -> pd.DataFrame: + """Get lick data from pkl file. + + Returns + ------- + np.ndarray + A dataframe containing lick timestamps. + """ + return self.api.get_licks() + + @property + def rewards(self) -> pd.DataFrame: + """Get reward data from pkl file. + + Returns + ------- + pd.DataFrame + A dataframe containing timestamps of delivered rewards. + """ + return self.api.get_rewards() + + @property + def running_data_df(self) -> pd.DataFrame: + """Get running speed data. + + Returns + ------- + pd.DataFrame + Dataframe containing various signals used to compute running speed. + """ + return self.api.get_running_data_df() + + @property + def running_speed(self) -> RunningSpeed: + """Get running speed using timestamps from + self.get_stimulus_timestamps. + + NOTE: Do not correct for monitor delay. + + Returns + ------- + RunningSpeed (NamedTuple with two fields) + timestamps : np.ndarray + Timestamps of running speed data samples + values : np.ndarray + Running speed of the experimental subject (in cm / s). + """ + return self.api.get_running_speed() + + @property + def stimulus_presentations(self) -> pd.DataFrame: + """Get stimulus presentation data. + + NOTE: Uses timestamps that do not account for monitor delay. + + Returns + ------- + pd.DataFrame + Table whose rows are stimulus presentations + (i.e. a given image, for a given duration, typically 250 ms) + and whose columns are presentation characteristics. + """ + return self.api.get_stimulus_presentations() + + @property + def stimulus_templates(self) -> Dict[str, np.ndarray]: + """Get stimulus templates (movies, scenes) for behavior session. + + Returns + ------- + Dict[str, np.ndarray] + A dictionary containing the stimulus images presented during the + session. Keys are data set names, and values are 3D numpy arrays. + """ + return self.api.get_stimulus_templates() + + @property + def stimulus_timestamps(self) -> np.ndarray: + """Get stimulus timestamps from pkl file. + + NOTE: Located with behavior_session_id + + Returns + ------- + np.ndarray + Timestamps associated with stimulus presentations on the monitor + that do no account for monitor delay. + """ + return self.api.get_stimulus_timestamps() + + @property + def task_parameters(self) -> dict: + """Get task parameters from pkl file. + + Returns + ------- + dict + A dictionary containing parameters used to define the task runtime + behavior. + """ + return self.api.get_task_parameters() + + @property + def trials(self) -> pd.DataFrame: + """Get trials from pkl file + + Returns + ------- + pd.DataFrame + A dataframe containing behavioral trial start/stop times, + and trial data + """ + return self.api.get_trials() + + @property + def metadata(self) -> Dict[str, Any]: + """Return metadata about the session. + :rtype: dict + """ + return self.api.get_metadata() + + def cache_clear(self) -> None: + """Convenience method to clear the api cache, if applicable.""" + try: + self.api.cache_clear() + except AttributeError: + logging.getLogger("BehaviorOphysSession").warning( + "Attempted to clear API cache, but method `cache_clear`" + f" does not exist on {self.api.__class__.__name__}") + + def list_api_methods(self) -> List[Tuple[str, str]]: + """Convenience method to expose list of API `get` methods. These methods + can be accessed by referencing the API used to initialize this + BehaviorDataSession via its `api` instance attribute. + :rtype: list of tuples, where the first value in the tuple is the + method name, and the second value is the method docstring. + """ + methods = [m for m in inspect.getmembers(self.api, inspect.ismethod) + if m[0].startswith("get_")] + docs = [inspect.getdoc(m[1]) or "" for m in methods] + method_names = [m[0] for m in methods] + return list(zip(method_names, docs)) diff --git a/allensdk/brain_observatory/behavior/behavior_ophys_session.py b/allensdk/brain_observatory/behavior/behavior_ophys_session.py index 3229fac76..6b561b226 100644 --- a/allensdk/brain_observatory/behavior/behavior_ophys_session.py +++ b/allensdk/brain_observatory/behavior/behavior_ophys_session.py @@ -1,98 +1,203 @@ import numpy as np import pandas as pd import xarray as xr -import math -from typing import NamedTuple -import os +from typing import Any +import logging -from allensdk.core.lazy_property import LazyProperty, LazyPropertyMixin from allensdk.internal.api.behavior_ophys_api import BehaviorOphysLimsApi -from allensdk.brain_observatory.behavior.behavior_ophys_api.behavior_ophys_nwb_api import equals, BehaviorOphysNwbApi +from allensdk.brain_observatory.behavior.behavior_ophys_api\ + .behavior_ophys_nwb_api import BehaviorOphysNwbApi from allensdk.deprecated import legacy -from allensdk.brain_observatory.behavior.trials_processing import calculate_reward_rate -from allensdk.brain_observatory.behavior.dprime import get_rolling_dprime, get_trial_count_corrected_false_alarm_rate, get_trial_count_corrected_hit_rate -from allensdk.brain_observatory.behavior.dprime import get_hit_rate, get_false_alarm_rate +from allensdk.brain_observatory.behavior.trials_processing import ( + calculate_reward_rate) +from allensdk.brain_observatory.behavior.dprime import ( + get_rolling_dprime, get_trial_count_corrected_false_alarm_rate, + get_trial_count_corrected_hit_rate) +from allensdk.brain_observatory.behavior.dprime import ( + get_hit_rate, get_false_alarm_rate) from allensdk.brain_observatory.behavior.image_api import Image, ImageApi +from allensdk.brain_observatory.running_speed import RunningSpeed -class BehaviorOphysSession(LazyPropertyMixin): - """Represents data from a single Visual Behavior Ophys imaging session. LazyProperty attributes access the data only on the first demand, and then memoize the result for reuse. - - Attributes: - ophys_experiment_id : int (LazyProperty) - Unique identifier for this experimental session - max_projection : allensdk.brain_observatory.behavior.image_api.Image (LazyProperty) - 2D max projection image - stimulus_timestamps : numpy.ndarray (LazyProperty) - Timestamps associated the stimulus presentations on the monitor - ophys_timestamps : numpy.ndarray (LazyProperty) - Timestamps associated with frames captured by the microscope - metadata : dict (LazyProperty) - A dictionary of session-specific metadata - dff_traces : pandas.DataFrame (LazyProperty) - The traces of dff organized into a dataframe; index is the cell roi ids - cell_specimen_table : pandas.DataFrame (LazyProperty) - Cell roi information organized into a dataframe; index is the cell roi ids - running_speed : allensdk.brain_observatory.running_speed.RunningSpeed (LazyProperty) - NamedTuple with two fields - timestamps : numpy.ndarray - Timestamps of running speed data samples - values : np.ndarray - Running speed of the experimental subject (in cm / s). - running_data_df : pandas.DataFrame (LazyProperty) - Dataframe containing various signals used to compute running speed - stimulus_presentations : pandas.DataFrame (LazyProperty) - Table whose rows are stimulus presentations (i.e. a given image, for a given duration, typically 250 ms) and whose columns are presentation characteristics. - stimulus_templates : dict (LazyProperty) - A dictionary containing the stimulus images presented during the session keys are data set names, and values are 3D numpy arrays. - licks : pandas.DataFrame (LazyProperty) - A dataframe containing lick timestamps - rewards : pandas.DataFrame (LazyProperty) - A dataframe containing timestamps of delivered rewards - task_parameters : dict (LazyProperty) - A dictionary containing parameters used to define the task runtime behavior - trials : pandas.DataFrame (LazyProperty) - A dataframe containing behavioral trial start/stop times, and trial data - corrected_fluorescence_traces : pandas.DataFrame (LazyProperty) - The motion-corrected fluorescence traces organized into a dataframe; index is the cell roi ids - average_projection : allensdk.brain_observatory.behavior.image_api.Image (LazyProperty) - 2D image of the microscope field of view, averaged across the experiment - motion_correction : pandas.DataFrame LazyProperty - A dataframe containing trace data used during motion correction computation +class BehaviorOphysSession(object): + """Represents data from a single Visual Behavior Ophys imaging session. + Can be initialized with an api that fetches data, or by using class methods + `from_lims` and `from_nwb_path`. """ @classmethod - def from_lims(cls, ophys_experiment_id): + def from_lims(cls, ophys_experiment_id: int) -> "BehaviorOphysSession": return cls(api=BehaviorOphysLimsApi(ophys_experiment_id)) @classmethod - def from_nwb_path(cls, nwb_path, **api_kwargs): - api_kwargs["filter_invalid_rois"] = api_kwargs.get("filter_invalid_rois", True) - return cls(api=BehaviorOphysNwbApi.from_path(path=nwb_path, **api_kwargs)) + def from_nwb_path( + cls, nwb_path: str, **api_kwargs: Any) -> "BehaviorOphysSession": + api_kwargs["filter_invalid_rois"] = api_kwargs.get( + "filter_invalid_rois", True) + return cls(api=BehaviorOphysNwbApi.from_path( + path=nwb_path, **api_kwargs)) def __init__(self, api=None): - self.api = api - self.ophys_experiment_id = LazyProperty(self.api.get_ophys_experiment_id) - self.max_projection = LazyProperty(self.get_max_projection) - self.stimulus_timestamps = LazyProperty(self.api.get_stimulus_timestamps) - self.ophys_timestamps = LazyProperty(self.api.get_ophys_timestamps) - self.metadata = LazyProperty(self.api.get_metadata) - self.dff_traces = LazyProperty(self.api.get_dff_traces) - self.cell_specimen_table = LazyProperty(self.api.get_cell_specimen_table) - self.running_speed = LazyProperty(self.api.get_running_speed) - self.running_data_df = LazyProperty(self.api.get_running_data_df) - self.stimulus_presentations = LazyProperty(self.api.get_stimulus_presentations) - self.stimulus_templates = LazyProperty(self.api.get_stimulus_templates) - self.licks = LazyProperty(self.api.get_licks) - self.rewards = LazyProperty(self.api.get_rewards) - self.task_parameters = LazyProperty(self.api.get_task_parameters) - self.trials = LazyProperty(self.api.get_trials) - self.corrected_fluorescence_traces = LazyProperty(self.api.get_corrected_fluorescence_traces) - self.average_projection = LazyProperty(self.get_average_projection) - self.motion_correction = LazyProperty(self.api.get_motion_correction) - self.segmentation_mask_image = LazyProperty(self.get_segmentation_mask_image) + # Using properties rather than initializing attributes to take advantage + # of API-level cache and not introduce a lot of overhead when the + # class is initialized (sometimes these calls can take a while) + @property + def ophys_experiment_id(self) -> int: + """Unique identifier for this experimental session. + :rtype: int + """ + return self.api.get_ophys_experiment_id() + + @property + def max_projection(self) -> Image: + """2D max projection image. + :rtype: allensdk.brain_observatory.behavior.image_api.Image + """ + return self.get_max_projection() + + @property + def stimulus_timestamps(self) -> np.ndarray: + """Timestamps associated with stimulus presentations on the + monitor (corrected for monitor delay). + :rtype: numpy.ndarray + """ + return self.api.get_stimulus_timestamps() + + @property + def ophys_timestamps(self) -> np.ndarray: + """Timestamps associated with frames captured by the microscope + :rtype: numpy.ndarray + """ + return self.api.get_ophys_timestamps() + + @property + def metadata(self) -> dict: + """Dictioanry of session-specific metadata. + :rtype: dict + """ + return self.api.get_metadata() + + @property + def dff_traces(self) -> pd.DataFrame: + """Traces of dff organized into a dataframe; index is the cell roi ids. + :rtype: pandas.DataFrame + """ + return self.api.get_dff_traces() + + @property + def cell_specimen_table(self) -> pd.DataFrame: + """Cell roi information organized into a dataframe; index is the cell + roi ids. + :rtype: pandas.DataFrame + """ + return self.api.get_cell_specimen_table() + + @property + def running_speed(self) -> RunningSpeed: + """Running speed of mouse. NamedTuple with two fields + timestamps : numpy.ndarray + Timestamps of running speed data samples + values : np.ndarray + Running speed of the experimental subject (in cm / s). + :rtype: allensdk.brain_observatory.running_speed.RunningSpeed + """ + return self.api.get_running_speed() + + @property + def running_data_df(self) -> pd.DataFrame: + """Dataframe containing various signals used to compute running speed + :rtype: pandas.DataFrame + """ + return self.api.get_running_data_df() + + @property + def stimulus_presentations(self) -> pd.DataFrame: + """Table whose rows are stimulus presentations (i.e. a given image, + for a given duration, typically 250 ms) and whose columns are + presentation characteristics. + :rtype: pandas.DataFrame + """ + return self.api.get_stimulus_presentations() + + @property + def stimulus_templates(self) -> dict: + """A dictionary containing the stimulus images presented during the + session keys are data set names, and values are 3D numpy arrays. + :rtype: dict + """ + return self.api.get_stimulus_templates() + + @property + def licks(self) -> pd.DataFrame: + """A dataframe containing lick timestamps. + :rtype: pandas.DataFrame + """ + return self.api.get_licks() + + @property + def rewards(self) -> pd.DataFrame: + """A dataframe containing timestamps of delivered rewards. + :rtype: pandas.DataFrame + """ + return self.api.get_rewards() + + @property + def task_parameters(self) -> dict: + """A dictionary containing parameters used to define the task runtime + behavior. + :rtype: dict + """ + return self.api.get_task_parameters() + + @property + def trials(self) -> pd.DataFrame: + """A dataframe containing behavioral trial start/stop times, and trial + data + :rtype: pandas.DataFrame""" + return self.api.get_trials() + + @property + def corrected_fluorescence_traces(self) -> pd.DataFrame: + """The motion-corrected fluorescence traces organized into a dataframe; + index is the cell roi ids. + :rtype: pandas.DataFrame + """ + return self.api.get_corrected_fluorescence_traces() + + @property + def average_projection(self) -> pd.DataFrame: + """2D image of the microscope field of view, averaged across the + experiment + :rtype: pandas.DataFrame + """ + return self.get_average_projection() + + @property + def motion_correction(self) -> pd.DataFrame: + """A dataframe containing trace data used during motion correction + computation + :rtype: pandas.DataFrame + """ + return self.api.get_motion_correction() + + @property + def segmentation_mask_image(self) -> Image: + """An image with pixel value 1 if that pixel was included in an ROI, + and 0 otherwise + :rtype: allensdk.brain_observatory.behavior.image_api.Image + """ + return self.get_segmentation_mask_image() + + def cache_clear(self) -> None: + """Convenience method to clear the api cache, if applicable.""" + try: + self.api.cache_clear() + except AttributeError: + logging.getLogger("BehaviorOphysSession").warning( + "Attempted to clear API cache, but method `cache_clear`" + f" does not exist on {self.api.__class__.__name__}") def get_roi_masks(self, cell_specimen_ids=None): """ Obtains boolean masks indicating the location of one or more cell's ROIs in this session. @@ -102,7 +207,7 @@ def get_roi_masks(self, cell_specimen_ids=None): cell_specimen_ids : array-like of int, optional ROI masks for these cell specimens will be returned. The default behavior is to return masks for all cell specimens. - + Returns ------- result : xr.DataArray diff --git a/allensdk/brain_observatory/behavior/behavior_project_cache.py b/allensdk/brain_observatory/behavior/behavior_project_cache.py new file mode 100644 index 000000000..676edc267 --- /dev/null +++ b/allensdk/brain_observatory/behavior/behavior_project_cache.py @@ -0,0 +1,361 @@ +import numpy as np +import os.path +import csv +from functools import partial +from typing import Type, Callable, Optional, List, Any +import pandas as pd +import time +import logging + +from allensdk.api.cache import Cache + +from allensdk.brain_observatory.behavior.behavior_project_lims_api import ( + BehaviorProjectLimsApi) +from allensdk.brain_observatory.behavior.internal.behavior_project_base\ + import BehaviorProjectBase +from allensdk.api.caching_utilities import one_file_call_caching, call_caching +from allensdk.core.exceptions import MissingDataError + +BehaviorProjectApi = Type[BehaviorProjectBase] + + +class BehaviorProjectCache(Cache): + + MANIFEST_VERSION = "0.0.1-alpha" + OPHYS_SESSIONS_KEY = "ophys_sessions" + BEHAVIOR_SESSIONS_KEY = "behavior_sessions" + OPHYS_EXPERIMENTS_KEY = "ophys_experiments" + + # Temporary way for scientists to keep track of analyses + OPHYS_ANALYSIS_LOG_KEY = "ophys_analysis_log" + BEHAVIOR_ANALYSIS_LOG_KEY = "behavior_analysis_log" + + MANIFEST_CONFIG = { + OPHYS_SESSIONS_KEY: { + "spec": f"{OPHYS_SESSIONS_KEY}.csv", + "parent_key": "BASEDIR", + "typename": "file" + }, + BEHAVIOR_SESSIONS_KEY: { + "spec": f"{BEHAVIOR_SESSIONS_KEY}.csv", + "parent_key": "BASEDIR", + "typename": "file" + }, + OPHYS_EXPERIMENTS_KEY: { + "spec": f"{OPHYS_EXPERIMENTS_KEY}.csv", + "parent_key": "BASEDIR", + "typename": "file" + }, + OPHYS_ANALYSIS_LOG_KEY: { + "spec": f"{OPHYS_ANALYSIS_LOG_KEY}.csv", + "parent_key": "BASEDIR", + "typename": "file" + }, + BEHAVIOR_ANALYSIS_LOG_KEY: { + "spec": f"{BEHAVIOR_ANALYSIS_LOG_KEY}.csv", + "parent_key": "BASEDIR", + "typename": "file" + }, + } + + def __init__( + self, + fetch_api: BehaviorProjectApi = BehaviorProjectLimsApi.default(), + fetch_tries: int = 2, + **kwargs): + """ Entrypoint for accessing visual behavior data. Supports + access to summaries of session data and provides tools for + downloading detailed session data (such as dff traces). + + --- NOTE --- + Because NWB files are not currently supported for this project (as of + 11/2019), this cache will not actually save any files of session data + to the local machine. Only summary tables will be saved to the local + cache. File retrievals for specific sessions will be handled by + the fetch api used for the Session object, and cached in-memory + only to enable fast retrieval for subsequent calls. + + If you are looping over session objects, be sure to clean up + your memory when it is not needed by calling `cache_clear` from + your session object. + + Parameters + ========== + fetch_api : + Used to pull data from remote sources, after which it is locally + cached. Any object inheriting from BehaviorProjectBase is + suitable. Current options are: + EcephysProjectLimsApi :: Fetches bleeding-edge data from the + Allen Institute"s internal database. Only works if you are + on our internal network. + fetch_tries : + Maximum number of times to attempt a download before giving up and + raising an exception. Note that this is total tries, not retries + **kwargs : + manifest : str or Path + full path at which manifest json will be stored + version : str + version of manifest file. If this mismatches the version + recorded in the file at manifest, an error will be raised. + other kwargs are passed to allensdk.api.cache.Cache + """ + kwargs["manifest"] = kwargs.get("manifest", + "behavior_project_manifest.json") + kwargs["version"] = kwargs.get("version", self.MANIFEST_VERSION) + + super().__init__(**kwargs) + self.fetch_api = fetch_api + self.fetch_tries = fetch_tries + self.logger = logging.getLogger(self.__class__.__name__) + + @classmethod + def from_lims(cls, lims_kwargs=None, **kwargs): + lims_kwargs_ = lims_kwargs or {} + return cls(fetch_api=BehaviorProjectLimsApi.default(**lims_kwargs_), + **kwargs) + + def get_session_table( + self, + suppress: Optional[List[str]] = None, + by: str = "ophys_session_id") -> pd.DataFrame: + """ + Return summary table of all ophys_session_ids in the database. + :param suppress: optional list of columns to drop from the resulting + dataframe. + :type suppress: list of str + :param by: (default="ophys_session_id"). Column to index on, either + "ophys_session_id" or "ophys_experiment_id". + If by="ophys_experiment_id", then each row will only have one + experiment id, of type int (vs. an array of 1>more). + :type by: str + :rtype: pd.DataFrame + """ + write_csv = partial( + _write_csv, + array_fields=["reporter_line", "driver_line", + "ophys_experiment_id"]) + read_csv = partial( + _read_csv, index_col="ophys_session_id", + array_fields=["reporter_line", "driver_line", + "ophys_experiment_id"], + array_types=[str, str, int]) + path = self.get_cache_path(None, self.OPHYS_SESSIONS_KEY) + sessions = one_file_call_caching( + path, + self.fetch_api.get_session_table, + write_csv, read_csv) + if suppress: + sessions.drop(columns=suppress, inplace=True, errors="ignore") + + # Possibly explode and reindex + if by == "ophys_session_id": + pass + elif by == "ophys_experiment_id": + sessions = (sessions.reset_index() + .explode("ophys_experiment_id") + .set_index("ophys_experiment_id")) + else: + self.logger.warning( + f"Invalid value for `by`, '{by}', passed to get_session_table." + " Valid choices for `by` are 'ophys_experiment_id' and " + "'ophys_session_id'.") + return sessions + + def add_manifest_paths(self, manifest_builder): + manifest_builder = super().add_manifest_paths(manifest_builder) + for key, config in self.MANIFEST_CONFIG.items(): + manifest_builder.add_path(key, **config) + return manifest_builder + + def get_experiment_table( + self, + suppress: Optional[List[str]] = None) -> pd.DataFrame: + """ + Return summary table of all ophys_experiment_ids in the database. + :param suppress: optional list of columns to drop from the resulting + dataframe. + :type suppress: list of str + :rtype: pd.DataFrame + """ + write_csv = partial( + _write_csv, + array_fields=["reporter_line", "driver_line"]) + read_csv = partial( + _read_csv, index_col="ophys_experiment_id", + array_fields=["reporter_line", "driver_line"], + array_types=[str, str]) + path = self.get_cache_path(None, self.OPHYS_EXPERIMENTS_KEY) + experiments = one_file_call_caching( + path, + self.fetch_api.get_experiment_table, + write_csv, read_csv) + if suppress: + experiments.drop(columns=suppress, inplace=True, errors="ignore") + return experiments + + def get_behavior_session_table( + self, + suppress: Optional[List[str]] = None) -> pd.DataFrame: + """ + Return summary table of all behavior_session_ids in the database. + :param suppress: optional list of columns to drop from the resulting + dataframe. + :type suppress: list of str + :rtype: pd.DataFrame + """ + read_csv = partial( + _read_csv, index_col="behavior_session_id", + array_fields=["reporter_line", "driver_line"], + array_types=[str, str]) + write_csv = partial( + _write_csv, array_fields=["reporter_line", "driver_line"]) + path = self.get_cache_path(None, self.BEHAVIOR_SESSIONS_KEY) + sessions = one_file_call_caching( + path, + self.fetch_api.get_behavior_only_session_table, + write_csv, read_csv) + sessions = sessions.rename(columns={"genotype": "full_genotype"}) + if suppress: + sessions.drop(columns=suppress, inplace=True, errors="ignore") + return sessions + + def get_session_data(self, ophys_experiment_id: int, fixed: bool = False): + """ + Note -- This method mocks the behavior of a cache. No files are + actually downloaded for local access. Instead, it adds the + session id to a csv log. If the "fixed" parameter is true, + then the API will first check to ensure that the log is present + in the record before pulling the data. + """ + # TODO: Future development will include an NWB reader to read from + # a true local cache (once nwb files are created) + # For now just check the log if pass `fixed` + path = self.get_cache_path(None, self.OPHYS_ANALYSIS_LOG_KEY) + if fixed: + self.logger.warning( + "Warning! Passing `fixed=True` does not ensure that the " + "underlying data has not changed, as no data are actually " + "cached locally. The log will be updated each time the data " + "are pulled from the database for tracking purposes.") + try: + record = pd.read_csv(path) + except FileNotFoundError: + raise MissingDataError( + "No analysis log found! Add to the log by getting " + "session data with fixed=False.") + if ophys_experiment_id not in record["ophys_experiment_id"].values: + raise MissingDataError( + f"Data for ophys experiment {ophys_experiment_id} not " + "found!") + + fetch_session = partial(self.fetch_api.get_session_data, + ophys_experiment_id) + write_log = partial(_write_log, path=path, + key_name="ophys_experiment_id", + key_value=ophys_experiment_id) + return call_caching( + fetch_session, + write_log, + lazy=False, + read=fetch_session + ) + + def get_behavior_session_data(self, behavior_session_id: int, + fixed: bool = False): + """ + Note -- This method mocks the behavior of a cache. No files are + actually downloaded for local access. Instead, it adds the + session id to a csv log. If the "fixed" parameter is true, + then the API will first check to ensure that the log is present + in the record before pulling the data. + """ + # TODO: Future development will include an NWB reader to read from + # a true local cache (once nwb files are created) + # For now just check the log if pass `fixed` + path = self.get_cache_path(None, self.BEHAVIOR_ANALYSIS_LOG_KEY) + if fixed: + self.logger.warning( + "Warning! Passing `fixed=True` does not ensure that the " + "underlying data has not changed, as no data are actually " + "cached locally. The log will be updated each time the data " + "are pulled from the database for tracking purposes.") + try: + record = pd.read_csv(path) + except FileNotFoundError: + raise MissingDataError( + "No analysis log found! Add to the log by getting " + "session data with fixed=False.") + if behavior_session_id not in record["behavior_session_id"].values: + raise MissingDataError( + f"Data for ophys experiment {behavior_session_id} not " + "found!") + + fetch_session = partial(self.fetch_api.get_behavior_only_session_data, + behavior_session_id) + write_log = partial(_write_log, path=path, + key_name="behavior_session_id", + key_value=behavior_session_id) + return call_caching( + fetch_session, + write_log, + lazy=False, # can't actually read from file cache + read=fetch_session + ) + + +def _write_log(data: Any, path: str, key_name: str, key_value: Any): + """ + Helper method to create and add to a log. Invoked any time a session + object is created via BehaviorProjectCache. + :param data: Unused, required because call_caching method assumes + all writer functions have data as the first positional argument + :param path: Path to save the log file + :type path: str path + :param key_name: Name of the id used to track the session object. + Typically "behavior_session_id" or "ophys_session_id". + :type key_name: str + :param key_value: Value of the id used to track the session object. + Usually an int. + """ + now = round(time.time()) + keys = [key_name, "created_at", "updated_at"] + values = [key_value, now, now] + if os.path.exists(path): + record = (pd.read_csv(path, index_col=key_name) + .to_dict(orient="index")) + experiment = record.get(key_value) + if experiment: + experiment.update({"updated_at": now}) + else: + record.update({key_value: dict(zip(keys[1:], values[1:]))}) + (pd.DataFrame.from_dict(record, orient="index") + .rename_axis(index=key_name) + .to_csv(path)) + else: + with open(path, "w") as f: + w = csv.DictWriter(f, fieldnames=keys) + w.writeheader() + w.writerow(dict(zip(keys, values))) + + +def _write_csv(path, df, array_fields=None): + """Private writer that encodes array fields into pipe-delimited strings + for saving a csv. + """ + df_ = df.copy() + for field in array_fields: + df_[field] = df_[field].apply(lambda x: "|".join(map(str, x))) + df_.to_csv(path) + + +def _read_csv(path, index_col, array_fields=None, array_types=None): + """Private reader that can open a csv with pipe-delimited array + fields and convert them to array.""" + df = pd.read_csv(path, index_col=index_col) + for field, type_ in zip(array_fields, array_types): + if type_ == str: + df[field] = df[field].apply(lambda x: x.split("|")) + else: + df[field] = df[field].apply( + lambda x: np.fromstring(x, sep="|", dtype=type_)) + return df diff --git a/allensdk/brain_observatory/behavior/behavior_project_lims_api.py b/allensdk/brain_observatory/behavior/behavior_project_lims_api.py new file mode 100644 index 000000000..0e50c0d19 --- /dev/null +++ b/allensdk/brain_observatory/behavior/behavior_project_lims_api.py @@ -0,0 +1,455 @@ +import pandas as pd +from typing import Optional, List, Dict, Any, Iterable +import logging + +from allensdk.brain_observatory.behavior.internal.behavior_project_base\ + import BehaviorProjectBase +from allensdk.brain_observatory.behavior.behavior_data_session import ( + BehaviorDataSession) +from allensdk.brain_observatory.behavior.behavior_ophys_session import ( + BehaviorOphysSession) +from allensdk.internal.api.behavior_data_lims_api import BehaviorDataLimsApi +from allensdk.internal.api.behavior_ophys_api import BehaviorOphysLimsApi +from allensdk.internal.api import PostgresQueryMixin +from allensdk.brain_observatory.ecephys.ecephys_project_api.http_engine import ( + HttpEngine) +from allensdk.core.typing import SupportsStr + + +class BehaviorProjectLimsApi(BehaviorProjectBase): + def __init__(self, postgres_engine, app_engine): + """ Downloads visual behavior data from the Allen Institute's + internal Laboratory Information Management System (LIMS). Only + functional if connected to the Allen Institute Network. Used to load + data into BehaviorProjectCache. + + Typically want to construct an instance of this class by calling + `BehaviorProjectLimsApi.default()`. + + Note -- Currently the app engine is unused because we aren't yet + supporting the download of stimulus templates for visual behavior + data. This feature will be added at a later date. + + Parameters + ---------- + postgres_engine : + used for making queries against the LIMS postgres database. Must + implement: + select : takes a postgres query as a string. Returns a pandas + dataframe of results + fetchall : takes a postgres query as a string. If there is + exactly one column in the response, return the values as a + list. + app_engine : + used for making queries agains the lims web application. Must + implement: + stream : takes a url as a string. Returns an iterable yielding + the response body as bytes. + """ + self.postgres_engine = postgres_engine + self.app_engine = app_engine + self.logger = logging.getLogger("BehaviorProjectLimsApi") + + @classmethod + def default( + cls, + pg_kwargs: Optional[Dict[str, Any]] = None, + app_kwargs: Optional[Dict[str, Any]] = None) -> \ + "BehaviorProjectLimsApi": + """Construct a BehaviorProjectLimsApi instance with default + postgres and app engines. + + :param pg_kwargs: dict of keyword arguments to pass to the + PostgresQueryMixin class instance. Valid arguments include: + "dbname", "user", "host", "password", "port". Will use + defaults in PostGresQueryMixin.__init__ if unspecified. + :type pg_kwargs: dict + :param app_kwargs: dict of keyword arguments to pass to the + HTTPEngine class instance. Valid arguments include: + "scheme", "host". Will default to scheme=http, host=lims2 + if left unspecified. + :type app_kwargs: dict + :rtype: BehaviorProjectLimsApi + """ + _pg_kwargs = pg_kwargs or dict() + + _app_kwargs = {"scheme": "http", "host": "lims2"} + if app_kwargs: + _app_kwargs.update(app_kwargs) + + pg_engine = PostgresQueryMixin(**_pg_kwargs) + app_engine = HttpEngine(**_app_kwargs) + return cls(pg_engine, app_engine) + + @staticmethod + def _build_in_list_selector_query( + col, + valid_list: Optional[SupportsStr] = None, + operator: str = "WHERE") -> str: + """ + Filter for rows where the value of a column is contained in a list. + If no list is specified in `valid_list`, return an empty string. + + NOTE: if string ids are used, then the strings in `valid_list` must + be enclosed in single quotes, or else the query will throw a column + does not exist error. E.g. ["'mystringid1'", "'mystringid2'"...] + + :param col: name of column to compare if in a list + :type col: str + :param valid_list: iterable of values that can be mapped to str + (e.g. string, int, float). + :type valid_list: list + :param operator: SQL operator to start the clause. Default="WHERE". + Valid inputs: "AND", "OR", "WHERE" (not case-sensitive). + :type operator: str + """ + if not valid_list: + return "" + session_query = ( + f"""{operator} {col} IN ({",".join( + sorted(set(map(str, valid_list))))})""") + return session_query + + @staticmethod + def _build_experiment_from_session_query() -> str: + """Aggregate sql sub-query to get all ophys_experiment_ids associated + with a single ophys_session_id.""" + query = f""" + -- -- begin getting all ophys_experiment_ids -- -- + SELECT + (ARRAY_AGG(DISTINCT(oe.id))) AS experiment_ids, os.id + FROM ophys_sessions os + RIGHT JOIN ophys_experiments oe ON oe.ophys_session_id = os.id + GROUP BY os.id + -- -- end getting all ophys_experiment_ids -- -- + """ + return query + + @staticmethod + def _build_line_from_donor_query(line="driver") -> str: + """Sub-query to get a line from a donor. + :param line: 'driver' or 'reporter' + """ + query = f""" + -- -- begin getting {line} line from donors -- -- + SELECT ARRAY_AGG (g.name) AS {line}_line, d.id AS donor_id + FROM donors d + LEFT JOIN donors_genotypes dg ON dg.donor_id=d.id + LEFT JOIN genotypes g ON g.id=dg.genotype_id + LEFT JOIN genotype_types gt ON gt.id=g.genotype_type_id + WHERE gt.name='{line}' + GROUP BY d.id + -- -- end getting {line} line from donors -- -- + """ + return query + + def _get_behavior_summary_table(self, + session_sub_query: str) -> pd.DataFrame: + """Build and execute query to retrieve summary data for all data, + or a subset of session_ids (via the session_sub_query). + Should pass an empty string to `session_sub_query` if want to get + all data in the database. + :param session_sub_query: additional filtering logic to get a + subset of sessions. + :type session_sub_query: str + :rtype: pd.DataFrame + """ + query = f""" + SELECT + bs.id AS behavior_session_id, + bs.ophys_session_id, + bs.behavior_training_id, + equipment.name as equipment_name, + d.id as donor_id, + d.full_genotype, + reporter.reporter_line, + driver.driver_line, + g.name AS sex, + bs.foraging_id + FROM behavior_sessions bs + JOIN donors d on bs.donor_id = d.id + JOIN genders g on g.id = d.gender_id + JOIN ( + {self._build_line_from_donor_query("reporter")} + ) reporter on reporter.donor_id = d.id + JOIN ( + {self._build_line_from_donor_query("driver")} + ) driver on driver.donor_id = d.id + JOIN equipment ON equipment.id = bs.equipment_id + {session_sub_query} + """ + return self.postgres_engine.select(query) + + def _get_foraging_ids_from_behavior_session( + self, behavior_session_ids: List[int]) -> List[str]: + behav_ids = self._build_in_list_selector_query("id", + behavior_session_ids, + operator="AND") + forag_ids_query = f""" + SELECT foraging_id + FROM behavior_sessions + WHERE foraging_id IS NOT NULL + {behav_ids}; + """ + self.logger.debug("get_foraging_ids_from_behavior_session query: \n" + f"{forag_ids_query}") + foraging_ids = self.postgres_engine.fetchall(forag_ids_query) + + self.logger.debug(f"Retrieved {len(foraging_ids)} foraging ids for" + f" behavior stage query. Ids = {foraging_ids}") + return foraging_ids + + def _get_behavior_stage_table( + self, + behavior_session_ids: Optional[List[int]] = None, + mtrain_db: Optional[PostgresQueryMixin] = None): + # Select fewer rows if possible via behavior_session_id + if behavior_session_ids: + foraging_ids = self._get_foraging_ids_from_behavior_session( + behavior_session_ids) + # Otherwise just get the full table from mtrain + else: + foraging_ids = None + + foraging_ids_query = self._build_in_list_selector_query( + "bs.id", foraging_ids) + + # TODO: this password has already been exposed in code but we really + # need to move towards using a secrets database + if not mtrain_db: + mtrain_db = PostgresQueryMixin( + dbname="mtrain", user="mtrainreader", + host="prodmtrain1", port=5432, password="mtrainro") + query = f""" + SELECT + stages.name as session_type, + bs.id AS foraging_id + FROM behavior_sessions bs + JOIN stages ON stages.id = bs.state_id + {foraging_ids_query}; + """ + return mtrain_db.select(query) + + def get_session_data(self, ophys_session_id: int) -> BehaviorOphysSession: + """Returns a BehaviorOphysSession object that contains methods + to analyze a single behavior+ophys session. + :param ophys_session_id: id that corresponds to a behavior session + :type ophys_session_id: int + :rtype: BehaviorOphysSession + """ + return BehaviorOphysSession(BehaviorOphysLimsApi(ophys_session_id)) + + def _get_experiment_table( + self, + ophys_experiment_ids: Optional[List[int]] = None) -> pd.DataFrame: + """ + Helper function for easier testing. + Return a pd.Dataframe table with all ophys_experiment_ids and relevant + metadata. + Return columns: ophys_session_id, behavior_session_id, + ophys_experiment_id, project_code, session_name, + session_type, equipment_name, date_of_acquisition, + specimen_id, full_genotype, sex, age_in_days, + reporter_line, driver_line + + :param ophys_experiment_ids: optional list of ophys_experiment_ids + to include + :rtype: pd.DataFrame + """ + if not ophys_experiment_ids: + self.logger.warning("Getting all ophys sessions." + " This might take a while.") + experiment_query = self._build_in_list_selector_query( + "oe.id", ophys_experiment_ids) + query = f""" + SELECT + oe.id as ophys_experiment_id, + os.id as ophys_session_id, + bs.id as behavior_session_id, + oec.visual_behavior_experiment_container_id as container_id, + pr.code as project_code, + vbc.workflow_state as container_workflow_state, + oe.workflow_state as experiment_workflow_state, + os.name as session_name, + os.stimulus_name as session_type, + equipment.name as equipment_name, + os.date_of_acquisition, + os.isi_experiment_id, + os.specimen_id, + g.name as sex, + DATE_PART('day', os.date_of_acquisition - d.date_of_birth) + AS age_in_days, + d.full_genotype, + reporter.reporter_line, + driver.driver_line, + id.depth as imaging_depth, + st.acronym as targeted_structure, + vbc.published_at + FROM ophys_experiments_visual_behavior_experiment_containers oec + JOIN visual_behavior_experiment_containers vbc + ON oec.visual_behavior_experiment_container_id = vbc.id + JOIN ophys_experiments oe ON oe.id = oec.ophys_experiment_id + JOIN ophys_sessions os ON os.id = oe.ophys_session_id + JOIN behavior_sessions bs ON os.id = bs.ophys_session_id + JOIN projects pr ON pr.id = os.project_id + JOIN donors d ON d.id = bs.donor_id + JOIN genders g ON g.id = d.gender_id + JOIN ( + {self._build_line_from_donor_query(line="reporter")} + ) reporter on reporter.donor_id = d.id + JOIN ( + {self._build_line_from_donor_query(line="driver")} + ) driver on driver.donor_id = d.id + LEFT JOIN imaging_depths id ON id.id = os.imaging_depth_id + JOIN structures st ON st.id = oe.targeted_structure_id + JOIN equipment ON equipment.id = os.equipment_id + {experiment_query}; + """ + self.logger.debug(f"get_experiment_table query: \n{query}") + return self.postgres_engine.select(query) + + def _get_session_table( + self, + ophys_session_ids: Optional[List[int]] = None) -> pd.DataFrame: + """Helper function for easier testing. + Return a pd.Dataframe table with all ophys_session_ids and relevant + metadata. + Return columns: ophys_session_id, behavior_session_id, + ophys_experiment_id, project_code, session_name, + session_type, equipment_name, date_of_acquisition, + specimen_id, full_genotype, sex, age_in_days, + reporter_line, driver_line + + :param ophys_session_ids: optional list of ophys_session_ids to include + :rtype: pd.DataFrame + """ + if not ophys_session_ids: + self.logger.warning("Getting all ophys sessions." + " This might take a while.") + session_query = self._build_in_list_selector_query("os.id", + ophys_session_ids) + query = f""" + SELECT + os.id as ophys_session_id, + bs.id as behavior_session_id, + experiment_ids as ophys_experiment_id, + pr.code as project_code, + os.name as session_name, + os.stimulus_name as session_type, + equipment.name as equipment_name, + os.date_of_acquisition, + os.specimen_id, + g.name as sex, + DATE_PART('day', os.date_of_acquisition - d.date_of_birth) + AS age_in_days, + d.full_genotype, + reporter.reporter_line, + driver.driver_line + FROM ophys_sessions os + JOIN behavior_sessions bs ON os.id = bs.ophys_session_id + JOIN projects pr ON pr.id = os.project_id + JOIN donors d ON d.id = bs.donor_id + JOIN genders g ON g.id = d.gender_id + JOIN ( + {self._build_experiment_from_session_query()} + ) exp_ids ON os.id = exp_ids.id + JOIN ( + {self._build_line_from_donor_query(line="reporter")} + ) reporter on reporter.donor_id = d.id + JOIN ( + {self._build_line_from_donor_query(line="driver")} + ) driver on driver.donor_id = d.id + JOIN equipment ON equipment.id = os.equipment_id + {session_query}; + """ + self.logger.debug(f"get_session_table query: \n{query}") + return self.postgres_engine.select(query) + + def get_session_table( + self, + ophys_session_ids: Optional[List[int]] = None) -> pd.DataFrame: + """Return a pd.Dataframe table with all ophys_session_ids and relevant + metadata. + Return columns: ophys_session_id, behavior_session_id, + ophys_experiment_id, project_code, session_name, + session_type, equipment_name, date_of_acquisition, + specimen_id, full_genotype, sex, age_in_days, + reporter_line, driver_line + + :param ophys_session_ids: optional list of ophys_session_ids to include + :rtype: pd.DataFrame + """ + # There is one ophys_session_id from 2018 that has multiple behavior + # ids, causing duplicates -- drop all dupes for now; # TODO + table = (self._get_session_table(ophys_session_ids) + .drop_duplicates(subset=["ophys_session_id"], keep=False) + .set_index("ophys_session_id")) + return table + + def get_behavior_only_session_data( + self, behavior_session_id: int) -> BehaviorDataSession: + """Returns a BehaviorDataSession object that contains methods to + analyze a single behavior session. + :param behavior_session_id: id that corresponds to a behavior session + :type behavior_session_id: int + :rtype: BehaviorDataSession + """ + return BehaviorDataSession(BehaviorDataLimsApi(behavior_session_id)) + + def get_experiment_table( + self, + ophys_experiment_ids: Optional[List[int]] = None) -> pd.DataFrame: + """Return a pd.Dataframe table with all ophys_experiment_ids and + relevant metadata. This is the most specific and most informative + level to examine the data. + Return columns: + ophys_experiment_id, ophys_session_id, behavior_session_id, + container_id, project_code, container_workflow_state, + experiment_workflow_state, session_name, session_type, + equipment_name, date_of_acquisition, isi_experiment_id, + specimen_id, sex, age_in_days, full_genotype, reporter_line, + driver_line, imaging_depth, targeted_structure, published_at + :param ophys_experiment_ids: optional list of ophys_experiment_ids + to include + :rtype: pd.DataFrame + """ + return self._get_experiment_table().set_index("ophys_experiment_id") + + def get_behavior_only_session_table( + self, + behavior_session_ids: Optional[List[int]] = None) -> pd.DataFrame: + """Returns a pd.DataFrame table with all behavior session_ids to the + user with additional metadata. + + Can't return age at time of session because there is no field for + acquisition date for behavior sessions (only in the stimulus pkl file) + :rtype: pd.DataFrame + """ + self.logger.warning("Getting behavior-only session data. " + "This might take a while...") + session_query = self._build_in_list_selector_query( + "bs.id", behavior_session_ids) + summary_tbl = self._get_behavior_summary_table(session_query) + stimulus_names = self._get_behavior_stage_table(behavior_session_ids) + return (summary_tbl.merge(stimulus_names, + on=["foraging_id"], how="left") + .set_index("behavior_session_id")) + + def get_natural_movie_template(self, number: int) -> Iterable[bytes]: + """Download a template for the natural scene stimulus. This is the + actual image that was shown during the recording session. + :param number: idenfifier for this movie (note that this is an int, + so to get the template for natural_movie_three should pass 3) + :type number: int + :returns: iterable yielding a tiff file as bytes + """ + raise NotImplementedError() + + def get_natural_scene_template(self, number: int) -> Iterable[bytes]: + """ Download a template for the natural movie stimulus. This is the + actual movie that was shown during the recording session. + :param number: identifier for this scene + :type number: int + :returns: An iterable yielding an npy file as bytes + """ + raise NotImplementedError() diff --git a/allensdk/brain_observatory/behavior/internal/__init__.py b/allensdk/brain_observatory/behavior/internal/__init__.py new file mode 100644 index 000000000..3364c5707 --- /dev/null +++ b/allensdk/brain_observatory/behavior/internal/__init__.py @@ -0,0 +1,2 @@ +from allensdk.brain_observatory.behavior.internal.behavior_base import BehaviorBase # noqa: F401 +from allensdk.brain_observatory.behavior.internal.behavior_ophys_base import BehaviorOphysBase # noqa: F401 diff --git a/allensdk/brain_observatory/behavior/internal/behavior_base.py b/allensdk/brain_observatory/behavior/internal/behavior_base.py new file mode 100644 index 000000000..cd198780a --- /dev/null +++ b/allensdk/brain_observatory/behavior/internal/behavior_base.py @@ -0,0 +1,130 @@ +import abc + +from typing import Dict, NamedTuple + +import numpy as np +import pandas as pd +from allensdk.brain_observatory.running_speed import RunningSpeed + + +class BehaviorBase(abc.ABC): + """Abstract base class implementing required methods for interacting with + behavior session data. + + Child classes should be instantiated with a fetch API that implements these + methods. + """ + @abc.abstractmethod + def get_licks(self) -> pd.DataFrame: + """Get lick data from pkl file. + + Returns + ------- + np.ndarray + A dataframe containing lick timestamps. + """ + raise NotImplementedError() + + @abc.abstractmethod + def get_rewards(self) -> pd.DataFrame: + """Get reward data from pkl file. + + Returns + ------- + pd.DataFrame + A dataframe containing timestamps of delivered rewards. + """ + raise NotImplementedError() + + @abc.abstractmethod + def get_running_data_df(self) -> pd.DataFrame: + """Get running speed data. + + Returns + ------- + pd.DataFrame + Dataframe containing various signals used to compute running speed. + """ + raise NotImplementedError() + + @abc.abstractmethod + def get_running_speed(self) -> RunningSpeed: + """Get running speed using timestamps from + self.get_stimulus_timestamps. + + NOTE: Do not correct for monitor delay. + + Returns + ------- + RunningSpeed (NamedTuple with two fields) + timestamps : np.ndarray + Timestamps of running speed data samples + values : np.ndarray + Running speed of the experimental subject (in cm / s). + """ + raise NotImplementedError() + + @abc.abstractmethod + def get_stimulus_presentations(self) -> pd.DataFrame: + """Get stimulus presentation data. + + NOTE: Uses timestamps that do not account for monitor delay. + + Returns + ------- + pd.DataFrame + Table whose rows are stimulus presentations + (i.e. a given image, for a given duration, typically 250 ms) + and whose columns are presentation characteristics. + """ + raise NotImplementedError() + + @abc.abstractmethod + def get_stimulus_templates(self) -> Dict[str, np.ndarray]: + """Get stimulus templates (movies, scenes) for behavior session. + + Returns + ------- + Dict[str, np.ndarray] + A dictionary containing the stimulus images presented during the + session. Keys are data set names, and values are 3D numpy arrays. + """ + raise NotImplementedError() + + @abc.abstractmethod + def get_stimulus_timestamps(self) -> np.ndarray: + """Get stimulus timestamps from pkl file. + + NOTE: Located with behavior_session_id + + Returns + ------- + np.ndarray + Timestamps associated with stimulus presentations on the monitor + that do no account for monitor delay. + """ + raise NotImplementedError() + + @abc.abstractmethod + def get_task_parameters(self) -> dict: + """Get task parameters from pkl file. + + Returns + ------- + dict + A dictionary containing parameters used to define the task runtime + behavior. + """ + raise NotImplementedError() + + @abc.abstractmethod + def get_trials(self) -> pd.DataFrame: + """Get trials from pkl file + + Returns + ------- + pd.DataFrame + A dataframe containing behavioral trial start/stop times, + and trial data + """ + raise NotImplementedError() diff --git a/allensdk/brain_observatory/behavior/internal/behavior_ophys_base.py b/allensdk/brain_observatory/behavior/internal/behavior_ophys_base.py new file mode 100644 index 000000000..8ae582882 --- /dev/null +++ b/allensdk/brain_observatory/behavior/internal/behavior_ophys_base.py @@ -0,0 +1,149 @@ +import abc + +import numpy as np +import pandas as pd + +from allensdk.brain_observatory.behavior.internal.behavior_base import BehaviorBase +from allensdk.brain_observatory.behavior.image_api import Image + + +class BehaviorOphysBase(BehaviorBase): + """Abstract base class implementing required methods for interacting with + behavior+ophys session data. + + Child classes should be instantiated with a fetch API that implements these + methods. Both fetch API and session object should inherit from this base. + """ + @abc.abstractmethod + def get_average_projection(self) -> Image: + """Get an image whose values are the average obtained values at + each pixel of the ophys movie over time. + + Returns + ---------- + allensdk.brain_observatory.behavior.image_api.Image: + Array-like interface to avg projection image data and metadata. + """ + raise NotImplementedError() + + @abc.abstractmethod + def get_max_projection(self) -> Image: + """Get an image whose values are the maximum obtained values at + each pixel of the ophys movie over time. + + Returns + ---------- + allensdk.brain_observatory.behavior.image_api.Image: + Array-like interface to max projection image data and metadata. + """ + raise NotImplementedError() + + @abc.abstractmethod + def get_cell_specimen_table(self) -> pd.DataFrame: + """Get a cell specimen dataframe containing ROI information about + cells identified in an ophys experiment. + + Returns + ------- + pd.DataFrame + Cell ROI information organized into a dataframe. + Index is the cell ROI IDs. + """ + raise NotImplementedError() + + @abc.abstractmethod + def get_corrected_fluorescence_traces(self) -> pd.DataFrame: + """Get motion-corrected fluorescence traces. + + Returns + ------- + pd.DataFrame + Motion-corrected fluorescence traces organized into a dataframe. + Index is the cell ROI IDs. + """ + raise NotImplementedError() + + @abc.abstractmethod + def get_dff_traces(self) -> pd.DataFrame: + """Get a table of delta fluorescence over fluorescence traces. + + Returns + ------- + pd.DataFrame + The traces of dff (normalized fluorescence) organized into a + dataframe. Index is the cell ROI IDs. + """ + raise NotImplementedError() + + @abc.abstractmethod + def get_metadata(self) -> dict: + """Get behavior+ophys session metadata. + + Returns + ------- + dict + A dictionary of session-specific metadata. + """ + raise NotImplementedError() + + @abc.abstractmethod + def get_motion_correction(self) -> pd.DataFrame: + """Get motion correction trace data. + + Returns + ------- + pd.DataFrame + A dataframe containing trace data used during motion + correction computation. + """ + raise NotImplementedError() + + @abc.abstractmethod + def get_ophys_timestamps(self) -> np.ndarray: + """Get optical physiology frame timestamps. + + Returns + ------- + np.ndarray + Timestamps associated with frames captured by the microscope. + """ + raise NotImplementedError() + + @abc.abstractmethod + def get_raw_stimulus_timestamps(self) -> np.ndarray: + """Get raw stimulus timestamps. + + Returns + ------- + np.ndarray + Timestamps associated with stimulus presentations on the monitor + without accounting for monitor delay. + """ + raise NotImplementedError() + + @abc.abstractmethod + def get_stimulus_timestamps(self) -> np.ndarray: + """Get stimulus timestamps. + + Returns + ------- + np.ndarray + Timestamps associated with stimulus presentations on the monitor + after accounting for monitor delay. + """ + raise NotImplementedError() + + @abc.abstractmethod + def get_stimulus_presentations(self) -> pd.DataFrame: + """Get stimulus presentation data. + + NOTE: Uses monitor delay corrected stimulus timestamps. + + Returns + ------- + pd.DataFrame + Table whose rows are stimulus presentations + (i.e. a given image, for a given duration, typically 250 ms) + and whose columns are presentation characteristics. + """ + raise NotImplementedError() diff --git a/allensdk/brain_observatory/behavior/internal/behavior_project_base.py b/allensdk/brain_observatory/behavior/internal/behavior_project_base.py new file mode 100644 index 000000000..ead386708 --- /dev/null +++ b/allensdk/brain_observatory/behavior/internal/behavior_project_base.py @@ -0,0 +1,66 @@ +from abc import ABC, abstractmethod +from typing import Iterable + +from allensdk.brain_observatory.behavior.behavior_ophys_session import ( + BehaviorOphysSession) +from allensdk.brain_observatory.behavior.behavior_data_session import ( + BehaviorDataSession) +import pandas as pd + + +class BehaviorProjectBase(ABC): + @abstractmethod + def get_session_data(self, ophys_session_id: int) -> BehaviorOphysSession: + """Returns a BehaviorOphysSession object that contains methods + to analyze a single behavior+ophys session. + :param ophys_session_id: id that corresponds to a behavior session + :type ophys_session_id: int + :rtype: BehaviorOphysSession + """ + pass + + @abstractmethod + def get_session_table(self) -> pd.DataFrame: + """Return a pd.Dataframe table with all ophys_session_ids and relevant + metadata.""" + pass + + @abstractmethod + def get_behavior_only_session_data( + self, behavior_session_id: int) -> BehaviorDataSession: + """Returns a BehaviorDataSession object that contains methods to + analyze a single behavior session. + :param behavior_session_id: id that corresponds to a behavior session + :type behavior_session_id: int + :rtype: BehaviorDataSession + """ + pass + + @abstractmethod + def get_behavior_only_session_table(self) -> pd.DataFrame: + """Returns a pd.DataFrame table with all behavior session_ids to the + user with additional metadata. + :rtype: pd.DataFrame + """ + pass + + @abstractmethod + def get_natural_movie_template(self, number: int) -> Iterable[bytes]: + """Download a template for the natural scene stimulus. This is the + actual image that was shown during the recording session. + :param number: idenfifier for this movie (note that this is an int, + so to get the template for natural_movie_three should pass 3) + :type number: int + :returns: iterable yielding a tiff file as bytes + """ + pass + + @abstractmethod + def get_natural_scene_template(self, number: int) -> Iterable[bytes]: + """ Download a template for the natural movie stimulus. This is the + actual movie that was shown during the recording session. + :param number: identifier for this scene + :type number: int + :returns: An iterable yielding an npy file as bytes + """ + pass diff --git a/allensdk/brain_observatory/behavior/swdb/behavior_project_cache.py b/allensdk/brain_observatory/behavior/swdb/behavior_project_cache.py index c30f336b5..37fdda4ab 100644 --- a/allensdk/brain_observatory/behavior/swdb/behavior_project_cache.py +++ b/allensdk/brain_observatory/behavior/swdb/behavior_project_cache.py @@ -10,6 +10,7 @@ from allensdk.core.lazy_property import LazyProperty from allensdk.brain_observatory.behavior.trials_processing import calculate_reward_rate from allensdk.brain_observatory.behavior.image_api import ImageApi +from allensdk.deprecated import deprecated csv_io = { 'reader': lambda path: pd.read_csv(path, index_col='Unnamed: 0'), @@ -19,6 +20,9 @@ cache_path_example = '/allen/programs/braintv/workgroups/nc-ophys/visual_behavior/SWDB_2019/cache_20190813' +@deprecated("swdb.behavior_project_cache.BehaviorProjectCache is deprecated " + "and will be removed in version 1.3. Please use brain_observatory." + "behavior.behavior_project_cache.BehaviorProjectCache.") class BehaviorProjectCache(object): def __init__(self, cache_base): ''' diff --git a/allensdk/brain_observatory/ecephys/ecephys_project_api/ecephys_project_api.py b/allensdk/brain_observatory/ecephys/ecephys_project_api/ecephys_project_api.py index edf43a490..6db0e473d 100644 --- a/allensdk/brain_observatory/ecephys/ecephys_project_api/ecephys_project_api.py +++ b/allensdk/brain_observatory/ecephys/ecephys_project_api/ecephys_project_api.py @@ -1,33 +1,71 @@ -class EcephysProjectApi: - def get_sessions(self, *args, **kwargs): - raise NotImplementedError() +from typing import Optional, TypeVar, Iterable + +import numpy as np +import pandas as pd + - def get_session_data(self, session_id, *args, **kwargs): +# TODO: This should be a generic over the type of the values, but there is not +# good support currently for numpy and pandas type annotations +# we should investigate numpy and pandas typing support and migrate +# https://github.com/numpy/numpy-stubs +# https://github.com/pandas-dev/pandas/blob/master/pandas/_typing.py +ArrayLike = TypeVar("ArrayLike", list, np.ndarray, pd.Series, tuple) + + +class EcephysProjectApi: + def get_sessions( + self, + session_ids: Optional[ArrayLike] = None, + published_at: Optional[str] = None + ): raise NotImplementedError() - def get_targeted_regions(self, *args, **kwargs): + def get_session_data(self, session_id: int) -> Iterable: raise NotImplementedError() def get_isi_experiments(self, *args, **kwargs): raise NotImplementedError() - def get_units(self, *args, **kwargs): + def get_units( + self, + unit_ids: Optional[ArrayLike] = None, + channel_ids: Optional[ArrayLike] = None, + probe_ids: Optional[ArrayLike] = None, + session_ids: Optional[ArrayLike] = None, + published_at: Optional[str] = None + ): raise NotImplementedError() - def get_channels(self, *args, **kwargs): + def get_channels( + self, + channel_ids: Optional[ArrayLike] = None, + probe_ids: Optional[ArrayLike] = None, + session_ids: Optional[ArrayLike] = None, + published_at: Optional[str] = None + ): raise NotImplementedError() - def get_probes(self, *args, **kwargs): + def get_probes( + self, + probe_ids: Optional[ArrayLike] = None, + session_ids: Optional[ArrayLike] = None, + published_at: Optional[str] = None + ): raise NotImplementedError() - def get_probe_lfp_data(self, probe_id, *args, **kwargs): + def get_probe_lfp_data(self, probe_id: int) -> Iterable: raise NotImplementedError() - def get_natural_movie_template(self, number, *args, **kwargs): + def get_natural_movie_template(self, number) -> Iterable: raise NotImplementedError() - def get_natural_scene_template(self, number, *args, **kwargs): + def get_natural_scene_template(self, number) -> Iterable: raise NotImplementedError() - def get_unit_analysis_metrics(self, unit_ids=None, ecephys_session_ids=None, session_types=None, *args, **kwargs): + def get_unit_analysis_metrics( + self, + unit_ids: Optional[ArrayLike] = None, + ecephys_session_ids: Optional[ArrayLike] = None, + session_types: Optional[ArrayLike] = None + ) -> pd.DataFrame: raise NotImplementedError() \ No newline at end of file diff --git a/allensdk/brain_observatory/ecephys/ecephys_project_api/ecephys_project_lims_api.py b/allensdk/brain_observatory/ecephys/ecephys_project_api/ecephys_project_lims_api.py index 3b6cc6a91..d90e301c9 100644 --- a/allensdk/brain_observatory/ecephys/ecephys_project_api/ecephys_project_lims_api.py +++ b/allensdk/brain_observatory/ecephys/ecephys_project_api/ecephys_project_lims_api.py @@ -1,23 +1,66 @@ -from pathlib import Path -import shutil -import warnings +from typing import Optional, Iterable, NamedTuple import pandas as pd -from .ecephys_project_api import EcephysProjectApi +from .ecephys_project_api import EcephysProjectApi, ArrayLike from .http_engine import HttpEngine from .utilities import postgres_macros, build_and_execute from allensdk.internal.api import PostgresQueryMixin -from allensdk.brain_observatory.ecephys import get_unit_filter_value class EcephysProjectLimsApi(EcephysProjectApi): + + STIMULUS_TEMPLATE_NAMESPACE = "brain_observatory_1.1" + def __init__(self, postgres_engine, app_engine): + """ Downloads extracellular ephys data from the Allen Institute's + internal Laboratory Information Management System (LIMS). If you are + on our network you can use this class to get bleeding-edge data into + an EcephysProjectCache. If not, it won't work at all + + Parameters + ---------- + postgres_engine : + used for making queries against the LIMS postgres database. Must + implement: + select : takes a postgres query as a string. Returns a pandas + dataframe of results + select_one : takes a postgres query as a string. If there is + exactly one record in the response, returns that record as + a dict. Otherwise returns an empty dict. + app_engine : + used for making queries agains the lims web application. Must + implement: + stream : takes a url as a string. Returns an iterable yielding + the response body as bytes. + + Notes + ----- + You almost certainly want to construct this class by calling + EcephysProjectLimsApi.default() rather than this constructor directly. + + """ + + self.postgres_engine = postgres_engine self.app_engine = app_engine - def get_session_data(self, session_id, **kwargs): + def get_session_data(self, session_id: int) -> Iterable[bytes]: + """ Download an NWB file containing detailed data for an ecephys + session. + + Parameters + ---------- + session_id : + Download an NWB file for this session + + Returns + ------- + An iterable yielding an NWB file as bytes. + + """ + nwb_response = build_and_execute( """ select wkf.id, wkf.filename, wkf.storage_directory, wkf.attachable_id from well_known_files wkf @@ -45,7 +88,21 @@ def get_session_data(self, session_id, **kwargs): f"well_known_files/download/{nwb_id}?wkf_id={nwb_id}" ) - def get_probe_lfp_data(self, probe_id): + def get_probe_lfp_data(self, probe_id: int) -> Iterable[bytes]: + """ Download an NWB file containing detailed data for the local field + potential recorded from an ecephys probe. + + Parameters + ---------- + probe_id : + Download an NWB file for this probe's LFP + + Returns + ------- + An iterable yielding an NWB file as bytes. + + """ + nwb_response = build_and_execute( """ select wkf.id from well_known_files wkf @@ -75,33 +132,90 @@ def get_probe_lfp_data(self, probe_id): ) def get_units( - self, unit_ids=None, - channel_ids=None, - probe_ids=None, - session_ids=None, - quality="good", - **kwargs - ): + self, + unit_ids: Optional[ArrayLike] = None, + channel_ids: Optional[ArrayLike] = None, + probe_ids: Optional[ArrayLike] = None, + session_ids: Optional[ArrayLike] = None, + published_at: Optional[str] = None + ) -> pd.DataFrame: + """ Download a table of records describing sorted ecephys units. + + Parameters + ---------- + unit_ids : + A collection of integer identifiers for sorted ecephys units. If + provided, only return records describing these units. + channel_ids : + A collection of integer identifiers for ecephys channels. If + provided, results will be filtered to units recorded from these + channels. + probe_ids : + A collection of integer identifiers for ecephys probes. If + provided, results will be filtered to units recorded from these + probes. + session_ids : + A collection of integer identifiers for ecephys sessions. If + provided, results will be filtered to units recorded during + these sessions. + published_at : + A date (rendered as "YYYY-MM-DD"). If provided, only units + recorded during sessions published before this date will be + returned. + + Returns + ------- + a pd.DataFrame whose rows are ecephys channels. + + """ + response = build_and_execute( """ {%- import 'postgres_macros' as pm -%} {%- import 'macros' as m -%} - select eu.* + select + eu.id, + eu.ecephys_channel_id, + eu.quality, + eu.snr, + eu.firing_rate, + eu.isi_violations, + eu.presence_ratio, + eu.amplitude_cutoff, + eu.isolation_distance, + eu.l_ratio, + eu.d_prime, + eu.nn_hit_rate, + eu.nn_miss_rate, + eu.silhouette_score, + eu.max_drift, + eu.cumulative_drift, + eu.epoch_name_quality_metrics, + eu.epoch_name_waveform_metrics, + eu.duration, + eu.halfwidth, + eu.\"PT_ratio\", + eu.repolarization_slope, + eu.recovery_slope, + eu.amplitude, + eu.spread, + eu.velocity_above, + eu.velocity_below from ecephys_units eu join ecephys_channels ec on ec.id = eu.ecephys_channel_id join ecephys_probes ep on ep.id = ec.ecephys_probe_id - join ecephys_sessions es on es.id = ep.ecephys_session_id - where ec.valid_data - and ep.workflow_state != 'failed' - and es.workflow_state != 'failed' - {{pm.optional_equals('eu.quality', quality) -}} - {{pm.optional_contains('eu.id', unit_ids) -}} - {{pm.optional_contains('ec.id', channel_ids) -}} - {{pm.optional_contains('ep.id', probe_ids) -}} - {{pm.optional_contains('es.id', session_ids) -}} - {{pm.optional_le('eu.amplitude_cutoff', amplitude_cutoff_maximum) -}} - {{pm.optional_ge('eu.presence_ratio', presence_ratio_minimum) -}} - {{pm.optional_le('eu.isi_violations', isi_violations_maximum) -}} + join ecephys_sessions es on es.id = ep.ecephys_session_id + where + not es.habituation + and ec.valid_data + and ep.workflow_state != 'failed' + and es.workflow_state != 'failed' + {{pm.optional_not_null('es.published_at', published_at_not_null)}} + {{pm.optional_le('es.published_at', published_at)}} + {{pm.optional_contains('eu.id', unit_ids) -}} + {{pm.optional_contains('ec.id', channel_ids) -}} + {{pm.optional_contains('ep.id', probe_ids) -}} + {{pm.optional_contains('es.id', session_ids) -}} """, base=postgres_macros(), engine=self.postgres_engine.select, @@ -109,212 +223,187 @@ def get_units( channel_ids=channel_ids, probe_ids=probe_ids, session_ids=session_ids, - quality=f"'{quality}'" if quality is not None else quality, - amplitude_cutoff_maximum=get_unit_filter_value("amplitude_cutoff_maximum", replace_none=False, **kwargs), - presence_ratio_minimum=get_unit_filter_value("presence_ratio_minimum", replace_none=False, **kwargs), - isi_violations_maximum=get_unit_filter_value("isi_violations_maximum", replace_none=False, **kwargs) + **_split_published_at(published_at)._asdict() ) + return response.set_index("id", inplace=False) - response.set_index("id", inplace=True) + def get_channels( + self, + channel_ids: Optional[ArrayLike] = None, + probe_ids: Optional[ArrayLike] = None, + session_ids: Optional[ArrayLike] = None, + published_at: Optional[str] = None + ) -> pd.DataFrame: + """ Download a table of ecephys channel records. - return response + Parameters + ---------- + channel_ids : + A collection of integer identifiers for ecephys channels. If + provided, results will be filtered to these channels. + probe_ids : + A collection of integer identifiers for ecephys probes. If + provided, results will be filtered to channels on these probes. + session_ids : + A collection of integer identifiers for ecephys sessions. If + provided, results will be filtered to channels recorded from during + these sessions. + published_at : + A date (rendered as "YYYY-MM-DD"). If provided, only channels + recorded from during sessions published before this date will be + returned. + + Returns + ------- + a pd.DataFrame whose rows are ecephys channels. + + """ - def get_channels(self, channel_ids=None, probe_ids=None, session_ids=None, **kwargs): response = build_and_execute( """ {%- import 'postgres_macros' as pm -%} select - ec.id as id, - ec.ecephys_probe_id, + ec.id, + ec.ecephys_probe_id, ec.local_index, ec.probe_vertical_position, ec.probe_horizontal_position, ec.manual_structure_id as ecephys_structure_id, st.acronym as ecephys_structure_acronym, - pc.unit_count - from ecephys_channels ec + ec.anterior_posterior_ccf_coordinate, + ec.dorsal_ventral_ccf_coordinate, + ec.left_right_ccf_coordinate + from ecephys_channels ec join ecephys_probes ep on ep.id = ec.ecephys_probe_id - join ecephys_sessions es on es.id = ep.ecephys_session_id - left join structures st on st.id = ec.manual_structure_id - join ( - select ech.id as ecephys_channel_id, - count (distinct eun.id) as unit_count - from ecephys_channels ech - join ecephys_units eun on ( - eun.ecephys_channel_id = ech.id - and eun.quality = 'good' - {{pm.optional_le('eun.amplitude_cutoff', amplitude_cutoff_maximum) -}} - {{pm.optional_ge('eun.presence_ratio', presence_ratio_minimum) -}} - {{pm.optional_le('eun.isi_violations', isi_violations_maximum) -}} - ) - group by ech.id - ) pc on ec.id = pc.ecephys_channel_id - where valid_data - and ep.workflow_state != 'failed' - and es.workflow_state != 'failed' - {{pm.optional_contains('ec.id', channel_ids) -}} - {{pm.optional_contains('ep.id', probe_ids) -}} - {{pm.optional_contains('es.id', session_ids) -}} + join ecephys_sessions es on es.id = ep.ecephys_session_id + left join structures st on ec.manual_structure_id = st.id + where + not es.habituation + and valid_data + and ep.workflow_state != 'failed' + and es.workflow_state != 'failed' + {{pm.optional_not_null('es.published_at', published_at_not_null)}} + {{pm.optional_le('es.published_at', published_at)}} + {{pm.optional_contains('ec.id', channel_ids) -}} + {{pm.optional_contains('ep.id', probe_ids) -}} + {{pm.optional_contains('es.id', session_ids) -}} """, base=postgres_macros(), engine=self.postgres_engine.select, channel_ids=channel_ids, probe_ids=probe_ids, session_ids=session_ids, - amplitude_cutoff_maximum=get_unit_filter_value("amplitude_cutoff_maximum", replace_none=False, **kwargs), - presence_ratio_minimum=get_unit_filter_value("presence_ratio_minimum", replace_none=False, **kwargs), - isi_violations_maximum=get_unit_filter_value("isi_violations_maximum", replace_none=False, **kwargs) + **_split_published_at(published_at)._asdict() ) return response.set_index("id") - def get_probes(self, probe_ids=None, session_ids=None, **kwargs): + def get_probes( + self, + probe_ids: Optional[ArrayLike] = None, + session_ids: Optional[ArrayLike] = None, + published_at: Optional[str] = None + ) -> pd.DataFrame: + """ Download a table of ecephys probe records. + + Parameters + ---------- + probe_ids : + A collection of integer identifiers for ecephys probes. If + provided, results will be filtered to these probes. + session_ids : + A collection of integer identifiers for ecephys sessions. If + provided, results will be filtered to probes recorded from during + these sessions. + published_at : + A date (rendered as "YYYY-MM-DD"). If provided, only probes + recorded from during sessions published before this date will be + returned. + + Returns + ------- + a pd.DataFrame whose rows are ecephys probes. + + """ + response = build_and_execute( """ {%- import 'postgres_macros' as pm -%} select - ep.id as id, - ep.ecephys_session_id, - ep.global_probe_sampling_rate, - ep.global_probe_lfp_sampling_rate, - total_time_shift, - channel_count, - unit_count, - case - when nwb_id is not null then true - else false - end as has_lfp_nwb, - str.structure_acronyms as structure_acronyms - from ecephys_probes ep - join ecephys_sessions es on es.id = ep.ecephys_session_id - join ( - select epr.id as ecephys_probe_id, - count (distinct ech.id) as channel_count, - count (distinct eun.id) as unit_count - from ecephys_probes epr - join ecephys_channels ech on ( - ech.ecephys_probe_id = epr.id - and ech.valid_data - ) - join ecephys_units eun on ( - eun.ecephys_channel_id = ech.id - and eun.quality = 'good' - {{pm.optional_le('eun.amplitude_cutoff', amplitude_cutoff_maximum) -}} - {{pm.optional_ge('eun.presence_ratio', presence_ratio_minimum) -}} - {{pm.optional_le('eun.isi_violations', isi_violations_maximum) -}} - ) - group by epr.id - ) chc on ep.id = chc.ecephys_probe_id - left join ( - select - epr.id as ecephys_probe_id, - wkf.id as nwb_id - from ecephys_probes epr - join ecephys_analysis_runs ear on ( - ear.ecephys_session_id = epr.ecephys_session_id - and ear.current - ) - right join ecephys_analysis_run_probes earp on ( - earp.ecephys_probe_id = epr.id - and earp.ecephys_analysis_run_id = ear.id - ) - right join well_known_files wkf on ( - wkf.attachable_id = earp.id - and wkf.attachable_type = 'EcephysAnalysisRunProbe' - ) - join well_known_file_types wkft on wkft.id = wkf.well_known_file_type_id - where wkft.name = 'EcephysLfpNwb' - ) nwb on ep.id = nwb.ecephys_probe_id - left join ( - select epr.id as ecephys_probe_id, - array_agg (st.id) as structure_ids, - array_agg (distinct st.acronym) as structure_acronyms - from ecephys_probes epr - join ecephys_channels ech on ( - ech.ecephys_probe_id = epr.id - and ech.valid_data - ) - left join structures st on st.id = ech.manual_structure_id - group by epr.id - ) str on ep.id = str.ecephys_probe_id - where true - and ep.workflow_state != 'failed' - and es.workflow_state != 'failed' - {{pm.optional_contains('ep.id', probe_ids) -}} - {{pm.optional_contains('es.id', session_ids) -}} + ep.id, + ep.ecephys_session_id, + ep.name, + ep.global_probe_sampling_rate as sampling_rate, + ep.global_probe_lfp_sampling_rate as lfp_sampling_rate, + ep.phase, + ep.air_channel_index, + ep.surface_channel_index, + ep.use_lfp_data as has_lfp_data, + ep.temporal_subsampling_factor as lfp_temporal_subsampling_factor + from ecephys_probes ep + join ecephys_sessions es on es.id = ep.ecephys_session_id + where + not es.habituation + and ep.workflow_state != 'failed' + and es.workflow_state != 'failed' + {{pm.optional_not_null('es.published_at', published_at_not_null)}} + {{pm.optional_le('es.published_at', published_at)}} + {{pm.optional_contains('ep.id', probe_ids) -}} + {{pm.optional_contains('es.id', session_ids) -}} """, base=postgres_macros(), engine=self.postgres_engine.select, probe_ids=probe_ids, session_ids=session_ids, - amplitude_cutoff_maximum=get_unit_filter_value("amplitude_cutoff_maximum", replace_none=False, **kwargs), - presence_ratio_minimum=get_unit_filter_value("presence_ratio_minimum", replace_none=False, **kwargs), - isi_violations_maximum=get_unit_filter_value("isi_violations_maximum", replace_none=False, **kwargs) + **_split_published_at(published_at)._asdict() ) - response = response.set_index("id") - # Clarify name for external users - response.rename(columns={"use_lfp_data": "has_lfp_data"}, inplace=True) + return response.set_index("id") - return response def get_sessions( self, - session_ids=None, - workflow_states=("uploaded",), - published=None, - habituation=False, - project_names=( - "BrainTV Neuropixels Visual Behavior", - "BrainTV Neuropixels Visual Coding", - ), - **kwargs - ): + session_ids: Optional[ArrayLike] = None, + published_at: Optional[str] = None + ) -> pd.DataFrame: + """ Download a table of ecephys session records. + + Parameters + ---------- + session_ids : + A collection of integer identifiers for ecephys sessions. If + provided, results will be filtered to these sessions. + published_at : + A date (rendered as "YYYY-MM-DD"). If provided, only sessions + published before this date will be returned. + + Returns + ------- + a pd.DataFrame whose rows are ecephys sessions. + + """ response = build_and_execute( """ {%- import 'postgres_macros' as pm -%} {%- import 'macros' as m -%} select - stimulus_name as session_type, - sp.id as specimen_id, - es.id as id, + es.id, + es.specimen_id, + es.stimulus_name as session_type, + es.isi_experiment_id, + es.date_of_acquisition, + es.published_at, dn.full_genotype as genotype, - gd.name as gender, + gd.name as sex, ages.days as age_in_days, - pr.code as project_code, - probe_count, - channel_count, - unit_count, case when nwb_id is not null then true else false - end as has_nwb, - str.structure_acronyms as structure_acronyms + end as has_nwb from ecephys_sessions es join specimens sp on sp.id = es.specimen_id join donors dn on dn.id = sp.donor_id join genders gd on gd.id = dn.gender_id join ages on ages.id = dn.age_id - join projects pr on pr.id = es.project_id - join ( - select es.id as ecephys_session_id, - count (distinct epr.id) as probe_count, - count (distinct ech.id) as channel_count, - count (distinct eun.id) as unit_count - from ecephys_sessions es - join ecephys_probes epr on epr.ecephys_session_id = es.id - join ecephys_channels ech on ( - ech.ecephys_probe_id = epr.id - and ech.valid_data - ) - join ecephys_units eun on ( - eun.ecephys_channel_id = ech.id - and eun.quality = 'good' - {{pm.optional_le('eun.amplitude_cutoff', amplitude_cutoff_maximum) -}} - {{pm.optional_ge('eun.presence_ratio', presence_ratio_minimum) -}} - {{pm.optional_le('eun.isi_violations', isi_violations_maximum) -}} - ) - group by es.id - ) pc on es.id = pc.ecephys_session_id left join ( select ecephys_sessions.id as ecephys_session_id, wkf.id as nwb_id @@ -330,36 +419,17 @@ def get_sessions( join well_known_file_types wkft on wkft.id = wkf.well_known_file_type_id where wkft.name = 'EcephysNwb' ) nwb on es.id = nwb.ecephys_session_id - left join ( - select es.id as ecephys_session_id, - array_agg (st.id) as structure_ids, - array_agg (distinct st.acronym) as structure_acronyms - from ecephys_sessions es - join ecephys_probes epr on epr.ecephys_session_id = es.id - join ecephys_channels ech on ( - ech.ecephys_probe_id = epr.id - and ech.valid_data - ) - left join structures st on st.id = ech.manual_structure_id - group by es.id - ) str on es.id = str.ecephys_session_id - where true - {{pm.optional_contains('es.id', session_ids) -}} - {{pm.optional_contains('es.workflow_state', workflow_states, True) -}} - {{pm.optional_equals('es.habituation', habituation) -}} - {{pm.optional_not_null('es.published_at', published) -}} - {{pm.optional_contains('pr.name', project_names, True) -}} + where + not es.habituation + and es.workflow_state != 'failed' + {{pm.optional_contains('es.id', session_ids) -}} + {{pm.optional_not_null('es.published_at', published_at_not_null)}} + {{pm.optional_le('es.published_at', published_at)}} """, base=postgres_macros(), engine=self.postgres_engine.select, session_ids=session_ids, - workflow_states=workflow_states, - published=published, - habituation=f"{habituation}".lower() if habituation is not None else habituation, - project_names=project_names, - amplitude_cutoff_maximum=get_unit_filter_value("amplitude_cutoff_maximum", replace_none=False, **kwargs), - presence_ratio_minimum=get_unit_filter_value("presence_ratio_minimum", replace_none=False, **kwargs), - isi_violations_maximum=get_unit_filter_value("isi_violations_maximum", replace_none=False, **kwargs) + **_split_published_at(published_at)._asdict() ) response.set_index("id", inplace=True) @@ -367,7 +437,37 @@ def get_sessions( return response - def get_unit_analysis_metrics(self, unit_ids=None, ecephys_session_ids=None, session_types=None): + def get_unit_analysis_metrics( + self, + unit_ids: Optional[ArrayLike] = None, + ecephys_session_ids: Optional[ArrayLike] = None, + session_types: Optional[ArrayLike] = None + ) -> pd.DataFrame: + """ Fetch analysis metrics (stimulus set-specific characterizations of + unit response patterns) for ecephys units. Note that the metrics + returned depend on the stimuli that were presented during recording ( + and thus on the session_type) + + Parameters + --------- + unit_ids : + integer identifiers for a set of ecephys units. If provided, the + response will only include metrics calculated for these units + ecephys_session_ids : + integer identifiers for a set of ecephys sessions. If provided, the + response will only include metrics calculated for units identified + during these sessions + session_types : + string names identifying ecephys session types (e.g. + "brain_observatory_1.1" or "functional_connectivity") + + Returns + ------- + a pandas dataframe indexed by ecephys unit id whose columns are + metrics. + + """ + response = build_and_execute( """ {%- import 'postgres_macros' as pm -%} @@ -397,6 +497,73 @@ def get_unit_analysis_metrics(self, unit_ids=None, ecephys_session_ids=None, ses return response + def _get_template(self, name, namespace): + """ Identify the WellKnownFile record associated with a stimulus + template and stream its data if present. + """ + + try: + well_known_file = build_and_execute( + f""" + select + st.well_known_file_id + from stimuli st + join stimulus_namespaces sn on sn.id = st.stimulus_namespace_id + where + st.name = '{name}' + and sn.name = '{namespace}' + """, + base=postgres_macros(), + engine=self.postgres_engine.select_one + ) + wkf_id = well_known_file["well_known_file_id"] + except (KeyError, IndexError): + raise ValueError(f"expected exactly 1 template for {name}") + + download_link = f"well_known_files/download/{wkf_id}?wkf_id={wkf_id}" + return self.app_engine.stream(download_link) + + + def get_natural_movie_template(self, number: int) -> Iterable[bytes]: + """ Download a template for the natural movie stimulus. This is the + actual movie that was shown during the recording session. + + Parameters + ---------- + number : + idenfifier for this movie (note that this is an integer, so to get + the template for natural_movie_three you should pass in 3) + + Returns + ------- + An iterable yielding an npy file as bytes + + """ + + return self._get_template( + f"natural_movie_{number}", self.STIMULUS_TEMPLATE_NAMESPACE + ) + + + def get_natural_scene_template(self, number: int) -> Iterable[bytes]: + """ Download a template for the natural scene stimulus. This is the + actual image that was shown during the recording session. + + Parameters + ---------- + number : + idenfifier for this scene + + Returns + ------- + An iterable yielding a tiff file as bytes. + + """ + return self._get_template( + f"natural_scene_{int(number)}", self.STIMULUS_TEMPLATE_NAMESPACE + ) + + @classmethod def default(cls, pg_kwargs=None, app_kwargs=None): @@ -411,3 +578,19 @@ def default(cls, pg_kwargs=None, app_kwargs=None): pg_engine = PostgresQueryMixin(**_pg_kwargs) app_engine = HttpEngine(**_app_kwargs) return cls(pg_engine, app_engine) + + +class SplitPublishedAt(NamedTuple): + published_at: Optional[str] + published_at_not_null: Optional[bool] + + +def _split_published_at(published_at: Optional[str]) -> SplitPublishedAt: + """ LIMS queries that filter on published_at need a couple of + reformattings of the argued date string. + """ + + return SplitPublishedAt( + published_at=f"'{published_at}'" if published_at is not None else None, + published_at_not_null=None if published_at is None else True + ) diff --git a/allensdk/brain_observatory/ecephys/ecephys_project_api/ecephys_project_warehouse_api.py b/allensdk/brain_observatory/ecephys/ecephys_project_api/ecephys_project_warehouse_api.py index 5b15e961b..7a9faa70a 100644 --- a/allensdk/brain_observatory/ecephys/ecephys_project_api/ecephys_project_warehouse_api.py +++ b/allensdk/brain_observatory/ecephys/ecephys_project_api/ecephys_project_warehouse_api.py @@ -70,7 +70,8 @@ def _list_stimulus_templates(self, ecephys_product_id=714914585): "[attachable_type$eq'Product']" r"[attachable_id$eq{{ecephys_product_id}}]" ), - engine=self.rma_engine.get_rma_tabular, ecephys_product_id=ecephys_product_id + engine=self.rma_engine.get_rma_tabular, + ecephys_product_id=ecephys_product_id ) scene_number = [] diff --git a/allensdk/brain_observatory/ecephys/ecephys_project_cache.py b/allensdk/brain_observatory/ecephys/ecephys_project_cache.py index 44f239764..116b2f63e 100644 --- a/allensdk/brain_observatory/ecephys/ecephys_project_cache.py +++ b/allensdk/brain_observatory/ecephys/ecephys_project_cache.py @@ -132,14 +132,8 @@ def _get_channels(self): def _get_units(self, filter_by_validity: bool = True, **unit_filter_kwargs) -> pd.DataFrame: path = self.get_cache_path(None, self.UNITS_KEY) - get_units = partial( - self.fetch_api.get_units, - amplitude_cutoff_maximum=None, # pull down all the units to csv and filter on the way out - presence_ratio_minimum=None, - isi_violations_maximum=None, - filter_by_validity=filter_by_validity - ) - units: pd.DataFrame = one_file_call_caching(path, get_units, write_csv, read_csv, num_tries=self.fetch_tries) + + units = one_file_call_caching(path, self.fetch_api.get_units, write_csv, read_csv, num_tries=self.fetch_tries) units = units.rename(columns={ 'PT_ratio': 'waveform_PT_ratio', 'amplitude': 'waveform_amplitude', diff --git a/allensdk/brain_observatory/ecephys/write_nwb/__main__.py b/allensdk/brain_observatory/ecephys/write_nwb/__main__.py index bb5be585d..d67fab6ab 100644 --- a/allensdk/brain_observatory/ecephys/write_nwb/__main__.py +++ b/allensdk/brain_observatory/ecephys/write_nwb/__main__.py @@ -32,7 +32,8 @@ from allensdk.brain_observatory import dict_to_indexed_array from allensdk.brain_observatory.ecephys.file_io.continuous_file import ContinuousFile from allensdk.brain_observatory.ecephys.nwb import EcephysProbe, EcephysLabMetaData -from allensdk.brain_observatory.gaze_mapping._sync_frames import get_synchronized_camera_frame_times +from allensdk.brain_observatory.sync_dataset import Dataset +import allensdk.brain_observatory.sync_utilities as su STIM_TABLE_RENAMES_MAP = {"Start": "start_time", "End": "stop_time"} @@ -753,24 +754,25 @@ def write_ecephys_nwb( add_raw_running_data_to_nwbfile(nwbfile, raw_running_data) # --- Add eye tracking ellipse fits to nwb file --- - eye_tracking_frame_times = get_synchronized_camera_frame_times(session_sync_path) + eye_tracking_frame_times = su.get_synchronized_frame_times(session_sync_file=session_sync_path, + sync_line_label_keys=Dataset.EYE_TRACKING_KEYS) eye_dlc_tracking_data = read_eye_dlc_tracking_ellipses(Path(eye_dlc_ellipses_path)) if eye_tracking_data_is_valid(eye_dlc_tracking_data=eye_dlc_tracking_data, synced_timestamps=eye_tracking_frame_times): add_eye_tracking_ellipse_fit_data_to_nwbfile(nwbfile, - eye_dlc_tracking_data=eye_dlc_tracking_data, - synced_timestamps=eye_tracking_frame_times) + eye_dlc_tracking_data=eye_dlc_tracking_data, + synced_timestamps=eye_tracking_frame_times) # --- Append eye tracking rig geometry info to nwb file (with eye tracking) --- append_eye_tracking_rig_geometry_data_to_nwbfile(nwbfile, - eye_tracking_rig_geometry=eye_tracking_rig_geometry) + eye_tracking_rig_geometry=eye_tracking_rig_geometry) # --- Add gaze mapped positions to nwb file --- if eye_gaze_mapping_path: eye_gaze_data = read_eye_gaze_mappings(Path(eye_gaze_mapping_path)) add_eye_gaze_mapping_data_to_nwbfile(nwbfile, - eye_gaze_data=eye_gaze_data) + eye_gaze_data=eye_gaze_data) Manifest.safe_make_parent_dirs(output_path) io = pynwb.NWBHDF5IO(output_path, mode='w') diff --git a/allensdk/brain_observatory/gaze_mapping/__main__.py b/allensdk/brain_observatory/gaze_mapping/__main__.py index 4d04c6f0e..eb0eca1e1 100644 --- a/allensdk/brain_observatory/gaze_mapping/__main__.py +++ b/allensdk/brain_observatory/gaze_mapping/__main__.py @@ -1,6 +1,7 @@ import logging import sys from pathlib import Path +from typing import Dict import numpy as np import pandas as pd @@ -17,34 +18,80 @@ ) from allensdk.brain_observatory.gaze_mapping._gaze_mapper import ( compute_circular_areas, + compute_elliptical_areas, GazeMapper ) from allensdk.brain_observatory.gaze_mapping._filter_utils import ( post_process_areas, post_process_cr, ) -from allensdk.brain_observatory.gaze_mapping._sync_frames import ( - get_synchronized_camera_frame_times -) +from allensdk.brain_observatory.sync_dataset import Dataset +import allensdk.brain_observatory.sync_utilities as su + + +def load_ellipse_fit_params(input_file: Path) -> Dict[str, pd.DataFrame]: + """Load Deep Lab Cut (DLC) ellipse fit h5 data as a dictionary of pandas + DataFrames. + + Parameters + ---------- + input_file : Path + Path to DLC .h5 file containing ellipse fits for pupil, + cr (corneal reflection), and eye. + + Returns + ------- + Dict[str, pd.DataFrame] + Dictionary where keys specify name of ellipse fit param type and values + are pandas DataFrames containing ellipse fit params. + + Raises + ------ + RuntimeError + If pupil, cr, and eye ellipse fits don't have the same number of rows. + """ + # TODO: Some ellipses.h5 files have the 'cr' key as complex type instead of + # float. For now, when loading ellipses.h5 files, always coerce to float + # but this should eventually be resolved upstream... + pupil_params = pd.read_hdf(input_file, key="pupil").astype(float) + cr_params = pd.read_hdf(input_file, key="cr").astype(float) + eye_params = pd.read_hdf(input_file, key="eye").astype(float) + + num_frames_match = ((pupil_params.shape[0] == cr_params.shape[0]) + and (cr_params.shape[0] == eye_params.shape[0])) + if not num_frames_match: + raise RuntimeError("The number of frames for ellipse fits don't " + "match when they should: " + f"pupil_params ({pupil_params.shape[0]}), " + f"cr_params ({cr_params.shape[0]}), " + f"eye_params ({eye_params.shape[0]}).") + + return {"pupil_params": pupil_params, + "cr_params": cr_params, + "eye_params": eye_params} -def repackage_input_args(parser_args: dict) -> dict: - """Repackage arguments obtained by argschema. + +def preprocess_input_args(parser_args: dict) -> dict: + """Preprocess arguments obtained by argschema. 1) Converts individual coordinate/rotation fields to numpy position/rotation arrays. 2) Convert all arguments in millimeters to centimeters - Args: - parser_args (dict): Parsed args obtained from argschema. + Parameters + ---------- + parser_args (dict): Parsed args obtained from argschema. - Returns: - dict: Repackaged args. + Returns + ------- + dict: Repackaged args. """ new_args: dict = {} - new_args["input_file"] = parser_args["input_file"] + new_args.update(load_ellipse_fit_params(parser_args["input_file"])) + new_args["session_sync_file"] = parser_args["session_sync_file"] new_args["output_file"] = parser_args["output_file"] @@ -100,32 +147,34 @@ def run_gaze_mapping(pupil_parameters: pd.DataFrame, Example: Z-axis for monitor and camera are aligned with X-axis for eye coordinate system - Args: - pupil_parameters (pd.DataFrame): A table of pupil parameters with + Parameters + ---------- + pupil_parameters (pd.DataFrame): A table of pupil parameters with 5 columns ("center_x", "center_y", "height", "phi", "width") - and n-row timepoints. Coordinate - cr_parameters (pd.DataFrame): A table of corneal reflection params with + and n-row timepoints. + cr_parameters (pd.DataFrame): A table of corneal reflection params with 5 columns ("center_x", "center_y", "height", "phi", "width") and n-row timepoints. - eye_parameters (pd.DataFrame): A table of eye parameters with + eye_parameters (pd.DataFrame): A table of eye parameters with 5 columns ("center_x", "center_y", "height", "phi", "width") and n-row timepoints. - monitor_position (np.ndarray): An array describing monitor position - [x, y, z] - monitor_rotations (np.ndarray): An array describing monitor orientation - about [x, y, z] axes. - camera_position (np.ndarray): An array describing camera position - [x, y, z] - camera_rotations (np.ndarray): An array describing camera orientation - about [x, y, z] axes. - led_position (np.ndarray): An array describing LED position [x, y, z] - eye_radius_cm (float): Radius of eye being tracked in cm. - cm_per_pixel (float): Ratio of centimeters per pixel - - Returns: + monitor_position (np.ndarray): An array describing monitor position + [x, y, z] + monitor_rotations (np.ndarray): An array describing monitor orientation + about [x, y, z] axes. + camera_position (np.ndarray): An array describing camera position + [x, y, z] + camera_rotations (np.ndarray): An array describing camera orientation + about [x, y, z] axes. + led_position (np.ndarray): An array describing LED position [x, y, z] + eye_radius_cm (float): Radius of eye being tracked in cm. + cm_per_pixel (float): Ratio of centimeters per pixel + + Returns + ------- dict: A dictionary of gaze mapping outputs with - fields for: `pupil_areas`, `eye_areas`, `pupil_on_monitor_cm`, and - `pupil_on_monitor_deg`. + fields for: `pupil_areas` (in cm^2), `eye_areas` (in cm^2), + `pupil_on_monitor_cm`, and `pupil_on_monitor_deg`. """ output = {} @@ -137,8 +186,11 @@ def run_gaze_mapping(pupil_parameters: pd.DataFrame, eye_radius=eye_radius_cm, cm_per_pixel=cm_per_pixel) - raw_pupil_areas = compute_circular_areas(pupil_parameters) - raw_eye_areas = compute_circular_areas(eye_parameters) + pupil_params_in_cm = pupil_parameters * cm_per_pixel + raw_pupil_areas = compute_circular_areas(pupil_params_in_cm) + + eye_params_in_cm = eye_parameters * cm_per_pixel + raw_eye_areas = compute_elliptical_areas(eye_params_in_cm) raw_pupil_on_monitor_cm = gaze_mapper.pupil_position_on_monitor_in_cm( cam_pupil_params=pupil_parameters[["center_x", "center_y"]].values, @@ -221,6 +273,40 @@ def write_gaze_mapping_output_to_h5(output_savepath: Path, version.to_hdf(output_savepath, key="version", mode="a") +def load_sync_file_timings(sync_file: Path, + pupil_params_rows: int) -> pd.Series: + """Load sync file timings from .h5 file. + + Parameters + ---------- + sync_file : Path + Path to .h5 sync file. + pupil_params_rows : int + Number of rows in pupil params. + + Returns + ------- + pd.Series + A series of frame times. (New frame times according to synchronized + timings from DAQ) + + Raises + ------ + RuntimeError + If the number of eye tracking frames (pupil_params_rows) does not match + up with number of new frame times from the sync file. + """ + # Add synchronized frame times + frame_times = su.get_synchronized_frame_times(session_sync_file=sync_file, + sync_line_label_keys=Dataset.EYE_TRACKING_KEYS) + if (pupil_params_rows != len(frame_times)): + raise RuntimeError("The number of camera sync pulses in the " + f"sync file ({len(frame_times)}) do not match " + "with the number of eye tracking frames " + f"({pupil_params_rows})!!!") + return frame_times + + def main(): logging.basicConfig(format=('%(asctime)s:%(funcName)s' @@ -230,27 +316,11 @@ def main(): schema_type=InputSchema, output_schema_type=OutputSchema) - args = repackage_input_args(parser.args) - - # TODO: Some ellipses.h5 files have the 'cr' key as complex type instead of - # float. For now, when loading ellipses.h5 files, always coerce to float - # but this should eventually be resolved upstream... - pupil_params = pd.read_hdf(args['input_file'], key="pupil").astype(float) - cr_params = pd.read_hdf(args['input_file'], key="cr").astype(float) - eye_params = pd.read_hdf(args['input_file'], key="eye").astype(float) - - num_frames_match = ((pupil_params.shape[0] == cr_params.shape[0]) - and (cr_params.shape[0] == eye_params.shape[0])) - if not num_frames_match: - raise RuntimeError("The number of frames for ellipse fits don't " - "match when they should: " - f"pupil_params ({pupil_params.shape[0]}), " - f"cr_params ({cr_params.shape[0]}), " - f"eye_params ({eye_params.shape[0]}).") + args = preprocess_input_args(parser.args) - output = run_gaze_mapping(pupil_parameters=pupil_params, - cr_parameters=cr_params, - eye_parameters=eye_params, + output = run_gaze_mapping(pupil_parameters=args["pupil_params"], + cr_parameters=args["cr_params"], + eye_parameters=args["eye_params"], monitor_position=args["monitor_position"], monitor_rotations=args["monitor_rotations"], camera_position=args["camera_position"], @@ -259,14 +329,8 @@ def main(): eye_radius_cm=args["eye_radius_cm"], cm_per_pixel=args["cm_per_pixel"]) - # Add synchronized frame times - frame_times = get_synchronized_camera_frame_times(args["session_sync_file"]) - if (pupil_params.shape[0] != len(frame_times)): - raise RuntimeError("The number of camera sync pulses in the " - f"sync file ({len(frame_times)}) do not match " - "with the number of eye tracking frames " - f"({pupil_params.shape[0]})!!!") - output["synced_frame_timestamps_sec"] = frame_times + output["synced_frame_timestamps_sec"] = load_sync_file_timings(args["session_sync_file"], + args["pupil_params"].shape[0]) write_gaze_mapping_output_to_h5(args["output_file"], output) module_output = {"screen_mapping_file": str(args["output_file"])} diff --git a/allensdk/brain_observatory/gaze_mapping/_gaze_mapper.py b/allensdk/brain_observatory/gaze_mapping/_gaze_mapper.py index 0b2a008bf..39c0b8f1a 100644 --- a/allensdk/brain_observatory/gaze_mapping/_gaze_mapper.py +++ b/allensdk/brain_observatory/gaze_mapping/_gaze_mapper.py @@ -297,31 +297,53 @@ def pupil_position_on_monitor_in_degrees(self, def compute_circular_areas(ellipse_params: pd.DataFrame) -> pd.Series: - """Compute circular area of a pupil or eye ellipse using half-major axis. + """Compute circular area of a pupil using half-major axis. - Assume the pupil/eye is a circle, and that as it moves off-axis + Assume the pupil is a circle, and that as it moves off-axis with the camera, the observed ellipse semi-major axis remains the radius of the circle. - Args: - ellipse_params (pandas.DataFrame): A table of pupil/eye parameters - consisting of 5 columns: - ("center_x", "center_y", "height", "phi", "width") - and n-row timepoints. + Parameters + ---------- + ellipse_params (pandas.DataFrame): A table of pupil parameters consisting + of 5 columns: ("center_x", "center_y", "height", "phi", "width") + and n-row timepoints. - NOTE: For ellipse_params produced by the Deep Lab Cut pipeline, - "width" and "height" columns, in fact, refer to the - "half-width" and "half-height". + NOTE: For ellipse_params produced by the Deep Lab Cut pipeline, + "width" and "height" columns, in fact, refer to the + "half-width" and "half-height". - Returns: - pandas.Series: A series of pupil/eye areas for n-timepoints. + Returns + ------- + pandas.Series: A series of pupil areas for n-timepoints. """ # Take the biggest value between height and width columns and - # assume that it is the pupil/eye circle radius. + # assume that it is the pupil circle radius. radii = ellipse_params[["height", "width"]].max(axis=1) return np.pi * radii * radii +def compute_elliptical_areas(ellipse_params: pd.DataFrame) -> pd.Series: + """Compute the elliptical area using elliptical fit parameters. + + Parameters + ---------- + ellipse_params (pandas.DataFrame): A table of pupil parameters consisting + of 5 columns: ("center_x", "center_y", "height", "phi", "width") + and n-row timepoints. + + NOTE: For ellipse_params produced by the Deep Lab Cut pipeline, + "width" and "height" columns, in fact, refer to the + "half-width" and "half-height". + + Returns + ------- + pd.Series + pandas.Series: A series of areas for n-timepoints. + """ + return np.pi * ellipse_params["height"] * ellipse_params["width"] + + def project_to_plane(plane_normal: np.ndarray, plane_point: np.ndarray, line_vectors: np.ndarray, diff --git a/allensdk/brain_observatory/gaze_mapping/_sync_frames.py b/allensdk/brain_observatory/gaze_mapping/_sync_frames.py deleted file mode 100644 index 8a249c245..000000000 --- a/allensdk/brain_observatory/gaze_mapping/_sync_frames.py +++ /dev/null @@ -1,31 +0,0 @@ -import pandas as pd -from pathlib import Path - -from allensdk.brain_observatory.sync_dataset import Dataset -from allensdk.brain_observatory import sync_utilities - - -def get_synchronized_camera_frame_times(session_sync_file: Path) -> pd.Series: - """Get eye tracking camera frame times from an experiment session sync file. - - Args: - session_sync_file (Path): Path to an ephys session sync file. - The sync file contains rising/falling edges from a daq system which - indicates when certain events occur (so they can be related to - each other). - - Returns: - pandas.Series: An array of times when frames for the eye tracking - camera were acquired. - """ - sync_dataset = Dataset(str(session_sync_file)) - - frame_times = sync_dataset.get_edges( - "rising", Dataset.EYE_TRACKING_KEYS, units="seconds" - ) - - # Occasionally an extra set of frame times are acquired after the rest of - # the signals. We detect and remove these. - frame_times = sync_utilities.trim_discontiguous_times(frame_times) - - return pd.Series(frame_times) diff --git a/allensdk/brain_observatory/stimulus_analysis.py b/allensdk/brain_observatory/stimulus_analysis.py index 69c83d2bf..9a4838fb8 100644 --- a/allensdk/brain_observatory/stimulus_analysis.py +++ b/allensdk/brain_observatory/stimulus_analysis.py @@ -33,7 +33,9 @@ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # +import warnings import scipy.stats as st +import scipy import numpy as np import pandas as pd import logging @@ -86,6 +88,10 @@ def __init__(self, data_set): self._pval = StimulusAnalysis._PRELOAD self._peak = StimulusAnalysis._PRELOAD + # get_speed_tuning emits a warning describing a scipy ks_2samp update. + # we only want to see this warning once + self.__warned_speed_tuning = False + @property def stim_table(self): if self._stim_table is StimulusAnalysis._PRELOAD: @@ -285,6 +291,13 @@ def get_speed_tuning(self, binsize): tuple: binned_dx_sp, binned_cells_sp, binned_dx_vis, binned_cells_vis, peak_run """ + if not self.__warned_speed_tuning: + self.__warned_speed_tuning = True + warnings.warn( + f"scipy 1.3 (your version: {scipy.__version__}) improved two-sample Kolmogorov-Smirnoff test p values for small and medium-sized samples. " + "Precalculated speed tuning p values may not agree with outputs obtained under recent scipy versions!" + ) + StimulusAnalysis._log.info( 'Calculating speed tuning, spontaneous vs visually driven') diff --git a/allensdk/brain_observatory/sync_dataset.py b/allensdk/brain_observatory/sync_dataset.py index d50f101bf..8dc0a8af6 100644 --- a/allensdk/brain_observatory/sync_dataset.py +++ b/allensdk/brain_observatory/sync_dataset.py @@ -90,6 +90,7 @@ class Dataset(object): OPTOGENETIC_STIMULATION_KEYS = ("LED_sync", "opto_trial") EYE_TRACKING_KEYS = ("cam2_exposure", # clocks eye tracking frame pulses (port 0, line 9) "eyetracking") # previous line label for eye tracking (prior to ~ Oct. 2018) + BEHAVIOR_TRACKING_KEYS = ("cam1_exposure",) # clocks behavior tracking frame pulses (port 0, line 8) def __init__(self, path): self.dfile = self.load(path) @@ -302,7 +303,7 @@ def get_edges(self, kind, keys, units='seconds'): fn = self.get_rising_edges elif kind == 'all': return np.sort(np.concatenate([ - self.get_edges('rising', keys, units), + self.get_edges('rising', keys, units), self.get_edges('falling', keys, units) ])) diff --git a/allensdk/brain_observatory/sync_utilities/__init__.py b/allensdk/brain_observatory/sync_utilities/__init__.py index 659b59d40..08829daef 100644 --- a/allensdk/brain_observatory/sync_utilities/__init__.py +++ b/allensdk/brain_observatory/sync_utilities/__init__.py @@ -1,4 +1,10 @@ +from pathlib import Path +from typing import Tuple + import numpy as np +import pandas as pd + +from allensdk.brain_observatory.sync_dataset import Dataset def trim_discontiguous_times(times, threshold=100): @@ -13,4 +19,38 @@ def trim_discontiguous_times(times, threshold=100): if len(gap_indices) == 0: return times - return times[:gap_indices[0]+1] \ No newline at end of file + return times[:gap_indices[0] + 1] + + +def get_synchronized_frame_times(session_sync_file: Path, + sync_line_label_keys: Tuple[str, ...]) -> pd.Series: + """Get experimental frame times from an experiment session sync file. + + Parameters + ---------- + session_sync_file : Path + Path to an ephys session sync file. + The sync file contains rising/falling edges from a daq system which + indicates when certain events occur (so they can be related to + each other). + sync_line_label_keys : Tuple[str, ...] + Line label keys to get times for. See class attributes of + allensdk.brain_observatory.sync_dataset.Dataset for a listing of + possible keys. + + Returns + ------- + pd.Series + An array of times when frames for the eye tracking camera were acquired. + """ + sync_dataset = Dataset(str(session_sync_file)) + + frame_times = sync_dataset.get_edges( + "rising", sync_line_label_keys, units="seconds" + ) + + # Occasionally an extra set of frame times are acquired after the rest of + # the signals. We detect and remove these. + frame_times = trim_discontiguous_times(frame_times) + + return pd.Series(frame_times) diff --git a/allensdk/core/cache_method_utilities.py b/allensdk/core/cache_method_utilities.py new file mode 100644 index 000000000..a487852fd --- /dev/null +++ b/allensdk/core/cache_method_utilities.py @@ -0,0 +1,18 @@ +import inspect + + +class CachedInstanceMethodMixin(object): + def cache_clear(self): + """ + Calls `cache_clear` method on all bound methods in this instance + (where valid). + Intended to clear calls cached with the `memoize` decorator. + Note that this will also clear functions decorated with `lru_cache` and + `lfu_cache` in this class (or any other function with `cache_clear` + attribute). + """ + for _, method in inspect.getmembers(self, inspect.ismethod): + try: + method.cache_clear() + except (AttributeError, TypeError): + pass diff --git a/allensdk/core/exceptions.py b/allensdk/core/exceptions.py index 61bcf4d5a..94dc8f028 100644 --- a/allensdk/core/exceptions.py +++ b/allensdk/core/exceptions.py @@ -9,6 +9,7 @@ def __init__(self, msg, caught_exception=None): error_string = msg super().__init__(error_string) + class DataFrameIndexError(LookupError): """More verbose method for accessing invalid rows or columns in a dataframe. Should be used when an index error is thrown on a dataframe. @@ -20,3 +21,6 @@ def __init__(self, msg, caught_exception=None): error_string = msg super().__init__(error_string) + +class MissingDataError(ValueError): + pass diff --git a/allensdk/core/typing.py b/allensdk/core/typing.py new file mode 100644 index 000000000..df00f7be0 --- /dev/null +++ b/allensdk/core/typing.py @@ -0,0 +1,9 @@ +from typing import _Protocol +from abc import abstractmethod + + +class SupportsStr(_Protocol): + """Classes that support the __str__ method""" + @abstractmethod + def __str__(self) -> str: + pass diff --git a/allensdk/internal/api/behavior_data_lims_api.py b/allensdk/internal/api/behavior_data_lims_api.py new file mode 100644 index 000000000..04f17266f --- /dev/null +++ b/allensdk/internal/api/behavior_data_lims_api.py @@ -0,0 +1,478 @@ +import numpy as np +import pandas as pd +import uuid +from datetime import datetime +import pytz + +from typing import Dict, Optional, Union, List, Any + +from allensdk.core.exceptions import DataFrameIndexError +from allensdk.brain_observatory.behavior.internal.behavior_base import ( + BehaviorBase) +from allensdk.brain_observatory.behavior.rewards_processing import get_rewards +from allensdk.brain_observatory.behavior.running_processing import ( + get_running_df) +from allensdk.brain_observatory.behavior.stimulus_processing import ( + get_stimulus_presentations, get_stimulus_templates, get_stimulus_metadata) +from allensdk.brain_observatory.running_speed import RunningSpeed +from allensdk.brain_observatory.behavior.metadata_processing import ( + get_task_parameters) +from allensdk.brain_observatory.behavior.trials_processing import get_trials +from allensdk.internal.core.lims_utilities import safe_system_path +from allensdk.internal.api import PostgresQueryMixin +from allensdk.api.cache import memoize +from allensdk.internal.api import ( + OneResultExpectedError, OneOrMoreResultExpectedError) +from allensdk.core.cache_method_utilities import CachedInstanceMethodMixin + + +class BehaviorDataLimsApi(PostgresQueryMixin, CachedInstanceMethodMixin, + BehaviorBase): + def __init__(self, behavior_session_id): + super().__init__() + # TODO: this password has been exposed in code but we really need + # to move towards using a secrets database + self.mtrain_db = PostgresQueryMixin( + dbname="mtrain", user="mtrainreader", + host="prodmtrain1", port=5432, password="mtrainro") + self.behavior_session_id = behavior_session_id + ids = self._get_ids() + self.ophys_experiment_ids = ids.get("ophys_experiment_ids") + self.ophys_session_id = ids.get("ophys_session_id") + self.behavior_training_id = ids.get("behavior_training_id") + self.foraging_id = ids.get("foraging_id") + self.ophys_container_id = ids.get("ophys_container_id") + + def _get_ids(self) -> Dict[str, Optional[Union[int, List[int]]]]: + """Fetch ids associated with this behavior_session_id. If there is no + id, return None. + :returns: Dictionary of ids with the following keys: + behavior_training_id: int -- Only if was a training session + ophys_session_id: int -- None if have behavior_training_id + ophys_experiment_ids: List[int] -- only if have ophys_session_id + foraging_id: int + :rtype: dict + """ + # Get all ids from the behavior_sessions table + query = f""" + SELECT + ophys_session_id, behavior_training_id, foraging_id + FROM + behavior_sessions + WHERE + behavior_sessions.id = {self.behavior_session_id}; + """ + ids_response = self.select(query) + if len(ids_response) > 1: + raise OneResultExpectedError + ids_dict = ids_response.iloc[0].to_dict() + + # Get additional ids if also an ophys session + # (experiment_id, container_id) + if ids_dict.get("ophys_session_id"): + oed_query = f""" + SELECT id + FROM ophys_experiments + WHERE ophys_session_id = {ids_dict["ophys_session_id"]}; + """ + oed = self.fetchall(oed_query) + + container_query = f""" + SELECT DISTINCT + visual_behavior_experiment_container_id id + FROM + ophys_experiments_visual_behavior_experiment_containers + WHERE + ophys_experiment_id IN ({",".join(set(map(str, oed)))}); + """ + container_id = self.fetchone(container_query, strict=True) + + ids_dict.update({"ophys_experiment_ids": oed, + "ophys_container_id": container_id}) + else: + ids_dict.update({"ophys_experiment_ids": None, + "ophys_container_id": None}) + return ids_dict + + def get_behavior_session_id(self) -> int: + """Getter to be consistent with BehaviorOphysLimsApi.""" + return self.behavior_session_id + + def get_behavior_session_uuid(self) -> Optional[int]: + data = self._behavior_stimulus_file() + return data.get("session_uuid") + + def get_behavior_stimulus_file(self) -> str: + """Return the path to the StimulusPickle file for a session. + :rtype: str + """ + query = f""" + SELECT + stim.storage_directory || stim.filename AS stim_file + FROM + well_known_files stim + WHERE + stim.attachable_id = {self.behavior_session_id} + AND stim.attachable_type = 'BehaviorSession' + AND stim.well_known_file_type_id IN ( + SELECT id + FROM well_known_file_types + WHERE name = 'StimulusPickle'); + """ + return safe_system_path(self.fetchone(query, strict=True)) + + @memoize + def _behavior_stimulus_file(self) -> pd.DataFrame: + """Helper method to cache stimulus file in memory since it takes about + a second to load (and is used in many methods). + """ + return pd.read_pickle(self.get_behavior_stimulus_file()) + + def get_licks(self) -> pd.DataFrame: + """Get lick data from pkl file. + This function assumes that the first sensor in the list of + lick_sensors is the desired lick sensor. If this changes we need + to update to get the proper line. + + :returns: pd.DataFrame -- A dataframe containing lick timestamps + """ + # Get licks from pickle file instead of sync + data = self._behavior_stimulus_file() + stimulus_timestamps = self.get_stimulus_timestamps() + lick_frames = (data["items"]["behavior"]["lick_sensors"][0] + ["lick_events"]) + lick_times = [stimulus_timestamps[frame] for frame in lick_frames] + return pd.DataFrame({"time": lick_times}) + + def get_rewards(self) -> pd.DataFrame: + """Get reward data from pkl file, based on pkl file timestamps + (not sync file). + + :returns: pd.DataFrame -- A dataframe containing timestamps of + delivered rewards. + """ + data = self._behavior_stimulus_file() + # No sync timestamps to rebase on, so pass dummy rebase function + return get_rewards(data, lambda x: x) + + def get_running_data_df(self) -> pd.DataFrame: + """Get running speed data. + + :returns: pd.DataFrame -- dataframe containing various signals used + to compute running speed. + """ + stimulus_timestamps = self.get_stimulus_timestamps() + data = self._behavior_stimulus_file() + return get_running_df(data, stimulus_timestamps) + + def get_running_speed(self) -> RunningSpeed: + """Get running speed using timestamps from + self.get_stimulus_timestamps. + + NOTE: Do not correct for monitor delay. + + :returns: RunningSpeed -- a NamedTuple containing the subject's + timestamps and running speeds (in cm/s) + """ + running_data_df = self.get_running_data_df() + if running_data_df.index.name != "timestamps": + raise DataFrameIndexError( + f"Expected index to be named 'timestamps' but got " + "'{running_data_df.index.name}'.") + return RunningSpeed(timestamps=running_data_df.index.values, + values=running_data_df.speed.values) + + def get_stimulus_frame_rate(self) -> float: + stimulus_timestamps = self.get_stimulus_timestamps() + return np.round(1 / np.mean(np.diff(stimulus_timestamps)), 0) + + def get_stimulus_presentations(self) -> pd.DataFrame: + """Get stimulus presentation data. + + NOTE: Uses timestamps that do not account for monitor delay. + + :returns: pd.DataFrame -- + Table whose rows are stimulus presentations + (i.e. a given image, for a given duration, typically 250 ms) + and whose columns are presentation characteristics. + """ + stimulus_timestamps = self.get_stimulus_timestamps() + data = self._behavior_stimulus_file() + raw_stim_pres_df = get_stimulus_presentations( + data, stimulus_timestamps) + + # Fill in nulls for image_name + # This makes two assumptions: + # 1. Nulls in `image_name` should be "gratings_" + # 2. Gratings are only present (or need to be fixed) when all + # values for `image_name` are null. + if pd.isnull(raw_stim_pres_df["image_name"]).all(): + if ~pd.isnull(raw_stim_pres_df["orientation"]).all(): + raw_stim_pres_df["image_name"] = ( + raw_stim_pres_df["orientation"] + .apply(lambda x: f"gratings_{x}")) + else: + raise ValueError("All values for 'orentation' and 'image_name'" + " are null.") + + stimulus_metadata_df = get_stimulus_metadata(data) + idx_name = raw_stim_pres_df.index.name + stimulus_index_df = ( + raw_stim_pres_df + .reset_index() + .merge(stimulus_metadata_df.reset_index(), on=["image_name"]) + .set_index(idx_name)) + stimulus_index_df = ( + stimulus_index_df[["image_set", "image_index", "start_time"]] + .rename(columns={"start_time": "timestamps"}) + .sort_index() + .set_index("timestamps", drop=True)) + stim_pres_df = raw_stim_pres_df.merge( + stimulus_index_df, left_on="start_time", right_index=True, + how="left") + if len(raw_stim_pres_df) != len(stim_pres_df): + raise ValueError("Length of `stim_pres_df` should not change after" + f" merge; was {len(raw_stim_pres_df)}, now " + f" {len(stim_pres_df)}.") + return stim_pres_df[sorted(stim_pres_df)] + + @memoize + def get_stimulus_templates(self) -> Dict[str, np.ndarray]: + """Get stimulus templates (movies, scenes) for behavior session. + + Returns + ------- + Dict[str, np.ndarray] + A dictionary containing the stimulus images presented during the + session. Keys are data set names, and values are 3D numpy arrays. + """ + data = self._behavior_stimulus_file() + return get_stimulus_templates(data) + + @memoize + def get_stimulus_timestamps(self) -> np.ndarray: + """Get stimulus timestamps (vsyncs) from pkl file. + + NOTE: Located with behavior_session_id. Does not use the sync_file + which requires ophys_session_id. + + Returns + ------- + np.ndarray + Timestamps associated with stimulus presentations on the monitor + that do no account for monitor delay. + """ + data = self._behavior_stimulus_file() + vsyncs = data["items"]["behavior"]["intervalsms"] + return np.hstack((0, vsyncs)).cumsum() / 1000.0 # cumulative time + + def get_task_parameters(self) -> dict: + """Get task parameters from pkl file. + + Returns + ------- + dict + A dictionary containing parameters used to define the task runtime + behavior. + """ + data = self._behavior_stimulus_file() + return get_task_parameters(data) + + def get_trials(self) -> pd.DataFrame: + """Get trials from pkl file + + Returns + ------- + pd.DataFrame + A dataframe containing behavioral trial start/stop times, + and trial data + """ + licks = self.get_licks() + data = self._behavior_stimulus_file() + rewards = self.get_rewards() + stimulus_presentations = self.get_stimulus_presentations() + # Pass a dummy rebase function since we don't have two time streams + trial_df = get_trials(data, licks, rewards, stimulus_presentations, + lambda x: x) + + return trial_df + + @memoize + def get_birth_date(self) -> datetime.date: + """Returns the birth date of the animal. + :rtype: datetime.date + """ + query = f""" + SELECT d.date_of_birth + FROM behavior_sessions bs + JOIN donors d on d.id = bs.donor_id + WHERE bs.id = {self.behavior_session_id} + """ + return self.fetchone(query, strict=True).date() + + @memoize + def get_sex(self) -> str: + """Returns sex of the animal (M/F) + :rtype: str + """ + query = f""" + SELECT g.name AS sex + FROM behavior_sessions bs + JOIN donors d ON bs.donor_id = d.id + JOIN genders g ON g.id = d.gender_id + WHERE bs.id = {self.behavior_session_id}; + """ + return self.fetchone(query, strict=True) + + @memoize + def get_age(self) -> str: + """Returns age code of the subject. + :rtype: str + """ + query = f""" + SELECT a.name AS age + FROM behavior_sessions bs + JOIN donors d ON d.id = bs.donor_id + JOIN ages a ON a.id = d.age_id + WHERE bs.id = {self.behavior_session_id}; + """ + return self.fetchone(query, strict=True) + + @memoize + def get_rig_name(self) -> str: + """Returns the name of the experimental rig. + :rtype: str + """ + query = f""" + SELECT e.name AS device_name + FROM behavior_sessions bs + JOIN equipment e ON e.id = bs.equipment_id + WHERE bs.id = {self.behavior_session_id}; + """ + return self.fetchone(query, strict=True) + + @memoize + def get_stimulus_name(self) -> str: + """Returns the name of the stimulus set used for the session. + :rtype: str + """ + query = f""" + SELECT stages.name + FROM behavior_sessions bs + JOIN stages ON stages.id = bs.state_id + WHERE bs.id = '{self.foraging_id}' + """ + return self.mtrain_db.fetchone(query, strict=True) + + @memoize + def get_reporter_line(self) -> List[str]: + """Returns the genotype name(s) of the reporter line(s). + :rtype: list + """ + query = f""" + SELECT g.name AS reporter_line + FROM behavior_sessions bs + JOIN donors d ON bs.donor_id=d.id + JOIN donors_genotypes dg ON dg.donor_id=d.id + JOIN genotypes g ON g.id=dg.genotype_id + JOIN genotype_types gt + ON gt.id=g.genotype_type_id AND gt.name = 'reporter' + WHERE bs.id={self.behavior_session_id}; + """ + result = self.fetchall(query) + if result is None or len(result) < 1: + raise OneOrMoreResultExpectedError( + f"Expected one or more, but received: '{result}' " + f"from query:\n'{query}'") + return result + + @memoize + def get_driver_line(self) -> List[str]: + """Returns the genotype name(s) of the driver line(s). + :rtype: list + """ + query = f""" + SELECT g.name AS driver_line + FROM behavior_sessions bs + JOIN donors d ON bs.donor_id=d.id + JOIN donors_genotypes dg ON dg.donor_id=d.id + JOIN genotypes g ON g.id=dg.genotype_id + JOIN genotype_types gt + ON gt.id=g.genotype_type_id AND gt.name = 'driver' + WHERE bs.id={self.behavior_session_id}; + """ + result = self.fetchall(query) + if result is None or len(result) < 1: + raise OneOrMoreResultExpectedError( + f"Expected one or more, but received: '{result}' " + f"from query:\n'{query}'") + return result + + @memoize + def get_external_specimen_name(self) -> int: + """Returns the LabTracks ID + :rtype: int + """ + # TODO: Should this even be included? + # Found sometimes there were entries with NONE which is + # why they are filtered out; also many entries in the table + # match the donor_id, which is why used DISTINCT + query = f""" + SELECT DISTINCT(sp.external_specimen_name) + FROM behavior_sessions bs + JOIN donors d ON bs.donor_id=d.id + JOIN specimens sp ON sp.donor_id=d.id + WHERE bs.id={self.behavior_session_id} + AND sp.external_specimen_name IS NOT NULL; + """ + return int(self.fetchone(query, strict=True)) + + @memoize + def get_full_genotype(self) -> str: + """Return the name of the subject's genotype + :rtype: str + """ + query = f""" + SELECT d.full_genotype + FROM behavior_sessions bs + JOIN donors d ON d.id=bs.donor_id + WHERE bs.id= {self.behavior_session_id}; + """ + return self.fetchone(query, strict=True) + + def get_experiment_date(self) -> datetime: + """Return timestamp the behavior stimulus file began recording in UTC + :rtype: datetime + """ + data = self._behavior_stimulus_file() + # Assuming file has local time of computer (Seattle) + tz = pytz.timezone("America/Los_Angeles") + return tz.localize(data["start_time"]).astimezone(pytz.utc) + + def get_metadata(self) -> Dict[str, Any]: + """Return metadata about the session. + :rtype: dict + """ + if self.get_behavior_session_uuid() is None: + bs_uuid = None + else: + bs_uuid = uuid.UUID(self.get_behavior_session_uuid()) + metadata = { + "rig_name": self.get_rig_name(), + "sex": self.get_sex(), + "age": self.get_age(), + "ophys_experiment_id": self.ophys_experiment_ids, + "experiment_container_id": self.ophys_container_id, + "stimulus_frame_rate": self.get_stimulus_frame_rate(), + "session_type": self.get_stimulus_name(), + "experiment_datetime": self.get_experiment_date(), + "reporter_line": self.get_reporter_line(), + "driver_line": self.get_driver_line(), + "LabTracks_ID": self.get_external_specimen_name(), + "full_genotype": self.get_full_genotype(), + "behavior_session_uuid": bs_uuid, + "foraging_id": self.foraging_id, + "behavior_session_id": self.behavior_session_id, + "behavior_training_id": self.behavior_training_id, + } + return metadata diff --git a/allensdk/internal/api/ophys_lims_api.py b/allensdk/internal/api/ophys_lims_api.py index bdbf9be3a..99c24c17f 100644 --- a/allensdk/internal/api/ophys_lims_api.py +++ b/allensdk/internal/api/ophys_lims_api.py @@ -6,14 +6,16 @@ import pytz import pandas as pd -from allensdk.internal.api import PostgresQueryMixin, OneOrMoreResultExpectedError +from allensdk.internal.api import ( + PostgresQueryMixin, OneOrMoreResultExpectedError) from allensdk.api.cache import memoize from allensdk.brain_observatory.behavior.image_api import ImageApi import allensdk.brain_observatory.roi_masks as roi from allensdk.internal.core.lims_utilities import safe_system_path +from allensdk.core.cache_method_utilities import CachedInstanceMethodMixin -class OphysLimsApi(PostgresQueryMixin): +class OphysLimsApi(PostgresQueryMixin, CachedInstanceMethodMixin): def __init__(self, ophys_experiment_id): self.ophys_experiment_id = ophys_experiment_id diff --git a/allensdk/internal/brain_observatory/time_sync.py b/allensdk/internal/brain_observatory/time_sync.py index b79d6512c..125048daf 100644 --- a/allensdk/internal/brain_observatory/time_sync.py +++ b/allensdk/internal/brain_observatory/time_sync.py @@ -139,9 +139,30 @@ def get_ophys_data_length(filename): return f["data"].shape[1] -def get_stim_data_length(filename): +def get_stim_data_length(filename: str) -> int: + """Get stimulus data length from .pkl file. + + Parameters + ---------- + filename : str + Path of stimulus data .pkl file. + + Returns + ------- + int + Stimulus data length. + """ stim_data = pd.read_pickle(filename) - return stim_data["vsynccount"] + + # A subset of stimulus .pkl files do not have the "vsynccount" field. + # MPE *won't* be backfilling the "vsynccount" field for these .pkl files. + # So the least worst option is to recalculate the vsync_count. + try: + vsync_count = stim_data["vsynccount"] + except KeyError: + vsync_count = len(stim_data["items"]["behavior"]["intervalsms"]) + 1 + + return vsync_count def corrected_video_timestamps(video_name, timestamps, data_length): diff --git a/allensdk/internal/morphology/compartment.py b/allensdk/internal/morphology/compartment.py index 66c75ed95..2a88781a8 100644 --- a/allensdk/internal/morphology/compartment.py +++ b/allensdk/internal/morphology/compartment.py @@ -13,7 +13,7 @@ # You should have received a copy of the GNU General Public License # along with Allen SDK. If not, see . -import node +import allensdk.internal.morphology.node as node class Compartment(object): def __init__(self, node1, node2): diff --git a/allensdk/internal/morphology/morphology.py b/allensdk/internal/morphology/morphology.py index d5301ac3a..a515ee9be 100644 --- a/allensdk/internal/morphology/morphology.py +++ b/allensdk/internal/morphology/morphology.py @@ -16,8 +16,8 @@ import copy import math import numpy as np -from node import Node -from compartment import Compartment +from allensdk.internal.morphology.node import Node +from allensdk.internal.morphology.compartment import Compartment class Morphology( object ): diff --git a/allensdk/internal/pipeline_modules/cell_types/morphology/upright_transform.py b/allensdk/internal/pipeline_modules/cell_types/morphology/upright_transform.py index 776bbbb3b..979716378 100644 --- a/allensdk/internal/pipeline_modules/cell_types/morphology/upright_transform.py +++ b/allensdk/internal/pipeline_modules/cell_types/morphology/upright_transform.py @@ -5,10 +5,8 @@ import sys import numpy as np from scipy.spatial.distance import euclidean -import neuron_morphology.swc as swc import skimage.draw -#import allensdk.core.json_utilities as json def calculate_centroid(x, y): diff --git a/allensdk/test/api/test_cache.py b/allensdk/test/api/test_cache.py index a93371561..78e1e5e35 100755 --- a/allensdk/test/api/test_cache.py +++ b/allensdk/test/api/test_cache.py @@ -38,6 +38,7 @@ import pandas as pd import pandas.io.json as pj import numpy as np +import time import pytest from mock import MagicMock, mock_open, patch @@ -160,33 +161,70 @@ def test_wrap_dataframe(ju_read_url_get, ju_write, mock_read_json, rma, cache): ju_write.assert_called_once_with('example.txt', _msg) mock_read_json.assert_called_once_with('example.txt', orient='records') -def test_memoize(): - import time +def test_memoize_with_function(): + @memoize + def f(x): + time.sleep(0.1) + return x - @memoize - def f(x): - time.sleep(1) - return x + # Build cache + for i in range(3): + uncached_result = f(i) + assert uncached_result == i + assert f.cache_size() == 3 + + # Test cache was accessed + for i in range(3): + t0 = time.time() + result = f(i) + t1 = time.time() + assert result == i + assert t1 - t0 < 0.1 + + # Test cache clear + f.cache_clear() + assert f.cache_size() == 0 - for ii in range(2): - t0 = time.time() - print(f(0), time.time() - t0) - class FooBar(object): +def test_memoize_with_kwarg_function(): + @memoize + def f(x, *, y, z=1): + time.sleep(0.1) + return (x * y * z) - def __init__(self): pass + # Build cache + f(2, y=1, z=2) + assert f.cache_size() == 1 - @memoize - def f(self, x): - time.sleep(.1) - return 1 + # Test cache was accessed + t0 = time.time() + result = f(2, y=1, z=2) + t1 = time.time() + assert result == 4 + assert t1 - t0 < 0.1 - fb = FooBar() - for ii in range(2): - t0 = time.time() - fb.f(0), time.time() - t0 +def test_memoize_with_instance_method(): + class FooBar(object): + @memoize + def f(self, x): + time.sleep(0.1) + return x + + fb = FooBar() + # Build cache + for i in range(3): + uncached_result = fb.f(i) + assert uncached_result == i + assert fb.f.cache_size() == 3 + + for i in range(3): + t0 = time.time() + result = fb.f(i) + t1 = time.time() + assert result == i + assert t1 - t0 < 0.1 def test_get_default_manifest_file(): diff --git a/allensdk/test/brain_observatory/behavior/conftest.py b/allensdk/test/brain_observatory/behavior/conftest.py index 19d53295d..7999284bd 100644 --- a/allensdk/test/brain_observatory/behavior/conftest.py +++ b/allensdk/test/brain_observatory/behavior/conftest.py @@ -8,6 +8,18 @@ import os import json +from allensdk.test_utilities.custom_comparators import WhitespaceStrippedString + + +def pytest_assertrepr_compare(config, op, left, right): + if isinstance(left, WhitespaceStrippedString) and op == "==": + if isinstance(right, WhitespaceStrippedString): + right_compare = right.orig + else: + right_compare = right + return ["Comparing strings with whitespace stripped. ", + f"{left.orig} != {right_compare}.", "Diff:"] + left.diff + def pytest_ignore_collect(path, config): ''' The brain_observatory.ecephys submodule uses python 3.6 features that may not be backwards compatible! diff --git a/allensdk/test/brain_observatory/behavior/test_behavior_data_lims_api.py b/allensdk/test/brain_observatory/behavior/test_behavior_data_lims_api.py new file mode 100644 index 000000000..804c830bf --- /dev/null +++ b/allensdk/test/brain_observatory/behavior/test_behavior_data_lims_api.py @@ -0,0 +1,267 @@ +import pytest +import numpy as np +import pandas as pd +from datetime import datetime +import pytz +import math + +from allensdk.internal.api.behavior_data_lims_api import BehaviorDataLimsApi +from allensdk.internal.api.behavior_ophys_api import BehaviorOphysLimsApi +from allensdk.brain_observatory.running_speed import RunningSpeed +from allensdk.core.exceptions import DataFrameIndexError + + +@pytest.fixture +def MockBehaviorDataLimsApi(): + class MockBehaviorDataLimsApi(BehaviorDataLimsApi): + """ + Mock class that overrides some functions to provide test data and + initialize without calls to db. + """ + def __init__(self): + super().__init__(behavior_session_id=8675309) + + def _get_ids(self): + return {} + + def _behavior_stimulus_file(self): + data = { + "items": { + "behavior": { + "lick_sensors": [{ + "lick_events": [2, 6, 9], + }], + "intervalsms": np.array([16.0]*10), + }, + }, + "session_uuid": 123456, + "start_time": datetime(2019, 9, 26, 9), + } + return data + + def get_running_data_df(self): + return pd.DataFrame( + {"timestamps": [0.0, 0.1, 0.2], + "speed": [8.0, 15.0, 16.0]}).set_index("timestamps") + + api = MockBehaviorDataLimsApi() + yield api + api.cache_clear() + + +@pytest.fixture +def MockApiRunSpeedExpectedError(): + class MockApiRunSpeedExpectedError(BehaviorDataLimsApi): + """ + Mock class that overrides some functions to provide test data and + initialize without calls to db. + """ + def __init__(self): + super().__init__(behavior_session_id=8675309) + + def _get_ids(self): + return {} + + def get_running_data_df(self): + return pd.DataFrame( + {"timestamps": [0.0, 0.1, 0.2], + "speed": [8.0, 15.0, 16.0]}) + return MockApiRunSpeedExpectedError() + + +# Test the non-sql-query functions +# Does not include tests for the following functions, as they are just calls to +# static methods provided for convenience (and should be covered with their own +# unit tests): +# get_rewards +# get_running_data_df +# get_stimulus_templates +# get_task_parameters +# get_trials +# Does not include test for get_metadata since it just collects data from +# methods covered in other unit tests, or data derived from sql queries. +def test_get_stimulus_timestamps(MockBehaviorDataLimsApi): + api = MockBehaviorDataLimsApi + expected = np.array([0.016 * i for i in range(11)]) + assert np.allclose(expected, api.get_stimulus_timestamps()) + + +def test_get_licks(MockBehaviorDataLimsApi): + api = MockBehaviorDataLimsApi + expected = pd.DataFrame({"time": [0.016 * i for i in [2., 6., 9.]]}) + pd.testing.assert_frame_equal(expected, api.get_licks()) + + +def test_get_behavior_session_uuid(MockBehaviorDataLimsApi): + api = MockBehaviorDataLimsApi + assert 123456 == api.get_behavior_session_uuid() + + +def test_get_stimulus_frame_rate(MockBehaviorDataLimsApi): + api = MockBehaviorDataLimsApi + assert 62.0 == api.get_stimulus_frame_rate() + + +def test_get_experiment_date(MockBehaviorDataLimsApi): + api = MockBehaviorDataLimsApi + expected = datetime(2019, 9, 26, 16, tzinfo=pytz.UTC) + actual = api.get_experiment_date() + assert expected == actual + + +def test_get_running_speed(MockBehaviorDataLimsApi): + expected = RunningSpeed(timestamps=[0.0, 0.1, 0.2], + values=[8.0, 15.0, 16.0]) + api = MockBehaviorDataLimsApi + actual = api.get_running_speed() + assert expected == actual + + +def test_get_running_speed_raises_index_error(MockApiRunSpeedExpectedError): + with pytest.raises(DataFrameIndexError): + MockApiRunSpeedExpectedError.get_running_speed() + + +# def test_get_stimulus_presentations(MockBehaviorDataLimsApi): +# api = MockBehaviorDataLimsApi +# # TODO. This function is a monster with multiple dependencies, +# # no tests, and no documentation (for any of its dependencies). +# # Needs to be broken out into testable parts. + +@pytest.mark.requires_bamboo +@pytest.mark.nightly +class TestBehaviorRegression: + """ + Test whether behavior sessions (that are also ophys) loaded with + BehaviorDataLimsApi return the same results as sessions loaded + with BehaviorOphysLimsApi, for relevant functions. Do not check for + timestamps, which are from different files so will not be the same. + Also not checking for experiment_date, since they're from two different + sources (and I'm not sure how it's uploaded in the database). + + Do not test `get_licks` regression because the licks come from two + different sources and are recorded differently (behavior pickle file in + BehaviorDataLimsApi; sync file in BehaviorOphysLimeApi) + """ + @classmethod + def setup_class(cls): + cls.bd = BehaviorDataLimsApi(976012750) + cls.od = BehaviorOphysLimsApi(976255949) + + @classmethod + def teardown_class(cls): + cls.bd.cache_clear() + cls.od.cache_clear() + + def test_stim_file_regression(self): + assert (self.bd.get_behavior_stimulus_file() + == self.od.get_behavior_stimulus_file()) + + def test_get_rewards_regression(self): + """Index is timestamps here, so remove it before comparing.""" + bd_rewards = self.bd.get_rewards().reset_index(drop=True) + od_rewards = self.od.get_rewards().reset_index(drop=True) + pd.testing.assert_frame_equal(bd_rewards, od_rewards) + + def test_ophys_experiment_id_regression(self): + assert self.bd.ophys_experiment_ids[0] == self.od.ophys_experiment_id + + def test_behavior_uuid_regression(self): + assert (self.bd.get_behavior_session_uuid() + == self.od.get_behavior_session_uuid()) + + def test_container_id_regression(self): + assert (self.bd.ophys_container_id + == self.od.get_experiment_container_id()) + + def test_stimulus_frame_rate_regression(self): + assert (self.bd.get_stimulus_frame_rate() + == self.od.get_stimulus_frame_rate()) + + def test_get_running_speed_regression(self): + """Can't test values because they're intrinsically linked to timestamps + """ + bd_speed = self.bd.get_running_speed() + od_speed = self.od.get_running_speed() + assert len(bd_speed.values) == len(od_speed.values) + assert len(bd_speed.timestamps) == len(od_speed.timestamps) + + def test_get_running_df_regression(self): + """Can't test values because they're intrinsically linked to timestamps + """ + bd_running = self.bd.get_running_data_df() + od_running = self.od.get_running_data_df() + assert len(bd_running) == len(od_running) + assert list(bd_running) == list(od_running) + + def test_get_stimulus_presentations_regression(self): + drop_cols = ["start_time", "stop_time"] + bd_pres = self.bd.get_stimulus_presentations().drop(drop_cols, axis=1) + od_pres = self.od.get_stimulus_presentations().drop(drop_cols, axis=1) + # Duration needs less precision (timestamp-dependent) + pd.testing.assert_frame_equal(bd_pres, od_pres, check_less_precise=2) + + def test_get_stimulus_template_regression(self): + bd_template = self.bd.get_stimulus_templates() + od_template = self.od.get_stimulus_templates() + assert bd_template.keys() == od_template.keys() + for k in bd_template.keys(): + assert np.array_equal(bd_template[k], od_template[k]) + + def test_get_task_parameters_regression(self): + bd_params = self.bd.get_task_parameters() + od_params = self.od.get_task_parameters() + # Have to do special checking because of nan equality + assert bd_params.keys() == od_params.keys() + for k in bd_params.keys(): + bd_v = bd_params[k] + od_v = od_params[k] + try: + if math.isnan(bd_v): + assert math.isnan(od_v) + else: + assert bd_v == od_v + except (AttributeError, TypeError): + assert bd_v == od_v + + def test_get_trials_regression(self): + """ A lot of timestamp dependent values. Test what we can.""" + cols_to_test = ["reward_volume", "hit", "false_alarm", "miss", + "sham_change", "stimulus_change", "aborted", "go", + "catch", "auto_rewarded", "correct_reject", + "trial_length", "change_frame", "initial_image_name", + "change_image_name"] + bd_trials = self.bd.get_trials()[cols_to_test] + od_trials = self.od.get_trials()[cols_to_test] + pd.testing.assert_frame_equal(bd_trials, od_trials, + check_less_precise=2) + + def test_get_sex_regression(self): + assert self.bd.get_sex() == self.od.get_sex() + + def test_get_rig_name_regression(self): + assert self.bd.get_rig_name() == self.od.get_rig_name() + + def test_get_stimulus_name_regression(self): + assert self.bd.get_stimulus_name() == self.od.get_stimulus_name() + + def test_get_reporter_line_regression(self): + assert self.bd.get_reporter_line() == self.od.get_reporter_line() + + def test_get_driver_line_regression(self): + assert self.bd.get_driver_line() == self.od.get_driver_line() + + def test_get_external_specimen_name_regression(self): + assert (self.bd.get_external_specimen_name() + == self.od.get_external_specimen_name()) + + def test_get_full_genotype_regression(self): + assert self.bd.get_full_genotype() == self.od.get_full_genotype() + + def test_get_experiment_date_regression(self): + """Just testing the date since it comes from two different sources; + We expect that BehaviorOphysLimsApi will be earlier (more like when + rig was started up), while BehaviorDataLimsApi returns the start of + the actual behavior (from pkl file)""" + assert (self.bd.get_experiment_date().date() + == self.od.get_experiment_date().date()) diff --git a/allensdk/test/brain_observatory/behavior/test_behavior_data_session.py b/allensdk/test/brain_observatory/behavior/test_behavior_data_session.py new file mode 100644 index 000000000..cd3aa4c2a --- /dev/null +++ b/allensdk/test/brain_observatory/behavior/test_behavior_data_session.py @@ -0,0 +1,58 @@ +import logging + +from allensdk.brain_observatory.behavior.behavior_data_session import ( + BehaviorDataSession) + + +class DummyApi(object): + def __init__(self): + pass + + def get_method(self): + """Method docstring""" + pass + + def get_no_docstring_method(self): + pass + + def _other_method(self): + """Other Method docstring""" + pass + + +class DummyApiCache(object): + def cache_clear(self): + pass + + +class TestBehaviorDataSession: + """Tests for BehaviorDataSession. + The vast majority of methods in BehaviorDataSession are simply calling + functions from the underlying API. The API required for instantiating a + BehaviorDataSession is annotated to show that it requires an class that + inherits from BehaviorBase, it is ensured that those methods exist in + the API class. These methods should be covered by unit tests on the + API class and will not be re-tested here. + """ + @classmethod + def setup_class(cls): + cls.behavior_session = BehaviorDataSession(api=DummyApi()) + + def test_list_api_methods(self): + expected = [("get_method", "Method docstring"), + ("get_no_docstring_method", "")] + actual = self.behavior_session.list_api_methods() + assert expected == actual + + def test_cache_clear_raises_warning(self, caplog): + expected_msg = ("Attempted to clear API cache, but method" + " `cache_clear` does not exist on DummyApi") + self.behavior_session.cache_clear() + assert caplog.record_tuples == [ + ("BehaviorOphysSession", logging.WARNING, expected_msg)] + + def test_cache_clear_no_warning(self, caplog): + caplog.clear() + bs = BehaviorDataSession(api=DummyApiCache()) + bs.cache_clear() + assert len(caplog.record_tuples) == 0 diff --git a/allensdk/test/brain_observatory/behavior/test_behavior_ophys_session.py b/allensdk/test/brain_observatory/behavior/test_behavior_ophys_session.py index 73bfe5862..c67ba1f92 100644 --- a/allensdk/test/brain_observatory/behavior/test_behavior_ophys_session.py +++ b/allensdk/test/brain_observatory/behavior/test_behavior_ophys_session.py @@ -20,17 +20,6 @@ from allensdk.brain_observatory.behavior.image_api import ImageApi -@pytest.mark.nightly -@pytest.mark.parametrize('oeid1, oeid2, expected', [ - pytest.param(789359614, 789359614, True), - pytest.param(789359614, 739216204, False) -]) -def test_equal(oeid1, oeid2, expected): - d1 = BehaviorOphysSession.from_lims(oeid1) - d2 = BehaviorOphysSession.from_lims(oeid2) - - assert equals(d1, d2) == expected - @pytest.mark.requires_bamboo @pytest.mark.parametrize("get_expected,get_from_session", [ [ diff --git a/allensdk/test/brain_observatory/behavior/test_behavior_project_cache.py b/allensdk/test/brain_observatory/behavior/test_behavior_project_cache.py index a45c1479b..92ca14cfd 100644 --- a/allensdk/test/brain_observatory/behavior/test_behavior_project_cache.py +++ b/allensdk/test/brain_observatory/behavior/test_behavior_project_cache.py @@ -1,116 +1,175 @@ import os -import numpy as np -import pandas as pd import pytest -from allensdk.brain_observatory.behavior.swdb import behavior_project_cache as bpc +import pandas as pd +import tempfile +import logging +import time +from allensdk.brain_observatory.behavior.behavior_project_cache import ( + BehaviorProjectCache) +from allensdk.core.exceptions import MissingDataError + + +@pytest.fixture +def session_table(): + return (pd.DataFrame({"ophys_session_id": [1, 2, 3], + "ophys_experiment_id": [[4], [5, 6], [7]], + "reporter_line": [["aa"], ["aa", "bb"], ["cc"]], + "driver_line": [["aa"], ["aa", "bb"], ["cc"]]}) + .set_index("ophys_session_id")) @pytest.fixture -def cache_test_base(): - return '/allen/programs/braintv/workgroups/nc-ophys/visual_behavior/SWDB_2019/test_data' +def behavior_table(): + return (pd.DataFrame({"behavior_session_id": [1, 2, 3], + "reporter_line": [["aa"], ["aa", "bb"], ["cc"]], + "driver_line": [["aa"], ["aa", "bb"], ["cc"]]}) + .set_index("behavior_session_id")) + @pytest.fixture -def cache(cache_test_base): - return bpc.BehaviorProjectCache(cache_test_base) +def mock_api(session_table, behavior_table): + class MockApi: + def get_session_table(self): + return session_table + + def get_behavior_only_session_table(self): + return behavior_table + + def get_session_data(self, ophys_session_id): + return ophys_session_id + + def get_behavior_only_session_data(self, behavior_session_id): + return behavior_session_id + return MockApi + @pytest.fixture -def session(cache): - return cache.get_session(792815735) - -# Test trials extra columns -@pytest.mark.requires_bamboo -def test_extra_trials_columns(session): - for new_key in ['reward_rate', 'response_binary']: - assert new_key in session.trials.keys() - -@pytest.mark.requires_bamboo -def test_extra_stimulus_presentation_columns(session): - for new_key in [ - 'absolute_flash_number', - 'time_from_last_lick', - 'time_from_last_reward', - 'time_from_last_change', - 'block_index', - 'image_block_repetition', - 'repeat_within_block']: - assert new_key in session.stimulus_presentations.keys() - -@pytest.mark.requires_bamboo -def test_stimulus_presentations_image_set(session): - # We made the image set just 'A' or 'B' - assert session.stimulus_presentations['image_set'].unique() == np.array(['A']) - -@pytest.mark.requires_bamboo -def test_stimulus_templates(session): - # Was a dict with only one key, where the value was a 3d array. - # We made it a dict with image names as keys and 2d arrs (the images) as values - for image_name, image_arr in session.stimulus_templates.items(): - assert image_arr.ndim == 2 - -# Test trial response df -@pytest.mark.requires_bamboo -@pytest.mark.parametrize('key, output', [ - ('mean_response', 0.0053334), - ('baseline_response', -0.0020357), - ('p_value', 0.6478659), -]) -def test_session_trial_response(key, output, session): - trial_response = session.trial_response_df - np.testing.assert_almost_equal(trial_response.query("cell_specimen_id == 817103993").iloc[0][key], output, decimal=6) - -@pytest.mark.requires_bamboo -@pytest.mark.parametrize('key, output', [ - ('time_from_last_lick', 7.3577), - ('mean_running_speed', 22.143871), - ('duration', 0.25024), -]) -def test_session_flash_response(key, output, session): - flash_response = session.flash_response_df - np.testing.assert_almost_equal(flash_response.query("cell_specimen_id == 817103993").iloc[0][key], output, decimal=6) - -@pytest.mark.requires_bamboo -def test_analysis_files_metadata(cache): - assert cache.analysis_files_metadata[ - 'trial_response_df_params' - ]['response_window_duration_seconds'] == 0.5 - -@pytest.mark.requires_bamboo -def test_session_image_loading(session): - assert isinstance(session.max_projection.data, np.ndarray) - -@pytest.mark.requires_bamboo -def test_no_invalid_rois(session): - # We made the cache return sessions without the invalid rois - assert session.cell_specimen_table['valid_roi'].all() - -@pytest.mark.requires_bamboo -def test_get_container_sessions(cache): - container_id = cache.experiment_table['container_id'].unique()[0] - container_sessions = cache.get_container_sessions(container_id) - session = container_sessions['OPHYS_1_images_A'] - assert isinstance(session, bpc.ExtendedBehaviorSession) - np.testing.assert_almost_equal(session.dff_traces.loc[817103993]['dff'][0], 0.3538657529565) - -@pytest.mark.requires_bamboo -def test_binarized_segmentation_mask_image(session): - np.testing.assert_array_equal( - np.unique(np.array(session.segmentation_mask_image.data).ravel()), - np.array([0, 1]) - ) - -@pytest.mark.requires_bamboo -def test_no_nan_flash_running_speed(session): - assert not pd.isnull(session.stimulus_presentations['mean_running_speed']).any() - -@pytest.mark.requires_bamboo -def test_licks_correct_colname(session): - assert session.licks.columns == ['timestamps'] - -@pytest.mark.requires_bamboo -def test_rewards_correct_colname(session): - assert (session.rewards.columns == ['timestamps', 'volume', 'autorewarded']).all() - -@pytest.mark.requires_bamboo -def test_dff_traces_correct_colname(session): - # This is a Friday-harbor specific change - assert 'cell_roi_id' not in session.dff_traces.columns +def TempdirBehaviorCache(mock_api): + temp_dir = tempfile.TemporaryDirectory() + manifest = os.path.join(temp_dir.name, "manifest.json") + yield BehaviorProjectCache(fetch_api=mock_api(), + manifest=manifest) + temp_dir.cleanup() + + +def test_get_session_table(TempdirBehaviorCache, session_table): + cache = TempdirBehaviorCache + actual = cache.get_session_table() + path = cache.manifest.path_info.get("ophys_sessions").get("spec") + assert os.path.exists(path) + pd.testing.assert_frame_equal(session_table, actual) + + +def test_get_behavior_table(TempdirBehaviorCache, behavior_table): + cache = TempdirBehaviorCache + actual = cache.get_behavior_session_table() + path = cache.manifest.path_info.get("behavior_sessions").get("spec") + assert os.path.exists(path) + pd.testing.assert_frame_equal(behavior_table, actual) + + +def test_session_table_reads_from_cache(TempdirBehaviorCache, session_table, + caplog): + caplog.set_level(logging.INFO, logger="call_caching") + cache = TempdirBehaviorCache + cache.get_session_table() + expected_first = [ + ("call_caching", logging.INFO, "Reading data from cache"), + ("call_caching", logging.INFO, "No cache file found."), + ("call_caching", logging.INFO, "Fetching data from remote"), + ("call_caching", logging.INFO, "Writing data to cache"), + ("call_caching", logging.INFO, "Reading data from cache")] + assert expected_first == caplog.record_tuples + caplog.clear() + cache.get_session_table() + assert [expected_first[0]] == caplog.record_tuples + + +def test_behavior_table_reads_from_cache(TempdirBehaviorCache, behavior_table, + caplog): + caplog.set_level(logging.INFO, logger="call_caching") + cache = TempdirBehaviorCache + cache.get_behavior_session_table() + expected_first = [ + ("call_caching", logging.INFO, "Reading data from cache"), + ("call_caching", logging.INFO, "No cache file found."), + ("call_caching", logging.INFO, "Fetching data from remote"), + ("call_caching", logging.INFO, "Writing data to cache"), + ("call_caching", logging.INFO, "Reading data from cache")] + assert expected_first == caplog.record_tuples + caplog.clear() + cache.get_behavior_session_table() + assert [expected_first[0]] == caplog.record_tuples + + +def test_behavior_session_fails_fixed_if_no_cache(TempdirBehaviorCache): + cache = TempdirBehaviorCache + with pytest.raises(MissingDataError): + cache.get_behavior_session_data(1, fixed=True) + cache.get_behavior_session_data(1) + # Also fails if there is a cache, but the id is not contained therein + with pytest.raises(MissingDataError): + cache.get_behavior_session_data(2, fixed=True) + + +def test_session_fails_fixed_if_no_cache(TempdirBehaviorCache): + cache = TempdirBehaviorCache + with pytest.raises(MissingDataError): + cache.get_session_data(1, fixed=True) + cache.get_session_data(1) + # Also fails if there is a cache, but the id is not contained therein + with pytest.raises(MissingDataError): + cache.get_session_data(2, fixed=True) + + +def test_get_session_table_by_experiment(TempdirBehaviorCache): + expected = (pd.DataFrame({"ophys_session_id": [1, 2, 2, 3], + "ophys_experiment_id": [4, 5, 6, 7]}) + .set_index("ophys_experiment_id")) + actual = TempdirBehaviorCache.get_session_table(by="ophys_experiment_id")[ + ["ophys_session_id"]] + pd.testing.assert_frame_equal(expected, actual) + + +def test_write_behavior_log(TempdirBehaviorCache): + expected_cols = ["behavior_session_id", "created_at", "updated_at"] + expected_ids = [1, 2] + expected_times = [False, True] + cache = TempdirBehaviorCache + cache.get_behavior_session_data(1) + cache.get_behavior_session_data(2) + time.sleep(1) + cache.get_behavior_session_data(1) + path = cache.manifest.path_info.get("behavior_analysis_log").get("spec") + # Log exists + assert os.path.exists(path) + actual = pd.read_csv(path) + # columns exist + assert list(actual) == expected_cols + # ids exist + assert actual["behavior_session_id"].values.tolist() == expected_ids + # first one should have updated different than created since accessed 2x + assert ((actual["created_at"] == actual["updated_at"]).values.tolist() + == expected_times) + + +def test_write_session_log(TempdirBehaviorCache): + expected_cols = ["ophys_experiment_id", "created_at", "updated_at"] + expected_ids = [1, 2] + expected_times = [False, True] + cache = TempdirBehaviorCache + cache.get_session_data(1) + cache.get_session_data(2) + time.sleep(1) + cache.get_session_data(1) + path = cache.manifest.path_info.get("ophys_analysis_log").get("spec") + # Log exists + assert os.path.exists(path) + actual = pd.read_csv(path) + # columns exist + assert list(actual) == expected_cols + # ids exist + assert actual["ophys_experiment_id"].values.tolist() == expected_ids + # first one should have updated different than created since accessed 2x + assert ((actual["created_at"] == actual["updated_at"]).values.tolist() + == expected_times) diff --git a/allensdk/test/brain_observatory/behavior/test_behavior_project_lims_api.py b/allensdk/test/brain_observatory/behavior/test_behavior_project_lims_api.py new file mode 100644 index 000000000..f84eef414 --- /dev/null +++ b/allensdk/test/brain_observatory/behavior/test_behavior_project_lims_api.py @@ -0,0 +1,108 @@ +import pytest + +from allensdk.brain_observatory.behavior.behavior_project_lims_api import ( + BehaviorProjectLimsApi) +from allensdk.test_utilities.custom_comparators import ( + WhitespaceStrippedString) + + +class MockQueryEngine: + def __init__(self, **kwargs): + pass + + def select(self, query): + return query + + def fetchall(self, query): + return query + + def stream(self, endpoint): + return endpoint + + +@pytest.fixture +def MockBehaviorProjectLimsApi(): + return BehaviorProjectLimsApi(MockQueryEngine(), MockQueryEngine()) + + +@pytest.mark.parametrize( + "col,valid_list,operator,expected", [ + ("os.id", [1, 2, 3], "WHERE", "WHERE os.id IN (1,2,3)"), + ("id2", ["'a'", "'b'"], "AND", "AND id2 IN ('a','b')"), + ("id3", [1.0], "OR", "OR id3 IN (1.0)"), + ("id4", None, "WHERE", "")] +) +def test_build_in_list_selector_query( + col, valid_list, operator, expected, MockBehaviorProjectLimsApi): + assert (expected + == MockBehaviorProjectLimsApi._build_in_list_selector_query( + col, valid_list, operator)) + + +@pytest.mark.parametrize( + "behavior_session_ids,expected", [ + (None, + WhitespaceStrippedString(""" + SELECT foraging_id + FROM behavior_sessions + WHERE foraging_id IS NOT NULL + ; + """)), + (["'id1'", "'id2'"], + WhitespaceStrippedString(""" + SELECT foraging_id + FROM behavior_sessions + WHERE foraging_id IS NOT NULL + AND id IN ('id1','id2'); + """)) + ] +) +def test_get_foraging_ids_from_behavior_session( + behavior_session_ids, expected, MockBehaviorProjectLimsApi): + mock_api = MockBehaviorProjectLimsApi + assert expected == mock_api._get_foraging_ids_from_behavior_session( + behavior_session_ids) + + +def test_get_behavior_stage_table(MockBehaviorProjectLimsApi): + expected = WhitespaceStrippedString(""" + SELECT + stages.name as session_type, + bs.id AS foraging_id + FROM behavior_sessions bs + JOIN stages ON stages.id = bs.state_id + ; + """) + mock_api = MockBehaviorProjectLimsApi + actual = mock_api._get_behavior_stage_table(mtrain_db=MockQueryEngine()) + assert expected == actual + + +@pytest.mark.parametrize( + "line,expected", [ + ("reporter", WhitespaceStrippedString( + """-- -- begin getting reporter line from donors -- -- + SELECT ARRAY_AGG (g.name) AS reporter_line, d.id AS donor_id + FROM donors d + LEFT JOIN donors_genotypes dg ON dg.donor_id=d.id + LEFT JOIN genotypes g ON g.id=dg.genotype_id + LEFT JOIN genotype_types gt ON gt.id=g.genotype_type_id + WHERE gt.name='reporter' + GROUP BY d.id + -- -- end getting reporter line from donors -- --""")), + ("driver", WhitespaceStrippedString( + """-- -- begin getting driver line from donors -- -- + SELECT ARRAY_AGG (g.name) AS driver_line, d.id AS donor_id + FROM donors d + LEFT JOIN donors_genotypes dg ON dg.donor_id=d.id + LEFT JOIN genotypes g ON g.id=dg.genotype_id + LEFT JOIN genotype_types gt ON gt.id=g.genotype_type_id + WHERE gt.name='driver' + GROUP BY d.id + -- -- end getting driver line from donors -- --""")) + ] +) +def test_build_line_from_donor_query(line, expected, + MockBehaviorProjectLimsApi): + mbp_api = MockBehaviorProjectLimsApi + assert expected == mbp_api._build_line_from_donor_query(line=line) diff --git a/allensdk/test/brain_observatory/behavior/test_metadata_processing.py b/allensdk/test/brain_observatory/behavior/test_metadata_processing.py new file mode 100644 index 000000000..96e01384a --- /dev/null +++ b/allensdk/test/brain_observatory/behavior/test_metadata_processing.py @@ -0,0 +1,56 @@ +import numpy as np + +from allensdk.brain_observatory.behavior.metadata_processing import ( + get_task_parameters) + + +def test_get_task_parameters(): + data = { + "items": { + "behavior": { + "config": { + "DoC": { + "blank_duration_range": (0.5, 0.6), + "stimulus_window": 6.0, + "response_window": [0.15, 0.75], + "change_time_dist": "geometric", + }, + "reward": { + "reward_volume": 0.007, + }, + "behavior": { + "task_id": "DoC_untranslated", + }, + }, + "params": { + "stage": "TRAINING_3_images_A", + }, + "stimuli": { + "images": {"draw_log": [1]*10} + }, + } + } + } + actual = get_task_parameters(data) + expected = { + "blank_duration_sec": [0.5, 0.6], + "stimulus_duration_sec": 6.0, + "omitted_flash_fraction": np.nan, + "response_window_sec": [0.15, 0.75], + "reward_volume": 0.007, + "stage": "TRAINING_3_images_A", + "stimulus": "images", + "stimulus_distribution": "geometric", + "task": "DoC_untranslated", + "n_stimulus_frames": 10 + } + for k, v in actual.items(): + # Special nan checking since pytest doesn't do it well + try: + if np.isnan(v): + assert np.isnan(expected[k]) + else: + assert expected[k] == v + except (TypeError, ValueError): + assert expected[k] == v + assert list(actual.keys()) == list(expected.keys()) diff --git a/allensdk/test/brain_observatory/behavior/test_rewards_processing.py b/allensdk/test/brain_observatory/behavior/test_rewards_processing.py new file mode 100644 index 000000000..fcb7cbd68 --- /dev/null +++ b/allensdk/test/brain_observatory/behavior/test_rewards_processing.py @@ -0,0 +1,29 @@ +import pandas as pd + +from allensdk.brain_observatory.behavior.rewards_processing import get_rewards + + +def test_get_rewards(): + data = { + "items": { + "behavior": { + "trial_log": [ + { + 'rewards': [(0.007, 1085.965144219165, 64775)], + 'trial_params': { + 'catch': False, 'auto_reward': False, + 'change_time': 5}}, + { + 'rewards': [], + 'trial_params': { + 'catch': False, 'auto_reward': False, + 'change_time': 4} + } + ] + }}} + expected = pd.DataFrame( + {"volume": [0.007], + "timestamps": [1086.965144219165], + "autorewarded": False}).set_index("timestamps", drop=True) + + pd.testing.assert_frame_equal(expected, get_rewards(data, lambda x: x+1.0)) diff --git a/allensdk/test/brain_observatory/behavior/test_running_processing.py b/allensdk/test/brain_observatory/behavior/test_running_processing.py new file mode 100644 index 000000000..492ea2419 --- /dev/null +++ b/allensdk/test/brain_observatory/behavior/test_running_processing.py @@ -0,0 +1,82 @@ +import numpy as np +import pandas as pd +import pytest + +from allensdk.brain_observatory.behavior.running_processing import ( + get_running_df, calc_deriv, deg_to_dist) + + +@pytest.fixture +def running_data(): + return { + "items": { + "behavior": { + "encoders": [ + { + "dx": np.array([0., 0.8444478, 0.7076058, 1.4225141, + 1.5040479]), + "vsig": [3.460190074169077, 3.4692217108095065, + 3.4808338150614873, 3.5014775559538975, + 3.5259919982636347], + "vin": [4.996858536847867, 4.99298783543054, + 4.995568303042091, 4.996858536847867, + 5.00201947207097], + }]}}} + + +@pytest.fixture +def timestamps(): + return np.array([0., 0.01670847, 0.03336808, 0.05002418, 0.06672007]) + + +@pytest.mark.parametrize( + "x,time,expected", [ + ([1.0, 1.0], [1.0, 2.0], [0.0, 0.0]), + ([1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 1.0, 1.0]), + ([1.0, 2.0, 3.0], [1.0, 4.0, 6.0], [1/3, ((1/3)+0.5)/2, 0.5]) + ] +) +def test_calc_deriv(x, time, expected): + assert np.all(calc_deriv(x, time) == expected) + + +@pytest.mark.parametrize( + "speed,expected", [ + (np.array([1.0]), [0.09605128650142128]), + (np.array([0., 2.0]), [0., 2.0 * 0.09605128650142128]) + ] +) +def test_deg_to_dist(speed, expected): + assert np.all(np.allclose(deg_to_dist(speed), expected)) + + +def test_get_running_df(running_data, timestamps): + expected = pd.DataFrame( + {'speed': { + 0.0: 4.0677840296488785, + 0.01670847: 4.468231641421186, + 0.03336808: 4.869192250359061, + 0.05002418: 4.47027713320348, + 0.06672007: 4.070849018882336}, + 'dx': { + 0.0: 0.0, + 0.01670847: 0.8444478, + 0.03336808: 0.7076058, + 0.05002418: 1.4225141, + 0.06672007: 1.5040479}, + 'v_sig': { + 0.0: 3.460190074169077, + 0.01670847: 3.4692217108095065, + 0.03336808: 3.4808338150614873, + 0.05002418: 3.5014775559538975, + 0.06672007: 3.5259919982636347}, + 'v_in': { + 0.0: 4.996858536847867, + 0.01670847: 4.99298783543054, + 0.03336808: 4.995568303042091, + 0.05002418: 4.996858536847867, + 0.06672007: 5.00201947207097}}) + expected.index.name = "timestamps" + + pd.testing.assert_frame_equal(expected, + get_running_df(running_data, timestamps)) diff --git a/allensdk/test/brain_observatory/behavior/test_stimulus_processing.py b/allensdk/test/brain_observatory/behavior/test_stimulus_processing.py new file mode 100644 index 000000000..4d25a6907 --- /dev/null +++ b/allensdk/test/brain_observatory/behavior/test_stimulus_processing.py @@ -0,0 +1,76 @@ + +import numpy as np +import pytest + + +from allensdk.brain_observatory.behavior.stimulus_processing import ( + get_stimulus_presentations, _get_stimulus_epoch, _get_draw_epochs) + + +data = { + "items": { + "behavior": { + "stimuli": { + "images": { + "set_log": [ + ('Image', 'im065', 5.809955710916157, 0), + ('Image', 'im061', 314.06612555068784, 6), + ('Image', 'im062', 348.5941232265203, 12), + ], + "draw_log": ([0]+[1]*3 + [0]*3)*3 + [0] + } + }, + # "intervalsms": np.array([16.0]*10) + } + } +} +timestamps = np.array([0.016 * i for i in range(19)]) + + +@pytest.mark.parametrize( + "current_set_ix,start_frame,n_frames,expected", [ + (0, 0, 18, (0, 6)), + (2, 11, 18, (11, 18)) + ] +) +def test_get_stimulus_epoch(current_set_ix, start_frame, n_frames, expected): + log = data["items"]["behavior"]["stimuli"]["images"]["set_log"] + actual = _get_stimulus_epoch(log, current_set_ix, start_frame, n_frames) + assert actual == expected + + +@pytest.mark.parametrize( + "start_frame,stop_frame,expected", [ + (0, 6, [(1, 4)]), + (0, 11, [(1, 4), (8, 11)]) + ] +) +def test_get_draw_epochs(start_frame, stop_frame, expected): + draw_log = data["items"]["behavior"]["stimuli"]["images"]["draw_log"] + actual = _get_draw_epochs(draw_log, start_frame, stop_frame) + assert actual == expected + + +# def test_get_stimulus_templates(): +# pass +# # TODO +# # See below (get_images_dict is a dependency) + + +# def test_get_images_dict(): +# pass +# # TODO +# # This is too hard-coded to be testable right now. +# # convert_filepath_caseinsensitive prevents using any tempdirs/tempfiles + + +# def test_get_stimulus_presentations(): +# pass +# # TODO +# # Monster function with undocumented dependencies + + +# def test_get_visual_stimuli_df(): +# pass +# # TODO +# # See above (this is a dependency of get_stimulus_presentations) diff --git a/allensdk/test/brain_observatory/behavior/test_swdb_behavior_project_cache.py b/allensdk/test/brain_observatory/behavior/test_swdb_behavior_project_cache.py new file mode 100644 index 000000000..bf2aaca0f --- /dev/null +++ b/allensdk/test/brain_observatory/behavior/test_swdb_behavior_project_cache.py @@ -0,0 +1,145 @@ +import os +import numpy as np +import pandas as pd +import pytest +from allensdk.brain_observatory.behavior.swdb import behavior_project_cache as bpc + + +@pytest.fixture +def cache_test_base(): + return '/allen/programs/braintv/workgroups/nc-ophys/visual_behavior/SWDB_2019/test_data' + +@pytest.fixture +def cache(cache_test_base): + return bpc.BehaviorProjectCache(cache_test_base) + +@pytest.fixture +def session(cache): + return cache.get_session(792815735) + +# Test trials extra columns +@pytest.mark.skip(reason="deprecated") +@pytest.mark.requires_bamboo +def test_extra_trials_columns(session): + for new_key in ['reward_rate', 'response_binary']: + assert new_key in session.trials.keys() + + +@pytest.mark.skip(reason="deprecated") +@pytest.mark.requires_bamboo +def test_extra_stimulus_presentation_columns(session): + for new_key in [ + 'absolute_flash_number', + 'time_from_last_lick', + 'time_from_last_reward', + 'time_from_last_change', + 'block_index', + 'image_block_repetition', + 'repeat_within_block']: + assert new_key in session.stimulus_presentations.keys() + + +@pytest.mark.skip(reason="deprecated") +@pytest.mark.requires_bamboo +def test_stimulus_presentations_image_set(session): + # We made the image set just 'A' or 'B' + assert session.stimulus_presentations['image_set'].unique() == np.array(['A']) + + +@pytest.mark.skip(reason="deprecated") +@pytest.mark.requires_bamboo +def test_stimulus_templates(session): + # Was a dict with only one key, where the value was a 3d array. + # We made it a dict with image names as keys and 2d arrs (the images) as values + for image_name, image_arr in session.stimulus_templates.items(): + assert image_arr.ndim == 2 + +# Test trial response df +@pytest.mark.skip(reason="deprecated") +@pytest.mark.requires_bamboo +@pytest.mark.parametrize('key, output', [ + ('mean_response', 0.0053334), + ('baseline_response', -0.0020357), + ('p_value', 0.6478659), +]) +def test_session_trial_response(key, output, session): + trial_response = session.trial_response_df + np.testing.assert_almost_equal(trial_response.query("cell_specimen_id == 817103993").iloc[0][key], output, decimal=6) + + +@pytest.mark.skip(reason="deprecated") +@pytest.mark.requires_bamboo +@pytest.mark.parametrize('key, output', [ + ('time_from_last_lick', 7.3577), + ('mean_running_speed', 22.143871), + ('duration', 0.25024), +]) +def test_session_flash_response(key, output, session): + flash_response = session.flash_response_df + np.testing.assert_almost_equal(flash_response.query("cell_specimen_id == 817103993").iloc[0][key], output, decimal=6) + + +@pytest.mark.skip(reason="deprecated") +@pytest.mark.requires_bamboo +def test_analysis_files_metadata(cache): + assert cache.analysis_files_metadata[ + 'trial_response_df_params' + ]['response_window_duration_seconds'] == 0.5 + + +@pytest.mark.skip(reason="deprecated") +@pytest.mark.requires_bamboo +def test_session_image_loading(session): + assert isinstance(session.max_projection.data, np.ndarray) + + +@pytest.mark.skip(reason="deprecated") +@pytest.mark.requires_bamboo +def test_no_invalid_rois(session): + # We made the cache return sessions without the invalid rois + assert session.cell_specimen_table['valid_roi'].all() + + +@pytest.mark.skip(reason="deprecated") +@pytest.mark.requires_bamboo +def test_get_container_sessions(cache): + container_id = cache.experiment_table['container_id'].unique()[0] + container_sessions = cache.get_container_sessions(container_id) + session = container_sessions['OPHYS_1_images_A'] + assert isinstance(session, bpc.ExtendedBehaviorSession) + np.testing.assert_almost_equal(session.dff_traces.loc[817103993]['dff'][0], 0.3538657529565) + + +@pytest.mark.skip(reason="deprecated") +@pytest.mark.requires_bamboo +def test_binarized_segmentation_mask_image(session): + np.testing.assert_array_equal( + np.unique(np.array(session.segmentation_mask_image.data).ravel()), + np.array([0, 1]) + + ) + + +@pytest.mark.skip(reason="deprecated") +@pytest.mark.requires_bamboo +def test_no_nan_flash_running_speed(session): + assert not pd.isnull(session.stimulus_presentations['mean_running_speed']).any() + + +@pytest.mark.skip(reason="deprecated") +@pytest.mark.requires_bamboo +def test_licks_correct_colname(session): + assert session.licks.columns == ['timestamps'] + + +@pytest.mark.skip(reason="deprecated") +@pytest.mark.requires_bamboo +def test_rewards_correct_colname(session): + assert (session.rewards.columns == ['timestamps', 'volume', 'autorewarded']).all() + + +@pytest.mark.skip(reason="deprecated") +@pytest.mark.requires_bamboo +def test_dff_traces_correct_colname(session): + # This is a Friday-harbor specific change + assert 'cell_roi_id' not in session.dff_traces.columns diff --git a/allensdk/test/brain_observatory/ecephys/test_ecephys_project_cache.py b/allensdk/test/brain_observatory/ecephys/test_ecephys_project_cache.py index 724472d13..5a89e2df0 100644 --- a/allensdk/test/brain_observatory/ecephys/test_ecephys_project_cache.py +++ b/allensdk/test/brain_observatory/ecephys/test_ecephys_project_cache.py @@ -41,21 +41,11 @@ def units(): 'snr': [1.5, 4.9], "amplitude_cutoff": [0.05, 0.2], "presence_ratio": [10, 20], - "isi_violations": [0.3, 0.4] + "isi_violations": [0.3, 0.4], + "quality": ["good", "noise"] }, index=pd.Series(name='id', data=[1, 2])) -@pytest.fixture -def filtered_units(): - return pd.DataFrame({ - 'ecephys_channel_id': [3], - 'snr': [4.2], - 'amplitude_cutoff': [0.08], - 'presence_ratio': [15], - 'isi_violations': [0.35] - }, index=pd.Series(name='id', data=[3])) - - @pytest.fixture def analysis_metrics(): return pd.DataFrame({ @@ -117,7 +107,7 @@ def shared_tmpdir(tmpdir_factory): @pytest.fixture -def mock_api(shared_tmpdir, raw_sessions, units, filtered_units, channels, raw_probes, analysis_metrics): +def mock_api(shared_tmpdir, raw_sessions, units, channels, raw_probes, analysis_metrics): class MockApi: def __init__(self, **kwargs): @@ -130,10 +120,7 @@ def get_sessions(self, **kwargs): return raw_sessions def get_units(self, **kwargs): - if kwargs['filter_by_validity']: - return filtered_units - else: - return units + return units def get_channels(self, **kwargs): return channels @@ -214,9 +201,10 @@ def test_get_sessions(tmpdir_cache, sessions): @pytest.mark.parametrize("filter_by_validity", [False, True]) -def test_get_units(tmpdir_cache, units, filtered_units, filter_by_validity): +def test_get_units(tmpdir_cache, units, filter_by_validity): if filter_by_validity: - lazy_cache_test(tmpdir_cache, '_get_units', "get_units", filtered_units, filter_by_validity=filter_by_validity) + units = units[units["quality"] == "good"].drop(columns="quality") + lazy_cache_test(tmpdir_cache, '_get_units', "get_units", units, filter_by_validity=filter_by_validity) else: units = units[units["amplitude_cutoff"] <= 0.1] lazy_cache_test(tmpdir_cache, '_get_units', "get_units", units, filter_by_validity=filter_by_validity) diff --git a/allensdk/test/brain_observatory/ecephys/test_ecephys_project_lims_api.py b/allensdk/test/brain_observatory/ecephys/test_ecephys_project_lims_api.py index 4c011bfc4..7c8f013d2 100644 --- a/allensdk/test/brain_observatory/ecephys/test_ecephys_project_lims_api.py +++ b/allensdk/test/brain_observatory/ecephys/test_ecephys_project_lims_api.py @@ -1,5 +1,6 @@ import os import re +from unittest import mock import pytest import pandas as pd @@ -10,103 +11,212 @@ ) -@pytest.mark.parametrize( - "method,conditions,expected", +class MockSelector: + + def __init__(self, checks, response): + self.checks = checks + self.response = response + + def __call__(self, query, *args, **kwargs): + self.passed = {} + self.query = query + for name, check in self.checks.items(): + self.passed[name] = check(query) + return self.response + + +@pytest.mark.parametrize("method_name,kwargs,response,checks,expected", [ [ - [ - "get_sessions", - {"published": True}, - re.compile(r".*where true and es\.workflow_state in \('uploaded'\) and es\.habituation = false and es\.published_at is not null and pr\.name in \('BrainTV Neuropixels Visual Behavior','BrainTV Neuropixels Visual Coding'\)$"), - ], - [ - "get_sessions", - {"session_ids": [1, 2, 3]}, - re.compile(r".*and es\.id in \(1,2,3\).*"), - ], - [ - "get_units", - {"session_ids": [1, 2, 3]}, - re.compile(r"select eu\.\*.*and es\.id in \(1,2,3\) and eu.amplitude_cutoff <= 0.1 and eu.presence_ratio >= 0.95 and eu.isi_violations <= 0.5$"), - ], - [ - "get_units", - {"session_ids": [1, 2, 3], "unit_ids": (4, 5, 6)}, - re.compile(r"select eu\.\*.*and eu\.id in \(4,5,6\) and es\.id in \(1,2,3\) and eu.amplitude_cutoff <= 0.1 and eu.presence_ratio >= 0.95 and eu.isi_violations <= 0.5$") - ], - [ - "get_channels", - {}, - re.compile(r"select ec\.id as id.*where valid_data and ep.workflow_state != 'failed' and es.workflow_state != 'failed'$"), - ], - [ - "get_probes", - {}, - re.compile(r"select ep\.id as id, ep.ecephys_session_id.*where true and ep.workflow_state != 'failed' and es.workflow_state != 'failed'$"), - ], + "get_units", + {}, + pd.DataFrame({"id": [5, 6], "something": [12, 14]}), + { + "no_pa_check": lambda st: "published_at" not in st + }, + pd.DataFrame( + {"something": [12, 14]}, + index=pd.Index(name="id", data=[5, 6]) + ) ], -) -def test_query(method, conditions, expected): - class MockPgEngine: - def select(self, rendered): - self.query = " ".join([item.strip() for item in str(rendered).split()]) - return pd.DataFrame({"id": [1, 2, 3], "ecephys_channel_id": [1, 2, 3], "genotype": [np.nan, "a", "b"]}) - - pg_engine = MockPgEngine() - api = epla.EcephysProjectLimsApi(postgres_engine=pg_engine, app_engine=None) - - results = getattr(api, method)(**conditions) - - obtained = pg_engine.query.strip() - print(obtained) - match = expected.match(obtained) - assert match is not None - - -def test_get_session_data(): - - session_id = 12345 - wkf_id = 987 - - class MockPgEngine: - def select(self, rendered): - pattern = re.compile( - r".*and ear.ecephys_session_id = (?P\d+).*", re.DOTALL - ) - match = pattern.match(rendered) - sid_obt = int(match["session_id"]) - assert session_id == sid_obt - return pd.DataFrame({"id": [wkf_id]}) - - class MockHttpEngine: - def stream(self, path): - assert path == f"well_known_files/download/{wkf_id}?wkf_id={wkf_id}" + [ + "get_units", + {"session_ids": [1, 2, 3]}, + pd.DataFrame({"id": [5, 6], "something": [12, 14]}), + { + "filters_sessions": lambda st: re.compile(r".+and es.id in \(1,2,3\).*", re.DOTALL).match(st) is not None + }, + pd.DataFrame( + {"something": [12, 14]}, + index=pd.Index(name="id", data=[5, 6]) + ) + ], + [ + "get_units", + {"unit_ids": [1, 2, 3]}, + pd.DataFrame({"id": [5, 6], "something": [12, 14]}), + { + "filters_units": lambda st: re.compile(r".+and eu.id in \(1,2,3\).*", re.DOTALL).match(st) is not None + }, + pd.DataFrame( + {"something": [12, 14]}, + index=pd.Index(name="id", data=[5, 6]) + ) + ], + [ + "get_units", + {"channel_ids": [1, 2, 3], "probe_ids": [4, 5, 6]}, + pd.DataFrame({"id": [5, 6], "something": [12, 14]}), + { + "filters_channels": lambda st: re.compile(r".+and ec.id in \(1,2,3\).*", re.DOTALL).match(st) is not None, + "filters_probes": lambda st: re.compile(r".+and ep.id in \(4,5,6\).*", re.DOTALL).match(st) is not None + }, + pd.DataFrame( + {"something": [12, 14]}, + index=pd.Index(name="id", data=[5, 6]) + ) + ], + [ + "get_units", + {"published_at": "2019-10-22"}, + pd.DataFrame({"id": [5, 6], "something": [12, 14]}), + { + "checks_pa_not_null": lambda st: re.compile(r".+and es.published_at is not null.*", re.DOTALL).match(st) is not None, + "checks_pa": lambda st: re.compile(r".+and es.published_at <= '2019-10-22'.*", re.DOTALL).match(st) is not None + }, + pd.DataFrame( + {"something": [12, 14]}, + index=pd.Index(name="id", data=[5, 6]) + ) + ], + [ + "get_channels", + {"published_at": "2019-10-22", "session_ids": [1, 2, 3]}, + pd.DataFrame({"id": [5, 6], "something": [12, 14]}), + { + "checks_pa_not_null": lambda st: re.compile(r".+and es.published_at is not null.*", re.DOTALL).match(st) is not None, + "checks_pa": lambda st: re.compile(r".+and es.published_at <= '2019-10-22'.*", re.DOTALL).match(st) is not None, + "filters_sessions": lambda st: re.compile(r".+and es.id in \(1,2,3\).*", re.DOTALL).match(st) is not None + }, + pd.DataFrame( + {"something": [12, 14]}, + index=pd.Index(name="id", data=[5, 6]) + ) + ], + [ + "get_probes", + {"published_at": "2019-10-22", "session_ids": [1, 2, 3]}, + pd.DataFrame({"id": [5, 6], "something": [12, 14]}), + { + "checks_pa_not_null": lambda st: re.compile(r".+and es.published_at is not null.*", re.DOTALL).match(st) is not None, + "checks_pa": lambda st: re.compile(r".+and es.published_at <= '2019-10-22'.*", re.DOTALL).match(st) is not None, + "filters_sessions": lambda st: re.compile(r".+and es.id in \(1,2,3\).*", re.DOTALL).match(st) is not None + }, + pd.DataFrame( + {"something": [12, 14]}, + index=pd.Index(name="id", data=[5, 6]) + ) + ], + [ + "get_sessions", + {"published_at": "2019-10-22", "session_ids": [1, 2, 3]}, + pd.DataFrame({"id": [5, 6], "something": [12, 14], "genotype": ["foo", np.nan]}), + { + "checks_pa_not_null": lambda st: re.compile(r".+and es.published_at is not null.*", re.DOTALL).match(st) is not None, + "checks_pa": lambda st: re.compile(r".+and es.published_at <= '2019-10-22'.*", re.DOTALL).match(st) is not None, + "filters_sessions": lambda st: re.compile(r".+and es.id in \(1,2,3\).*", re.DOTALL).match(st) is not None + }, + pd.DataFrame( + {"something": [12, 14], "genotype": ["foo", "wt"]}, + index=pd.Index(name="id", data=[5, 6]) + ) + ], + [ + "get_unit_analysis_metrics", + {"ecephys_session_ids": [1, 2, 3]}, + pd.DataFrame({"id": [5, 6], "data": [{"a": 1, "b": 2}, {"a": 3, "b": 4}], "ecephys_unit_id": [10, 11]}), + { + "filters_sessions": lambda st: re.compile(r".+and es.id in \(1,2,3\).*", re.DOTALL).match(st) is not None + }, + pd.DataFrame( + {"id": [5, 6], "a": [1, 3], "b": [2, 4]}, + index=pd.Index(name="iecephys_unit_id", data=[10, 11]) + ) + ] +]) +def test_pg_query(method_name,kwargs, response, checks, expected): - api = epla.EcephysProjectLimsApi( - postgres_engine=MockPgEngine(), app_engine=MockHttpEngine() - ) - api.get_session_data(session_id) + selector = MockSelector(checks, response) + + with mock.patch("allensdk.internal.api.psycopg2_select", new=selector) as ptc: + api = epla.EcephysProjectLimsApi.default() + obtained = getattr(api, method_name)(**kwargs) + pd.testing.assert_frame_equal(expected, obtained, check_like=True, check_dtype=False) + + any_checks_failed = False + for name, result in ptc.passed.items(): + if not result: + print(f"check {name} failed") + any_checks_failed = True + + if any_checks_failed: + print(ptc.query) + assert not any_checks_failed + + +WKF_ID = 12345 +class MockPgEngine: + + def __init__(self, query_pattern): + self.query_pattern = query_pattern + + +class MockTemplatePgEngine(MockPgEngine): + def select_one(self, rendered): + assert self.query_pattern.match(rendered) is not None + return {"well_known_file_id": WKF_ID} -def test_get_probe_data(): - probe_id = 12345 - wkf_id = 987 +class MockDataPgEngine(MockPgEngine): + def select(self, rendered): + assert self.query_pattern.match(rendered) is not None + return pd.DataFrame({"id": [WKF_ID]}) - class MockPgEngine: - def select(self, rendered): - pattern = re.compile( - r".*and earp.ecephys_probe_id = (?P\d+).*", re.DOTALL - ) - match = pattern.match(rendered) - pid_obt = int(match["probe_id"]) - assert probe_id == pid_obt - return pd.DataFrame({"id": [wkf_id]}) - class MockHttpEngine: - def stream(self, path): - assert path == f"well_known_files/download/{wkf_id}?wkf_id={wkf_id}" +class MockHttpEngine: + def stream(self, url): + assert url == f"well_known_files/download/{WKF_ID}?wkf_id={WKF_ID}" + + +@pytest.mark.parametrize("method,kwargs,query_pattern,pg_engine_cls", [ + [ + "get_natural_movie_template", + {"number": 12}, + re.compile(".+st.name = 'natural_movie_12'.+", re.DOTALL), + MockTemplatePgEngine + ], + [ + "get_natural_scene_template", + {"number": 12}, + re.compile(".+st.name = 'natural_scene_12'.+", re.DOTALL), + MockTemplatePgEngine + ], + [ + "get_probe_lfp_data", + {"probe_id": 53}, + re.compile(r".+and earp.ecephys_probe_id = 53.+", re.DOTALL), + MockDataPgEngine + ], + [ + "get_session_data", + {"session_id": 53}, + re.compile(r".+and ear.ecephys_session_id = 53.+", re.DOTALL), + MockDataPgEngine + ] +]) +def test_file_getter(method, kwargs, query_pattern, pg_engine_cls): api = epla.EcephysProjectLimsApi( - postgres_engine=MockPgEngine(), app_engine=MockHttpEngine() + postgres_engine=pg_engine_cls(query_pattern), app_engine=MockHttpEngine() ) - api.get_probe_lfp_data(probe_id) + getattr(api, method)(**kwargs) \ No newline at end of file diff --git a/allensdk/test/brain_observatory/gaze_mapping/test_gaze_mapping.py b/allensdk/test/brain_observatory/gaze_mapping/test_gaze_mapping.py index d92f7e7fd..5fbf9ec77 100644 --- a/allensdk/test/brain_observatory/gaze_mapping/test_gaze_mapping.py +++ b/allensdk/test/brain_observatory/gaze_mapping/test_gaze_mapping.py @@ -247,12 +247,30 @@ def test_mapping_gives_sane_outputs(gaze_mapper_fixture, ellipse_fits, expected, pd.Series([4 * np.pi, 16 * np.pi, 81 * np.pi, 729 * np.pi])), (pd.DataFrame({"height": [1, 3, 9, 27], "width": [2, 4, 8, 16]}), - pd.Series([4 * np.pi, 16 * np.pi, 81 * np.pi, 729 * np.pi])) + pd.Series([4 * np.pi, 16 * np.pi, 81 * np.pi, 729 * np.pi])), + + (pd.DataFrame({"height": [np.nan, 3, np.nan, 27], + "width": [2, 4, np.nan, np.nan]}), + pd.Series([4 * np.pi, 16 * np.pi, np.nan, 729 * np.pi])), ]) def test_compute_circular_areas(ellipse_params, expected): obtained = gm.compute_circular_areas(ellipse_params) - assert np.allclose(obtained, expected) + assert np.allclose(obtained, expected, equal_nan=True) + + +@pytest.mark.parametrize('ellipse_params, expected', [ + (pd.DataFrame({"height": [1, 2, 3, 4], "width": [4, 3, 2, 1]}), + pd.Series([4 * np.pi, 6 * np.pi, 6 * np.pi, 4 * np.pi])), + + (pd.DataFrame({"height": [np.nan, 7, 11, 12, np.nan], + "width": [5, 3, 11, np.nan, np.nan]}), + pd.Series([np.nan, np.pi * 21, np.pi * 121, np.nan, np.nan])) +]) +def test_compute_elliptical_areas(ellipse_params, expected): + obtained = gm.compute_elliptical_areas(ellipse_params) + + assert np.allclose(obtained, expected, equal_nan=True) @pytest.mark.parametrize("function_inputs,expected", [ diff --git a/allensdk/test/brain_observatory/gaze_mapping/test_main.py b/allensdk/test/brain_observatory/gaze_mapping/test_main.py new file mode 100644 index 000000000..3c2ba14ec --- /dev/null +++ b/allensdk/test/brain_observatory/gaze_mapping/test_main.py @@ -0,0 +1,157 @@ +from pathlib import Path +import pytest + +import numpy as np +import pandas as pd + +import allensdk.brain_observatory.gaze_mapping.__main__ as main + + +def create_sample_ellipse_hdf(output_file: Path, + cr_data: pd.DataFrame, + eye_data: pd.DataFrame, + pupil_data: pd.DataFrame): + cr_data.to_hdf(output_file, key='cr', mode='w') + eye_data.to_hdf(output_file, key='eye', mode='a') + pupil_data.to_hdf(output_file, key='pupil', mode='a') + + +@pytest.fixture +def ellipse_fits_fixture(tmp_path, request) -> dict: + cr = {"center_x": [300, 305, 295, 310, 280], + "center_y": [300, 305, 295, 310, 280], + "width": [7, 8, 6, 7, 10], + "height": [6, 9, 5, 6, 8], + "phi": [0, 0.1, 0.15, 0.1, 0]} + + eye = {"center_x": [300, 305, 295, 310, 280], + "center_y": [300, 305, 295, 310, 280], + "width": [150, 155, 160, 150, 155], + "height": [120, 115, 120, 110, 100], + "phi": [0, 0.1, 0.15, 0.1, 0]} + + pupil = {"center_x": [300, 305, 295, 310, 280], + "center_y": [300, 305, 295, 310, 280], + "width": [30, 35, 40, 25, 50], + "height": [25, 27, 30, 20, 45], + "phi": [0, 0.1, 0.15, 0.1, 0]} + + test_dir = tmp_path / "test_load_ellipse_fit_params" + test_dir.mkdir() + + if request.param["create_good_fits_file"]: + test_path = test_dir / "good_ellipse_fits.h5" + else: + test_path = test_dir / "bad_ellipse_fits.h5" + pupil = {"center_x": [300], "center_y": [300], "width": [30], + "height": [25], "phi": [0]} + + cr = pd.DataFrame(cr) + eye = pd.DataFrame(eye) + pupil = pd.DataFrame(pupil) + + create_sample_ellipse_hdf(test_path, cr, eye, pupil) + + return {"cr": pd.DataFrame(cr), + "eye": pd.DataFrame(eye), + "pupil": pd.DataFrame(pupil), + "file_path": test_path} + + +@pytest.mark.parametrize("ellipse_fits_fixture, expect_good_file", [ + ({"create_good_fits_file": True}, True), + ({"create_good_fits_file": False}, False) +], indirect=["ellipse_fits_fixture"]) +def test_load_ellipse_fit_params(ellipse_fits_fixture: dict, expect_good_file: bool): + expected = {"cr_params": pd.DataFrame(ellipse_fits_fixture["cr"]).astype(float), + "pupil_params": pd.DataFrame(ellipse_fits_fixture["pupil"]).astype(float), + "eye_params": pd.DataFrame(ellipse_fits_fixture["eye"]).astype(float)} + + if expect_good_file: + obtained = main.load_ellipse_fit_params(ellipse_fits_fixture["file_path"]) + for key in expected.keys(): + pd.testing.assert_frame_equal(obtained[key], expected[key]) + else: + with pytest.raises(RuntimeError, match="ellipse fits don't match"): + obtained = main.load_ellipse_fit_params(ellipse_fits_fixture["file_path"]) + + +@pytest.mark.parametrize("input_args, expected", [ + ({"input_file": Path("input_file.h5"), + "session_sync_file": Path("sync_file.h5"), + "output_file": Path("output_file.h5"), + "monitor_position_x_mm": 100.0, + "monitor_position_y_mm": 500.0, + "monitor_position_z_mm": 300.0, + "monitor_rotation_x_deg": 30, + "monitor_rotation_y_deg": 60, + "monitor_rotation_z_deg": 90, + "camera_position_x_mm": 200.0, + "camera_position_y_mm": 600.0, + "camera_position_z_mm": 700.0, + "camera_rotation_x_deg": 20, + "camera_rotation_y_deg": 180, + "camera_rotation_z_deg": 5, + "led_position_x_mm": 800.0, + "led_position_y_mm": 900.0, + "led_position_z_mm": 1000.0, + "eye_radius_cm": 0.1682, + "cm_per_pixel": 0.0001, + "equipment": "Rig A", + "date_of_acquisition": "Some Date", + "eye_video_file": Path("eye_video.avi")}, + + {"pupil_params": "pupil_params_placeholder", + "cr_params": "cr_params_placeholder", + "eye_params": "eye_params_placeholder", + "session_sync_file": Path("sync_file.h5"), + "output_file": Path("output_file.h5"), + "monitor_position": np.array([10.0, 50.0, 30.0]), + "monitor_rotations": np.array([np.pi / 6, np.pi / 3, np.pi / 2]), + "camera_position": np.array([20.0, 60.0, 70.0]), + "camera_rotations": np.array([np.pi / 9, np.pi, np.pi / 36]), + "led_position": np.array([80.0, 90.0, 100.0]), + "eye_radius_cm": 0.1682, + "cm_per_pixel": 0.0001, + "equipment": "Rig A", + "date_of_acquisition": "Some Date", + "eye_video_file": Path("eye_video.avi")} + ), + +]) +def test_preprocess_input_args(monkeypatch, input_args: dict, expected: dict): + def mock_load_ellipse_fit_params(*args, **kwargs): + return {"pupil_params": "pupil_params_placeholder", + "cr_params": "cr_params_placeholder", + "eye_params": "eye_params_placeholder"} + + monkeypatch.setattr(main, "load_ellipse_fit_params", + mock_load_ellipse_fit_params) + + obtained = main.preprocess_input_args(input_args) + + for key in expected.keys(): + if isinstance(obtained[key], np.ndarray): + assert np.allclose(obtained[key], expected[key]) + else: + assert obtained[key] == expected[key] + + +@pytest.mark.parametrize("pupil_params_rows, expected, expect_fail", [ + (5, pd.Series([1, 2, 3, 4, 5]), False), + (4, None, True) +]) +def test_load_sync_file_timings(monkeypatch, pupil_params_rows, expected, expect_fail): + def mock_get_synchronized_frame_times(*args, **kwargs): + return pd.Series([1, 2, 3, 4, 5]) + + monkeypatch.setattr(main.su, "get_synchronized_frame_times", + mock_get_synchronized_frame_times) + + if expect_fail: + with pytest.raises(RuntimeError, match="number of camera sync pulses"): + main.load_sync_file_timings(Path("."), pupil_params_rows) + + else: + obtained = main.load_sync_file_timings(Path("."), pupil_params_rows) + assert expected.equals(obtained) diff --git a/allensdk/test/brain_observatory/sync_utilities/test_sync_utilities.py b/allensdk/test/brain_observatory/sync_utilities/test_sync_utilities.py index d92342ef4..829aa60cb 100644 --- a/allensdk/test/brain_observatory/sync_utilities/test_sync_utilities.py +++ b/allensdk/test/brain_observatory/sync_utilities/test_sync_utilities.py @@ -1,7 +1,36 @@ import pytest import numpy as np +from functools import partial + from allensdk.brain_observatory import sync_utilities as su +from allensdk.brain_observatory.sync_dataset import Dataset + + +class MockDataset(Dataset): + def __init__(self, path: str, + eye_tracking_timings, behavior_tracking_timings): + # Note: eye_tracking_timings and behavior_tracking_timings are test + # inputs that can be parametrized and do not exist in the real + # `Dataset` class. + self.eye_tracking_timings = eye_tracking_timings + self.behavior_tracking_timings = behavior_tracking_timings + + def get_edges(self, kind, keys, units='seconds'): + if keys == self.EYE_TRACKING_KEYS: + return self.eye_tracking_timings + elif keys == self.BEHAVIOR_TRACKING_KEYS: + return self.behavior_tracking_timings + + +@pytest.fixture +def mock_dataset_fixture(request): + test_params = { + "eye_tracking_timings": [], + "behavior_tracking_timings": [] + } + test_params.update(request.param) + return partial(MockDataset, **test_params) @pytest.mark.parametrize('vs_times, expected', [ @@ -9,4 +38,19 @@ ]) def test_trim_discontiguous_vsyncs(vs_times, expected): obtained = su.trim_discontiguous_times(vs_times) - assert np.allclose(obtained, expected) \ No newline at end of file + assert np.allclose(obtained, expected) + + +@pytest.mark.parametrize("mock_dataset_fixture,sync_line_label_keys,expected", [ + ({"eye_tracking_timings": [0.020, 0.030, 0.040, 0.050, 3.0]}, + Dataset.EYE_TRACKING_KEYS, [0.020, 0.030, 0.040, 0.050]), + + ({"behavior_tracking_timings": [0.080, 0.090, 0.100, 0.110, 8.0]}, + Dataset.BEHAVIOR_TRACKING_KEYS, [0.08, 0.090, 0.100, 0.110]) +], indirect=["mock_dataset_fixture"]) +def test_get_synchronized_frame_times(monkeypatch, mock_dataset_fixture, + sync_line_label_keys, expected): + monkeypatch.setattr(su, "Dataset", mock_dataset_fixture) + + obtained = su.get_synchronized_frame_times("dummy_path", sync_line_label_keys) + assert np.allclose(obtained, expected) diff --git a/allensdk/test/brain_observatory/test_session_analysis_regression.py b/allensdk/test/brain_observatory/test_session_analysis_regression.py index 369139485..1dcc466be 100644 --- a/allensdk/test/brain_observatory/test_session_analysis_regression.py +++ b/allensdk/test/brain_observatory/test_session_analysis_regression.py @@ -4,7 +4,6 @@ import pytest import os -import tempfile import json from pkg_resources import resource_filename # @UnresolvedImport import numpy as np @@ -102,9 +101,8 @@ def nm2(nwb_c, analysis_c): return NaturalMovie.from_analysis_file(BODS(nwb_c), analysis_c, si.NATURAL_MOVIE_TWO) @pytest.fixture(scope="module") -def analysis_a_new(nwb_a): - with tempfile.NamedTemporaryFile(delete=True) as tf: - save_path = tf.name +def analysis_a_new(nwb_a, tmpdir_factory): + save_path = str(tmpdir_factory.mktemp("session_a") / "session_a_new.h5") logging.debug("running analysis a") session_analysis = SessionAnalysis(nwb_a, save_path) @@ -114,13 +112,10 @@ def analysis_a_new(nwb_a): yield save_path - if os.path.exists(save_path): - os.remove(save_path) @pytest.fixture(scope="module") -def analysis_b_new(nwb_b): - with tempfile.NamedTemporaryFile(delete=True) as tf: - save_path = tf.name +def analysis_b_new(nwb_b, tmpdir_factory): + save_path = str(tmpdir_factory.mktemp("session_b") / "session_b_new.h5") logging.debug("running analysis b") session_analysis = SessionAnalysis(nwb_b, save_path) @@ -130,13 +125,10 @@ def analysis_b_new(nwb_b): yield save_path - if os.path.exists(save_path): - os.remove(save_path) @pytest.fixture(scope="module") -def analysis_c_new(nwb_c): - with tempfile.NamedTemporaryFile(delete=True) as tf: - save_path = tf.name +def analysis_c_new(nwb_c, tmpdir_factory): + save_path = str(tmpdir_factory.mktemp("session_c") / "session_c_new.h5") logging.debug("running analysis c") session_analysis = SessionAnalysis(nwb_c, save_path) @@ -152,9 +144,6 @@ def analysis_c_new(nwb_c): yield save_path - if os.path.exists(save_path): - os.remove(save_path) - def compare_peak(p1, p2): assert len(set(p1.columns) ^ set(p2.columns)) == 0 diff --git a/allensdk/test/brain_observatory/test_session_analysis_regression_data.json b/allensdk/test/brain_observatory/test_session_analysis_regression_data.json index b862e2859..5ef6a8c77 100644 --- a/allensdk/test/brain_observatory/test_session_analysis_regression_data.json +++ b/allensdk/test/brain_observatory/test_session_analysis_regression_data.json @@ -8,9 +8,9 @@ "nwb_c": "/allen/aibs/informatics/module_test_data/observatory/py2_analysis/569494121.nwb" }, "3": { - "analysis_a": "/allen/aibs/informatics/module_test_data/observatory/py3_analysis/510859641_three_session_A_analysis.h5", - "analysis_b": "/allen/aibs/informatics/module_test_data/observatory/py3_analysis/510698988_three_session_B_analysis.h5", - "analysis_c": "/allen/aibs/informatics/module_test_data/observatory/py3_analysis/510532780_three_session_C_analysis.h5", + "analysis_a": "/allen/aibs/informatics/module_test_data/observatory/py3_analysis/new_ks_2samp/510859641_three_session_A_analysis.h5", + "analysis_b": "/allen/aibs/informatics/module_test_data/observatory/py3_analysis/new_ks_2samp/510698988_three_session_B_analysis.h5", + "analysis_c": "/allen/aibs/informatics/module_test_data/observatory/py3_analysis/new_ks_2samp/510532780_three_session_C_analysis.h5", "nwb_a": "/allen/aibs/informatics/module_test_data/observatory//plots/510859641.nwb", "nwb_b": "/allen/aibs/informatics/module_test_data/observatory/plots/510698988.nwb", "nwb_c": "/allen/aibs/informatics/module_test_data/observatory/plots/510532780.nwb" diff --git a/allensdk/test/internal/brain_observatory/test_time_sync.py b/allensdk/test/internal/brain_observatory/test_time_sync.py index da15cf458..86f655f3f 100644 --- a/allensdk/test/internal/brain_observatory/test_time_sync.py +++ b/allensdk/test/internal/brain_observatory/test_time_sync.py @@ -434,3 +434,18 @@ def test_monitor_delay(scientifica_input): delay = ts.monitor_delay(dset, stim_times, "stim_photodiode", assumed_delay=30) assert delay == 30 + + +@pytest.mark.parametrize("deserialized_pkl,expected", [ + ({"vsynccount": 100}, 100), + ({"items": {"behavior": {"intervalsms": [2, 2, 2, 2, 2]}}}, 6), + ({"vsynccount": 20, "items": {"behavior": {"intervalsms": [3, 3]}}}, 20) +]) +def test_get_stim_data_length(monkeypatch, deserialized_pkl, expected): + def mock_read_pickle(*args, **kwargs): + return deserialized_pkl + + monkeypatch.setattr(ts.pd, "read_pickle", mock_read_pickle) + obtained = ts.get_stim_data_length("dummy_filepath") + + assert obtained == expected diff --git a/allensdk/test/model/test_glif.py b/allensdk/test/model/test_glif.py index ee172f900..e96dd0d25 100644 --- a/allensdk/test/model/test_glif.py +++ b/allensdk/test/model/test_glif.py @@ -37,10 +37,8 @@ from allensdk.api.queries.glif_api import GlifApi import allensdk.core.json_utilities as json_utilities from allensdk.model.glif.glif_neuron import GlifNeuron -from allensdk.model.glif.simulate_neuron import simulate_neuron from allensdk.core.nwb_data_set import NwbDataSet import os -# import matplotlib.pyplot as plt @pytest.fixture @@ -85,7 +83,6 @@ def configured_glif_api(glif_api, neuronal_model_id, neuron_config_file, return glif_api - @pytest.fixture def output(neuron_config_file, ephys_sweeps_file): neuron_config = json_utilities.read(neuron_config_file) @@ -112,7 +109,6 @@ def output(neuron_config_file, ephys_sweeps_file): @pytest.fixture def stimulus(neuron_config_file, ephys_sweeps_file): - neuron_config = json_utilities.read(neuron_config_file) ephys_sweeps = json_utilities.read(ephys_sweeps_file) ephys_file_name = 'stimulus.nwb' @@ -125,12 +121,6 @@ def stimulus(neuron_config_file, ephys_sweeps_file): return stimulus -def test_cache_stimulus(neuron_config_file, ephys_sweeps_file, fn_temp_dir, - configured_glif_api): - nwb_path = os.path.join(fn_temp_dir, 'stimulus.nwb') - configured_glif_api.cache_stimulus_file(nwb_path) - - def test_run_glifneuron(configured_glif_api, neuron_config_file): # initialize the neuron neuron_config = json_utilities.read(neuron_config_file) @@ -145,101 +135,10 @@ def test_run_glifneuron(configured_glif_api, neuron_config_file): # simulate the neuron output = neuron.run(stimulus) - voltage = output['voltage'] - threshold = output['threshold'] - spike_times = output['interpolated_spike_times'] - - -@pytest.mark.skipif(True, reason="needs nwb file") -def test_3(configured_glif_api, neuron_config_file, ephys_sweeps_file): - neuron_config = json_utilities.read(neuron_config_file) - ephys_sweeps = json_utilities.read(ephys_sweeps_file) - ephys_file_name = 'stimulus.nwb' - - neuron = GlifNeuron.from_dict(neuron_config) - - # sweep_numbers = [ s['sweep_number'] for s in ephys_sweeps - # if s['stimulus_units'] == 'Amps' ] - sweep_numbers = [7] - simulate_neuron(neuron, sweep_numbers, - ephys_file_name, ephys_file_name, 0.05) - - -@pytest.mark.skipif(True, reason="needs nwb file") -def test_4(output): - voltage = output['voltage'] - threshold = output['threshold'] - spike_times = output['interpolated_spike_times'] - - -@pytest.mark.skipif(True, reason="needs nwb file") -def test_5(output): - voltage = output['voltage'] - threshold = output['threshold'] - interpolated_spike_times = output['interpolated_spike_times'] - spike_times = output['interpolated_spike_times'] - interpolated_spike_voltages = output['interpolated_spike_voltage'] - interpolated_spike_thresholds = output['interpolated_spike_threshold'] - grid_spike_indices = output['spike_time_steps'] - grid_spike_times = output['grid_spike_times'] - after_spike_currents = output['AScurrents'] - -# # create a time array for plotting -# time = np.arange(len(stimulus))*neuron.dt -# -# plt.figure(figsize=(10, 10)) -# -# # plot stimulus -# plt.subplot(3,1,1) -# plt.plot(time, stimulus) -# plt.xlabel('time (s)') -# plt.ylabel('current (A)') -# plt.title('Stimulus') -# -# # plot model output -# plt.subplot(3,1,2) -# plt.plot(time, voltage, label='voltage') -# plt.plot(time, threshold, label='threshold') -# -# if grid_spike_indices: -# plt.plot(interpolated_spike_times, interpolated_spike_voltages, 'x', -# label='interpolated spike') -# -# plt.plot((grid_spike_indices-1)*neuron.dt, voltage[grid_spike_indices-1], '.', -# label='last step before spike') -# -# plt.xlabel('time (s)') -# plt.ylabel('voltage (V)') -# plt.legend(loc=3) -# plt.title('Model Response') -# -# # plot after spike currents -# plt.subplot(3,1,3) -# for ii in range(np.shape(after_spike_currents)[1]): -# plt.plot(time, after_spike_currents[:,ii]) -# plt.xlabel('time (s)') -# plt.ylabel('current (A)') -# plt.title('After Spike Currents') -# -# plt.tight_layout() -# plt.show() - + expected_fields = {"AScurrents", "grid_spike_times", + "interpolated_spike_threshold", + "interpolated_spike_times", + "interpolated_spike_voltage", + "spike_time_steps", "threshold", "voltage"} -@pytest.mark.skipif(True, reason="needs nwb file") -def test_6(configured_glif_api, neuron_config_file, stimulus): - # define your own custom voltage reset rule - # this one linearly scales the input voltage - def custom_voltage_reset_rule(neuron, voltage_t0, custom_param_a, custom_param_b): - return custom_param_a * voltage_t0 + custom_param_b - - # initialize a neuron from a neuron config file - neuron_config = json_utilities.read(neuron_config_file) - neuron = GlifNeuron.from_dict(neuron_config) - - # configure a new method and overwrite the neuron's old method - method = neuron.configure_method('custom', custom_voltage_reset_rule, - {'custom_param_a': 0.1, 'custom_param_b': 0.0}) - neuron.voltage_reset_method = method - - truncate = 56041 - output = neuron.run(stimulus[0:truncate]) + assert expected_fields.difference(output.keys()) == set() diff --git a/allensdk/test_utilities/custom_comparators.py b/allensdk/test_utilities/custom_comparators.py new file mode 100644 index 000000000..5183b45bf --- /dev/null +++ b/allensdk/test_utilities/custom_comparators.py @@ -0,0 +1,25 @@ +import re +from typing import Union +import difflib + + +class WhitespaceStrippedString(object): + """Comparator class to compare strings that have been stripped of + whitespace. By default removes any unicode whitespace character that + matches the regex \s, (which includes [ \t\n\r\f\v], and other unicode + whitespace characters). + """ + def __init__(self, string: str, whitespace_chars: str = r"\s", + ASCII: bool = False): + self.orig = string + self.whitespace_chars = whitespace_chars + self.flags = re.ASCII if ASCII else 0 + self.differ = difflib.Differ() + self.value = re.sub(self.whitespace_chars, "", string, self.flags) + + def __eq__(self, other: Union[str, "WhitespaceStrippedString"]): + if isinstance(other, str): + other = WhitespaceStrippedString( + other, self.whitespace_chars, self.flags) + self.diff = list(self.differ.compare(self.value, other.value)) + return self.value == other.value diff --git a/appveyor.yml b/appveyor.yml index b78fce112..549ae99c1 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -20,7 +20,6 @@ environment: install: - set PATH=%MINICONDA%;%MINICONDA%\\Scripts;%PATH% - conda config --set always_yes yes --set changeps1 no - - conda install conda==4.6.14 - conda create -q -n test-environment python=%PYTHON% pip - activate test-environment - conda install statsmodels diff --git a/doc_template/brain_observatory.rst b/doc_template/brain_observatory.rst index e57d6e122..97f909bd2 100644 --- a/doc_template/brain_observatory.rst +++ b/doc_template/brain_observatory.rst @@ -12,6 +12,12 @@ an experiment container have different stimulus protocols, but cover the same im .. image:: /_static/container_session_layout.png :align: center +**Note:** Version 1.3 of scipy fixed an error in its 2 sample Kolmogorov-Smirnoff test implementation. The new version produces more accurate p values for small and medium-sized samples. +This change impacts speed tuning analysis p values (as returned by `StimulusAnalysis.get_speed_tuning`). +If you access precalculated analysis results via `BrainObservatoryCache.get_ophys_experiment_analysis`, you will see values calculated +using an older version of scipy's `ks_2samp`. To access values calculated from the new version, install scipy>=1.3.0 in your environment and construct a `StimulusAnalysis` object +from a `BrainObservatoryNwbDataSet` (as returned by `BrainObservatoryCache.get_ophys_experiment_data`). + **Note:** Data collected after September 2016 uses a new session C stimulus designed to better-characterize spatial receptive fields in higher visual areas. The original locally sparse noise stimulus used 4.65 visual degree pixels. Session C2 broke that stimulus into two separate stimulus blocks: one with 4.65 degree pixels and one with 9.3 degree pixels. Note that the :py:mod:`~allensdk.brain_observatory.stimulus_info` diff --git a/doc_template/examples_root/examples/internal/Lims Behavior Project Cache.ipynb b/doc_template/examples_root/examples/internal/Lims Behavior Project Cache.ipynb new file mode 100644 index 000000000..b6228fd06 --- /dev/null +++ b/doc_template/examples_root/examples/internal/Lims Behavior Project Cache.ipynb @@ -0,0 +1,927 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Visual Behavior Data Project Cache\n", + "A short introduction to analyzing the Visual Behavior data.\n", + "This notebook uses the LIMS API to access data, so it will only work on the Allen Institute network.\n", + "\n", + "Please note that local caching functionality has not been implemented, as there are currently no NWB files for these data. Because the data may change over time, whenever a Session object is created through the cache, we note when the data were accessed in a session log." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import os.path\n", + "import pandas as pd\n", + "import SimpleITK as sitk\n", + "import matplotlib.pyplot as plt\n", + "from allensdk.brain_observatory.behavior.behavior_project_cache import BehaviorProjectCache\n", + "from allensdk.brain_observatory.behavior.image_api import ImageApi" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The `BehaviorProjectCache` is the main entry point to the Visual Behavior project dataset. It allows you to view cross-session summary information and create classes to analyze individual sessions. It supports both behavior-only sessions as well as sessions with behavior and optical physiology recordings." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "# this path determines where the downloaded data will be stored\n", + "\n", + "manifest_path = os.path.join(\"example_behavior_project_cache\", \"manifest.json\")\n", + "\n", + "cache = BehaviorProjectCache.from_lims(manifest=manifest_path)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can view all session records present in the LIMS database." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array(['VisualBehaviorMultiscope', 'VisualBehavior',\n", + " 'VisBIntTestDatacube', 'VisBehNeuroModAx', 'VisualBehaviorTask1B',\n", + " 'MesoscopeDevelopment', 'VisBehViralDev',\n", + " 'VisualBehaviorDevelopment', 'VisualBehaviorIntegrationTest',\n", + " 'DevelopmentMultiscope4areasx2d',\n", + " 'VisualBehaviorMultiscope4areasx2d'], dtype=object)" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "ophys_sessions = cache.get_session_table()\n", + "ophys_sessions.head()\n", + "ophys_sessions.project_code.unique()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If we want to get more specific, we can look at all experiment records in the LIMS database. We can then filter down to what we're interested in." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
ophys_session_idbehavior_session_idcontainer_idproject_codecontainer_workflow_stateexperiment_workflow_statesession_namesession_typeequipment_namedate_of_acquisitionisi_experiment_idspecimen_idsexage_in_daysfull_genotypereporter_linedriver_lineimaging_depthtargeted_structurepublished_at
ophys_experiment_id
953659741952430817952554548949264660VisualBehaviorMultiscopeholdingfailed20190923_457841_2imagesAOPHYS_2_images_A_passiveMESO.12019-09-23 08:13:07.627573858992726850862430F209.0Sst-IRES-Cre/wt;Ai148(TIT2L-GC6f-ICL-tTA2)/wt[Ai148(TIT2L-GC6f-ICL-tTA2)][Sst-IRES-Cre]NaNVISpNaN
953659756952430817952554548949264660VisualBehaviorMultiscopeholdingfailed20190923_457841_2imagesAOPHYS_2_images_A_passiveMESO.12019-09-23 08:13:07.627573858992726850862430F209.0Sst-IRES-Cre/wt;Ai148(TIT2L-GC6f-ICL-tTA2)/wt[Ai148(TIT2L-GC6f-ICL-tTA2)][Sst-IRES-Cre]NaNVISlNaN
953659749952430817952554548949264660VisualBehaviorMultiscopeholdingpassed20190923_457841_2imagesAOPHYS_2_images_A_passiveMESO.12019-09-23 08:13:07.627573858992726850862430F209.0Sst-IRES-Cre/wt;Ai148(TIT2L-GC6f-ICL-tTA2)/wt[Ai148(TIT2L-GC6f-ICL-tTA2)][Sst-IRES-Cre]NaNVISlNaN
953659747952430817952554548949264660VisualBehaviorMultiscopeholdingfailed20190923_457841_2imagesAOPHYS_2_images_A_passiveMESO.12019-09-23 08:13:07.627573858992726850862430F209.0Sst-IRES-Cre/wt;Ai148(TIT2L-GC6f-ICL-tTA2)/wt[Ai148(TIT2L-GC6f-ICL-tTA2)][Sst-IRES-Cre]NaNVISpNaN
953659743952430817952554548949264660VisualBehaviorMultiscopeholdingpassed20190923_457841_2imagesAOPHYS_2_images_A_passiveMESO.12019-09-23 08:13:07.627573858992726850862430F209.0Sst-IRES-Cre/wt;Ai148(TIT2L-GC6f-ICL-tTA2)/wt[Ai148(TIT2L-GC6f-ICL-tTA2)][Sst-IRES-Cre]NaNVISpNaN
\n", + "
" + ], + "text/plain": [ + " ophys_session_id behavior_session_id container_id \\\n", + "ophys_experiment_id \n", + "953659741 952430817 952554548 949264660 \n", + "953659756 952430817 952554548 949264660 \n", + "953659749 952430817 952554548 949264660 \n", + "953659747 952430817 952554548 949264660 \n", + "953659743 952430817 952554548 949264660 \n", + "\n", + " project_code container_workflow_state \\\n", + "ophys_experiment_id \n", + "953659741 VisualBehaviorMultiscope holding \n", + "953659756 VisualBehaviorMultiscope holding \n", + "953659749 VisualBehaviorMultiscope holding \n", + "953659747 VisualBehaviorMultiscope holding \n", + "953659743 VisualBehaviorMultiscope holding \n", + "\n", + " experiment_workflow_state session_name \\\n", + "ophys_experiment_id \n", + "953659741 failed 20190923_457841_2imagesA \n", + "953659756 failed 20190923_457841_2imagesA \n", + "953659749 passed 20190923_457841_2imagesA \n", + "953659747 failed 20190923_457841_2imagesA \n", + "953659743 passed 20190923_457841_2imagesA \n", + "\n", + " session_type equipment_name \\\n", + "ophys_experiment_id \n", + "953659741 OPHYS_2_images_A_passive MESO.1 \n", + "953659756 OPHYS_2_images_A_passive MESO.1 \n", + "953659749 OPHYS_2_images_A_passive MESO.1 \n", + "953659747 OPHYS_2_images_A_passive MESO.1 \n", + "953659743 OPHYS_2_images_A_passive MESO.1 \n", + "\n", + " date_of_acquisition isi_experiment_id \\\n", + "ophys_experiment_id \n", + "953659741 2019-09-23 08:13:07.627573 858992726 \n", + "953659756 2019-09-23 08:13:07.627573 858992726 \n", + "953659749 2019-09-23 08:13:07.627573 858992726 \n", + "953659747 2019-09-23 08:13:07.627573 858992726 \n", + "953659743 2019-09-23 08:13:07.627573 858992726 \n", + "\n", + " specimen_id sex age_in_days \\\n", + "ophys_experiment_id \n", + "953659741 850862430 F 209.0 \n", + "953659756 850862430 F 209.0 \n", + "953659749 850862430 F 209.0 \n", + "953659747 850862430 F 209.0 \n", + "953659743 850862430 F 209.0 \n", + "\n", + " full_genotype \\\n", + "ophys_experiment_id \n", + "953659741 Sst-IRES-Cre/wt;Ai148(TIT2L-GC6f-ICL-tTA2)/wt \n", + "953659756 Sst-IRES-Cre/wt;Ai148(TIT2L-GC6f-ICL-tTA2)/wt \n", + "953659749 Sst-IRES-Cre/wt;Ai148(TIT2L-GC6f-ICL-tTA2)/wt \n", + "953659747 Sst-IRES-Cre/wt;Ai148(TIT2L-GC6f-ICL-tTA2)/wt \n", + "953659743 Sst-IRES-Cre/wt;Ai148(TIT2L-GC6f-ICL-tTA2)/wt \n", + "\n", + " reporter_line driver_line \\\n", + "ophys_experiment_id \n", + "953659741 [Ai148(TIT2L-GC6f-ICL-tTA2)] [Sst-IRES-Cre] \n", + "953659756 [Ai148(TIT2L-GC6f-ICL-tTA2)] [Sst-IRES-Cre] \n", + "953659749 [Ai148(TIT2L-GC6f-ICL-tTA2)] [Sst-IRES-Cre] \n", + "953659747 [Ai148(TIT2L-GC6f-ICL-tTA2)] [Sst-IRES-Cre] \n", + "953659743 [Ai148(TIT2L-GC6f-ICL-tTA2)] [Sst-IRES-Cre] \n", + "\n", + " imaging_depth targeted_structure published_at \n", + "ophys_experiment_id \n", + "953659741 NaN VISp NaN \n", + "953659756 NaN VISl NaN \n", + "953659749 NaN VISl NaN \n", + "953659747 NaN VISp NaN \n", + "953659743 NaN VISp NaN " + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "ophys_experiments = cache.get_experiment_table()\n", + "ophys_experiments.head()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's analyze the most recent passed experiment for the Visual Behavior project.\n", + "\n", + "Note that the session data will not be downloaded to your local machine. However, the time this ID was last accessed will be recorded in the analysis log (in seconds since epoch)." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Latest experiment id: 978244684. Acquired on 2019-11-07 20:02:18.000000. Session: OPHYS_4_images_B\n" + ] + } + ], + "source": [ + "latest = ophys_experiments.query(\"project_code == 'VisualBehavior'\"\n", + " \"& experiment_workflow_state == 'passed'\")\\\n", + " .sort_values(\"date_of_acquisition\", ascending=False).iloc[0]\n", + "\n", + "print(f\"Latest experiment id: {latest.name}. Acquired on {latest['date_of_acquisition']}. \"\n", + " f\"Session: {latest['session_type']}\")\n", + " \n", + "session = cache.get_session_data(latest.name)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can look at metadata about the session:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'rig_name': 'CAM2P.5',\n", + " 'sex': 'M',\n", + " 'age': 'unknown',\n", + " 'excitation_lambda': 910.0,\n", + " 'emission_lambda': 520.0,\n", + " 'indicator': 'GCAMP6f',\n", + " 'field_of_view_width': 447,\n", + " 'field_of_view_height': 512,\n", + " 'ophys_experiment_id': 978244684,\n", + " 'experiment_container_id': 968890932,\n", + " 'ophys_frame_rate': 31.0,\n", + " 'stimulus_frame_rate': 60.0,\n", + " 'targeted_structure': 'VISp',\n", + " 'imaging_depth': 175,\n", + " 'session_type': 'OPHYS_4_images_B',\n", + " 'experiment_datetime': Timestamp('2019-11-07 20:02:18+0000', tz='UTC'),\n", + " 'reporter_line': ['Ai93(TITL-GCaMP6f)'],\n", + " 'driver_line': ['Camk2a-tTA', 'Slc17a7-IRES2-Cre'],\n", + " 'LabTracks_ID': 483803,\n", + " 'full_genotype': 'Slc17a7-IRES2-Cre/wt;Camk2a-tTA/wt;Ai93(TITL-GCaMP6f)/wt',\n", + " 'behavior_session_uuid': UUID('d7593ceb-4420-4d42-8c70-41f8ae71b9a6')}" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "session.metadata" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can examine 2d images of cells:" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXcAAACUCAYAAABoZ2lmAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nOy9aZBk2XUe9t3lvZcvs7L23qdnBpjBUAOAFAFBJCR6CYmQSEiySZumTStkyQpTDAdlm3Y4wqGQ//qHQ+GgJPuHIyhKDCksW7IphQSaDJISl6BIcQEMAqAGy2AwmJme3qtrzcyXb7n3+se523uZ1VXdXVWd3agTUVFVmW/LzKrvnved73yHGWNwHudxHudxHs9X8Kd9AedxHudxHudx8nEO7udxHudxHs9hnIP7eZzHeZzHcxjn4H4e53Ee5/Ecxjm4n8d5nMd5PIdxDu7ncR7ncR7PYZwKuDPGvp8x9jXG2FuMsb92Guc4j6cT55/t8xnnn+vzF+ykde6MMQHgTQB/CsD7AD4L4D81xnz5RE90Hmce55/t8xnnn+vzGaeRuX8XgLeMMW8bYyoA/wjAD5zCec7j7OP8s30+4/xzfQ7jNMD9GoAb0e/v28fO49mP88/2+Yzzz/U5DHkKx2RzHpvhfhhjPwbgxwBAQP6RAV+ZsycLj5nWznMO6ba1341pP+boJ/e725+xzj7R8YyZuSadSvBKhUMwewwTHU+bh1xftJ/7ro0/juHMH49pQ8fVOpyDsWh/+7s7r3vOGMAdRxsYyf22TBs6H4+u2RiAc6BR2K/vbRljLnQ/ifAiZqL1Qtufq/gjA7EKCDG767wjnWSw0z7B/HMaBrDoHTH+780/Av+3wBm9eRwwnIE14W/UcIA3JjoOA7OfF/2N2EOZ9p+S+5z9n4i9noODmyf6ufaxfPT7cR6nHgfYOfRzPQ1wfx/A9ej3FwDc6m5kjPkpAD8FAMt83Xx3+v0AACYlmBAwTUMb2n9SlveAqoZRip7vfHfBVpZhRmNAcKCqwfo5jDGAUoAQYPZ4pqzCY0sDAtD4+uz5mZQEfHUNJAmQSNp2WgL9HKibsJPg9Hsi6Xl6nYBSdBy3fV3DGEPXIgRMWQJKg/Vzuqbo/P4aOtdklvoE1ADM/gGgNJ1fCKCqw75LA6hLq+DfuEnHznvAcADce0CvPUthjIG+fhH8zffwS3t/790n+WxbnytbN58c/PvgK8uAFHgqweffnBrBwZRufX/sY2tNP9tjGMlb25hEwAgGEy84nBZylXHwxkBMG+hEQOUChjM0OUd+rwRT2j+e7Ffg0wZqkKJaSaAyjmynBlMGRjDwUoEZA50IsEaDGQPVk2DK4Fd/43880c/1u9n3Pvr79ZzFL936AgDg+65+51O7hn9pfvbQz/U0wP2zAD7EGPsAgJsAfgTAn3/4LuGP3jQNTNN44PaPH4z8Yw7k4u9++3v3W2BvDkbhNMb4fzC/vVLAngXUzkIBIWCq2i8CmBT+cShFv7uf43CPRc+Zqg77u2MD7e04DwtDBNJG2IXgwjpwZwsGDbC7DyOEB34HLB7Y3XmLAvwbdrETHCimrWvUByPg1RfxtR/t49X/6xXg1x/6QT3GZxuFW4DOIg4B9TgcoD8WsHfPFR3DgS1rNMAMdE9CW4AX0wZM0d2Y6qcQpUY9lOA1JRe8ou/VskCTCzAtIIoGrOGol1Okiu68xFRDTGlb1mgwzehuTBmwRkMUNXRPYnw1Rf9OPXvN7Xiyz/U8zjTcogIA4srh2504uBtjGsbYfwXglwAIAH/PGPPGEXvNPtIBzBjsW+DdBVZ0QDqiWvzjEcADANKEsnz3ewzYDny7j88D9fhxIcI/fHyM+LhpQtso+ic1RQT+Soc7i7xHGf/2Hsz1S2B3t8HSlO4m3Dk718BYSncn3eMBMJfWwW7d99cx+uAyXvo5DaiHK6ce77M94zgC1J8YyI8KxmBElKEbA6YMmDCQByWBfh0lLYKBaWD7wzlWv15C9QRUxiAnGjrlYBowXEKnlOFXKylkofxdGy8VeNUAnKNayyDHDQynazCCI79Xwxzxkp+Jz/U8HjlOI3OHMeYXAPzCiR7zGMDunpsH+D7DpQsMP8c0RgzAXcCc91gX4OPnYxCPv7vtIlAP29jf08RmubSdGY3Bsoxex91toqfcfhurYOMCmJYwZQWWpfQSHe2UJu1jCwG+P/HLKRMCwzd3Mf7ACtJ3t2bet2480Wd72sB6jGz9VI4fvS4H7EyFegifVOCTAOTg3NOAvNbQkmHjD8aU8XMGgKPpcyQjjSbnSCrK6HXKwSvaRktGnDxnMIkAqxXqJUFZfamhewmgDXTKke4dmbmfyv/s8x6PQ8ecJJVDx3jr0OdPBdwfN7pUzLx4WEZvLLfts/NO1k47sDb1AgSQjrNyB77dLL61H28/7jP2zvHcMQHiuMsqgC0QwNwfh0egrGYWHceTMymhJcfoD1/G8It3iHZqGloIBG+dn62twOzs0XuxdxCOm/eAG7cxuHUPuooy/UWPY2TozN0RzVtUIoB96DncNkctHNHfm6Nl1CCDGJeWnmGef6cQYNqgGUhPxbiFwWXaKuVgCjDSLhiaiqzMgjagoaWALACdCGTbbRA3kiM5aPz7cB7PRxx3YVgo+4GjgN1FF5zd70yItgIEiFQkZu6+czNvpQhsHcDGz8VgHxUuZ2gXB/JxCE5FWUHgzbJ09hxun4jrZ4xRUdZm7z4rTxJMLw8wuiJgiindnbgCLai47B+zwK5fudZ+HVUN9eGX51NMixrHzNCN4LPAznnY/7DjxNu43w9bCNzx40TCArgoaqhBRlRNIvzjRJkwqExAFIpoFMbApw1EqcAbAy0ZDl4QEBVl9gCBvk456mECnTBMNxO6E9AG080UWjLolPtFQuX2/+IIuu085scv3fqC/zqpOMvi60KB+6NGl6KZWRy6EsXDtjuMWnEA67bpAnz8e7y/A2ynXnH7Kx149arG+GMvBiWN49/9cehnJiVcF7EpS+Lf7XlMVSH/vW/g8s++2X4tjp4qCphJ4SkplqXg79ym7dLEH0N8+R37Ji44CHRB93HiMJB2x+4e/6iFoJMVM0UFVADhez8lYLfnpsyeQ5SKVC61RjOgbXmlwCuN5Tf3sfnFCVTKMb4krbwWYI0BrzR4bdDbsgV3m6E76SRTBnxaQ44byvLlM/1v/kzHvIXhrAB+oWiZOI6iaGJ+nQkxl4qZyeLdfodx6zGtEoejaao53GV8jbYwC8dudDP7DlXT/62vWdVMY5+P5IzRosKk9NdmRmOSd1r5ZHw8MxwAZRleR1UTheNURbG6RxFtoz/yQey8voTNn39zsbP3R1C/PNL+7vG4BjLvdyBIHh96EQbCFk75tGlx8HExlQlGChpOz8tCoVrPKXOvNaBJ8cKbFP37tE+6V4MpjXo59fuJogEzBrxWMIJDS+plaIYZqhWJ/O709IvIz2mcFAj/0q0vPBW55EKCe1e/HhdTXeY9F/w7WfrMNu75Ljfuogv0aTLLnc8Lx5l3ZIj+mO4YXbWNP5eeAWpHx5SvX0P2lZstvbyZFETpSEvRuAVhd5/2rWoANS0GTYOZOw13zrwH8fX3sfl1LC6wPwIF8yT7z5Vp+prKMXl3IPztKWNpFAm5PyVgh6VMOEe9lECOG4hGw0hOFE2poBNu9e8DGAvUvLF8e6XAagW9noE3hvh3K+9lxoApDaaFP0a619D7ohf8juw5j6elg1/IJT0ukB4ne589AHV2xvu6zN5EGTGANuftHu9qxrvRXRhaGZ/wlMfM4hGd0zcsxefzdxS0UBhjkH1zyzc8sTy3YM1hjAnArjQ9n2WhCOxed5YFYHeLlYuqBq5eXFxgf0g4Pn0urw60KRan8Y+/uiF4qM/MC7fPw5qxLLfuf60V5D71FbCGsnHHfyejmhqOMoHphdR3DjNFBVNea8hxgyYP2biWHCpPIKYahoNkkz0JnQn/Hji9fD0UkfrmPJ4knoR3f5oNTgsJ7odFTMHMSCPjNvxOtDpY4wXBZdxpEvHkAubqhfn0jH3eFyXjuHpxthhqt5/h5JUOGXV3e78f0SZmNKbvxrQ5/M7CY8qKOPks9QVbAK3zuI7YVty61379ixCH8d82jqQZOJ8F4S5wd8GeMc+Rt7ZnLFp8eUdGG/al6+r87dkkAwAtwJaaAQj4DSO+Pd2lxiYjKbMnKwINXilk27UvsBrJYSRx9U2fW8CnIqrK7Gdss3TK2u25Fr2W8hTiJIukixoLAu5HZxfzKJrDN5493tw7ANut6TNed+wbd9tUigsrQ+TfuNlWuigFnUqwtRW/3UwTU3x5WQpzaX1+UdZn2HoGiM1oHF2LBfDV4PHBpCQppJTh9RRTQCnUH3nJZ/1B6qkWC9SPEUfy6m5B8Ith9HtM27m7LQvgjg/XPelBls7H5icO8XW4gneWBIDvAqqgY3rlCueeSnGFVRdMGxRXetCSAHy6LgDOIvWLRv/mFKyhBUEU9PflOPxzdczx4nkH+AUB94f/McYc/ENBvbUTO3xB6IKmtShgm+tzKJKgLW/t0zQBgPMexM4B3QK3Mu/QZRrTNKasaAGJI97PZeWba63r8IVVpb0sEgdjf81BVUNSyUBBaST3DgK3HwN6VaP+Q9dni4dnGbEipZOtH0q/xNl9l4IBWq/HSD4XoH2mnIhDzn0IwB+ywLCyBp82BOyd99PIoHHXkqSVfNpQBs/JF4bbYqvhDMlIwUgOnVCXKRxt02iIgxK8aiAnNZgONI4Df53Q9akeecycRzseFdS/7+p3PlV65XFjMcDdJVRzgHueOVh7Azb/Z8x6xbT49pgTt9my2dqeVbg4QHaKE9fSH//zKgVTTIHdfeLS4+fnNUHFCpd5jUMOPO5uhWN1MnljfWo81WIzcbY08Fl9yz9nezecu5jSArFCWX/y1Rv2oE8h4ztKajhv+6O27YCvz2Rj+iTeRmuwmoqVTJm5tJDTpsfHaVE3zHrJOKfOOWobnTq5Y+OP7QCfNwT2YkryRV5pL5MEgGq1rX1gyoDVyuvkxbShLtaEw3BaIOS4Ac4z+ZlwQP0sAvajxGKoZdyd7CEqmONm635f273Z2t9qugFEFAgP9EuFWblizMnHEcsVuxYC82JeA5S7tqVB4NznHcMVXNdXYbZ3Z4uu8Xaw1I0QlPXfukdvrZVFtvh/pYLmXml6b07LJrd73GN0h85tPnL7dJ9zSpeYaunSJjGwcw4w44GVKVIixQAe+7/48yN0j848H71WOgaz7tIMaBSY4RBF7c9noL3/C2voZy3p74zXGnxaQ/VT8EpBauN593olBbfbG2vhrBMO1ZME8iVdl+Pgoc1sLeA8viViMTL3OfFIFIyN2GLAKW288iYG9q7KpStX9Bm7bQu/eiHQIEAExkGXznLK2ElLbt/WjdX2BXaVM3nPd5+2m53UbPa5P4rO3b5rYP08XHNV05fN+tnm+pzXOKsGYmn6yO/3Y4fTi7cy4/k/P7Sj1N3RuJ/td0+juEXF2xCwANIR4LVA3WbEc89nt9U9eThgWqMwOhhr3yXEC1p0bObMxZwPv2tYcpbOklNjUmMgxw10KqBdt6uldLrBHKh/C6pljlK3PO9cu4uFBfdDI/6ndUMtnJ8MDlfGsDTizD0dw9tZdRz2ebY0oH/S3IK7kznG2WJVR1lldFfgdOdxVh4vHsW07dyY99oF1Qi8jONxW1p4un7i/9sLAssyYGMVb/7YldD96s67vtp+H2wc1/7htOKRm21iFcscy10jSGHiipnhPJG5l9s+sgcAEDpKI8/2VlbPO8qZDqXVOrZVubQWC63J8Mt+vqy0C60dpKJTCZUJ6JQ8aOQBWUo4v/dqJYHO6DkH4O6uwvjfaaEwp3VHtoBxHOB+3ukYFwsJ7q0M8iGcugN2cwhAxXr5Ft999SL9HClNWkAP+J/NaGz9sk1bJ94BFjMpPBAzKX0m37YUsMd3vjLuetxxu+6UcVQ1HcvZD7i3xOrl2fpqS6VjlgfA7j4+9L9Ya4J4Ybi/TRn9PA3+CQebR8nMKZrOjW6RtPtz53eTSXrt3QKpNe0C5zbzTlp3AzHY656EyZL2Nbp9E0G8udun2xENtAd1dMIvMJENAVMGfFLRMaP3qhkmRLFoUtTEMk0jGJJR40HcLRBy3EAnpLBxTVS0qHzrgPtxgTve7qT9Y44TZ3HOxQD3OAGKqBV6ICqGGTMD/Edlm2xpQH4sMc1zw/qr3HsQNux2KLqsPE2I4tA6AFWXg5/T7KQurc5dLNz23goglj3OZOU8XEdMpTi6R2lS+sQdroJTveFOVIy118dWlmmbjVWYnT167+xCw/r56dMyhxRCY9dCD/RdYO82H3WKmTGotvzSE+G9XXRP0pjEae1VMso+plM504Dkhmq0rjVyfgzX3KGBomhJHJ3O3S4W7jrdtYETjeKLodH5PKduC6iuscmZjrFGA46isW6TsL7uzrfmWyWcuuVbhX45LBYD3IFZeqCr3HAyP/fcHGXMvPDa8Dm0CBsuRZSFfStiA69IFmnyDOrKZrvj00XrZw5TVuA37rUz8cO6IrvXFmfwvibA0eKX3aLk+P6lAUxRWKtf4SWR3QXLF1t39wnU05S2LyuvvnkacSjH7uKQBjU/vs4BKCfJY/x4bNgFkCrFAbPbRmfCyyJblE5M09jnnKrGPQZm1ShWR99VprjsPF4ojGCB5omujSkd+HalfaFUJ8I2MXUGgQB+nJ6RHE1OenhP17iO16bzv/SMxqNku267RQT4s/KaWQxwZ/CZdauQOu+fOgL2w6STM8XYGGQjisYMcrBBv32AlkwyKGF0LwW/cadN78T8ehe8uwDt9on37TYqxc/HnH7URTv55KvtCU5Kob6+4RuYWouDi1gVFNcJnCzyLOI4Usd5NIyLeX8LcWGWB826588j6kVLS3toPWMAxmttVSfEcau4U9VuN5PFH+UF34kuz9+67oTsA2hWqgR3dw0grbpO7JxVm7XzmvxoDGe+GOtmsTpA55X2C5YomsMu6zzw9Dj40z7vYoC7M2/sKFw8kMdZvPt9jiabLQ3oh4g28YXUWNPuMum9A5prGisuunJIuw+/cYfkiE7vfu1i2Mcd3x+DBxsAAL4xKb4GFx7oO0odd0xnDaAUGGPov7ffyuL1K9eRvHufCrPuHC5cQTheRLo+ODbzd81YZxUPLZ4eNWu1O8oOaGXdrU27j0VqGRroYULrfqPR5II80V32H3mwx8Ouuxr4ebRMl5KJr0FZxQ2rFS0mrkGpaPwiAxCP7pwijQhSR9e41H0fY9qIV5oonGw2CVqUeJRs/FHA8FG17GcF8Gd5J7EY4N6hKmcyctb553E8a7dJaTQmuiHPA1C1eGzRzrKrOphvAbOA4pQxoCIp9g4oQ752EezOg/Y+LitOE+DSpn2ss2h0i7aOb3fXFke0uLC8R/y6MeQFE+0v7u1g75PXO4sR9xm8o6XUh14IC4V73l6/2duH97E5wzjU9GvO5+Gz8q4E0dEbrsgZFUihdcjYLZ+tehJqkHkpIWB15ZVCtUbzSXml7T7cWxLoVIZjuWuKB3B0KJxDpZL2zsFZ9JpEUOOSA2VjvARS9USQQ1qeHa4j1TY5Ta5kqIepnbUazqkyDl6TtcEiNzE5UD1NgH9YnMZAjtOKR73GxQD3KOba9B7CvwOYpXOqmppziml7kejIEf0ouug4frv4dyAMzLCdquzOA19cdZOOPM2iFLC1Y4/R4fG7FI2ni+bQOoB/nrjyAMi+C7WqYfIMS++Ow+IRLxZ2UQAA8fX3w/VZu19c3gzbugz+acTDuk5dId3RIhGId7NnKowSxcGnTdCOR6EyHjhphFZ9bQdeONrDHdM3OjkNurvemD+PiqNAp9gaLUiee4cr2NLi4x7TmdWva4MmFzR+zxVNBc1MbQbS32kYzpDfrWxHKjy11AwkmIa3/n1W4mkA7LMmi3yU92hhyuhznR7jiCma+Psc6SObNwq0q1YBacHD0AsdJIlzsmjGGExH0cIYCx4zQCuL7+ZKbHnoR93NUjOd/WN6ZU4nrAd7ANjdB9/dn1HVsH5OIJSm1MAVH8MtFNt78I1PAMzobGaoHkrJzHu8NXBljhrFcus6EZQN221cITU+F8kE2zpwrw23/LTqCYipomzaFV5dtj7HF52GU1dA01Y6MRXRMpxD9aUf4uH2M4KDV4qUWOAQ08afS1p+3SleAEsx9QTQaP+chntNlOlzO5aPNYjsBxbbX8YpW04aaJ/WkIyHxWHXE4P2w675UV7PgmTuh0ga58nLOg1MLS27jZlj+aw2LlSqYIfrvFiiRiefmTNGc08R9OuMMfJTjyc8ZeksXx6d1wM70B5+3fpu9c/OnyZ+DkBL5x4XbONBIbZD1ZQlJh++jPFHrwDDgZ/KxKQMrpFVPd8G+KTjON2n86IjdwTQyphb0kWrguEVDbwoN3q+SchYeSEAb7SlMoHxtcy7LboM3oGj9ouDBdiOXlz3U5+x+0zdFWvdNbm7Detd47N83+BkX0siWiocP3ij0XZcXk2LTUOP1UOJZkCv3RWDmT2tTjnEVPnGKcr6F+Tf/CFxGsAefz/q3E97Eejq7o/a5jixIJ+6mW83cEjhNNa+z3OMbB3HAeylTWz9Bx8h4HQNP/E2IIsCJqUHb69rF9wDPOraF1WdlBAA0TXdgmRVtzxuAIQF5jDVjePWu1SNy8ytha8rtrIsDeofZ7FgufzsXoE7f3kKtTkkY7Smoa+yivj3UFCduf4TjpaG/Sh/mXm+/J2iJEBmXMpqxJtBEiSLjaaiZeQpo1Pu2/VVj0MWGtWytcl1KpNag2l42sYVXOd1uM5w/NF1daWR3pAMbVllbAMcJJZUi+BNZGhWK9KxNwa8NqiWuB+c7a7bzUwFgGpZUBfrM0bNuHgSHvxZ4M+PipN4DQsC7p1sO3bak3I2g58jhTQW8Fo2Ay6UxtYn1sGVoeLj9i6YlFAfuBwOaYHNZeOOZzcbq0DdtOwFHGCbLLGZry2UTsuQCdvCpqdt0iSoUuxxukZenjbp+sfEssx4IfAF4WhRi94Tfus+Pvhf34W8uT0rvXScfyLDQJCzkkX6C+wsXnGYtt48ljm2u0fpb0EnlBHXyylULvyw6GaYQGVRg5Nt1xdTDVESUPoCZsa9q6JOOHQvQT1MoVMephpFQOk18Ymg4SCJpO9d6W6nNsCU8XYDsZeNSUhv7+x948VD9VNS8kgGMVXo362JjrELi7P7hc3ik5GyC4H2dyVnEScNrI96PEfFxF/PSpz0tS4IuLN2tu2yHpedWuA9rIOStQAyKojmPf/75s+/CVEayKmGfvkKTFkS6Ll9HDADMEt94tllZBDVoVDYoE9e6r3MA6a+uBayejcABPDANdfe1+vPw4LgVTeWcvELiIvYrTLeP45YMVMUYThIZ+qTKaZ+cTrNDtXjqmIABHCMZYuRBLElT3SFUUn0irBqF15r760CEL+uUpvtJ6QkURmDKKMmH87Q9AWYJv66yQU1DtliJY3KC9l4iwtPQiOU87RxPvKtTL+zX+vxJHjF8KZTsG00ZKHsXQXVCOjuxMk4rYrGvhZeaf9azzpOAuAfB+gWuXHpuHGSAL8gBdX5Ui3TNODDJeqeNIaA8xATpJlBHEoBRXQ3UNVY+Y1vto9fFAFUk8TTMhhNYJb6YKMJ/XNawEbTBMpmUhD4F9NA36go628az63HhVfXTerDF2F1G7Sdl8ylTRjn697tVJ3Rtc/TstM+7NZ9WuzWV8iaYH0VuL8dwD5N5i8+pxWH8cCRZS4AT+FoyT3gAQEQeaOhAQhQ5i2mNF7OMNKEN7nwBUfeGDR94Zt9AEBMlQV6yop5Yzxv3fQFdMIgx6RsafoJgbwhVQqNwmv89fgB2O6lWGvf1suzC4HuC8/Js1qF+oE3DrP1gqiuAG1oahNnUD0OxhmSA7oDoEXJ+AEdvDHQ0HTX8YwOyH4UoDtuQfJZiJO6/gXJ3A8JxqB3doPX+EMsB9j6asfYS4QmIPe7BTLPg0dURwvYooIm39oDYyxY84K4dmNvtx3YA4iGayi/PUvTkM33c7p1bylhOkoZF4IDF9bBHuy2pZZdqwR7PgC0fWxIFhWQfSH1Pt2tMLd4OErprBUVxzifEQzNkNw4nYe5a+5xDUisVh70RRmKl9zOIAWoyFitSsrus8BVy0J7u9ymL7x6xg27cLSHHDd0PitLdHLD1iAMd0fRNSSLCqhAkEayWvlr9duz4C3DFGntjeC+Y5YZWpTcYA7YwrDhDMJl6va1uTsQc8b/4celQk46uz6tARyneRdw2vr6xQX3Q/xE/NNdCmFa0mBrIIDfhfW5IEKDpLPQ+WnBnqW2OFlWYGXtte2A5eKFwN73vAzUVmVS15h++IUA8IKGaxt7l8GkBOoou+a2uLW5FvzhBUf9kRcDb740CGC9vQd9aT3yfOezyhggmH81KrL/bS9uXf8YsxfG7tUfeQnmxcvz6xUnHfMMwR6yHbcdnN7DXBvUw9Q29QQaBCDFjEkEVD9tWfQ2fQ5e20y9pKzWqUscmI6vCBgBr6ypViRYQ8OrjeSYXMl84VInPGjQremXW3hiFU8zzKByKvKqPJmxGG751GtNi5Sjkdz7YsGcGbLuZcb4BU6UpJRxNsC81pATBZ3S9ZXrEipb3ILqaQH8SUVM83R/ftJrd/uf5l3GgoD7nD8+x7tbwJnrIxM13ZimAd+f0C8uw93eDdxznKUKYd0U1Qz4O2BGXQMrQ5jlJeLVAUAprPzOjfC7EMhu77eUNf4a4mPZbU1VAYnEN39oowXCyVdvwlxaJ919RNkwKcFu3muP9osdKSNjM/QyKvy632PNfLc4G9UiABqzx25Gna+nGcf1ZNE6ZOa1Aq8UgbcdGu2bkHqJb+XnjQafVJAHJdn3JsJn10wBwrbjTy5IjK8kKDYlFU4TjmyP/FmMIPqGgBMwkkEnHMnIylTtuYnfDhORDKPtYnUOr8mpsRlmfttWs5P1hZ+xAnYFVUkNV00/oXNI29Ebcev+WLbxSZQK6V4NcAY50ZCFPvPsfV48K12gccwD3pO4Q4jfh2+BzL3jHfYda7sAACAASURBVBM/Y+mSuTp460OjX3sRLM+h70cWvk7VojRwcWO24OjpGg4kMhRtHX0hBNi4IPrC0i9Mkpc3aquP5xxsn1r2/XDqYkpTjdzCE3nVsDQFtvfwwZ95j7Tn9hoZY+D3dtoNUUpTg5W7RqXpTqTLswO+U7V1l6A0cPViuBOIwD7UCNr6+jPl3OOReO67U0G5js0evd98UrXoFy0ZmlygWkt9t6ZOLX3h/F8s/VEvBdqCKBUGWRqUKxw738ZRrUgYTllwndtMP+fQCbkw8sqaislgn+vUMw6MnXeLmFKW7+gUlzGLaRMoGKd7j6ga5xHvgilyemSeblK+q9Ypdpzck9c6DM+O101tIKZOHnlSH9rJxtP2VD/OuQ9T3ZzUtX4LZO5RxP7tc6Klh7c/8xv3YPb2W7pzAN43hpU1zOUN8nZnDPf+3CsexNUHr9I/1rQkcHSRSJiB5d7rBuhl0JfsyLqMGlhu/cCLtDDEDU+Ab5OnxSXcHfisvK7BitJeo/1ui7Ut/5luYfT+dpvGiLpWsb0XzMPc9rfu0dCOiK9nWRo1b7W7ZM9kzN68piWX7Uag7BuEOtkta7TvvOSVxs5rKeolotUmVzJU6zmqlRTVWoZyjaiQZKzsgsA99cIbg80/UNAJdaW69n7DGXZeo9+V1cUzZSAnyitTPFWigjYePMxDBSxn32gC6ljrjrb8EQDNVnV/M06KWyswY+zCYDyIe9lmLlo2v6on/F2Ge8xwKjIvYkH1OJ2a3ccXJft/UonlWQ3oXhBw72jYY3sBWEC39IzXsw+XaExd17bWZqmUfVspXVEQ7WAbhC794rvUlNTPIbesM6RtUipfu0zg7bTtSsMs5YDS4Ft7YQHQGld/9hueCnG8ffXhF+gaJkXIvDvhuHx3N+D4eVNWgVPvgnhsAQzMGoC5fdIEbGUYise7B+E4QgSaKA73Ps177kkjXjDm+LTPuDs6LxcL7LG+XdsuUZIBEnWy9mYFYbtK5URD9Th0yjFdl0jGCkwB0AbFhkS1RK36otSQBbDzmkDtAR+QJYHg2tcV0t0GyaiBtvJJ1ePesEsW0TBtHXhyeo3h9fBGg7usPRrU0TVA6ypsPCcfH9OqaJzne5i6RK8vGTVQKbcgz728U5TaD/54mnFcQDxqm2epYPqw+Nbxc48j6kpltmtyhpJx1gFxdLJQvbkCtjxsPWaKgugQWAqitF+ApzSyt+6FeakHY/rHHBUBSJMEamVAqpckoW0t9XLv0x9E+uX3g3omtiAA2t2qjvt3hdimoUUpth7w2/IA7JF+HUoDly9E1I9dGOrw3pgDO1g7TcL5Y04+ek8Pu1t64ojmkELb1yBFq/u42/U5d/CFiSSMpfJ6dVEoTNdItsgrAnhRWWVJRVl1OtbI9hV4ZdDbqiBLg/WvKjQ5IEqbsX+IiqoAUK1KLyscXc+QHDSkoXeyycg3ngy/osIlZ9b1kfnCaasJKwJ7skhod6+64nHL08YCe7A6cM6RBPBaEvUU7hyodvCsdqh242Fg+CQA/SjNTk9653DWdx4LAu6d20Zn6ds0vmsy5oPZCs0+9UZh66toXn+x3cxjDNT6EtjyMChTlAYOxiGjVvaW3xU+bdav0wiEnS/7tASEwMHHr0LsWKWJpVdMSc+NrreNzDActAEVUSdsnkFvLGP67ZFdrwP57hi/eXJJB/iuaBwBtgd0oNXoZCYF2SF3i6y2c/VUaJnuwhy35wMejGbCTlTSqWy16DcD6blvI5mfbzt8r0S611CmOomlkQb1QHiKAgjGYSpl6G9R1j++zDF8T0OlDLwygKEipZEc6b4b5sFRDxNUqxLTzZR81xPhqRsjmJcm1ktUBNVu/F/UfAV0GrMAz8c7QHfNTE41w1Tg7HmjrfOjQXLQ+OYlZlVA9J5ErzM/A7rtEeIwkHucrP2swPIkz3NW17wg4G6Nw+bNTe1uKYT3H4/BSO5N7ZBoAj124y74O7dhxqReYRkVOb3skTHSs9fk/8KyjDLxNMH0ah/bP50D6yvQSz3Kzvs51MU1DD9/y2fQZqlvj02j9z74M+/5IqkxhpqgeODQiY7RMJc3iF+d1sj/zfuBE+8aibnYWA3F4dgjPv499pWPww8gsddsrQZm3tcspffvtOIwgzClw+cdFx0RZfPMyR45+a3XgZpgxrRMwQDKWpORCsVPW2w0HLjzxwSmF1JMV5kvoBoBrLzdQNQAb4DpmpUy5hwqYyQvTLiXGIopdbcaQYuL4UA9tDNOSwWmiTKCnZTkG66mkY2FHdjRvTvxcsiI6tGZ8N7v8YLgKCNek15fFI09P3xXbT0UmK4vSK9iJ06y8agrVzzJeNTrPGrxOqsmqyPBnTF2nTH2a4yxrzDG3mCM/YR9fJ0x9i8YY1+339fs44wx9r8yxt5ijH2JMfbxoy/DWg10TMJaRlbRDFXj2updk9DeAXSeoHqJVDFeIWJ55/qlC74xyWXZ6somce023v+PXqZFYVyg/7UtrP9oAVbWmLy4DJPaf9ydAwLRuoZJE18UpcszxO1LSUVMpeixpUCzMMZg8gzTy5bW2TsAhMDtH34VbG0lAHzHS4buDjpKllgBEwN9rG/v0i9V7e+ACjXCZ0e/gN/c/X/wm3s/i3f2Pg+zvQsA4sQ+V4aHc+7+tehQnIysdgHrcW5nnKqMujZ5pSBKyl4NIwrEFTjroYSYWg06B8aXJMple9ekgQu/T2A83SDVTJ0zX3QkVQwgaus5o4iycWPqWEPDL5wvDQBvM6AyAn8nrQSAco2ye1Yr8pKJVTI6NFC1FDQ2M3eF1ZY9MaMGKjIVo7F7LFoIeKNRVHv4/c//HXz2t34Sv/t7fwt3vvobTsZ5cp/rGcRxKYwYME97KPajNGcdtu1Zds8eJ3NvAPz3xpjXAXwSwF9ljH0YwF8D8CvGmA8B+BX7OwB8GsCH7NePAfjfjz7FnIYly0UDFuQP60613/m4RPKldwhU96IiotJI3r4DPcyh15aoi1NK9P7WFgG+5asv/n8TmDxDc20DrFHQG0T98Frjaz+6jHd/+ArMIEdzcQVIEi+RBOehAzUnIGc7+0FSCVDTkpNX7hNlUrww9Nfe29aB+48zeEupeKqm2307r8szHgwiONFabiHIe74wzdMU39b/bvxbw/8Qnxz8Obw3fQMH9QMAuIKT+lydFfu8RiVXOJd2Aeo4gOqeDMVVZTxgGsF98xJA4KoTYQdJc5Srwj9uOAG4lsB0hVM2n9FEo3zLYLrO0fQZqgFDuUodnbwBtABUjzJ2OW5QXO6hWqG7gCYXqIc2s+8La3mgqHhr2/+5zd7JuKszCSkCeKohtAd2t7c13q0yjmYgqUs1Utw4ywI1SPHay9+PP/bxn8DHP/HjuPXOb6O+ceNkP9cnjJMGuJOka56UF18k64Mjwd0Yc9sY83n78wGArwC4BuAHAPx9u9nfB/CD9ucfAPAPDMXvAFhljF054izuZC3dsxmNKTPvTFVy6hlv8dvPyS8l7npME5iXrxLAGQN+dxv8wT4NqFAK9X8CVNfXiFMHkL67BSgNeXsHqBvwezuA1vi1n/lp/Hd/8hfxmf/yb+CdH9zA/Y8vobmyBpMmqF7apGzf2RMA/ngAAKWghr3wuM3K+1+8gf4XbxDHPimw9itvtzTt5Xe87I/b6ki1x6QLTlr0SvmxD4bnI693f/cTyyaFQFYJLBuyPpb9IQZ8FaUeA8DqiX2u8Xr8sI7UzqIe+GgepiJZmkFlAs0w8aZg9VCiWiHnR9XjyO/VUD3LU2sD3gDp2CAdk9Ilf0Dv2eQygygM6gEtAIbRObQEmh5DkzHvDslrgya3pmOp64qlbLoZiGBQZtU0zBjwaY3swdRq3q1KJrPdwhHPHqtoWs8papJSmWjVJcjAbFba6LzhB6qP4fAaFZN7PQwGF1CPd4CT/FyfMGLnxofF0wTKLhVz0uP9ziIeiXNnjL0M4GMAfhfAJWPMbYAWAAB2YjSuAbgR7fa+fexhR+6eiL5Fni8zwzjc73kPZnUYZavCUxTsxl0AQPPKlU7RlBaF5OYuZcz9HGYpD34rIGdIp5r5Jzc/hoQBH/gH7+HKz9+AvPkAbFwgfXfL8/FmqU90jbRZdZIASQI+raHzxN9e641l4vaNiUA4+LNDCGS//zZx81ZB431iHNXSAnDKyrM3bszYLXifGbePi+g9MFWFyf5dHKgHWOGbACBP7nM9JKIuTi8DdEV0yT2w64SHmaFWg85r7R0Qne7cCBC/bV960+doco56SZD3+YBZ/lmi6XNoAbAGSAqbQWuAGfjMPR0byJLOWa5LMhdrDMksE7ILBoBig/jvpk9DtR0HDiCYghnjuXJn8+vu+Hx9oetLA3huXk5qr76J6wu+EzZWwljnSmdupu7cwcHBbawsXwfO4nM9RpzUrNRnsZB61nFscGeMLQH4JwD+W2PM/sM2nfPYTKrBGPsxxtjnGGOfq80cH/FoylJrv64LYlWHmaXA7FxSISD37PGdj4w7LqfuVMMZRq+tQS/3CZwTCdYosFGBT3//j+DXP/rP8Ff+4x+HXluCXh/6/SA4ySTLioqnSWK18aETkT/Yh9jaBytrNC9eBLu7DbW5Apb3PPiG6UgKLS7dFTijBctH5B/jB4z0YuqGE+3k9sl7c+8AVMLwhemv4Q8N/zgkz+Z8dOGtn/PYEZ8r3Y2wWKcPzNRW/MGisXRMRZw0EIDdceOumNgYakSyhVNHx+iEQSVAuUxSwTrnqPsM1YD2S/eA8RWOpVsa2v5JNT3SyjNNSprpZoJkpFGuJ5iucWrnH1PjE1N07jpnUD0CfKexVz3paSUjeCt7b9kNPGwKVbSd4Swa5EGOkOCOf7e6f2+WRk1PNWp88ev/GK994NNgg6XDz/M4nyvm928cJx51IPa8eFjh9EmLlvMmIp0GwJ/FonEscGeMJSBg/4fGmH9qH77rbt/sd2dO8j6A69HuLwC41T2mMeanjDGfMMZ8ImGdwqmlZ7qTlZzmvTuUYvrt172mPbgiar8PtnaCfUDc3eq6RPsZeGVQbQ4oI9xcJjXDpVXc+hPr+Nj/9OOQd3bBd0bgd7dJEz8trRKGByrGqTzWltvKl7qhwR41KWXqtZ6/BqRJ24MmHqhBBU5PM7mfmasV2OObjVWMPvES2LSOahEaZv8gLHKOe3dvs1JQTY0v7P8yrsgP4hLzH1lzcp/rIYvFvOalCPAc96wz12k5B3+shp1kiCA9uzZ2shLRF7I0wf+cE3j7CUvaoLhoPAef7mvIwkCnpKIhmob5xURMSUvONPyAj3SkIWpnNgYvn6TCqvA+MP6SnQeOVc0YwWCyxN/BUJNW1BMAytCFtV9wAK8y7qdD8VIF7t2qjdhkin/zxv+JK2sfxeXht7nXfHKfKx6aBDxRnIQW/ElpkdMe8nFWvvPHUcswAH8XwFeMMT8ZPfUZAH/J/vyXAPzz6PG/aKvwnwSw524Hj4x4+lInc3d+LTPOhUohf+s+/WE7kHXj54TA6KOXgfWV0FjkMl7OPUizWqH/9S303nmA937oKsQW3ZiIu7tYuq1x5dfuhwzYdrKapiFKp26C1wwANpp4v5mY/mCjCcTWHvjeGNntfTpelgbHx5iacfRQrIKJXCGpkzWipsoKt//CdC6f7Yq8/tjui3N8ufrXGPAVvJx+JH6vd0/6c20VVOfIW+cBOwCfubtiqlOiqJ6lQaRrsWcoV2muqJZAuk+dqUxT16lKgQefmoLb9VNLht6uwepXrZadA3KiwGuDdLdB/kBBlgT8jtpJRxqqxzw1Q3cSJHnkNVE+vDaee/fNQ3bwhhHRvNVIDhleN/fe8EDo0G0GMmxrt08OGlsfCO+l7iV0p2AM3nj35zDINvGBte+ijJ6sj0/8c33SeNogfpzjnyV/ftJNTscRwH4PgP8MwB8wxtyZ/zqA/xnA/80Y+y8AvAfgh+1zvwDgzwB4C8AEwF8++hTtgmqseWdLg6DN3iZvc+YGeMSyQcG9l4w/ohAYfPadcBZjwoi8pRxsBMqoi5JAM8/w4j96j45RVjB5htXffp92FhxGhmy/ZeObZ2DjgjJ4TceZXltG/jXi/L2hl7Db7lhWS2uY5SWwcdHuuI1tBub5vEdWvwCAB7vANy+j3uBI7qRhgVC67cJ4Yd0P6NhVd3Gr/gaW+Bp+e/IZAMCryccA4DaAP3Uyn2snDpmLGhcU41C5tJpxRZmqZFB+6DMgxzQIOttVNqs2EJVBvUQgXy0x8BoQFbD+qz1oCWQHBruvcKy9qQBwKGskRtm21aM3BmU0mk9lJD1UKYNKBWRhZskMBu/UaCSHtncPJK9sZ/Az/DrgveehybaYOR947jpb7Xa1BlNOvgnbzETcPFMGO+Ut3N75EpayC/jX+38HYMArL3wKOM3PNYpHsbJ9mOvicfZzhdnTji6VdBJ3Bg8D8ZN6XUeCuzHmNzGflwOA752zvQHwVx/tMqKimlPMuO+RUsYoFdQfaeInLZnRODwWywXjzDl2QkwSyq4ttw6lgUSivrKM9O37lIknBCwmS6A2h5B3dqkg5ophrhCWJQTs1gceSkGvDcGds+HaEti0pm3qBsxm+aZpwLIUetCDaBRYIlFfXSMt9du3iH7JMgJ9m8kzaS0PIjWMe70f+ttve78cZClMtWcXPFq4kCZgO/t+37XsGv40/uLsR1FCGWNO6HONwskdO5RMbDsQD4rWPUn68mhUHq80DBfglZopYPKGFC3GWt0WG8Ly4gSy2QEdoxpwyIIalMaXObIdAzk2mK4LGAYAEnWfrikdWY6fA2KqUWxIpAcGRtBxeM6QFMby9NQJmxxQx6tbKJgWwdul4TAI05UoW3c+NRxMseCpY7eRB3WwIYClgATdERgY/91RNuv96/i+j/x1+ht2CpxMAjilz/Ux4nFH6J1Eo9NxC7VH2R3EAH2UCdpxFzF3zJNasBarda0jhfQj9iK1hxmNfbWHxSoQW1xl/RymLDH+oy+jWuaYrnJc/affCLQMAKY1ytcuI3vrns+qb//pK7j8L+/AWQBDa6iVAaZX+xh8ie5SzfIArCihVgYYv7yE4RtbYEWJ5vom5Dt3feOS2BtD3N6iUX3KwPQSAvF7ofDLGJlOMa3xlZ+4jNf/5i3i+wcJBIhOMXlGdyvWF0ZvrpAsbq9TzxaC7lrcQjARYTEAYECPt6pk8yyUzyIMNQf5bN25IXacE104iZ+TRLq2fJVx71VOvLq1J8gYAI6kIH17scnRe2B8kVTUZN9b5wz9exqiIlpGC4YmB/Ze5Vj9Op1rdEVg+b0G6biBLBT6iePGCdSdBr3JubUnIGkkvT7g/T8hceW3NcTUTldqOMCDsyS08VOWAIBPgzTSgTWzfvG0QagZuN8Be8fgKa3INtq+t89CdDPZk1TLPA3Fy+Oe8yTvRBb7k4+KhgC83S/Le/N9UJSyQzg0xFRh5c0R6iEoU49teTlHenMvnCORuPJz7xLlwrlvKBJ7Ywy+dJs6TuuaHBa1htgbE7CPC/p9ewxvAlbVZHkgBN0VuJfigJ3zFp3Eigqv/81bQN1AZwLJA9oXWnsaCkrBlCWqjT6mV+YoHxz/7oqxdharmRTQF9dCp6qTUmKOtPQUg8Uqn24xNVaFWH27jsbKuUyW18r7mQt7x5Yc1NYOgGgZWWgkhYaWVEAVlcHyuw16OwrTFfpTr4YMvQdE3xhm+XlN+nZRAptfUqR9LwyW32sgKg1RaoiDKWRhF5qGFg6VMu9XAwCi1F4Dz2uDS58lj5pqRUKUqjVpyVkT8Fr5WoKXT7qmJmsq5rpgY5sFKhzTQlMvST80JLYSflpxEgB1FDg+jr/6ky4Yz5rWfbEydxuehjkYBXMwNwIPCAMrPEiqGTomuz/B3usr6N8xlJ0nCVAFB8j7n7qOi7/8LryXe5KQfYBSRG1Y7pwBgRM3BowTH84mBenbxxMCeVh7gTQsJFSsbcASGbLUuqbj25oBizzk07fuBi1+tBi5XC1740b7NbvX6943W1C+/YMfxOV//FVS1dzd9sdgrp7R0b2fiY97RC148EI7a/fZp2vuiYy2aBsCcpUL8m6xYThJEZu+QDnk6O0SIDtqJN2pwCuN0bUU6QHp3rMDAmk5bsAaA14JTNcEZKGRb9mipeSoB866oAdlaaBkv4HhEtWAo8kAWQKyAOo+QzrSvhbgFjXX8ASQnYLzxiEqRkMUjfegYeBQedIaBg7AFpI50r3aF3ONpYucOofpdpevjzPM3s8S1OJzPYwmOe41HUem2T3nca/vacSCZO6RUZhVyXgPGRex46P7vdOlGQe7eQ+rX3yAzc9tA72MmqGUQvPyJUAIXPhXd8I4PatPjy151dqQsqUs8WoYxpinbNDL/D4mz/wADMeLA84ozCo+1q39sBB0ZxBJMt2YPLMatvFWwK4IvNqRV+a9mZF7xhiYssKVf/Z2OH9Vo3z9Wls1o9QsCJyW3S8OsR+YE7HcrzuSzsT0BKjIOd1MvcxRp2QDkG+TFYCYkopFjpVvcHIZPWwnKgBvCNb0ObgyKDYEinWO4mKC8SUJMEClHNVKgqbP7f5BD18PrWxSkDLHDf9wBV4g8OQ6FVA9AZXRFCkAdnoU88ZgKk+8r4x73QAgSgU5UTCcYXoh9TbF1Qo1ZhlxiGQ0OsYix1GAfJY0zdMG5ZOKhQB3xqLmlljfHlkMxBFruQEA16+Aba6HDZx3+Z0tAIBe7nv7XbFnM2b3j9c0oQt2qU+dpmUFfus+2HhK2WOWgg36AZgTcmh0DU9+TF+cocNq7HsZZfPjKT1vpzjd/TMvheu1dw/sgdW1R3ciLKXJSWw0CTp9IUi3HnepWpM0rxoCfCE2/cI3YYqOIicaaxe/9ycehwD7YcMqYnmgGqT+ZwfsvNZI9isaizegbJbsbglsx5eEHeShkO5UMJJjdL2HalVicFch21Xo7dBIvckmx+haSl2rEjCMAFpODVTCUK4xFGsc1XLbE326Li2HD4ipATNAOaRtRleF7Z6lyU68oUlQxvqxJ3sVeK1pzims3FOHzlPK4K19guPddejO1QlH7z7diYhKIzlo7J0E8w1N/n3knLx7Fiwe1/L3pK/heY+FAHdf7DxkfqoPwedm82x/DEwK8nmPpwoJHiYR7e7TY/e3KcPdt4/bjLx+6QLu/buXrSzSqnKyFHxnFLTsWeqBHVrD9LPQOt7tHnWvoZjSLNbRhOifsoJeW8KF34uA3N0JdGL67ba3xFoReH2706rb9yTYEmjK8F3jkt3XvU7Pvcfe7fEM09OKyDclthyI6Rg/yCOeWqSJmqmHKXVkZqK1n6jIywWAHWqtsPFG4UGQ3BqpeCqmBIYq4yjWOdKxQTImYG56DNUSUSokdwxWBuCAnBoP1EwDva0actwg39Ho7RokY410TDTJ8H1F05omGvWSgMqYp3OMoGtyfjjkRdNAFDVZBwvuqRoXblFTuYjM02zTlG2YKtcTGKt5N4kIZmyCL3TWfhoA+6xl3YctdCeheV8IcAcQ5I8ImXmctRtrHdCdoQpYBU3c4Rn/c5Ql2N3tcJ64QzUy/ErevY9LP/92AOZpSQtDXVtFDQGQKaae4uH3dkgPn0pynIznsCaSgN01KSUJAXgiwXdGaFZ7MBur+NpPXgnXnYRpSUxK9G7uh+asqg5NWkDI1N1rsv/Mk2/bDMeLYvLxl+juJc/9guCawpiUs81hJxkOZGLutztuzj7n1SKK/FF0IiBsM5NvapLEPfe2aj+FSEuG2hqKqZyanFRORcfBXfrbUCmHKDXSEQFxMjHQgoB85e0Kva0KvW2Fwd0GXBnwCkgPDA3vAC0C05Vg7ZvuNgT49mPobTe+sYgGcxN9VC9xGHt9Khd+kYCzFXAeNJyBV2G8YL3ag84Emlz4BcJn8XbMYLVMmn4tGV1XL4Hup2RTnUo/8ORpRBec5nHi81QyjwPQ3f2OM/T6WYtHBfyFAXcmBPjFTf8z0AZ59uJVVK9eoXZ9J4u8fqlFYZiqanu5u0ae2FPFnY8xz2vvfc/L/nHTNGGhyFKvNtHLfSr+5T0C4J4d7jHIIe7uEtAnQY2C2g687mUBfJW2/D1H+u4DYGsHr/03N0iLnhB3D6Vgrl6wwz6KAODXLvnr9QXlaMiHvn4ZAJDfOIgWMJfVcwy+cpd84YuC3iMhMPnES2BrK36032kVVj1VALQ6LcPQiTAU24+msxl8GDJtfc6V9uP2eKkI+B3YTVUwG2sMVEqOjnKikBw0dugGQzLWkFNqRKqGDMU6x/5LKZqBxPiSRLUsICqDbE9DFpoamDIGURn09igjn1wO9Y7pmrDqGVo8klGNwc0p+rdLomVG2g/oBmALuOT42KzkUMOezdrDHY0bvO2Gf5NCx0584qFAm4zINsFwOn4zkCg3elB5ApUnfgzgeTzb8ThZ/IKAuyHQtg1LRqmQVfZzsH4O3UsxfqEHluc+yyyuD62tr/AgbkbjAHwW2Krv/ECbn7bBGIOZFFj+zW9auaGlgdz+dQM2KWHyDHx/0tYN84j6AHwRFZwToNsJTCZNWlQOOHms+8Yrey4znhAlJAT4aAqWptDrQ5gNsuV1dNGMu6OVQPJb92m7+C7Fb6dx44dewLv/2xpNjbJyzP7XtkiCeXHtUG78iUO0Z4QCgZ5R/RS6l1CLPW9n997L3Y3jiyY0xSP3mCKXyJgTJ0sADTlREJWGHNeQByXZDFg5IwxJGWVhIGri2ceXEmgJ8pbhRM+UywLlqkC57BwmudfF772SkmKlJmllulcjvzu14EyKmGSkYARIucNg7zAEmj5x855+spx6a5ZsFH50njcTg5dhNjnzTVvK2jI0A4lmIKF6Ek3/9MH9sCz9Ydn7aWfQz0J2fth70H38cV7Lwkgh3axUp2PHcADW0OANMspiWPnKXguEBl+5j2ZzCOGVM5Fned6DubwBknFZZAAAIABJREFU9t4dZF+5SSALtBQ3gcpR1q6AB6pnkBNXXpRQV9Yhbm9Db66A7xEoGynAxpX9J9ZhYEeaWt8a6hBk+6PgzujuRBiDsbSQMSZIJ20cfPtFbH2HxLVfmyB96zYtFkrjwadfw+avvuutF9hwKUgnI7vilgOjLbK+8H+8BfYPaS4trLIHD3bJWvfKGoQQwGkwM1aZ0+LZLccupg205JBj+u7483YjTwRMtrBqLEUhSu2bgJw1AUBFVzGpgH4KZicmqQEpa6plARgCW5JEGqgEYCRJR3ZAU5jAgKXbynupj66lqJY40pFGMlaoBwJTyWmBmBJ1w7ShaUk94aWLTMM7SBYbHOOLEoN7tPi4DtvJ5RT9OxXElP6G6tWeNylL9+rWe+dqCcx6x5vGetbvK6iedausyQ55fEkg3w6S0NOIGLyP0135ON2cT3JNZxGPMobvuK/zJOiqBcnciW93bo8sy8gvZVr6xiRe1KSesI9BCJidPYivvgsgLA4+lCIg3lj1MsWZs0oZAL3zPBsXMHkGdXENvKgx+ehV8K09qLUhkEio1X7IRpUKrpMg4AcQPGUsFeO2nWmqAkLjEoA7nxT4xb/yN/D+p/poXrwII2kw98YvfyN4vLvYXLPvWRooKLeQxbx7Vfs7E18HEBwsz8nuAAAubR7r03qcmDHNAsgS2VrUso4003PRdjxda5i014+Hph4A3mcdQNjeZseGMxjLzeuEkcHY2KBYp25T16l6cJ0DzAKptHa6OvjG8JooH209aXhjt00ZiosZVC4xvZDCSKKKmr4d4ddolGscu5+sUGxwy8ULFJvSe+P4t0UyVKuSiqZwfjLGjwPklfYcO2s0kpEKvjg1FX4nFwTyHbJFoK7dpx+PMqbuceNRJZGnOW/1UZ7rxknc1SxE5h4DI+vnpPc+GHkwMkoB77wPKYQ3BOvq3lmaBl4bIDBz4/Zisy0LYGZrx7tPAmg3B1nXSLU5hLyxBb25guzBFOAc4t4OTJ5BjC2Fo7XXyZvVIdj+GKyqUbx+hYzDhM3irSLGFFNqXEoSqEurEHd3rYHYgKyB+xmu/XqNP7v9P+DFfzWC3DoAJgWWfucdmEvrRLu4LFspsP0xvT95HvTzrtgae7g7r5qDEYyzQ+7nnh5iWQZ1GoU393k42iX+zNzM0JhrB9BtavLF1lrBpNJLBDkirxU4rTspZZw1gPM6B8inRVlNu6jICEyU8Bx2dkBqGhhAlATI1aqESgWqIYMsgMkFgd6ekzlajX3CfHG1XEvIv2aiacB2ypCM6Drk2ODiv0gwfI8SkWYgIWl+O4zkaAYJgXiPRv6JUkPlAsl+RdYEOrH2wqT+MZL5ekO6V5Jm3y5I+bZCutegWpFty4JTisdtHHrSfeYd42nKHLtF3XmNVmfV6boQ4G4ie1+WpdCpBPvANbA7D2BG1WyhL27m6T7W3S6ma6w00uQZMBwAI/rPYozBrC4DB+MAPlpD3tgCOAffIqsC0zRUUC1KoLBF0KoCS1Ps/NsvYu13bAasNPKv3Cau3UkodWfYRl1D3N72OnndSyFGBZlcPZC4/v/ukdGXbWJinGNybQh+cYBkv0KzlKJck1j+4j1gNKa7hPg9iKWSipqtxt9xDf13dmnUIEhJ1Lz+IpJbOwDnmLw4AD7/aJ/dkeFsigWDTqXvvPRyRyEwI40ECNhtB6tvrec87F8rekwbME2Ap60lr6N3VMa9/wpA3aFi6pqkyJMm32rAK41qRXreXmUM2Y5CuZbgzndzbHyJrAmChbBtLKqMtz1gmhYXXhtwZVAvcVQDDp0CohQAA3p7lGUzOwCbNaS8qZeEnTRFU6SYit6fCJh5paB6Erwm0AfgzdIAkC2DbZ7iNc14rQcc46unf4P+pIZXJ8mPPy2u/ahZro+ajT8pVbUgtAxCBj0pIPbGmFwfUkYfA7ujT65dAhsGjxWvfY87NmPtd9T0dP9TL1FD0Gji+W7TNMAumXGxQd8/5ikQ25zEGCNayOrdTTGl7L+uA7DbCU0Hn3gBzeVVeqwmLtVUFR2jn9OXvSaUFcTtLT8VStzeJlCLOlxR1yg2Je7+0Qxie4TsjRtY/vI26ecFb1NPUYGZfudQVzYxviyx9V2bIbNXGsmNB3SHU1YYfu79J/4YZ0IFMBYFXU+sdY8tBnwRUYd9vLdME6gYP7bOFVuV9q34cqLsdCbi8kVBssJqJfEWumLqmoos9WIHfqiMIR0Z9LYblGsJdMKw8SVXvASaHBhf5rj9J5W3ACbde0WUzrUE9VK4K8gONHo72k6KsguH07yzkHVza2bGtG2K0kTBOE6+XrKqF8u1x8VWJ8OsB4Kkn5L7AjPTVPhN904/cweeLYMu4OQkkacxD/Yk3suFyNyd/YBTwaiVAdKDGvpgFNQzMchv7wWPFMHBXK9TMQ2WBI57jicbAdj8zFepAzAGQguM5tI6tBDgrsEJpKFnUrYLn5ZDJ6sBGRQekszCmosrGH7xDgH06tDr5Vlq/d6lAKtq1C9dgNybhkaruiErYYDoFsCP3zPGYOUbE2x8zi5MAFhZB3kkALY8hNnZCwuCo2UEx5v/+RDL32BIDkx4jy6sA9YXx53jxENQl6Tup16/7W0FIm07FVQb7yljBAOyxHP13kK36xqp6fls2kAN0qjVn3v1ibMmYI2GPKD3V5SWj7bDMIwgJc10VZCfDIP1r6FrlCVQDRgGdzTSPQmVEi2TbdOCVa4nfu6qA2q6XrI64E2kl7fnVClHMmrAco7E2QvbO4BkZDX9KUe1LJCMbEdrpfxAbp1y7L4qkd+niVSi1NSsVTTWR54hKbQfGH7acd5hOhtPU7GzIOBuW+EtYIt7O+DvFIA1C+NOFeIy0QqBcvHZqXOQ1Nb2twp6cBd+QbA+MRHHzvJe8C7pcPouK/YAX9eAk847T/cm3B3Ie3uejmH7Y9qmCcCt1geQN7eRvHuf9slSMhKzhdLy1YtIv/y+9bJJ/MLghyNHC038Oky0KMVadyYlXv/bd+g6d/ZJSbR3QIZnbmFy3a+nEM7dsF7pITmIuo61BgNvZ/G8Dd7OIdJluoAmOsaYFtC7iUM6FX4KUpMTdw07YDsObheNRGnoRCDdM2j6Ar1d5YdzTFeJLplucAzuaIgKtgGJul6HNxuyPLjWs/NUQ/GV1wb1gAaJGEFcuF5iyO/V3gvH+dC4qVFA0K83faf8sTr5RIA1pA5SWXCJXH6XPjNRKn+Mai2l16vJa35p0m5oO4/DG6oWhR46ietYCFomVoyYPOiwoRTY5rqV/oU2ejdCDwBlp+4xG55O6Rhr+XBNTyVNVTr44x+wWbcGP5i0i7XDQetafaYctfOzkpqlfOHKDv9oNoe49e+9SK/RSiTN6hBi6yBcp+Xlxx97kSgfANk3t/xQEpPRa2iurSP95j0CZ5dllxWMMbjzZ1+Cvmg17Gli3xPuvWhM05A9w/4YpqzobsT63pvR2L6fvL0QnmA4GkZO6mAMZgEfiFwiBYeW3A6BljTBCGgPqxAcOhN+OLTvdHXDojmDKIhHp8KkaTlIwo6+07H6xjZEOWMuXtP1pGPt6ZRqQC3/TAHjawyjF4nbrlapYFnnDOlYo8kYkhFl0nJK1rxiSmocpoFqVVpqiJwriw1719Vo33mqJfNUjWt80gk1KLnuW2YXLDfmz/nrOCrHqWncInGa8bj0xiIUPg/T4T9uLNIdxUJk7sYNxVYK5va9FnA7b/S4iNrKMpWGuX4J9VqOYjOlhiS7HYAA6kIACFpwn403DZZ/+x3KhouyZa7FGKPCqQNJO9XINVmhbgiQbdOSyTOatGTVL/LeHq7+8wd0zUv9kMknwWIAAFDX6P/OW+Qr4pqh6gb60jpJRBsFVirbDKW8hYAppmB5D5c/83Z4Tf61hsULSgPSPm8pGT+hyZlUWSXNaQQvapiIiollkaym7JwZA9jOTC05eKmgM+tZnnJrzWu7UxPuZYFMKQvSwhtuGUmUTLZji/RKg01pMIbTnxvOICpyb8y2S/BJBSNzyEJ7QG0yhmqF+SHbsjRocoaNNxrvpV6scYADvR2N6Qp52UwuSKgUWHtzCrlXQA17mFzJ/MxVwxlErUguqYBiQ2Bw1zlIwtr5MoiS7gDE1HrZ23F9hnMAxM03kf1x3AnsZr8yhVPVuT9uhrlo81NPatLTIsVCZO4u2MoyAAS736qGubxBYBRRKgAC4KcJ+IN9qIQj22natgOOb/cDptueNDPdqq4hyD3nzpEkRJ3YrleWZQTssTul726luw72YJekjXkGs7xE0soh2QS7bc0g95SND619Bs+qhjpjlYbYOcDd770GCIHm2joVa9dXYEZjFB99gRYCF1175Ngp0hZSHbCzeOE6pczde9OrMA5O96TPtuPxegDIZsCYMH2p0X4CE0C6b/J00ZTpM2bpCuE15a75KDbbcvsyDYjIcKzpJzBZ4g25vFd6DcuxG+iUGoV62/Yaa9Kwy6mBsC0W+bbG0m2FJgdEFReIjbUJIOsCf1125uveq0TDNDlvDf+YrnLvLlmtSCrGcuavHYCdRkUDTMRUBTWQlWjGXvKLGE87e3eAfpKLxby7gVhTfxr6+nmxOODOGPT2TtsYTCmwmzaTf+U6cPWibdZRAcCKKcykQP61u+i9fT/KRFNvuAXgcOASNKLOFxMjyiVkwpxoDWuy5TPiJLFfZOXLHuwCnBPQ+oVBUqdrVRMdE6lfvAtiltnrtb7wUfbeXF711M2lX74B0zSQd3YJ1KzSpHdzH2w8BVsehsVsc43uMq5dpAavuD4B67hZ1V4V5D15TiM6/uQmS8IwDj8ezm6jdQsUmTaQBXnGNLkgikYH4AfgQV+UxLvzynYLK22LqhyqJ70KhVdkWeAah5o+OU+SmySj8YwrHNNVBi2AbF+DVyStdODp6JNyhSN/0KC3VSPbqSlbNgBXpHkfvbKCZiA98BoOlKsC9VCgXpaQE42rv9VATqjzFQwol+lvuLerwRSw/1JmaS1FHbawxdh9Uuk4dRCvqes33auRjIiuYZoGj5x1dAFtXiyCgddJguxhC8XTeo0LAu7Gg3o8pIMNl3yGyR/sU8brJIQubDZtiqLVtOQsclmWApc38f5feJVsDJSOhmVTRu+Ns2wzlXeLdNz2tKTvzmbAAb8FZyOFpWKWsP+HL4EfFMRnLy8RR959tXnm7wb0cp8e7Oe0ELiCrR3eLd+5S3cAdrAIS1OY0RjyoMSLP/0usL4K1A2KVzbIn8YVprXG6N/5ENjNe61Zs2yJagiuG3jGk+e04rBpQFqDTxsvfzSJ8By7oxmc74oodehkta3+zTDxiwAimaBzlIQ2vsCqE+6tgR1nzRprG2CBPt1ryPN9j9wjezvU/VmuMZTLHGLatEy7+lvaX2PTFzT0oyKb4GqJo1zmaAYC1YpEuZ6gGjLvXQMD34VKFAsVXvdeA4p12ndygXuljSgVmCLZoysQM013NtUK3bm4axGV9o1c9XAh2NdWnOTwjceNw/xvjtrnuJOauo+f9WteEHBHm+t2QO88VKSEmZDPixlPwk6OT3c0DNBuFAKAfg49yCDHBn/+V3+XFgKfpUf8uwXymLdmeR5UKVaSyNLUP+b3y1Pf6dm/OYEe5pSNVzV1pQ5yr3VHkkAP+5h8+DJxw6Ngm6A2l4MGPnoden3ofWhMRV2o/KDA+z9yCayqwYoS+Rfeo2uKpJF7L0uYqxdCYdmas/miqzVW8+9D1z//JENromJsgXjGZ9ypdtx7y5inVoBgOQC0uWVeaRx8ICcuPZfWCkCGsXMRleM07W76Eg2oJvB0zUBMGZ/pGkbHT/cV/v/23jRIsuw6D/vuvW/Jvaqyqqu7epnpmcEAmMEyAjQkCI9HIqENBGHKVMA2SDhEybSlsCNoWnQEDYaCUtjhP6IdJm2HHSIlWLYYdICyLIs0QIcoESREIWgQgAYbMVtjZnqZ6a7qWrMq8+Vb7r3+cc69773s6u6q6qyuwjhPxETn+vJmvppzz/vOd76vsWFRNIG9R1qAAA0p9SRrxgQYrpBzU9olo+28SWbcVpLRto4ldi8qGB6eMiFtFmScTSJjOpZYf0Zg8Vv09xkmFu1VjXDEFErG15M+fX/dCEhfhnXrk+UQo5UmkrOR5+4DqDeUH1I8SLV6lKR7lOMf9X0PWonvl+iP4zufmuTuYrJ6B0qanh0lxGUHyjF7976KdG11ItUGCruPtaFSoCuTEtJx74tjYqZM4PBVhyZPNbTW67ULISCiCKNnLmHnqTlSsUxSqM16U9JGIU20hmXzVK3vINrJMLzcZfkCsu5Tq2TgsfX957D7wfP0PRox5OYuRs9coquKIAD6c+zHWnhc3zdnuQ8htga48NkreOmnu+X7+PnaFUqz3EgcQ2eqUdHXd01UE5XVOaT0dEegxNuFpcRMWjAlbm7c0JMU0E16XxGz/ruhZiw4gefd0BtPWyXu8F21gaxpxuTdwJtVByODeIcSZ7ibo3W7QGO7hJcKlgAuGoTTt1ZzGvkfWnZpImEygKZbR8sSwwvk0GQCen+QWowXFCAF8h7BRgvfsX4DIKlhbhzzRiUM0LuaeRaMY2iFewa6oi8DAMGI/pYfFs99Mu5XrT5okt3v8ftV1ncT+XoQg+1pYOjHAd2ckuROPPdJk45qJSn683c2Ct1znTZMpwGz2IOY69ZofSJJMfftTfznn/4sfuUjH6HHYkqGHmfmqto3F6t6M2HoE7uv2J2pSKeJvKPQWs2w/vwF0krfG0EkGfJHz1DSTVLAGOilHm4/f87DO8Ebq2i/zpCNNjBzbRQrC7BSYO472+i8uMH67pSQmq9vAVrDLM1BzzWhl3oYPHuBKt7dYTm45Ta8RozRBx/F079wDfnj50oZA8b1r/3EZTL4ThK/hmOJCSlhoS3kOPcTpybiBmMgKdEH5WCTzDSKNuHTurIBCIZaipZCshyhdzXjY5BbU9EOiInCAltWlRZ1HgYpjMfPRWHJc7WwnkpI06Ilxu0VK5n2WLQE0q6EDgWSZcHYtia2TYsSfjAmds3692nkbaBzVUCNLXRTeA9XlTOcJIGsLdG6XUBqkjZw1sKNtRTxZuoZOtl8AJkRLz/vkIdq0ZKIBxoFQz7OwzVgP9nTFgepVO81zr9fQj1Mc/R+xiEPYgW433EOsgEcxKD7MHFKkvtdMHfnDqQ17OZ2/S1VauTOAOL6KsTqpp8o9awQEI77a3/h+dpmYYvCs1J8Ve249a6yjWMgz8tkzrCNEALFI8sQWwPMfe0mvvsTCotfXacqOAwhhgkZJGiDYmWBjplrzL+aAHPditY767znOeTOEMHaDmy7gb0nqDIXnKgHz17wmLRc3yEMephSZWZMaTwC+CsavdQjLLnTQrA2gD3brxmRXPoHL5cQl4Nt9jE1mUooeaciJMpEL7T1yV5mRU27vbGekck1SslbANANwp2DEQloucEe+pyKJ6mBr/5985EVGwkSISxbJaxOWZCUgTAW46UQMrdIlmMULeWZKcKSQ1NzUyPeNeheN0j7AcZLJOwV7ZA+fHNDI0gsFl/gDSwGwiGgUgsTkgKlUfRd8qZEPDDcAwCKFum/y4IokNl8TBtArwJJcb9AZk43XqCxqaHjUkjN2QOetrhbIj5oAryXDvpB3n+3OArv/V5XKCfZMD4lyZ1hEMa0/WNZVib7KmziKvsJ+AYA9DsuQF88Q43TTptggBZrrLvk5Zgp1jLUUhCuX9VniSNqijr9kuqglbWQaQE0Yuw+cw5P/8I1iL2ENg9uhhrGlIO1HXr9+g7GZ2KaCjXUyFWr2wQdWVtCF0mG7kubpGXfJF/VnceVn4C1RYHglRvIlzto3KLp16o9n5PyVesDtL5zi75roSFWN+k78G8g4rhCi9T+9zyu8IwYdlgijruuuTJNOhE54SxKdgp5N/AVOEDa7LIofVQNJ/kg0QiGhWfUqJSHe7gxGwwL1l6x3HQ0Hpt39nd5m5J5kBhEO0W5GRiLrEMsmmCoITOLtEf4drinOSETjTIYFmjsGFjBST1h9s+IJIL3HitQNLiROySz67yrMFqSyDuChMdco3c7Ja792LJpB4mG+QTPGx3AVT/gFSSD4cNnyxwknEriYSvVaWPe+90+KkVyvyuSw8I/04pTktwF5AKJbNWgGKUgogiy2ykpjv15D7t42uS5M55fbgKu4OLIe6uqK2/yMBTDNWkGs9jzn+PpgE3C7K21LAfA0I7jwKMcPBJvrgHaEHzSaRGTxUkBjFNITUNR3m4PQO+PrnMyDvyxPVvGacokKbsuGbLZA3DpH10jRg7/HqLTRrCVwMQB9NIckned9YJlPvKCYKINvuKpiKuJTrv8Pm6yV8kaZXFqwYycyWlUoN5ULVohGXG4SdPKc1YCozMBNSH7IXPVLeJNMpaWuWE8mnFpp0Lpqvym8vRHAD7JC02TqVYRzdIfozLVSiP/0k+6WinQ3DRoreawgYSJBMIhHZd04gV0VDolyYwGicJdC5USFm8Cqu5X/iWbeRgSGst7ATFm2CvdYfPud3MwUzAyPnFrljFwjCFnxedUJnVDQo6P6YrsiLFfQp+ESU6q4j2M8cZ+cZAK/mHRQ09Jci/ZMqKStN0kqNeJiUKiO1aVHsEJMab/I7L5CMGbm+xnyl/PKSQ6mKYoINe2ymrXrYFVG2tVsNY1x6ba65MEr/y1M17AqwqNxFfWyu/F+vCTBtHFhT5Z6gXEk6cGKa/T0SybNPgkCu3pkBinkHtjFJ0Iuh3ib/3KPyATkUqYxZ6/Opn8jb1xt+svuGGvCUeoaUYtYTt4RpZyA8GIryi4ancDT8Jamk41Ds4Q0DEl62BYVtQA4+SJRtEK+b6DLYzHoB0t0ipiqcjCQGbMXc8M+7Ia3H4m9GwTYeCnVklwjCCPokmyvnmbDEDytkTeFBidJ7pj2g+x/r4AYWLRuZkjTCzCpIRJjCJGjAmFb8ImSxI6AnQExvItqT0qMhvZvSTJUs/Q9Kp1F7QNmrYtWqVWj+8jNCoyHFOO+zUvT6IyP0ocxPbvoN/jNPD3gdOU3PeGVIU3GxD9+dIEe29ITVKgTOoVOMZqTTREhhjaX3qV4JxOs86oiUJfpbpKfPTe88jffYEGftw6GFe3i3Ql4ZqsAEp+e2Va9p2f2fBXG14lUmvPqjGXlvm9RDm0MStVhgGufZRli+MIptOAbcZI3nux1GbnAShIWfYHKg3fxss3oYY5fvGHf4wGd5QixtDZJdz8wT4NW/XnKJnz5ufonK5RDIDUIbttyH75O0wtGCYCUHNTAijhk+m1ZeGvgnR6gBK24Zi7MmIIhUf0U0r8zi8UYNMNToLZXOCVIYlJg/rEKxt5WCVgAumrcsejX/oWGWrT4/ASukVTApbt+gTJADvlR8MTrsljGYQB9s5LtN+iz07nA6iUvVZZAdIEpEOjUusbvSqx6NwwCHeBcBcYPE6UzbwTYniWknSQOklgdpUKBUwkkHcUHcc6AxI6Zt49vuQO3B+nvtdQz2lIggeJ/RL7YTeuatJ/EHjmoJvmqUnusBaiP090PG189W4un4eZawPLJEMguh0vAeylgJ3eDBhm6LaRneuWvqJOT8VVrlxNt15eQ/jSmzXowg8Bra7vP9TjVCT5dWJr4GmRtklOSy7xD9+/AnWDlR/ZV1UkKSX4vMDj//AtT2kU4xyf+cKvIdpOcfUnnwDCkKAY1+x1nyklRLtFomHtJunAgySCAaaNrq7j/G/fpM1ms2I00utSNZ9lMP2y0reNEOvPnYNempv2WaUQpZuSu4IxEZk3m0h543BfsScsz1DReodvfhqaWOWBHZWS1IBKSPjLBARRxJs5TbJyVe6wdTUuPPxC2ujEj7fKVfbKyx24zcKEZMjhjDmKJum+5E1ycurcLBAkbPWXWZz5YgiVW4RDiyC1WP0QMFqigSQAGC0pCAvkbVGr4o0CIAFhScqgaAPdN+g7q9SguVGguVYKizm8vkoF1Sw5oGOJvBswfHM8p3W/qGLO90rc3ytJ3cVkUp5kxJzGOD3JHfCQi90Z+GapWt9hrRCu1FnjHSgpkzXT6zSDmW8T+8HZ7k2yQFi7HeO0Zk5dgzH2o1xWfVJ5WMha6232RMLHiyMM338B7a+/WR7H6ckUXJ0qdtDJC9I1aUb4qb/wV6FWt3Huj8YeiqlRFCfWJBLSg3GsGu8Jy885Zo//bYYjv4HJ3QTZ+x4FAMiNAc78wU2/UUw9bAVv95BM3Q/VT6gqARsHfgOwguiN7nWueVqVHwjYkMNEEmmfqlQTSoR7OfJuhLwblMqTwplzGA/nBKOclBZ1OeHqhoPyTkCG0zHJEgzPKvJbVZSEo6HxyV9qmmjdu8jm1rsWWVvg3B9Sss67rCxpKLHHOwZFS/rhqqwrEIwoQWdt4W380n6IvBvUdGd0TKqT5X36e5KaPFNp4+Kf+pjZMg8jiR+nHstBjz15xXEQKOd+x3mQuN9xTk9yF4Iqys0t3ywFALszIOx5kgoJlieYEMgCALW5h7xTYZBMYO9CCMitvZpOO0Aqim6wx57te4phld9exbDdIJDtNMkD1UFFwxHaL67C9trkB+uqbymJCumap8YSm8WQzopIcyDPEb/4Jssem8ratYd7rORKmFk3tt30tz2MxFcY1logy3Hrx56g4SeArgriEPGVNQghYJbmvHzwsUSlhwCAJQdKjH2yF6G7DTJDl8JvAtWJUzdlapWEzDVV6Kn2uu3EhqH3qFQj3C1oqKkTouiGfhjKNVzdYJS/H5fNVaso0QeJQWNTI0jgWSzB2CIakP777qXAs1Q6N5jRIklAzAQ0ZDR/pSjZL7sWKi8bzConazwAfgNorFuonCCYZDEgQ5GUkn+QGAQjg91LCsPzkimexNRxrKFwzxw7U+ZukMu9Bn4eJKYBj0zGvaCSyWM/rMS8XxyWpnl6knv61o2LAAAgAElEQVQlanx0rYE3V2uCXlCKYBopaTI0jjB87kmk779M7xmO0Li5R2P8bLdXrWrd9KkX6nIDQG6jCANYpUjKd65bp0hWm6oOohkMSSvdVagB2/LtlWwVp0Wj+2288tOPEnyTZr56H59pVpQlFVY/egnZo0sM59TFz0TKomOsVLn5gQUUFxb95iOiCKv/1uME33CsfP66r/SR5xBpjpf+xkXYhR5gLW1Cx0iFdDi3v68tgmHliop12fP5hk/eOlbYu9RAuhByA9FAR6Vyoo5LmV9hKbE3b47JeHpYQI2LOlPEbRQ8qCRz45urbkNw1Xx5hWC9PIFjwqicsPN4M/fyBPGORdqlJBvvsmqlIQExNxAFAOP5UkY4Gmio1HrD7mhYNofDxFJjNWQrPk3PjedpAErm1ChO54GiRXIHsqAqPe3TFULRlN5r9WHG3YaP7jYkdBAc+qhV8mHiNDtJ3Y9iuV+cyuTuL+Mr4/A2q2q4a2BjG6bfpYo5DIn32yr/kHU7wt7Ty0QdjKKS6uiaoUsLZcWolJ9WdSYYamdITcyd3RqrxjUmvdaLGwyaZNJI4TFspzsDrSFHGZ78tU3/3YgumaP1xrb/LpASZ3/7KqKr60TJBMNAfAWQPX4G0BrDp84geWoFi1+8gbxHipXotmGzDJ0bBca/6qSOaYPw61AKyAs8+Wu7EGlO+jZmH/hqmiHEnVRLVoCs/hcMcxq5540gSEgt0cEywljkvYBxZ9ZkYRkCpwEfJNRsdX6rns9eGVxy8IuTJ/CuTajQNZ3tHWPcVgHNLTLkUOMKvZOP39ykq4e8KVA0GKdXBLHohkLaUwiHQGuNqm7H5Em7rA3fZ8plRseIdy2yOXpOZYS9xwOLnSeJ4qgbEmoM9F6z2Lso/dRra9XQcFRMVMmqyfbDjrtVw6cpHkRA7Cjf5Sja8UfZeE6PXJy1pe63w7WTcYmv83CTeeIC5CvXqPK+dosSMoDmzQUaGHJ89ybxom1S8tXt3tBPY4qN7ZpYmXvOMWmES/RaA3Hs7zvd8yrv3eHvgqV5oTXpzDjrvUpvQK5t0W02+IBin9VhAhuoUrIAYN0ZxvPd5tGIEV1ZpQq9sFCMPTfe2KR172kgCNB+4Rrsp2wJ0fDn26JgtkwAKAE74M1rOKo7V00r7iL45R/jc25ihWBITk3BsCB9GT8tihqMYiXj7MxuCUa5t95T48Kzb3Qz5MlXA4QSNqgP9IhK0qsOKQEMbbCTUz4fQGhq3qZtSZV5Zrzzk47YXi+3SJZpSjVIDPJ2KWWQLCrC59lfdbygEA/clQE5PcU7BkFqKckzA6e1Rr+BDgUCcMP26+BpVYX+yxp5U2DudeuTe7ytaywae5xqn/eIe1WZxyG1+yDvv9t6jsMU5Khx2LUcuHIXQighxAtCiM/x/ceEEF8WQrwqhPgNIUTEj8d8/wo/f/mAH1C7W2XE+IhCnuq8c1pVDTOmRBJnO7q+hd4fvkHVqBMbY958bRrVVYStpmfUVGUPnJFFtdHqOOK+incUyLxk49iigE3G/ng+obIGfH5+wRtz6HZENn9pTnz2mGmKzIVf/8ijfq2m1/JN0da330J0ZRXZ42c8hXASN/d6MrxROUnh7/zCIl77SwQ5fen2Z/Gvtz7vKvdoque1Mlh1xwCTpKlRYR1mTgnT0RFlYdiPlBkxKemkhHvETVfjgtyrnHWf+yzG92VFVVLmptSH8WyY8m9ON5SHZapWdnlHeZZMtJWhuU7a7W5qVjckNS4tM1SaJAjmfFGDlBgz9P1LyQCadKXqO+8SVKNy4rqHQwtIglrcEBMABCODoiFY30Z62qNTjZQFPDQUbmf45u/8El780v/i3j7d83qIeJDEeVyN1P3omodJnnd7/UmYctwtDgPL/AyAFyv3/w6AX7LWPglgC8BP8eM/BWDLWvsOAL/ErztwVBP6HQlea6rEHQWSH4NSwPWbFQPt3OurTzJMbLtJ+LJ/LzUuq7i6zTKGWiaGpZzOuztWUcBaC7284DXXy02j1KgpVhZQXOhj8IEVgkc6TQQ7Y8KZOy2o7ZFnA60/t4Ld9y2X6zYW8y+z0mQYUCN4cd7rvwNA9PJbJWzl5gCsxVv/3jtw/ZOXGZJq0GYUUg/i3b80xJN//yaumpfRDsjOjyv3i8dxXmshq0mY6Y8VRo3ISYvHKgE1Juzc/RaOv+1gFR0rfzyaTGVPVZYWpiEp6Y071CgrzTzcVGco6SohIClgwZIGwgDxZo7WrQxqrCELg2A3Z0VJAd2QSOcVxvPEec86EiFTIrOORMbsGFkAOgaSJbb3GxOrpnWbcP5oh96T9AWMIkhHjWlziHdpMxO8eajMYuM9CsPzAlawW1NMm0DWFggT2rSu3fpDNHrLMJLW8lDO60Tcq7l6klTIaSfco3Ld9zvOQ5f8FUJcBPAjAP4+3xcAPgLgH/NL/jcA/zbf/ot8H/z8nxFioizfLxxjZVIZssJlr6oeilZzginDjdYKtODhjSpVkuUJ6g1aWSZGoNJgrbwO8InRQx2OVnj9ln+vY9B4popSCN7cQHB9Hb0/ug7baVGFzmsThr1bA0pIzc0CexcUNUuNwdqzXSQrLIsQh9h67iKtJc2QXezTRtWIa7+BTTO6OlHAypf26MrCGX6EdJUh17aQbN3E7eR1XGw+Rb9bowEA3ame14nwyZureJkVpd4MUDPO9ufMVfNsIO1gGt0ISkpkrQoPvME2DUkZn+Cdnk3RLGEe3aAriKpbkjBlU9WN+ptIwcRUrRN9EQgSi8a2RtEQyNvwmkKNLY1otzSxbq9q4qBLwEQC6byCjiSKmPTf0wV6HCCGjcrd72MR7hWsKUOJu7EBdG5QFa9j2uwcJGMlMMQAGxsv4+zjHwIEEAzNsZ/Xu8VpTOLfaxz7o8ZBK/dfBvBzANx17iKAbWutAzBvALjAty8AuA4A/PwOv/7e4fB2bm7aanUOlNOX584Ay4tkZVdNxpVJSCia6CS8XdaOITd3IXcToD+Hyb9hMp7m5O0ank5Lptko6Y5VDJuldBGG1CR1VaQQPJClYee7/mqB6I6sw55lJS1ybwTbiqEjCZWwto3WOP9Pvovut9a8uXbv5V1gfQu2KBDd2ARW14l5434HgGQaigILL5H2ipt2RbcNjBLfWH1p+Id4Z+tDECpA2o+QZ7sAoKd6XitRbZzWnJdCRRrvFV0ZN8BU5ac7yqPDu0lXRnsdGROy3G9FZ8a91zFrnFeqHx5iaqLjmgfDnBJyUkClGmk/RLIcki1eN4SJJEE1rCNjFW8+BpA5WBjMeJ9TNbbeqLq9RuwYWNoUkkXFRthAvEUVfDzQyOYEMXB2DbKeRDYX8NQpyRqoxPrPFobgGxvQbR0KvPbt38KlP/Fxj7unIjnW83q3OMoE572OddjPvtf7ppHg7yYJfNL6OC7um9yFEB8HsGat/Vr14X1eag/wXPW4f00I8VUhxFdzO65V7i75VPF20WqWfqUb2xCvv0nPR5VqumKObYsC9sJyuSk4XZkkgR2OoLsNEvxy4SiCbpNx1TcPAuXnF7DxwT4xc5weS5ZXEj1LBBjWaQlDmli1FmJjG2Kb/FMdXOA+w7ab9N4whFjdRPeVbZz9nesVf1aaZt17ehmQEso1ZAE/DeslioMAWFrwG1LrX1+FWh9g/PRFmF6rHHoKArzy/jEi2cR8k6CieG18N577A5zXdL/jla+tJm73WF7K+1afr+LwPtg5ifjuhgW4CFqxkjYDJyRHssBU7ZtQeYEtl6wdLx0ANXat5Y0Cnr8uM+M3jnBXI9wz/jhWAo0dgk9MKLC3EiJv0mujLYJ1om3KrVlHej2ZZJm+e/M2NVNlbtFcN8wYAqwA0p5C1iFsXxaEx5sIGC8JjBfpOMGIKJgbt78D2e2isXIJFiCqZSm0Op3zinuf18k47IDQaVBUvFvsR+G81xoPi+NPMw5SuT8H4EeFEG8A+CwIjvllAPNCCMe2uQjgLb59A8AlAODn5wBsTh7UWvur1tpnrbXPhqIBCHHH5KnsdpD84NMlO0VJ4o1XoRsHnwAlJOO4z9duAd029Eof5okLJcyiNUYXWlh/7lyp5Ih6M7Iq9WutRfjWFpa+dLPUfWGtGE+TdNovlWEjMRjykFOLNqVGDJHmMAukKWMX5zF6pEcm2O64b615nr/NMhqE6jTR+c6aP76z+vNDVBUqphgMy2Gmbhs2GSPYy/D6Jxaw9fwjHo+3/+oFrKVv4Pe3fh3f2P0X2Eiv4+u9rwOAmt55Lc1GarZ6UkK3uRnt9GNMHa6pGmULa0tWTaS8ZV7RDpDNRSiaVLE7j1QaYJLeuMOLhbFxhwmlV32UBQ0iiYIHfviKIu+EREFMSS/dvR+ggSTHlQ/3WIFSkUQvAAzPShRN2hRMSBtTvJ4gGBYwoUA80JC5RZgYdN40xFvXpfOSyiwgyNg6SC37qwI7jymkcwwlhQLRAIh2ABOCsXZgePN1DN74Y3zjn/7X+O6/+nXsrF3BG9/8zemeV8STT98zDpO0jmqQcZTPvp+j01HX9jAZMfeK+yZ3a+3PW2svWmsvA/gkgC9Yaz8F4PcAfIJf9pMAfpNv/xbfBz//BXuQ0UdrSyjGqUKOErS/ds1j4HZzm6z2KuGrctdYXZz3U5oijiD2RpDbQ8gkL8WzlMLWuxRu/wAnxCo84wS1tAb6cxg/TRi3Tcawg92aR6kIAoyeuUSDQI34TskCl3BTVnrkqVK5tUc2enmBWx8OIEdZSaeM2aNVSfrXGJhGVA44hWE51BQGHhby07P+yiHA7vuWIZoNBNfW8MT//Brmv7EBffEM7Nk+Lj3/7+KHzv4V/ODSX8Yz/R/GYuMSvn/zWQDYnep5Vfv/iamk5NTLwvj7Do4xUVDTf/c0SmN9g9UEZJVHJtGW+eqSdN7zMnnLnFyXZEZNVRsIjM9E3Ei1CHcyqJTZNVKiaNO4v6hcJARJKRnsoBxnYyczglOioaHhpoHF+IxgTXdSqUyXmrTR8AASBMEsNPRE1noACOaRdEzH56fPJ9gmXQTSeeH/z21sG9KhaRCs1P/Yx/G+T/4t/Mkf/pt4/E/9+5hbfgfe854fn/555bgX3/teXqF3u18daDoKFFOtqu+lF3+QYz/sK4Vpf96DDDH9FwB+VghxBYTRfYYf/wyARX78ZwF8+v6HKjnuAGpNUbNbepLWfFUdTbEo6k5CUlKizXLYNPOCYpsfWCAOOQt+qQQ49weyboqttTeghlIQSYrh+bBM/v15oNuuKUW2XmU9FhbEmpQoQFi/miguLQEAOi9uQGQ5nvh710l+uGKkAYBw+ZD8V+WQmDXFyoJP6M5kxEWtiufofmuNXtOIPftHrQ8gVjfR/Pq18runKUwzwtpfejdAldyUzitq3/1uUXVeAijBy8LABKWDkzCWPU8NRJoj2smgY1nzEnXiX0FivLWeTPUdtEewIqOrxJ0ImYm4ScuSBVaWE6okO+ygIdKaIZs7WXqdVli786/SoBKkYPzeML0SJMsrXSOUGTWaqJJCww9M+eGlnCiVRVsABkgXaB3hLg08jZYkDHvEdq8ZNLbpt3QUTP7u0z+vR4yjuB1N43Me9BgHmZI9Kox0HL/BoYaYrLW/D+D3+fZrAL5/n9eMAfw7hzmuEJJ44FoDzUYp/xuFkJ15IC9gtne8JDDccFO1iQoASsK0G5CmUslHIUSao//CFrA9oIEOpXDhs1fqi3Cvr6o+jlMsfGsHUAp7z17E9Y9ZPP23r3m9GVsUEKMEwl1N7DMsYrPMSx/ouTZR7poxiX1pg+RdZ9H89g06pjfOqBwnL2AWulj9s2fRfbPAzU908M7//ir1FNKUqvbFeWLcDEeec+9487YogKIAzvQhtgY1qib68xg/Mo/FFwM0/sSfQvt33wSAzFo7lfNa/rYG2G+AybkzATV6pID0DBkHzXjNdma7mEgh3srJbi83/liN9YzgmZz0a2yoYA2gUk0MmYDcm8I9ysRFSyFZVOhdTb0VX5jr0jzbNbwLAx0HZYLPwc1QSvQmgNebCcYWe+cVVGohjEDRDaEjwsyztkBjp1SCBIB4xyCdkxAhJehgSN6xwpDyoxMIa6xbyK5AvEVceOe+FA8EgsQib0tm7QgUbYFG+0k8vfQEgoE+nvOKcgBochDoqPDC3Y5xkONV1zCJi+/32nsdZ3Ith4nJdRx27dOKUzGhaq2lJFgZECKIhuUH4qjE4wPWfakMMPmKl/Hd/MI8QsbmzfnzkLsjmgDVBtCGXJoqCbom0OXW1OtQ1by+A2iN7rfW8NT/O4IFSLOlIgvgBcZ0CfN4zJ4ljKENNUPDANvffx7zX70FaIPmaxuExTtxrSoHPM+BMIS6uY5w2MPNDyssf8WUSZ3X6vRrRBDQc1pTl0wpPzxlb2/CnunTJrA3ZE2cPTS/vQdrLdp/vAp7HBOqQNkDcQ1S91NLWYqtVTF5hpaskjVapAmlh23ybkgwS2Y8X71oKg/DyExDjgvoUNEmAUqcuqkIGskMs2I0wr3yasdh8bRG4YeOyHRbQmYGKimQLUSkp84Tp8G4lODN2hLxlkHWkRAtGmpSY8Lfo6EbcLIINUkE5B3qCTTWM6T9gNdBG5tL7EKTvoyJSrVIzdRJEhKz7OoEQALhwPLnGm80clxxkk1DFy6JTvP4D5vtUk3w0/jsU5HcXXPeag3LMIxQiiZLo6imCGmZIgiAkrTDoqMQyHLIN94irIkbnvKt25SMxylRLJOEkuk4rXmX1kJrYGeXriI6TWBzhyp0IWh4aJT4CtgndmfKPRnj9I5jz3/pGjVYjalg6SxsJiU95jYvTnRnvnAdZ74oiSGjDazOSLqgKEoBtDT13qjWWggnbyBZqngvgY1DT930VxVpSnLLxxXa1PsaxlCSrkAxQtvyMSl9c9UqAZFrqFEGJUuXoXAnQ9EOmPPOlTondhc2VJDjAqYRkL6KFKXfakRmF956D/CTsLoReJ67DQQsnNOT9RRLNSb7PZUaP3nqUE6pCS7ZvSgRjIDxokT3uvZuUiTFKxh3pyGneKAR7owR7hLmj0KTIQhoMnW84G6TTnzREmhsGWQ9BZVbDM9KxAMy3g6H1g9Tycx46Oq4o1rFH+Y9+x3jKDGJt59kHPXzq32CB/0Op0Q47E5RKWeY7cWuwDh7BW6xaWUq06k61oaRiK5ot3botY5b7XjhADUu58qJVdFs1hgwIs3LSrwREz/dNVWd/gxILthHI64nM6Wo6apUOVnKGjROZsAGbiLVlKYizZiGmYCauQYA74XqYBhHhawpWIYBfQdjS6pnkvrEDoC4++53aDUPdLYeNKpWekDJfwdQT/oTJtoA0SKtIuw92sm8PR9QUXnkgSPdjqCb7M0ayFI3Jpq4SmMRMlGYknLJFXq0ReeVFCHpSsKEEnlXQY2ZKROx+YcmZg3RKgXatwx23mXR2CCDj3gz96YfJhRMlSyvFnQrQrYQI5sLkM0F0CEwWqa+gjBUuUd7hME3tgzU2JJKpCKufN4sISzn1ZrNBxjPPbz/zQ+iCHmQYxxFWOukE/pBv+PDkiU4Jcm9DD+45DTUs6w2tWq1hmg1MfrQ43VoBqAEtdQnXH5COgBKlkyb2uPKwxRQiip73wQtE43D4D1TpfrZujTVcK+rNjahNVEUw4Cq8pws/qqf4Yw5PDTSauLmR1ew8wEyv5Y7w3JTikLYc4t0nzccEiEr1yOEoCsMa8mkgxOkne/ixZ+7QI1hIYDbmzCXV0jDfoKJNM2YNL32wQm8TPR1KAYgCQETsfJjVpQG2Cwt4LTchTas8y48z91pwpMImfBDUEWTmDJOmZG0bip8e2Opect68Tpmnnmu6SqBJ0gddp+3aQBruKIQ7hk01nOozGL5K0RTdAnfNVDH8wLjRek59EID46UIozMB8rZE0icufrhLgmABW/Q5Kz+SKqaNqWgSHLP1PlbSZNu+rC0wnpOIhienCnmveBBlxbvFSRtrT6OJOrlRHfU3OnXJfTJkfwHyzGKNQWOLAq2X1719HgA42V47HJVWedXE7+AXFhbzio/+ear+9ePn8dLPXvSfY5PxHebXToDMnlus8eRr/05q2lSExGwzRus7t0qVSa0pQXOSF0LAxiGe/ytfwe5FPo6UtQErkTCkMk5L3LrdLHnsi/N13n6WEftmexfv+ns7JY2y14W8vkab2DFj7n4tlSlUP9TFSV5m2jdHvZGHFF6moHacgORsHU3SDzNpl7RZz51lgXcfiWFiBTUuEO7ybybJwclRDvNehHQh5MckSwbDG1K7cPCOs/QjeMaSCcdYQ6WVvy1+m27Q8NLa91Eyt4qqbaGJVilzS0NQinD1aGi8xAEA5E2JnC+0nNxAwBCNiYDeKyVjx0kdyKJs3D6MOI6EPY14kDUd13c56EZ01Abv6UnuDsZoNrx/qlCKBn7YlaimELk7JMhFaw/XONzY4/DVqCaY6obACpAijiE6bchxjnf/j7dgzp+BOX+mPrHqOPCK9NBFYaAv8msqG821n7jsK+Pi8ln/OYK9VoWx0EtzdfNtKSnhcrITSYqX/tOnsfIvdwiyYdqj16YflBRRX/0PE7/5FHONmlmHU4MEALkxgJ5r0nfPi3IzPC49d13XjoGU0K39xyYB1Oz4qKo35eMcVggv9iUK93zZXAXgr1bcVGv7Zs5SvkHpkSoBHbnhJ8bzhxrjfgDdDFinRiIYaozOBLBKkpFIg5K+s/Bzbk0qs8i7gWfeFDFBMLKghqqOyXYPEhj3LcKRhW6QCJnKDGRBujJhQsJl0dAibwkEI/JQDRI6phW8SQaEw8PQ+7aftth+QiLtC5iQDLyL1tSkYqYaD1Jl76e+eBzc9YO+/jAUyIcFIZ2K5C4q06kuYbuQe2PCzJlNA4AarRWopibRW3nONVUBVAaYJpJ8TsNNeqUP22vT5w1HUGtbUGtbpc0eY9YebmHDaxtIwtFdNGJc+r9uepw7eHPTf46XTzCG/ErDoDT9SAgSEXsMC41TBOt7kLsjDJ5Zhj47zxBM7q88nColAD+lKqIImO+RMXSa4crPPAHRbsFai/HTF0kSodeG2kmgl+YI12e5hPvx0acSxjkcVQa8KuEnVh2MxA3Vajg5Aqf57kTFJpk1JqKNxCoBlWrfdPWfZQBnpydzgnR0RP8lS4Sr+6ZtJNFeLVA0VU33PRyS/oyr/tWYIJu8EyAaaHTe5AGp3NZYK2oMLL9gsPmUQtol6d+iKSFzyxRK/hlyyzCLQNGSnrs+uCyRdiVEQVcBJgR2HwE6VyXCIQALZHMkJYz6Bc9Dif38Ru/2ugdN8qdFy6Ua06z2j3qsU5HcrS2rcpfE3X2zerv2mJtgrW0Ci/NUOUcVrFyR/rno8HVsHOH1n36q5jHqG5DMmx4+Rs5JQogSLtF6X6clGAPbCJEuxND9DiVVruixO4Qd7OLlv/EI9QwcPJLnJd2xEROk4rRhmjxp66AHHmAShebkQkYe7urBV9tAfW3GAGEAtbkHSIknPrsFjBKYS8uIb2wTFTIKMLo8T/h1klJT1RxjBlAly8VX06OM9esr9nvVQSaH0fNwmH+MqY3+dYHEiFUzDcM0gq8UnDwB6cnwMbg6V+PCN2DVuDTLJglgAZlZkvKFY7TQIFMwLLD57rimGqlSC92gASXdkGiuJn56Ne2HNGzVLGUD0jnCyZNFiYVXtMfEVWoRDjXCoYbMSGDMKb+kcxLjBdKLL5oMt7BnajYvoBsCNgTyDq13tGKRnDMIh/SahxXTSNZH/dyDPj8tMbN7vfY0bDSnIrlPsmWqOu5V6V//L0MxvmJf26jJAQMoq3amItrBLh7/zBs1rN1TGJVEeH0Dna9eLSdDncGFWxNXyl7SF1RlDh4NoNvMPmF9daf78q7/9nUPx/gYp8SRZ9ZNjd3Cvqibz1/0Fn3QhizdjIXuNvyVRm2qlte5/ucex8YPPYKd9/YJ3lGSnJ/cjEBGVx5ycxftb9+kxB6H5XDTfrTQKUZV9RGSG6faANbum+AnwzVVgQrjpjBorBMM55K+M+0YnQmgG6xT5OQI8ro9npMkMOzURE8AusEc96I0+dAxiZAtXMlYulf6JqnQ1lfuhpk5VpLOusxJXiBZIsxcFFRVywxI+rK07eOrAfJ/1bSOSODWhwVWnzewgpqzUgONTZpYNQENNwkDdN4gobG8Swm+9ZZEuFvSIk97HKYpWa3Wjxvfn4aJx35RhZPu5x972HUApya53/nHd4dRh7U15UgfLjFGIbFkPGOEaY8+CTLF0NEeq83PChfdZhkpR7r3ucQoBEE7SkFfPIPsHWeBzR2sfP46opdJg8k3TNPMN2FtlsEsL9z5laX0id8WBUE+7SZsoDD3yh7U1q7foPKOxI0f6mDvcpuO6QaokjExZnhT610bQ2VA62aKnff2cevPXyDISGuoG7fJtpB1a7LHzwCjBOPLfVofs5COJSqYezWsEh4Kqk6r+ood9QreYedeRIzlCGSuSdK3qi5pLFq3STJANxT7oVZhm7IqL9q0aeQdhXReIRwUWHglR7xTcXLKiHopcw2V6NLVqXJcExG+PrzURDYXQI3JkCNvCUhNcgGNbYuiKRBUiEkBwzAq0QgHGYItMninRq1F6y2J6LZCkJSyvllPeCXLILWIdijpJ5cKCA3oxRwyh9eFPw0xbZeigya9Sd74w6iqj6qNc7c4yppPSXJHTRUSwJ23hbhDOVLMdUuc2HHcAUBr6Hdd8tCMjywHNrdrCdvj1oxle+lcPo6ruqsVvxymfjDI8cV9RV9Zg3NuslFQwbU5qTTjsh/An5FenMfeU4tQ6wOSHZhrA+MU4Z5B5waNmptLy/671PTutYYaZJC5RXR1HW99VGPvEfqcnecuk0kIfy/Ta2F0Nsbgw5ex83gEuTEgHD89nJTrgYPdkWqqjwAlcD6vDhuvwjAA7niPbrC/rrTbJq8AABE7SURBVBKU7FlYTCVFxc2Jq/SMrnpkZmDiOlbu6ZKgKVEbCM9RL9qKbP3G3BcwPCE6ykjkjWmTaqzZHUr4zSntKdKGqdAerRTQoYBinZpo16J9S5NH6m2DeDOHzEjmtxoypyatY9aYgHjvsrAoGsDwosV4iT6j4H259xJpv7euRP4xE+B7Ig6i3fIgMY3BoP2OOXl7v8eOcrz9njvM8U5JchdeFfKur5jUbVGKJldd8s4q/HMAwRurdZimOthUHXTi5+ziPCWYooBd6HlbuhpsoqhBJ5IU0fWtenXP761uMm7jUDcrTVVn5pGkHgJyST/aGCHeyHDrz18ggw8pYRd6aL+4isUvr6FoCsLSXX8hYoMQ1rRXa1voffVNIM/xqT/5ZXz8h7+MW8/3MffCqoeMoBRMI0S0U2DjU0M0tgw2//QjdebONMOdH9drqAwoOVnfKh5/Bxd+4jGZ6gr8Ulb4wpZ2fVYR9k589HJz17H0lnp++pWnRh0VUmUWwVAjSKjX4YafREVH3r02mwv8QJTQLNebOykEgbwbsGsToBtEcaRhKGC4rKAy0n73/q2BRNEKYdoxTCNAuhChaEn0rmksfZP02pMzQLpA1MdwIJB3LMZ9hofYvMNEQHOVjLfH8xLj/un433yaiXXyKuBerzuOz68ee3ItB8Xej7qZHTTBn4qzLlz1hpL94iiOvoEK1HH4SnIWrSZV0G5KtdkoE28UQsQRdj76NCdB/sra1E2vk9RPembnurjyH15A8tQKMN+DPbcILC3wZGlOU597I09tTJ5awfjJs7QZnO17eMNVyi7J3/jxJ8gJyFqiPc53YfpdwselhNzchRoX2PwgMV1uf18PALD9fSsQgz2EI5IrcFcX1asMwc5RroH7wo9cwlf+q2ex8vnrRHdMUpjFHmy7CXV1Fa2X14Bv9GD+g3X0XhvVePRTDfd7yzIJTrJfANT0233id9V+5fUOmilaIdJ+jORsk1gxHESjJPaMq9T9BKoBhucjr/GuG2xwLUv5XTUm9UbnuuSXFyuYBlFV3USpSqkSN5FAuEs8dZWVm0beEoDlDWNksf1Odk1SJBHgKuusR2spmhJpP8DuY20MH+nAKmA8J5EsSuiQBpI6N2ioqXkb0DEQjPjqo0liZmpsocbAzjuBZNki76ImsXDSUYVGjivZT37eNHxLD6szf5QJ2+q/01DOPBXJvSaRywlYRCwWtrzop1Rrlf2lFYhuh6puh0FX2SPOJYmlf+e+8Crrvxj6tyorfGkZMAbZ+XniiLcUlr5hMDpHipLrH5wna7tkTFV2q0lDQr0OEAbYemeMxmu3YXttbD6zQK/pz/vjO3jmwu9sEEMkijC+vAiRpLj97DzRAVlbRmQFnv7FNVKy/E4CsTXA/NdWgTBE++tv0vcsCuiLZ8pNL4qob+Bw/CgC4gidKzskE6xI3kBuDIiBwxvBI5/bQeOXF4iuOXkephmiIi9QlRlwT0/IEbhmq6+uq1BOBYaJdnKojJuh/BnVRO8YMpJfYyKJcGh8k7VoytInVZEUrxs0MgE3SgvjaY42VKT7AviNQxQWjdsZVFKw6qMkpguoGWpCkgIoGgILL1E1rVIyvqbhJfr88QJZ9w2XFYpYIO1JEh5jjD1ZFsi7pPhImxQl8SCh2/GmRTZPMgStWxbd14HOdcL2s7nj47mfxLDStD7zsPDJvTaC/fD8oyb4+63poE3kU5HcXQilIHrdUvxKa2B74AeaqqHbEYmMOe31TrtupD0JMWQ5bQIstlWFZqwUSB9bQjCgadT2N2+iaJBGNvIcZ377u/4wtkPCYVjfghjswQ5HOP/bN6ia3xpg6QtXMXzvCr+2heIJvs12e8hzsrVb3UN2fh7/99/+b2D6XWSXFnwCBq8zen2NNoq00uRlLN8G0g9U2SZz+PMcxYU+/T5zbS9KZtvNckiqye5IeQE5HKP1jeslRfOYwsEnphHUvVEnm6aAFw27YxOoiozlmmEYg3CQeajG+ayCpQdUqimpB2UVXzSl11NvrBMHPdylqzyZUeXuhMBMyJo0bHJdtEIPvzjs3unIJ+caZJG3UcBEAuvvJ9nd8TwNGjV2DKI9cl1qbFMfobWuEW9laK7lUDlV5MLQv07rnQy2LYoWVepFEwjGtBlBAEULkDkZZdN6WG++SdIF8ZaF2memb5px2qZRDxr7JdPDJvmDeqYeVZZgck2HYQedmuTuIBc72C0HlKKoNoXqE3wUQm2P6PY+XG/R7dQPXsPXOUlUkr+6ukqaIxsD3yRd+sJVnPmDm/vj0JPSAqz54pqu6YLC7nuWgDhCeGsHgw9f9m5JaLCv6jBB9NY2/upf/OsQSYbwzW3SlwG8byoNSuW19ZvlBdheB3knxOv/0RPQywv0PtbDCdYGgJIYXu5ApDnUMMO1jy8iX+nBBgq2GcHGIbLHlv3nORkE30iecgjWaHEiYI4SCaBsoDpsfYLr7qp3N4vg3uuOJzRrwLh/OekLa32z08Eogi31ooGur40NPhTLAId7BWRGFEs11j6RD1dCpP2Q1CczUmTUDYW8E8CEpO2e9RTypsDCi+Sc5LF8lu5tbpYbl5MzMBHx18d9NvNo0sbgYBtnoWcVYfcqseh9F8h7pAIJAMMLNMykI9aVH1rkXWriqlNOhTyKsNjk/aPCPAe1yTvq8e/HEDrMBOxBK3sXp6aPXqUsmqQi++v+dYNHnPBx63b55qqcwFyXxLeaMfHfJ/VjAMKBq0NMAIJXbsC6gSI3xFQUJZee3y+2Bn7wyIVoNmp0yv4Xr9FxlxZw/cfOIxha9OAkAOiYeqFLU7A7wNqfXsH8qwmi19f8ehx0gtx9RpNwaACjJ/sIRhqP/a/XAGNQXFhEcHOLoKJAQiQZui9teq/WRz63ATEY0lWHtSiWe4je2qYrhFYTwpjj5bjzcJbA/vrtk9DLJBXSm3dMyBD46VUnIQyUUFWuYaMA4W7mfVd1UyEYaV99A4ThWyUQDAuMlyIeSGJGU0CPW0m+q9EesVfyToCsx3VRh/RcooGGjgV2L0nMva4xniMYJRwSOybco6Gp4YUGbSYji7wjMV5osJIl6cQALAvM0ghSA1lXINwjOCejNgzJ+g5IUliNScIgGJWTrSYi9s/GewQam8crP1DFiY+rOXi3zzzoZ0zLnOOoOi/3qrgnNdzv9xn173Jl39cAp6lyj2PCZh21cLJinuS2V4eb9hEIs6249pzH7Cuv9YmU+es+8bAPq/Mw9RWtUkifuoDt5x4hvL0RexzfywBYW2LfgyEuffYNrPzT1+pXGHkOYQyyx5aBvMDGD6W4+iNNqsJ5AAp5gdc/dZHMtRsxYfJSQoxStL92DfGVNXz+y5+DWexBXV0lI22XMJXEznv72Hl6HnJrDyYKYHttjC/0IDcGCK9vIDs/XypG5mQ+7n77YwsnuVxtnAI1rB2o4/H3Ola12epgHncsE5G5hgmIIePwd3clYAMJ3VTI5iLknRC3P9DC7oUAY1ZjJHclgbwb0iATK0E6qCRILMKhQbhnaoJizTWL8ZxEc8sgHLLmS2ZRtJyUgcVwWWF4NkDelJ6OKQuCblq3NTb+7BhpX6C1rqFSi7xD4mLhrsXokQLJWeETfjgkfrvMAGGB8RLh8g73V2NxrFTIw1aTDzsOOxh1twp9P3bOveCTyWMeZq2Tt4/aeD4lyd3Wkrpjy9wRVb32KnRTxc/3htSYXGUD98lGbEWe1/HavSMQa7aMVzqUVFtNb2fnrhqitSHizRy2EZZ87Dz3Fb//RlnmrxCqcIzXapcSwWAMs9BB/4sx8uUc2+/p1njwl3/jJh3MUQhdciwK2L0hnvuZvw65tUdrX9+CGDnnKIN4q0C0o2lYSQmI7V00rm3Taw2bQXfbdIXA+vLHBcvUghuiVcx90rTDT5BWOO7utqvU3XQqCg2RFh6nt0rACkHsllB6GV8HrRTtAEVL0RRootlpCYi3DDq3NIKxM9Og0DEdSxQG0VbmJ0njrRzRTkEQyEizEiRNg7bWNUwgEO9QAxWWsPC8rZA3JRrbxjs8mYDUH00ANFdTxFsZVn4zQrxJjk1BYgBL4mDpgkDrWoDmqkXaL12YZEbKkZ4KGVOVHyRAvIXS+eqY4yhV7WkY0z9MPMjmtV+iv9/m+CC/jzg2hsQhoif79sPtH6U71tZokZMSBCKKShExpcpNgeETX31WjuOlDaqDUm6jaDZKPvx+wlmsYwPgYJK4WU5USK29OqXzRq3y4r1zkzPO6LYh9riPEIY0scpWf3CesBw2dRuHgViYo+obBOXYvSFps2cZJe68QP7oGQwvNIh1M6pM34KuWjzbKE3xO8N/+DVr7bP3/6L3j57s23+j92OVOYFDZJkJuquPSfioAslBSYiqFr/WsMbQY4IlLiLuZ1T/7nlq14nB7bvWNKNjVQaswEYp/niBgm2Rl4BgCWcbBnRbG3oOAAK1rwKn2BvRuoUgvZ+CIad2w1+h6G5MG1uaI++3aBNZG9ZVN8GbYZLCNokz/8+/+l9O77yKvv2Q+DPTONQs7hEHgbn+hf3Hdz2vpyK5CyF2Abx80us4RCwBWD/pRRwiDrPeR621Z6bxobPzeuxxUuf1NoDhIT77NMTb9dze9byelobqy9OqKh5GCCG+OlvvgWJ2Xo8xTmq91tozs9/qeGMa6z0lmPssZjGLWcximjFL7rOYxSxm8TaM05Lcf/WkF3DImK33dH/uUWO23u+Nzz5K/P9uvaeioTqLWcxiFrOYbpyWyn0Ws5jFLGYxxTjx5C6E+KgQ4mUhxBUhxKdPej0AIIS4JIT4PSHEi0KIPxZC/Aw/3hdC/HMhxKv87wI/LoQQ/wN/h28KIT54AmtWQogXhBCf4/uPCSG+zGv9DSFExI/HfP8KP3/5mNYzO6/TW/epObez8zrVdR/vefV2cCfwHwAF4LsAHgcQAfgGgKdPck28rhUAH+TbXQCvAHgawC8C+DQ//mkAf4dvfwzA/wNAAPgBAF8+gTX/LID/HcDn+P4/AvBJvv13AfzHfPs/AfB3+fYnAfzG7Lye3vN6ms7t7Lx+b53Xkz4pHwbwzyr3fx7Az5/0H8s+6/xNAH8ONJCzUvmDeplv/wqAH6+83r/uIa3vIoDfBfARAJ/jP9p1AMHk7wzgnwH4MN8O+HVidl5P33k9bed2dl6/t87rScMyFwBcr9y/wY+dmuBLoA8A+DKAs9bamwDA/7Kh6Yl/j18G8HMA3Gz+IoBta63TGaiux6+Vn9/h108zTvr3uG98j5xX4HSd29Pwe9wzZue1jJNO7vtpkZ4a+o4QogPg/wTwn1lrB/d66T6PPZTvIYT4OIA1a+3XDrieh7HW2XmdQpzCczs7r1OIh3VeT1p+4AaAS5X7FwG8dUJrqYUQIgT9ofy6tfaf8MOrQogVa+1NIcQKgDV+/CS/x3MAflQI8TEADQA9UFUwL4QIeKevrset9YYQIgAwB2BzymuandfpxGk7tyf9e9w1Zuf1zjjpyv0rAJ7kLnEEahb81gmvCUIIAeAzAF601v53lad+C8BP8u2fBGF77vG/zF34HwCw4y4HjzustT9vrb1orb0M+v2+YK39FIDfA/CJu6zVfYdP8OunXbXMzusU4hSe29l5nUI8tPN6CpofHwN1t78L4G+e9Hp4Tf8m6LLnmwC+zv99DIRz/S6AV/nfPr9eAPif+Dt8C8CzJ7TuH0TZeX8cwB+BrFr+DwAxP97g+1f4+cdn5/V0n9fTdG5n5/V757zOJlRnMYtZzOJtGCcNy8xiFrOYxSyOIWbJfRazmMUs3oYxS+6zmMUsZvE2jFlyn8UsZjGLt2HMkvssZjGLWbwNY5bcZzGLWczibRiz5D6LWcxiFm/DmCX3WcxiFrN4G8b/B620M39948TuAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "fig, ax = plt.subplots(1, 3)\n", + "ax[0].imshow(session.max_projection)\n", + "ax[1].imshow(session.average_projection)\n", + "ax[2].imshow(session.segmentation_mask_image)\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can also look at data for behavior-only sessions. These data do not have optical physiology recordings associated with them." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
ophys_session_idbehavior_training_idequipment_namedonor_idfull_genotypereporter_linedriver_linesexforaging_idsession_type
behavior_session_id
846710859846605051.0NaNCAM2P.4814111925Slc17a7-IRES2-Cre/wt;Camk2a-tTA/wt;Ai94(TITL-G...[Ai94(TITL-GCaMP6s)][Camk2a-tTA, Slc17a7-IRES2-Cre]Fb183a85b-6a29-4054-9d3c-915a0408a10aOPHYS_5_images_B_passive
820627398NaNNaNMESO.1703198154Slc17a7-IRES2-Cre/wt;Camk2a-tTA/wt;Ai93(TITL-G...[Ai93(TITL-GCaMP6f)][Camk2a-tTA, Slc17a7-IRES2-Cre]Me3bc6a4b-3477-41bc-966b-ef8e3d35b6cdOPHYS_1_images_A
767197943NaNNaNCAM2P.3642238406Slc17a7-IRES2-Cre/wt;Ai162(TIT2L-GC6s-ICL-tTA2...[Ai162(TIT2L-GC6s-ICL-tTA2)][Slc17a7-IRES2-Cre]M7a1d6674-df2c-53e7-ad85-9065a520634d0_gratings_autorewards_15min
768638583768434190.0NaNCAM2P.4703210569Slc17a7-IRES2-Cre/wt;Camk2a-tTA/wt;Ai93(TITL-G...[Ai93(TITL-GCaMP6f)][Camk2a-tTA, Slc17a7-IRES2-Cre]Fcf1769ef-623e-4bb9-a698-a82986e917275_images_a_ophys
768879508NaNNaNCAM2P.3642238406Slc17a7-IRES2-Cre/wt;Ai162(TIT2L-GC6s-ICL-tTA2...[Ai162(TIT2L-GC6s-ICL-tTA2)][Slc17a7-IRES2-Cre]M3e2e0696-70f9-5839-888a-a90668e66a4d1_gratings
\n", + "
" + ], + "text/plain": [ + " ophys_session_id behavior_training_id equipment_name \\\n", + "behavior_session_id \n", + "846710859 846605051.0 NaN CAM2P.4 \n", + "820627398 NaN NaN MESO.1 \n", + "767197943 NaN NaN CAM2P.3 \n", + "768638583 768434190.0 NaN CAM2P.4 \n", + "768879508 NaN NaN CAM2P.3 \n", + "\n", + " donor_id \\\n", + "behavior_session_id \n", + "846710859 814111925 \n", + "820627398 703198154 \n", + "767197943 642238406 \n", + "768638583 703210569 \n", + "768879508 642238406 \n", + "\n", + " full_genotype \\\n", + "behavior_session_id \n", + "846710859 Slc17a7-IRES2-Cre/wt;Camk2a-tTA/wt;Ai94(TITL-G... \n", + "820627398 Slc17a7-IRES2-Cre/wt;Camk2a-tTA/wt;Ai93(TITL-G... \n", + "767197943 Slc17a7-IRES2-Cre/wt;Ai162(TIT2L-GC6s-ICL-tTA2... \n", + "768638583 Slc17a7-IRES2-Cre/wt;Camk2a-tTA/wt;Ai93(TITL-G... \n", + "768879508 Slc17a7-IRES2-Cre/wt;Ai162(TIT2L-GC6s-ICL-tTA2... \n", + "\n", + " reporter_line \\\n", + "behavior_session_id \n", + "846710859 [Ai94(TITL-GCaMP6s)] \n", + "820627398 [Ai93(TITL-GCaMP6f)] \n", + "767197943 [Ai162(TIT2L-GC6s-ICL-tTA2)] \n", + "768638583 [Ai93(TITL-GCaMP6f)] \n", + "768879508 [Ai162(TIT2L-GC6s-ICL-tTA2)] \n", + "\n", + " driver_line sex \\\n", + "behavior_session_id \n", + "846710859 [Camk2a-tTA, Slc17a7-IRES2-Cre] F \n", + "820627398 [Camk2a-tTA, Slc17a7-IRES2-Cre] M \n", + "767197943 [Slc17a7-IRES2-Cre] M \n", + "768638583 [Camk2a-tTA, Slc17a7-IRES2-Cre] F \n", + "768879508 [Slc17a7-IRES2-Cre] M \n", + "\n", + " foraging_id \\\n", + "behavior_session_id \n", + "846710859 b183a85b-6a29-4054-9d3c-915a0408a10a \n", + "820627398 e3bc6a4b-3477-41bc-966b-ef8e3d35b6cd \n", + "767197943 7a1d6674-df2c-53e7-ad85-9065a520634d \n", + "768638583 cf1769ef-623e-4bb9-a698-a82986e91727 \n", + "768879508 3e2e0696-70f9-5839-888a-a90668e66a4d \n", + "\n", + " session_type \n", + "behavior_session_id \n", + "846710859 OPHYS_5_images_B_passive \n", + "820627398 OPHYS_1_images_A \n", + "767197943 0_gratings_autorewards_15min \n", + "768638583 5_images_a_ophys \n", + "768879508 1_gratings " + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# get all behavior sessions\n", + "behavior_sessions = cache.get_behavior_session_table()\n", + "behavior_sessions.head()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's take a look at one behavior session.\n", + "\n", + "Note that when accessing behavior data using `get_behavior_session_data`, different data sources may be used than when using `get_session_data` for shared attributes. In addition, there may also be some changes to how the data are processed. For example, when examining the stimulus presentations for a behavior session, there is no correction for monitor delay (unlike for ophys sessions). Behavior sessions also use lower temporal resolution lick monitors than ophys sessions.\n", + "\n", + "Please see the documentation for additional details." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---- Metadata ----\n", + "rig_name: CAM2P.4\n", + "sex: F\n", + "age: P123\n", + "ophys_experiment_id: [847241639]\n", + "experiment_container_id: 876693136\n", + "stimulus_frame_rate: 60.0\n", + "session_type: OPHYS_5_images_B_passive\n", + "experiment_datetime: 2019-04-05 15:57:39.013000+00:00\n", + "reporter_line: ['Ai94(TITL-GCaMP6s)']\n", + "driver_line: ['Camk2a-tTA', 'Slc17a7-IRES2-Cre']\n", + "LabTracks_ID: 442709\n", + "full_genotype: Slc17a7-IRES2-Cre/wt;Camk2a-tTA/wt;Ai94(TITL-GCaMP6s)/wt\n", + "behavior_session_uuid: b183a85b-6a29-4054-9d3c-915a0408a10a\n", + "foraging_id: b183a85b-6a29-4054-9d3c-915a0408a10a\n", + "behavior_session_id: 846710859\n", + "behavior_training_id: None\n", + "------------------\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYcAAAEHCAYAAABFroqmAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nO3dd5wU5f3A8c/3jt4E5EB6F0VERcReAMWCLZpEjV0TU0wsMcUSTUws5JeYGBNjib3XGI0FNQg2kCYgAiIHHEXa0esdV76/P2b22LstN3s3uzO7+32/Xve63dnZmWdnd57vPGWeR1QVY4wxJlpB0AkwxhgTPhYcjDHGxLDgYIwxJoYFB2OMMTEsOBhjjIlhwcEYY0yMJkHuXERKgG1AFVCpqsNFpCPwItAHKAG+q6qbkm2nU6dO2qdPn7Sm1Rhjcs3MmTPXq2pRvNcCDQ6ukaq6Pur5jcAEVR0nIje6z3+dbAN9+vRhxowZ6UyjMcbkHBFZlui1MFYrnQU86T5+Ejg7wLQYY0xeCjo4KPCeiMwUkavcZV1UdTWA+79zYKkzxpg8FXS10tGqukpEOgPvi8hXXt/oBpOrAHr16pWu9BljTF4KtOSgqqvc/+uA14ARwFoR6Qrg/l+X4L0Pq+pwVR1eVBS3PcUYY0wDBRYcRKS1iLSNPAbGAF8CbwCXuqtdCrweTAqNMSZ/BVmt1AV4TUQi6XhOVceLyHTgJRG5ElgOfCfANBpjTF4KLDio6hLgoDjLNwCjM58iY4wxEUH3VjKmli27KvjvnFVBJ8OYvBd0byVjarn+xdl88NU6DujWjn5FbYJOjjF5y0oOJlRWbd4FQHlldcApMSa/WXAwxhgTw4KDCb01W8r407tfUV1t850bkynW5mBC7dWZK7nh5TkAnLh/Fw7p1SHgFBmTH6zkYEJF6xQObnptbs1jKzgYkzkWHEwoOfdGGmOCYsHBhIpSu3igdYsSxpiMsOBgQklwig4VVRYcjAmCBQcTKskKClbVZEzmWHAwoWSBwJhgWXAwWcOaH4zJHAsOxhgAZi7byLxVW4JOhgkJuwnOZI2KKhtvKZ3OfWAKACXjxgacEhMGVnIwvvpm8y4e/HAxqkp1tfKvj5awo7zS8/uT1RzNXLap8Qk0xnhiJQfjqyufmM5Xa7Zx+tCuzF25hTvfXsCKTTv5/VlDPL0/cl+DtUcbEywrORjPNu/czTXPz2JbWUXCdba7pQRV2FVRBcC2Mu8lhwgRuwHOmCBZcDCePTBpMW/MWcWzU5fXu65q43sXvTB9ReM2YIxpMAsOpl6fLFrPT56dmbQ9ICLe/QkNrSK66d9zaz2PHrL7wkc+44lPlzZwy8aY+lhwMPW6+LGpvD13TUaqebzu4dPiDfzuv/PTmhZj8pkFB5M2DQklS0p3ALBqc5m/iTHGpMSCg0mLWqOrNqBeqWTDDv8SY4xJmQUHU69UapMkKhL4XQ1lfZeMyRwLDsYzSWE0PK1VcEi96BDvHZMXr+fjRaUpb8sYk7rAg4OIFIrILBF5033eV0SmisgiEXlRRJoFnUbTMEvX+1s19NmSjVz86DRft2lMJrw2ayVbdia+PyiMAg8OwLXAgqjnfwT+qqoDgU3AlYGkyiTkpRwgAv+ctBiA8sqqBuzE7pE2uaF43Xauf3EO1704K+ikpCTQ4CAiPYCxwCPucwFGAa+4qzwJnB1M6kxjRFcrvfnFal+3vWD1Vl+3Z0w6lbkjBazdWh5wSlITdMnhXuBXQGS4zb2BzaoaGW9hJdA9iISZhsnEBf+Fj0xN/06MyXOBBQcROR1Yp6ozoxfHWTVuJxURuUpEZojIjNJSa6TMhFR6H6WzZ5FVOJlslG297YIsORwNnCkiJcALONVJ9wLtRSQyWmwPYFW8N6vqw6o6XFWHFxUVZSK9xpWJ0oEFAJMrsrX5LLDgoKo3qWoPVe0DnA98oKoXAhOBb7urXQq8HlASTUhl68lmTDYJus0hnl8DPxeRYpw2iEcDTo9xealVsnzbmPiybQj6UEz2o6qTgEnu4yXAiCDTY5LzclNbY0+EZKWDLDvHTJ5ryE2gYRDGkoMJkehM3tuQ3ek/ETbs2J32fRiT7yw4mKRWbtpV83jFxp2e3zfqng8btd9svdoyJldYcDBJFZduT2n9HeWpTwlqTC7L1g4UFhyMr6p9ahB4a27cHswmQ3aUV9qd6HnOgoPxmT+XSZ8Wb/BlO2GzblsZfW58i8+WOJ9veslGtpaFb0C2Hzw1g1P/9jGVVdX1r2w8ybaOFBYcTHIp/qCztQidKTNLNgHwxKclbC+v5DsPTuGHT82s512ZN23pRiD77uoNo2w9Jyw4mKSiZ3Sr9pBTlG7LrsHFglRR6VyVL1hj1TcmfCw4mKTWb9vTbTTSrTVMV0IL12wLOgmNtjlk4/zvrrSqJGPBwdTjrncWxF3+p3e/4vS/f5zh1MQ6+d6Pgk5Cg2iIK2z2/c07IU6dyZRQ3CFtwivRVe39ExdnOCUmndZtLYu7PESFRJNhVnIwoRSmqqt02LIrXFVJI+6aEHQSTMhYcDCeTVmSm91Lvdq4YzffbN5V/4oefLZkY8YD4FtfrLabFAMU5qrEeCw4GM927m7AXNANFMaCwxF3TeDocR/4tr1M9nuft2oLVz/3OTe/NtfT+tk2gmiYZetQMBYcTCgVhLBeabcPN4TNWLbJh5Skbke5E9hXpVjyycRAivni67XbmRnQ998QFhxMyhqbYWzcsZvNO/NzZNVtIbwb2koJmXPDS7ODToJn1lvJZNywP7wPQMm4sQnXydUL1nYtmgay32QBYKp7N3S0yA2P1aoUZmm1SFhk62/ZSg4mlLK1nrY+w/t0rHk8ceG6AFOyR1lF4rYkuyHOX6mU0coqqqj2MixBmlhwMOGUm7Gh1lXk3G+2ZHC/OXpAQ66yqpqz/vFpg967363jPXcgSAcLDsaEwNqtZWmt+7d2hWBs3LmbXUlKZvV5YfoKH1OTmnqDg4gUiMghIjJWREaJSJdMJMyEVyauQfPtOvfwuybwzNTlad9PvOq6ZKUKCyn5K2FwEJH+IvIwUAyMAy4AfgK8LyKficjlImIlD5MW+VALUjej/izPbzJsiIqqai54+DOml8Q2qofB9rKG3XQYhpJessz9DuAZoL+qnqyqF6nqt1V1KHAmsBdwcSYSGWYzl21i2YYdQScja2wJ2QikJrut3LSLKUs28MuX5wSdlLiq6jQohyDP9yxhV1ZVvSDJa+uAe9OSoixz7gOTgeTdMnNN6faGz9lw3sNTPK1XVpH7vWTyoXSUKVmU53oShiCSMDiIyDnJ3qiq//Y/OSYbLC1teEnpqxyYf8EvdWPD+oAmSkoWo56aUsLZB3enW/uWmUpOSiy+pk+yaqUz3L8rgUeBC92/R4CL0p80Y3Jb3ZJDvJvRgvZ/4xdy2ePTgk5GvZJdaZdVVPGX9xZSXpm5scEiGloACEHBIXFwUNXLVfVynHQOVtVzVfVc4AA/diwiLURkmojMEZF5InK7u7yviEwVkUUi8qKINPNjf8YY7woL9kSurbuyeyTXRz9Zyn0fFPP4pyVBJ8WzsDdIR/RR1dVRz9cC+/qw73JglKoeBBwMnCIiRwB/BP6qqgOBTTglFxMi2Tb0cFiF5ca0+pIRkmQ2WOQO8DDc7Z3quRPksfcSHCaJyLsicpmIXAq8BUxs7I7Vsd192tT9U2AU8Iq7/Eng7Mbuy5gwyuR5v9XtUllRnXoGGebYEPbA1dACQBguv+oNDqr6U+AhIHKF/7Cq/syPnYtIoYjMBtYB7wOLgc2qGinHrgS6+7Ev459cHffIq+Ubdjb4vUEduXHuXOCzlm9u8DbKK6uYt6pxQ36oKn8c/xWL1vrbMSHZFXmQNTR1Z/xL9dwJ8kzzdBObqv5bVa93/17za+eqWqWqBwM9gBHA/vFWi/deEblKRGaIyIzS0lK/kmRMvY77U6MLzhnXmLkoItVft/7nS8be9wmrtzR8Nrz123fzwKTFXPjI1AZvo6GCyGir60Qmr9VK0W/756Ri1m2LP8d3OnkZPuMct3F4i4hsFZFtIrLVz0So6mZgEnAE0F5EIl1sewCrErznYVUdrqrDi4qK/EyOMWlTK2sISQHM69VspNSxrYF3/cKehtYABxttsE07drNmS2Yy6UgQqVanx9i1z2d+HggvJYf/A85U1b1UtZ2qtlXVdo3dsYgUiUh793FL4ERgAU57xrfd1S4FXm/svoy/wl7Pmy3CUj0X72o2Xb1lIlv16zfk5Rj61YHi0Dve54i7JzRqGw09rDt3Z77HmJfJftaq6oI07Lsr8KSIFOIEqZdU9U0RmQ+8ICJ3ALNw7rEwIRKCXnZZKzory5YgG0mnH1975Lfj90fPxG+yIaWdBjdIh+Ac8xIcZojIi8B/cLqfAo2/Q1pVvwAOibN8CU77gzE5LSyxIeFIrW4OlY4glsnAGJYSGmTPBQF4Cw7tgJ3AmKhlCtjwGXkqm37gpn71VbtEMlc/vvbIvsKUYafLxh272VVRuzqowSWCAE66eoODe5e0MSaPRLc5pKVaya82Bw/bCeqmzchc6XWt2LiTV2au5LoTBya8ETIM1Upeeiv1EJHXRGSdiKwVkVdFpEcmEmdMLsuWEljdZDYm2enK87xkpmE53lc+OZ2/TVjE8o0Nv18mE7z0VnoceAPohnND2n/dZcaYRshk1UqyzDOTs8NFSiQFYcmpA1DuDuOR7DupW9oJ4mh5CQ5Fqvq4qla6f08AdmOBMY0Ulvyx3jYHHxMahuqSIK3c5O0GwjAcJy/BYb2IXOQOdVEoIhcBNp9hHgvDDzdbRWe0mYwNqebvtdoc6m6r8ckJTWDMFkEcLy/B4Qrgu8AaYDXODWpXpDNRJtzGz1sTdBKyVhiGYq6rtL5JhnzMmPxukM5Gy9yxuSK/hG1lFUxYsLbWOnV/JdGH65+Tipm/ytdBKuLyMvDeclU9U1WLVLWzqp6tqsvSnjJjcl0Gc8hkMekfHxTHLIu+4WuF23DqR2Dzu+dQLgSZn780hyufnOF5QMf/G7+Q0//+cZpT5a230pORYS7c5x1E5LH0JsuY3BSWORxSUVHlX4a+5w7pcATGMFi2wZl2d1fFnpnq6gvEmRibyku10lB3YDwAVHUTce5sNsaElx8xyY/AFmQ+HfbAHF2qqnucyioyP1GRl+BQICIdIk9EpCPe7qw2eaCsIvPz8uaKgpDkVZnMsN+Y7Qyy7Hcf/zC25dRHVRn/5Wq+XrvdfZ543bpDf2eCl+BwDzBZRP4gIr8HJuOM1GoML89cGXQSslZYqla8Zqx+ZMDfbPY3KIS9NFCfHz3zedzlYYh1XobPeEpEZuBM3ynAOao6P+0pM8bknGycxyFTagWEOscpiCDoaSY4oCOwQ1X/DpSKSN80pskYE0J+ZFBBVI9ki6DGgErES2+l3wK/Bm5yFzUFnklnoozJB9nW5uBHtdK/P/+m0duIJ1zZqjfJ0pwtw2d8CzgT2AGgqquAtulMlDEmc1LN88NUze8lKdkSOJJ9DwVuTr155+7MJAZvwWG3OpcMCiAirdObJJNVrJogJdGZ2dSlGwNLR2PYV55+iY7xqs2ZmcMavAWHl0TkIaC9iPwA+B/wr/Qmy5jcFH3Of1K8PrB0RAtbXXe+GH3Ph7WeV1Unvs+h2r3N4c63M9cXyMvwGX8GXgFeBfYFbnMbpo0JVx2DaZBsrlaKyIXSzGOfLk342vzVzlhK33gc1dUPnm5mU9X3ReRz4DggO8vCxoRAUPnqll0VCV/L5ow1jIGqoRau2VbzOFHjfya/qoQlBxF5U0SGuI+7Al/ijMb6tIhcl6H0GWN8sK2sMuFrXjPYMMeQNVszVxefLl9FBYdEMhnIk1Ur9VXVL93HlwPvq+oZwOHYkN0mIpsvOw2Q+19hNn6+REmurMrcGEvJgkN0OXQ08DaAqm4DMj8KlDEmUNleg5MLVVC7MjiWWbI2hxUi8jNgJTAMGA8gIi1xboQzxuSRxaU7gk5CjEyOT5UJ1dVKQUjujkxWcrgSOAC4DDgvatjuI4DH05wuky1y4XIsgyoyWC3gVep3Puffd56pUV+r3P0Ur9sePx0ZSYUjWcnhSuAhVZ0VvVBVJwITG7tjEekJPAXsg1NN9bCq/s0dEvxFoA9QAnzXnUPCmKz3wIeLg05CjNQznPBU4mfqHo1Mt1us3x5/6tawNEgvBa4VkVki8oSInBc9r4MPKoEbVHV/nNLI1SIyGLgRmKCqA4EJ7nNjcsLmnYm7lAYl1QzntVnpGR8pzDIdDhN9J5kcuDBhyUFVXwBeABCRQ4BTgH+LSCHOXdLjVXVaQ3esqquB1e7jbSKyAOgOnAWc4K72JDAJZ+A/E0bZ2BUkQLlwN/LsFZvrX8k0SqJfSbIuyX7zNGS3qs5S1btVdSRwOjAP+L5fiRCRPjhTj04FuriBIxJAOvu1H7+t3OTvxCUm94UxlqYasML4GZLJpoAcObZhmNmu3juk3ZLCWJw2gJr1VfUqPxIgIm1whua4TlW3eh0zXkSuAq4C6NWrlx9JSVl1+NoWTcjlQvt9CPKtGqmkpTE9m8KQWWeal5LDf3F6LO2NM1R3W6CNHzsXkaY4geFZVf23u3ite0d25M7sdfHeq6oPq+pwVR1eVFTkR3JStq08fPXHGZcLuV2eSzXfS9RYmsvyLzR4G1uph6oO9XvH4hQRHgUWqOpfol56A7gUGOf+f93vffulTXNPQ1PltNWbMzcQmAmHRQm6WeayTBcctmawbSERLyWHd0RkTBr2fTRwMTBKRGa7f6fhBIWTRGQRcJL7PJQ27sjcxBthtbvS6tZSEcbaiRAmKXQy3W5RGIISuZdL38+A10SkAGdIDQFUVds1Zseq+gmJ76YZ3ZhtZ8qyDdYgbRlL9svmIbsz9fvLdFBv1sRTX6G08pKCe4AjgVaq2k5V2zY2MBiTr8KUsTbUQT3aB52EnFXqtudk8n6GRLwEh0XAl5qPzfX1yIUTvT49O7YMOgk5JRfOougZy7KCm9zFpeFvK9kUqaoOwSH2EhxWA5NE5CYR+XnkL90JM+HQvmWzoJOQU8J5QZFaTpR1wcH1ysyVDX5vpoN6GO7N8NLmsNT9a+b+GVMjF66E80FhgSTM1FP9DjfsCE9X1kxVaGQ6sw5D/K03OKjq7ZlISDbyesNeNqvvpAjDFU42CWMwTTVJa7eGJzhkSqa+t8h+sqLNQUTeF5H2Uc87iMi76U1Wdsj90GB3geeKZL9Va06sX1gG3sskL20ORVFzOeAOnx3a8Y6MMakJQT5kXJGSeBi+Ey/BoUpEagYvEpHehCPtxmSdoGoimxTmQzk3fTJeugpB0cFLcLgF+EREnhaRp4GPgJvSm6zskAdNDhw/KJhxq4y/rjquPwAXH9E74JT4y0se6kc2m6msOjI4YLY0SI8XkWE4E/IIcL2qrk97yrJArs1fG0/PDq2CTkJOCeqCsHWzQgBauv+jheAiNfQy1iAdqVYKwZeSMDiISB9VLQFwg8GbdV4XoLuqNrzzcJZrakV1y1hyQBgyotDLcG+lMHwjyUoOf3LHU3odmAmUAi2AAcBInPGPfgvkbXDoV9Q66CQErm7VWvtWTUM5FaZx5GMg8OMz230OUVT1O+6czhcCVwBdgZ3AAuBt4E5VLctIKkMr90sOqZ4UBfnQEJOFkn0tZTaybkq2llXQrkXTtGw7crYtWrstLdtPRdI2B1Wdj9MgbUxcddtdLDRkn1wfdt2PwlL0NsoqqtIWHABmLtvIC9NXpG37XgU/LqzJKVZwSL8nPl1KnxvfYluZVd95kU29lVSV4pBMpmTBwSRV31VX3WqnPKzSzrgnpywDoHRb/g1j0RDZ9JtUwjNPjAWHRrCr5Fh2TJILOqMKev9+8zJCrB+NyfnYkJ+sK+uwZG9U1c/9T47JNk9NWcZvzzggaolFh2T8DJ6pZFe5ek/OE5NLMrKf6GOdzmMphOcCK1mD9D3u/xbAcGAOTtqHAlOBY9KbtPALyXcYKC9Xbj86vn8GUpId/DjxG7OJXLv+XbMlMx0mowsOYcm80y1htZKqjlTVkcAyYJiqDlfVQ4FDgOJMJTDMcu1Ei8ePz9g6zl25+crP2olUtpUvGVo82VQjpIQnvV7aHPZT1bmRJ6r6JXBw+pKUPe6faDGyrnzOhLzw48Qv2bAjsjXP7/nXx0sAqKzKrW6rmfq9RbdbpHOXYQkM4C04LBCRR0TkBBE5XkT+hXMjXN57f97aoJNgsowfmVmkJm/lpl2e3xOZoKesIreCQ8ZkMNMOywWWl2lCLwd+DFzrPv8IeCBtKcomIfkS0yrFS5l4hyREF0M5xY6rN/4Mn5Ep4flWvYzKWiYiDwJvq+rCDKQpa+RDbDDh1aVti5Tf48dV6bEDOzV+Iz7x8nl8uQmuVoN0fpz5XqYJPROYDYx3nx8sIm+kO2HZIF9+JKmwQ5KcH3XKffZ2hlGPN/x2ffz4zZ5xULdGbyPbRM/pnO42h8qqcJQevLQ5/BYYAWwGUNXZQB8/di4ij4nIOhH5MmpZR3fe6kXu/w5+7CsdLCM02Sbeb7brXqmXQLKJL2MrNX4TnvfzysxwDHTtJThUquqWNO3/CeCUOstuBCao6kBggvs8lPIhNqR6UsS7QShMPTCCFvQFxY7yyphluf79+HGHdHUGx9AuD8lAiF6Cw5ci8j2gUEQGisjfgcl+7FxVPwI21ll8FvCk+/hJ4Gw/9pUOVq1kguTXkA4pZ54hCiaZuvM7+lCn0kusYfuKPcCd2jRP6z7j8RIcfgYcAJQDzwFbgOvSmKYuqroawP3fOY37Mj6zeJl+ke6oFQ2om87H+Tb8qVbas5EPv17X+A0m2o/Gj71BfG31BgdV3amqtwAnqOphqvqbMEzyIyJXicgMEZlRWloaTBoC2WtmpXpi5cMxaQw/Mqo1W53T7/lpy1N+rx/fT6ZnRWssv3srfbM5fSWHRKXBeN/b7BWbOe1vH7Nrd1Va0uKlt9JRIjIf98Y3ETlIRP6ZltQ41opIV3dfXYG4YVpVH3aH9BheVFSUxuQklocXYTGaN6n9E4r30862zCRbNKRuOl5VqNeAdVT/vVPeX7pl6rcV3Vvpy2+2ZmSf0QoLYr+3P7w5n/mrtzJvVXqahL1UK/0VOBnYAKCqc4Dj0pIaxxvApe7jS3HmsA4piw7fPrRHred2RDLHvzYHbyIZVJgasDPV5hDdHr1zd2yjvl8Sja2UrDowXV+Hp/kcVLXunHW+lGNE5HlgCjBIRFaKyJXAOOAkEVkEnOQ+DyUrOcQeA2ukz5zqMOXSIebPYdqzkf5FbfzYYJI9eUtwus80L8NnrBCRowAVkWbANfg0tpKqXpDgpdF+bD/d8iEbrO/qtO6VWz5OihKUDPauDC9PJ6Efk/3seZzORn0hQckhgGnZvOzyR8DVQHfgG5wRWa9OZ6JM9vBScrB4kVuy7ev04/cXHYjTWThW4h/fpNVKafpCvIyttB64MD27z25lFenpJbCkdDtTlmzgwsN7p2X7fsqH0lNYNaRaKV4ek+vB2++urOmkCaJDvPMs3TW4Xnor9ROR/4pIqTvUxesi0i+9ycoOW8vS0zB11v2fcstrX9a/YgbUd0p0bJ35m3OMoyGZni8ZZYiCiZf80Z85pBu9Ce/7ipPeIO5P8VKt9BzwEtAV6Aa8DDyfzkTlu21pCjrpsF/XtrWeW3t0cn62yfi3LY8NoFn65S7fuLPR28hU47+icQNRKG+CA0RVn1bVSvfvGbKv2jErlW4rDzoJvrAfS3o0pEE6/lV0ajlPtt23smpz4+/Zjc6wgyg5JW9zSE+CvASHiSJyo4j0EZHeIvIr4C139NSOaUmVAeCW1+bWv5LJWw3KErIrX/eFHyWHjAUEhco4UT9ecFjnXjwGeZ/DecAPgYnAJJxZ4a4AZgIz0pQuA+xKU4N3Kuo7Keq+nqU1Dxlz4RH+dTKoqg7H6J1B8lLV1aNDy0bvJ3PVSvHF+5jLNjhB78/vpmcONi+9lfqmZc+mXh8vWh90Ejyo/XPO1B2r2ap9q6a+bevtuWt821YqwtQg7UXTQn9vEkh3tdqxAzvFnPvJguCMZZvSko6ER01EDhORfaKeX+L2VLrPqpNMSkKam5RVVFGVZ3eSxf+04bgjN138qJMP+m70OEMrpX+fSV57CNgNICLH4Qxj8RTOkN0Ppz9pJgzqOyVCmu97st+t47nx1S+CTkZG+ZFRhukr9/J5/Ehv9DXEvl3aJl6xkVTjn1Nh661UqKqRiXjOAx5W1VdV9VZgQPqTZrJB3d9xtrU5vBySKRkzJe6ouR5zzzB+t17aHPy56t+zjbQGBzRuetu18K860qukwUFEIm0So4EPol7zMiaTyQG76xkWOltLDjYGVCOE6Nh5iVebd1Q0ej+l23bXPE73DWnxDm/39o1vVE9VsuDwPPChiLwO7AI+BhCRAThVSyZLVVcr67Z56/v9x/FfNXp/4clK9ghR/pZR8T631wbbEBYcPNldlfwC55vNu5iyeEPSdeav3jOHQzrr/8P0u0z4q1DVO4EbgCeAY3TPpVYBztShJuSml2zk+D9NjBl//h8Tixlx5wRfZrSq23MjWzKQSKrDWFWSTvHyniaF3g5CiPKthMorY7t/7926WdL3jPzzJC7412dJ1wm6pBm2NgdU9TNVfU1Vd0Qt+1pVP09/0nLD2q1lrNmy5yq9vLKKdVv3PC9et43t5ZV8tWYr1R56zqzZUsag37zD/FX1z0Z119sLWLZhJwtW11534sJ1Nduqzw0n7Zv09Renrwj8xGmISL1uOqsI1m0r49Nip0vi9JKNLFyzLW378ired+X164v07HpjzqqU9vnh16Vc/+LslN7TEOO/XM2g34yPOTfq+3j1VZ1C7XaLdGfUYbkD3doOfHL7f+dx2+mDERE+WbSeix6dymF9OjC9xOmDXDJuLABXPzuL/y1Yy+tXH8328koufGRqre385IT+Sffz9GcllFdW8+zUZdz5rQOTrhv5Ddc9+ZNlBh99XUqXdi0YtI/T6FZQTxn640XreXvuGsYO7QpAyYbG342aqvmrttK9Q0v2ahnbaFdeWZhMCKIAABqsSURBVMWcFVsY0bd27+vIMaiqVtZsKWOfvVo0aN+bduxmxrJN7N2mGcN6dQDgd2/MY8JXaykQYdmGnQzu2q6mWuKOs4c0aD9+euuL1UxZsp5nPlvOgxcN81yC3LLLqbufXrKJXburaNms0NP7Ln1sGgDfqTNroB+iM+oPvnIuer5YuZnB3drVLPfj2qVLuz2/j0gj+DXPz+LoAXtz3mG9Gr+DKPGuEV+asZKbT9u/5nnb5k3YVp7eMdgCmEIiO6zesouhv3uX4nXervYe/7SEHz/zOVXVyn0fLAKoCQwAL01fweLS7fxvwVrAGXm1bmAA+OekxUn3c/9E53UvV7yRH3F9d12WVVTxy5fnsGF7OZc8No2T7/2o3m1H217uZBqJ7hn4+wfFzFmxOaVtpuK0+z7m3AcmUxlVt7y7spplG3ZwyO/f57sPTWHhmm3MW7WFm/49l8qqaiqi1j3i7gkA7HfrOzz+6dKa5Vt2VvDb17/kf/PXJiwdHfKH9/nBUzM455+Tueb5WXz3wSk8MbmEFRt31dzBGl1fHcnAvFBVPvy61NeS2ZtfrObq5z7nmc+WA/CjZ7xVAqhqrZ5Bf3hrfs3jHeWVvPlF/aWJ78X5vfvp8+Xxf2N+XIm3aLInEEaOwhtzVvHrV/0d4iZZSmdFfb50BwawkgMPf7SYu97+ir6dWjNyUGcGd2vH01NK6Ni6GVvLKnn80xKWb9zJUf078eMT+vOfWd9wUM/2NIlzRT1+3hr63/x23P38qhH96Vdt3sUDkxbTtf2eq5fCAuGjr0uprK6mY+vmnH3/p7x9zbHs37Ut67aV07ltc2a6d05G5y3jv1xD8brtAFRWKTt3V/L67FUxXTrP/McnlFVU8fXa7fWm79evzuXXr87liqMT30x/1v2f1pSeIl6avoJ/TCzmo1+NrHcf9Slet50Bt7xTs48hv3u3VnXBPe8t5L35TmB+ftryuNsoq6jm9v/OZ+GabfTs2Io/ucMSPDllWcy6781bQ9s63Qu9VLekMgfIM1OXc+t/vuRv5x/McQOL6FBP3Xk69b2p9u/6uanL+f2ZB3Dew5+xcI1TNaoKAzq3Yf+ue67apy5J3tCbTPG67bRp3oR2LZuweN0ODuyxV73rA9zx1gJOHNyFdi2asmZLWcyVeFlFFWu2lNGnU+tayyctXMcJgzpTUVXNhAXrOPmALnG7yj47dXnc7qzV1cqPn53JpUf1oU3zhmWtj3y8hGlLN9a/YgbkfXC4622nN87S9TtYun5pzOvPTnUyko8Xree4fTtxXQbqTqO9PGMFv3wlNrBMW7qRJyaX1Fo2ceE6fvLszJiqne8+NIVObZqzfnt5zPJa+4oKEF+sTL1D2mOfxh6/aDNKav/oIwHz8+WbWLB6K8N7N/7G+2uenxU3k44EhkSix6d5YXrdKdNjXfX0zNQTB0yup1fM67O/4ayDuwOwwh0w7toXnN/cyz86MuY7j6isqmb1ljJ6dGhJVbVSIIJIeofZHnDLO7We/+z5WbWez/jNiZz3cPKG3mRO/MuHAIwZ3IX35q9lzm1j2MvD8CPbyyv5yTOf02vvVrwS5z6WX77yBf+ds4rLj+7Db884oGb5ZY9Pp3v7lpwyZB8e/cT5Lb/yoyP59oO1z5MPvy7lhD9Pqnne58a3ePb7h9N1rxa8O28t785L/ltLJtmQOZm+m1+ysTGxruHDh+uMGamPAVhWUcV+t473vH6b5k3YnoHiXEP16tjKlxEoTbCm3DSKK56YEdORYJ92LViztXYngkhJ6fyHp/DZktrB95cnD+Lqkc79qn1ufCuNKW6YknFjWbp+B33rXMFHRNJc1LY5pdvKmXbzaB79ZCltmjdhSI+9GDmoM9e9MIv/zPbeQD7tltGMuHNCzfPiO0+NCXLZqG6p3CsRmamqw+O9ltclh+klqRXfwhwYwJ+hiU3wjrz7g7jL6wYGgMsen0bbFk1jAgPAn95dyMkH7BPIuDxevDFnFdc8P4vHLz+MkYM6859Z3zCgcxuGdK9dfRSZ12TEXRNqLZ9ww/HM/Sa1Em50YIDY0o/ZI6+Dwyof+vkbE6RJC0uTvh6pmgmja9xqqKuf/Zydu/e0xQzr1Z7mTervCTX6nvB+tlyQ18HBGBO86MAAiXsdmczK666sS9bvqH8lY4zJQ3kdHJ6O00XRGGNMiIODiJwiIgtFpFhEbkzHPuoWZ40xxjhCGRxEpBC4HzgVGAxcICKDg02VMcbkj1AGB2AEUKyqS1R1N/ACcFbAaTLGmLwR1uDQHYi+TXWlu8wYY0wGhDU4xLttp9at3CJylYjMEJEZpaXJ+3obY4xJTViDw0qgZ9TzHkCte+RV9WFVHa6qw4uKijKaOGOMyXVhDQ7TgYEi0ldEmgHnA28EnCZjjMkbobxDWlUrReSnwLtAIfCYqs4LOFnGGJM3QhkcAFT1bSD+5Ag+6b13q5oJWYwxxuwR1mqljOjawKkhjTEm1+V1cDhp8D5BJ8GYvHRQz/Zxlx/et/ETPhl/5HVwOGWIBYeg/GzUAE/rdQxwasywm3rzaP738+P4+o5T6ZBkhrR5t5/MxF+ckLmEefD61Ufzg2OdaWV/OnIA71x7LM99/3Be/OGRvHPtscy7/WQ+uOF4pt08uuY9c24bw62nD671WQd2bpPSfpfefZo/HyAP5HVwMOlz0uAuSV+/YcwgLjuqT73bGXfOgTWP37v+uMYmK9Qic3CfeVA35t1+Mn84a88Ulr8Ys2/M+l3atWBA57Y0a1LAPd89qGb5hBuOB+DUIftw7rAetG7eJOFsa0G69sR9OW94T354fD/279qOowZ0AmD/ru1o3bwJ/Yra0LldC4b1ckoZBQVw5TF9mXXbGJbefRol48bGzANdHxHhtZ8cBcBeLZtyz3cOYvx1x3LcvunpDt+zY8u0bDcTLDhkibZxJiyPZJwnDPL2w27XIjP9DwZ3bcffLzgk4evFd54KwIn7OwHk/u8NY/x1x/KrUwbx7nW1A0BkHaDWpO53RwWNaO1bNeXrO07l3vMOTrj/90MQZJo3iT31bjtjMCXjxnLfBYfQunkTLjqiNwAnH9CFn44aWHPc4hm1XxeW3n0axXeeSv+iNpSMG8sDFx1aK2j45YIRPRO+lsqU1W2aN+GP3x5K2xbJ54V+7LLDePKKEbXWi8yNXZhgh6P265xwe4f06sBTV4xg2i2jOffQHuy3TzueumIEi+86jd+fdQDnDIsdjGHcOQdyX9Rv+qcj45d8p9w0qtbz7x/Tj5d+eGTiD5fA9SfGXgxkWmh7K2VCs8LYE/TNnx1DpzbNKRBnWsJ/XTKcGSUbeeijJc57mhTw0MWHMumrdVxyVJ+0zkbVr1Nrlqzfwas/PopDe3cA9syr++BFh3LKkH0YuV9n2rdqSnU1rN9ezuotZXxSvJ6fn7QvyzbsoPferfli5WY6tWlOt/bOVcxv/jOXZz5bXmtf7Vo0YWtZ7DSoh/ftyNSlzhSULZsWsqsidiTbbx3SnddmfQPAwT3b85+rjwacK+BjB3bijIO61Zqru4l73I8Z2KnW3Lf77dOu1nbfve44CgqE+y44hH271K4+uGBEL04dsg+LS3dw7gOTueGkffnZ6IE1r599SHeue3F23OM6sEtbFt91Gv1vju0Md8KgoqSzqyU6Bqma//tTavb/4EXD6NGhVcw6IlLr+DQpLOCyo/rwxOSSuNsUEZoUpm9O0HOH9eDP3xmKiPD8tD2j29z1rQM5dmAnyiurGNC5LaqKiFBdrfS7+W26tGvO2q3lDd5v+1bNOD7BlX1BnVP4lycPYs2WMm4+bX92V1XzztzVLN+4k39OWlxrvXglhcIC4ZIj+3DJkfCX7zoXF6Xbytm1u4pee7di5+5KhvVqz93nDGVx6XbAKX28fvXRtUowJePG8vGiUi5+dBqj9utMz46tKBk3lrkrt3DGPz4BnNnu4k1q9LfzD+asg53gNHZo15qZ/E45YB8uOLwXVz4xncrqWoNFUNS2ecJj1xh5HRyK2jbnb+cfzLUvOJnIcz84vNb8tZET86TBXbjy2L5MWbyBMYP3oWWzQkYOqn1l0qNDS1ZucqYdHdK9HWce1I273v6KvVo25VenDOJ7I3rR96bkPXMP69OB6SWbAPj6jlNpUiAU1JkA+Ddj9+eOtxYwuKuTkXZpt6fHVc+OrejZsRUj3Ea93ns7P9ihPWo3/v3hrCHMXbmFOSu3cMfZQ/jeiF4x+4kWCUjXjB7IH8d/xbnDevDq5ysB+PzWk+jYuhmXHdWHe97/mjvOGlLzvugrrSk3jUo4N3Jd14wawNOfLWPQPk5J4cyDusVdr32rZhzau1nCydUn/uIERv55EnefcyDrt5Vz3oieFLVxTqTCqM/7yCXD+f5TMwA4Yd8itpVV0r+oNRu272ba0o3Mvf3kmnW37KrgFy/P4f35az19lnguPLwXhQXC57eeRKtmhbRoWv+UmBGqWv9K9Ti4Z3tmr9iTMXVv35KPfzWS3VXVrNq8i35FbXjri9U8NaWEX52yH+c+MJk7vzWECw7rVXPFfu3ogWzcsZs/nD0kZvuRdQoKhA9uOJ6ue7Vk/9vGx6znh4KoksPUm0fXOh9aUsj5I3oBxAQHr6Iz3lbNmvDvnzgXPgM6t+GaUQO48th+7NUytuRz7MCihL9LgNMO7FrvjHcDotpTHrz4UAB6dWzFkvU7eP/64zjprx8BcMNJ6Sll5HVwADjr4O6s3VrGMQOKGNytXcL1OrdtURPR4/nk16Moq6jiyien89szDmDfLm256rj+tda5+IjePP2ZM8HQ4rtOo7BAajLeC0b05LdnHFBzhd0sTrUDOHWuFx3RO6UMpS4RYf+u7ZizcguFcQJQXb87YzC/++98juq/N5NvHEWXdi1qgkOkwfignu156ooRCbfRdS/vda8/HzOIn48ZFPe1xy8/jDZxqtji6dupddITNGL0/p35xZh9+fN7XzOwS1te/XHfhOvu1bIp/7pkOBO/WseCNVs5d1gPzvnnZO6/cBid2jSjR4dWDP3du2wtq+Sa0QNZu6WMF2esoEOrptx9ztBanSCCaGw/cf/OPHLpYQAc/6eJLNuwk3OGdaegQGhRUEi/IidDGju0K2OHdgWIewyv95ghRbaXLtFBPjowpFthgST8jSai1B/Yj+y/d63ndQN5RPQ5e95hiav5GiPvgwMQk4k3VIumhTz7/SMSvv6Hs4fEXGlNvnEUj3+6lF+fsl9NdUsyItKowBDxg+P6MW3pRsbU03AMcOlRfRi5X+eakkiQ6pbYGuPsg7vRt1MbRIQfnzCAQ3t3jDk5E6Zjv86MdOu1P72xdj3zFcf05d7/LQJVjhnYiRdnrOCWsYMD7x1XN5N/65pjeejDxVwTVR2XbQpSaeTIAp3b1g5wkSraeKbePJoWTQtrSmp+s+DQSLefeQAffd3wUWG7tW/JLWMzP49R/6I2fOCxe6OI+BIYfn/WAXxavL7R2/HLvefvqfYqLBDPgaE+pw/txr3/W8QZB3VjQOc2dGvfgmG9Oviy7YgDo6o/G6pN8ybckOLVb2M9ecWIuEMuN1SuBQevVNNfUrLg0EiXHtWHSz10yTS4jX19gk5G2g3o3KbWVfqhvf27sStSMXFunB412SBRw3JDeShs1ziynz/Bv6HEDYvRVcYdWzfj2IGdeH32qkRvq7uRjLHgYEwWSldVQrrc+a0hvpeeoHabQzKzbzuJls0aXx3rh+ied986pDu3nj7Ye3DIIAsOxmQRHzorBeLCw3unZbtFbb1VrbRvFa477bPhe7Sb4EKmX6fWtPShwdmYfOB1GJawCnP5z0oOITPhhuOz4qrCBCPLapPSrmkqjQ4mJRYcQkZEsiIDiB4QzWSOXTjEmn3bSVRU2YHxmwUH0yCdM3jDkYmVDRcQmRK29oRcYWUyY7LI2Yc4Q4kc445garJH/86t6dCqKb88eT9Pd0sHzYKDMVnk0N4dKRk3Nu3DUhj/tWrWhFm3jeH4fYtqqgcjJcDz0zQERmNYcDDGmIBE7le5+5wDWXJXKhMRpb/kYW0OxhgTMK8dUTLZ1GQlB2OMMTEsOBhjjIlhwcEYYzIs/H2VLDgYY0xgwny7SiDBQUS+IyLzRKRaRIbXee0mESkWkYUicnKibRhjjEmfoHorfQmcAzwUvVBEBgPnAwcA3YD/ici+qtr4Gd2NMcZ4FkjJQVUXqOrCOC+dBbygquWquhQoBhJPTGyMMSYtwtbm0B1YEfV8pbsshohcJSIzRGRGaWnDp+k0xphMa+JOUpTqqLKROdQzMZ5U2qqVROR/QLwZ1W9R1dcTvS3OsrgN+6r6MPAwwPDhw7Oh8d8YYwC46IjelG4r5ycj+6f0vhtP3Y/vH9uPTm2apylle6QtOKjqiQ1420ogepCRHkD45s8zxphGaNG0kJtO2z/l9zUpLGCfvTIzInLYqpXeAM4XkeYi0hcYCEwLOE3GGJN3gurK+i0RWQkcCbwlIu8CqOo84CVgPjAeuNp6KhljTOYF0pVVVV8DXkvw2p3AnZlNkTHGmGhhq1YyxhgTAhYcjDHGxLDgYIwxJoYFB2OMMTEsOBhjjIkhqtl/c7GIlALLGvj2TsB6H5OT7ex47GHHojY7HrXlwvHorapF8V7IieDQGCIyQ1WH179mfrDjsYcdi9rseNSW68fDqpWMMcbEsOBgjDEmhgUHd2RXU8OOxx52LGqz41FbTh+PvG9zMMYYE8tKDsYYY2LkdXAQkVNEZKGIFIvIjUGnJ11E5DERWSciX0Yt6ygi74vIIvd/B3e5iMh97jH5QkSGRb3nUnf9RSJyaRCfpbFEpKeITBSRBSIyT0SudZfn3fEQkRYiMk1E5rjH4nZ3eV8Rmep+rhdFpJm7vLn7vNh9vU/Utm5yly8UkZOD+USNJyKFIjJLRN50n+ftsUBV8/IPKAQWA/2AZsAcYHDQ6UrTZz0OGAZ8GbXs/4Ab3cc3An90H58GvIMzK98RwFR3eUdgifu/g/u4Q9CfrQHHoiswzH3cFvgaGJyPx8P9TG3cx02Bqe5nfAk4313+IPBj9/FPgAfdx+cDL7qPB7vnT3Ogr3teFQb9+Rp4TH4OPAe86T7P22ORzyWHEUCxqi5R1d3AC8BZAacpLVT1I2BjncVnAU+6j58Ezo5a/pQ6PgPai0hX4GTgfVXdqKqbgPeBU9Kfen+p6mpV/dx9vA1YgDNPed4dD/czbXefNnX/FBgFvOIur3ssIsfoFWC0iIi7/AVVLVfVpUAxzvmVVUSkBzAWeMR9LuTpsYD8rlbqDqyIer7SXZYvuqjqanAyTKCzuzzRccm54+VWBRyCc8Wcl8fDrUaZDazDCXCLgc2qWumuEv25aj6z+/oWYG9y5FgA9wK/Aqrd53uTv8cir4ODxFlmXbcSH5ecOl4i0gZ4FbhOVbcmWzXOspw5HqpapaoH48zXPgKIN7Fx5HPl7LEQkdOBdao6M3pxnFVz/lhE5HNwWAn0jHreA1gVUFqCsNatHsH9v85dnui45MzxEpGmOIHhWVX9t7s4b48HgKpuBibhtDm0F5HILJHRn6vmM7uv74VTXZkLx+Jo4EwRKcGpYh6FU5LIx2MB5HdwmA4MdHsjNMNpVHoj4DRl0htApIfNpcDrUcsvcXvpHAFscatZ3gXGiEgHtyfPGHdZVnHrhR8FFqjqX6JeyrvjISJFItLefdwSOBGnDWYi8G13tbrHInKMvg18oE4r7BvA+W4Pnr7AQGBaZj6FP1T1JlXtoap9cPKCD1T1QvLwWNQIukU8yD+cnihf49Sz3hJ0etL4OZ8HVgMVOFc2V+LUj04AFrn/O7rrCnC/e0zmAsOjtnMFTgNbMXB50J+rgcfiGJxi/hfAbPfvtHw8HsBQYJZ7LL4EbnOX98PJ0IqBl4Hm7vIW7vNi9/V+Udu6xT1GC4FTg/5sjTwuJ7Cnt1LeHgu7Q9oYY0yMfK5WMsYYk4AFB2OMMTEsOBhjjIlhwcEYY0wMCw7GGGNiWHAwOUVE9haR2e7fGhH5Jur55DTt8xARiYzH00VE3nRHOp0vIm+nY58e0vSEiHzbffyCiAwMIh0mezWpfxVjsoeqbgAOBhCR3wHbVfXPad7tzcAd7uPf4wzI9zc3DUPTvG8vHsAZM+gHQSfEZA8rOZi8ISLb3f8niMiHIvKSiHwtIuNE5EJ3boO5ItLfXa9IRF4Vkenu39FxttkWGKqqc9xFXXFuNARAVb+IWveX7na+iMyd4C6/xF02R0Sedpf1FpEJ7vIJItLLXf6EOPNLTBaRJVGlAxGRf7illbfYM3AgwMfAiVHDQBhTLwsOJl8dBFwLHAhcDOyrqiNwhmv+mbvO34C/quphwLnua3UNx7m7OOJ+4FFxJhS6RUS6AYjIGJyhFEbglGwOFZHjROQAnDtqR6lqJE0A/8AZKnwo8CxwX9Q+uuLc6X06MM5d9i1gkPt5fgAcFVlZVatx7uQ9yPvhMfnOriRMvpqu7hDdIrIYeM9dPhcY6T4+ERjsDMcEQDsRaavOPBARXYHSyBNVfVdE+uHM7XAqMEtEhuCMvTQGZ7gKgDY4weIg4BVVXe++PzLvxpHAOe7jp3EmI4r4j5vhzxeRLu6y44DnVbUKWCUiH9T5vOuAbsBMjPHAgoPJV+VRj6ujnlez57woAI5U1V1JtrMLZ5ydGm4G/xzwnDjTTR6HM0bT3ar6UPS6InIN3oZ0jl4nOu2SYJ26WrhpNcYTq1YyJrH3gJ9GnojIwXHWWQAMiFpnlIi0ch+3BfoDy3FGbL3CnUcCEekuIp1xBvn7rojs7S7v6G5qMs7ooAAXAp/Uk9aPcEYDLXSHHB9Z5/V9gXn1bMOYGlZyMCaxa4D7ReQLnHPlI+BH0Suo6lcisldUddOhwD9EpBLn4usRVZ0OICL7A1PcaqrtwEWqOk9E7gQ+FJEqnGqny9x9PyYiv8Sptrq8nrS+hjMHwVyckYY/jLzgVj3tilSjGeOFjcpqTCOJyPXANlWN12AdODd9W1X10aDTYrKHVSsZ03gPULsdIGw2A08GnQiTXazkYIwxJoaVHIwxxsSw4GCMMSaGBQdjjDExLDgYY4yJYcHBGGNMDAsOxhhjYvw/iPgcU2ZltmgAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "behav_sess = cache.get_behavior_session_data(846710859)\n", + "\n", + "print(\"---- Metadata ----\")\n", + "print(\"\\n\".join([f\"{k}: {v}\" for k, v in behav_sess.metadata.items()]))\n", + "print(\"------------------\")\n", + "\n", + "# Plot the running speed\n", + "plt.plot(behav_sess.running_speed.timestamps, behav_sess.running_speed.values)\n", + "_ = plt.xlabel('Time (Second)')\n", + "_ = plt.ylabel('Speed (cm/Second)')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Memory management\n", + "Session objects cache certain values to enable faster access, such as time-consuming queries to the LIMS database.\n", + "In order to prevent issues with running out of memory when working with session objects in a loop, you should use the `cache_clear` method to clear the caches." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Current cache size:\n", + "1\n", + "Cache size after clearing: \n", + "0\n" + ] + } + ], + "source": [ + "# Example of a cached query\n", + "# You can look at the cache of any call by invoking `cache_size` on a cached function\n", + "print(\"Current cache size:\")\n", + "print(behav_sess.api.get_driver_line.cache_size())\n", + "# Clear the cache from the session object\n", + "behav_sess.cache_clear()\n", + "print(\"Cache size after clearing: \")\n", + "print(behav_sess.api.get_driver_line.cache_size())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Our analysis log has recorded which sessions we've accessed. If we want to mock out the behavior of a \"fixed\" cache, like `EcephysProjectCache.fixed()`, we pass `fixed=True` to `get_session_data`. If the ophys_experiment_id is not in the log, then it will not load." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
ophys_experiment_idcreated_atupdated_at
097824468415742035031574206089
\n", + "
" + ], + "text/plain": [ + " ophys_experiment_id created_at updated_at\n", + "0 978244684 1574203503 1574206089" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# See our log\n", + "pd.read_csv(\"example_behavior_project_cache/ophys_analysis_log.csv\")" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Warning! Passing `fixed=True` does not ensure that the underlying data has not changed, as no data are actually cached locally. The log will be updated each time the data are pulled from the database for tracking purposes.\n" + ] + }, + { + "ename": "MissingDataError", + "evalue": "Data for ophys experiment 12345 not found!", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mMissingDataError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;31m# Trying to load an ID that isn't in the log will raise an error\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0mcache\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_session_data\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m12345\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfixed\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", + "\u001b[0;32m~/miniconda3/envs/allensdk-bpc/lib/python3.6/site-packages/allensdk/brain_observatory/behavior/behavior_project_cache.py\u001b[0m in \u001b[0;36mget_session_data\u001b[0;34m(self, ophys_experiment_id, fixed)\u001b[0m\n\u001b[1;32m 256\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mophys_experiment_id\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrecord\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m\"ophys_experiment_id\"\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvalues\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 257\u001b[0m raise MissingDataError(\n\u001b[0;32m--> 258\u001b[0;31m \u001b[0;34mf\"Data for ophys experiment {ophys_experiment_id} not \"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 259\u001b[0m \"found!\")\n\u001b[1;32m 260\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mMissingDataError\u001b[0m: Data for ophys experiment 12345 not found!" + ] + } + ], + "source": [ + "# Trying to load an ID that isn't in the log will raise an error\n", + "cache.get_session_data(12345, fixed=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Warning! Passing `fixed=True` does not ensure that the underlying data has not changed, as no data are actually cached locally. The log will be updated each time the data are pulled from the database for tracking purposes.\n" + ] + }, + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# But it will work if we use one that already exists\n", + "cache.get_session_data(978244684, fixed=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python [conda env:allensdk-bpc]", + "language": "python", + "name": "conda-env-allensdk-bpc-py" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.9" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/doc_template/index.rst b/doc_template/index.rst index ac360a8be..8bbdc722d 100644 --- a/doc_template/index.rst +++ b/doc_template/index.rst @@ -90,6 +90,20 @@ The Allen SDK provides Python code for accessing experimental metadata along wit See the `mouse connectivity section `_ for more details. +What's New - 1.2.0 (November 21, 2019) +----------------------------------------------------------------------- + +The 1.2.0 release adds +- (internal feature) A project cache for the Behavior Ophys project, with example notebook +- (internal feature) A major overhaul of the `BehaviorOphysLimsApi` +- (internal feature) Updates to the `EcephysProjectLimsApi` such that it returns data in the same format as the `EcephyProjectWarehouseApi` +- improved eye-tracking area calculation + +and fixes +- several flaky tests +- regress tests which depend on scipy's `ks_2samp` +- (internal feature) duplicate caching on the Bevavior Ophys Lims Api + What's New - 1.1.1 (November 12, 2019) -----------------------------------------------------------------------