diff --git a/allensdk/brain_observatory/behavior/eye_tracking_processing.py b/allensdk/brain_observatory/behavior/eye_tracking_processing.py index 3f1f092a7..733348779 100644 --- a/allensdk/brain_observatory/behavior/eye_tracking_processing.py +++ b/allensdk/brain_observatory/behavior/eye_tracking_processing.py @@ -231,7 +231,7 @@ def process_eye_tracking_data(eye_data: pd.DataFrame, cr_areas[likely_blinks] = np.nan eye_areas[likely_blinks] = np.nan - eye_data.insert(0, "time", frame_times) + eye_data.insert(0, "timestamps", frame_times) eye_data.insert(1, "cr_area", cr_areas) eye_data.insert(2, "eye_area", eye_areas) eye_data.insert(3, "pupil_area", pupil_areas) diff --git a/allensdk/brain_observatory/behavior/rewards_processing.py b/allensdk/brain_observatory/behavior/rewards_processing.py index de13d9d8e..81b66bb9d 100644 --- a/allensdk/brain_observatory/behavior/rewards_processing.py +++ b/allensdk/brain_observatory/behavior/rewards_processing.py @@ -1,7 +1,6 @@ from typing import Dict import numpy as np import pandas as pd -from collections import defaultdict def get_rewards(data: Dict, @@ -31,13 +30,14 @@ def get_rewards(data: Dict, trial_df = pd.DataFrame(data["items"]["behavior"]["trial_log"]) rewards_dict = {"volume": [], "timestamps": [], "autorewarded": []} for idx, trial in trial_df.iterrows(): - rewards = trial["rewards"] # as i write this there can only ever be one reward per trial + rewards = trial["rewards"] + # as i write this there can only ever be one reward per trial if rewards: rewards_dict["volume"].append(rewards[0][0]) rewards_dict["timestamps"].append(timestamps[rewards[0][2]]) auto_rwrd = trial["trial_params"]["auto_reward"] rewards_dict["autorewarded"].append(auto_rwrd) - df = pd.DataFrame(rewards_dict).set_index("timestamps", drop=True) + df = pd.DataFrame(rewards_dict) return df diff --git a/allensdk/brain_observatory/behavior/session_apis/data_io/behavior_nwb_api.py b/allensdk/brain_observatory/behavior/session_apis/data_io/behavior_nwb_api.py index 6572de144..4f34fc0b8 100644 --- a/allensdk/brain_observatory/behavior/session_apis/data_io/behavior_nwb_api.py +++ b/allensdk/brain_observatory/behavior/session_apis/data_io/behavior_nwb_api.py @@ -224,7 +224,7 @@ def get_licks(self) -> np.ndarray: licks = lick_module.get_data_interface('licks') return pd.DataFrame({ - 'time': licks.timestamps[:], + 'timestamps': licks.timestamps[:], 'frame': licks.data[:] }) else: @@ -238,11 +238,11 @@ def get_rewards(self) -> np.ndarray: volume = rewards.get_data_interface('volume').data[:] return pd.DataFrame({ 'volume': volume, 'timestamps': time, - 'autorewarded': autorewarded}).set_index('timestamps') + 'autorewarded': autorewarded}) else: return pd.DataFrame({ 'volume': [], 'timestamps': [], - 'autorewarded': []}).set_index('timestamps') + 'autorewarded': []}) def get_metadata(self) -> dict: diff --git a/allensdk/brain_observatory/behavior/session_apis/data_io/behavior_ophys_nwb_api.py b/allensdk/brain_observatory/behavior/session_apis/data_io/behavior_ophys_nwb_api.py index 96e890d88..8a72ed6ec 100644 --- a/allensdk/brain_observatory/behavior/session_apis/data_io/behavior_ophys_nwb_api.py +++ b/allensdk/brain_observatory/behavior/session_apis/data_io/behavior_ophys_nwb_api.py @@ -233,7 +233,7 @@ def get_eye_tracking(self, eye_tracking_acquisition.corneal_reflection_tracking eye_tracking_dict = { - "time": eye_tracking.timestamps[:], + "timestamps": eye_tracking.timestamps[:], "cr_area": corneal_reflection_tracking.area_raw[:], "eye_area": eye_tracking.area_raw[:], "pupil_area": pupil_tracking.area_raw[:], @@ -480,7 +480,7 @@ def add_eye_tracking_data_to_nwb(self, nwbfile: NWBFile, width=eye_tracking_df['eye_width'].values, height=eye_tracking_df['eye_height'].values, angle=eye_tracking_df['eye_phi'].values, - timestamps=eye_tracking_df['time'].values + timestamps=eye_tracking_df['timestamps'].values ) pupil_tracking = EllipseSeries( diff --git a/allensdk/brain_observatory/behavior/session_apis/data_transforms/behavior_data_transforms.py b/allensdk/brain_observatory/behavior/session_apis/data_transforms/behavior_data_transforms.py index 5b8414600..317cfccc8 100644 --- a/allensdk/brain_observatory/behavior/session_apis/data_transforms/behavior_data_transforms.py +++ b/allensdk/brain_observatory/behavior/session_apis/data_transforms/behavior_data_transforms.py @@ -110,7 +110,7 @@ def get_licks(self) -> pd.DataFrame: 'range') lick_times = [stimulus_timestamps[frame] for frame in lick_frames] - return pd.DataFrame({"time": lick_times, "frame": lick_frames}) + return pd.DataFrame({"timestamps": lick_times, "frame": lick_frames}) @memoize def get_rewards(self) -> pd.DataFrame: diff --git a/allensdk/brain_observatory/behavior/trials_processing.py b/allensdk/brain_observatory/behavior/trials_processing.py index 990c3c04c..9053e9677 100644 --- a/allensdk/brain_observatory/behavior/trials_processing.py +++ b/allensdk/brain_observatory/behavior/trials_processing.py @@ -519,7 +519,6 @@ def get_trials_from_data_transform(input_transform) -> pd.DataFrame: monitor_delay = input_transform.get_monitor_delay() data = input_transform._behavior_stimulus_file() - assert rewards_df.index.name == 'timestamps' stimuli = data["items"]["behavior"]["stimuli"] trial_log = data["items"]["behavior"]["trial_log"] @@ -527,7 +526,7 @@ def get_trials_from_data_transform(input_transform) -> pd.DataFrame: all_trial_data = [None] * len(trial_log) lick_frames = licks_df.frame.values - reward_times = rewards_df.index.values + reward_times = rewards_df['timestamps'].values for idx, trial in enumerate(trial_log): # match each event in the trial log to the sync timestamps @@ -628,7 +627,8 @@ def get_time(exp_data): def data_to_licks(data, time): lick_frames = data['items']['behavior']['lick_sensors'][0]['lick_events'] lick_times = time[lick_frames] - return pd.DataFrame(data={"frame": lick_frames, 'time': lick_times}) + return pd.DataFrame(data={"timestamps": lick_times, + "frame": lick_frames}) def get_mouse_id(exp_data): @@ -926,12 +926,12 @@ def find_licks(reward_times, licks, window=3.5): return [] else: reward_time = one(reward_times) - reward_lick_mask = ((licks['time'] > reward_time) & - (licks['time'] < (reward_time + window))) + reward_lick_mask = ((licks['timestamps'] > reward_time) & + (licks['timestamps'] < (reward_time + window))) tr_licks = licks[reward_lick_mask].copy() - tr_licks['time'] -= reward_time - return tr_licks['time'].values + tr_licks['timestamps'] -= reward_time + return tr_licks['timestamps'].values def calculate_reward_rate(response_latency=None, diff --git a/allensdk/brain_observatory/nwb/__init__.py b/allensdk/brain_observatory/nwb/__init__.py index 6f6455446..94a932df6 100644 --- a/allensdk/brain_observatory/nwb/__init__.py +++ b/allensdk/brain_observatory/nwb/__init__.py @@ -1,7 +1,7 @@ import logging import warnings from pathlib import Path -from typing import Iterable, Union +from typing import Iterable import h5py import marshmallow @@ -17,10 +17,8 @@ from pynwb.ophys import ( DfOverF, ImageSegmentation, OpticalChannel, Fluorescence) -from allensdk.brain_observatory.behavior.stimulus_processing.stimulus_templates import \ - StimulusTemplate -from allensdk.brain_observatory.behavior.write_nwb.extensions.stimulus_template.ndx_stimulus_template import \ - StimulusTemplateExtension +from allensdk.brain_observatory.behavior.stimulus_processing.stimulus_templates import StimulusTemplate # noqa: E501 +from allensdk.brain_observatory.behavior.write_nwb.extensions.stimulus_template.ndx_stimulus_template import StimulusTemplateExtension # noqa: E501 from allensdk.brain_observatory.nwb.nwb_utils import (get_column_name) from allensdk.brain_observatory import dict_to_indexed_array from allensdk.brain_observatory.behavior.image_api import Image @@ -36,12 +34,12 @@ log = logging.getLogger("allensdk.brain_observatory.nwb") CELL_SPECIMEN_COL_DESCRIPTIONS = { - 'cell_specimen_id': 'Unified id of segmented cell across experiments (after' - ' cell matching)', + 'cell_specimen_id': 'Unified id of segmented cell across experiments ' + '(after cell matching)', 'height': 'Height of ROI in pixels', 'width': 'Width of ROI in pixels', - 'mask_image_plane': 'Which image plane an ROI resides on. Overlapping ROIs ' - 'are stored on different mask image planes.', + 'mask_image_plane': 'Which image plane an ROI resides on. Overlapping ' + 'ROIs are stored on different mask image planes.', 'max_correction_down': 'Max motion correction in down direction in pixels', 'max_correction_left': 'Max motion correction in left direction in pixels', 'max_correction_up': 'Max motion correction in up direction in pixels', @@ -53,31 +51,31 @@ 'y': 'y position of ROI in Image Plane in pixels (top left corner)' } + def check_nwbfile_version(nwbfile_path: str, desired_minimum_version: str, warning_msg: str): - - with h5py.File(nwbfile_path, 'r') as f: - # nwb 2.x files store version as an attribute + with h5py.File(nwbfile_path, 'r') as f: + # nwb 2.x files store version as an attribute + try: + nwb_version = str(f.attrs["nwb_version"]).split(".") + except KeyError: + # nwb 1.x files store version as dataset try: - nwb_version = str(f.attrs["nwb_version"]).split(".") - except KeyError: - # nwb 1.x files store version as dataset - try: - nwb_version = str(f["nwb_version"][...].astype(str)) - # Stored in the form: `NWB-x.y.z` - nwb_version = nwb_version.split("-")[1].split(".") - except (KeyError, IndexError): - nwb_version = None - - if nwb_version is None: - warnings.warn(f"'{nwbfile_path}' doesn't appear to be a valid " - f"Neurodata Without Borders (*.nwb) format file as " - f"neither a 'nwb_version' field nor dataset could " - f"be found!") - else: - if tuple(nwb_version) < tuple(desired_minimum_version.split(".")): - warnings.warn(warning_msg) + nwb_version = str(f["nwb_version"][...].astype(str)) + # Stored in the form: `NWB-x.y.z` + nwb_version = nwb_version.split("-")[1].split(".") + except (KeyError, IndexError): + nwb_version = None + + if nwb_version is None: + warnings.warn(f"'{nwbfile_path}' doesn't appear to be a valid " + f"Neurodata Without Borders (*.nwb) format file as " + f"neither a 'nwb_version' field nor dataset could " + f"be found!") + else: + if tuple(nwb_version) < tuple(desired_minimum_version.split(".")): + warnings.warn(warning_msg) def read_eye_dlc_tracking_ellipses(input_path: Path) -> dict: @@ -121,44 +119,58 @@ def read_eye_gaze_mappings(input_path: Path) -> dict: *_eye_areas: Area of eye (in pixels^2) over time *_pupil_areas: Area of pupil (in pixels^2) over time *_screen_coordinates: y, x screen coordinates (in cm) over time - *_screen_coordinates_spherical: y, x screen coordinates (in deg) over time - synced_frame_timestamps: synced timestamps for video frames (in sec) + *_screen_coordinates_spherical: y, x screen coordinates (in deg) + over time + synced_frame_timestamps: synced timestamps for video frames + (in sec) """ eye_gaze_data = {} - - eye_gaze_data["raw_eye_areas"] = pd.read_hdf(input_path, key="raw_eye_areas") - eye_gaze_data["raw_pupil_areas"] = pd.read_hdf(input_path, key="raw_pupil_areas") - eye_gaze_data["raw_screen_coordinates"] = pd.read_hdf(input_path, key="raw_screen_coordinates") - eye_gaze_data["raw_screen_coordinates_spherical"] = pd.read_hdf(input_path, key="raw_screen_coordinates_spherical") - - eye_gaze_data["new_eye_areas"] = pd.read_hdf(input_path, key="new_eye_areas") - eye_gaze_data["new_pupil_areas"] = pd.read_hdf(input_path, key="new_pupil_areas") - eye_gaze_data["new_screen_coordinates"] = pd.read_hdf(input_path, key="new_screen_coordinates") - eye_gaze_data["new_screen_coordinates_spherical"] = pd.read_hdf(input_path, key="new_screen_coordinates_spherical") - - eye_gaze_data["synced_frame_timestamps"] = pd.read_hdf(input_path, key="synced_frame_timestamps") + eye_gaze_data["raw_eye_areas"] = \ + pd.read_hdf(input_path, key="raw_eye_areas") + eye_gaze_data["raw_pupil_areas"] = \ + pd.read_hdf(input_path, key="raw_pupil_areas") + eye_gaze_data["raw_screen_coordinates"] = \ + pd.read_hdf(input_path, key="raw_screen_coordinates") + eye_gaze_data["raw_screen_coordinates_spherical"] = \ + pd.read_hdf(input_path, key="raw_screen_coordinates_spherical") + eye_gaze_data["new_eye_areas"] = \ + pd.read_hdf(input_path, key="new_eye_areas") + eye_gaze_data["new_pupil_areas"] = \ + pd.read_hdf(input_path, key="new_pupil_areas") + eye_gaze_data["new_screen_coordinates"] = \ + pd.read_hdf(input_path, key="new_screen_coordinates") + eye_gaze_data["new_screen_coordinates_spherical"] = \ + pd.read_hdf(input_path, key="new_screen_coordinates_spherical") + eye_gaze_data["synced_frame_timestamps"] = \ + pd.read_hdf(input_path, key="synced_frame_timestamps") return eye_gaze_data def create_eye_gaze_mapping_dataframe(eye_gaze_data: dict) -> pd.DataFrame: - eye_gaze_mapping_df = pd.DataFrame( - { - "raw_eye_area": eye_gaze_data["raw_eye_areas"].values, - "raw_pupil_area": eye_gaze_data["raw_pupil_areas"].values, - "raw_screen_coordinates_x_cm": eye_gaze_data["raw_screen_coordinates"]["x_pos_cm"].values, - "raw_screen_coordinates_y_cm": eye_gaze_data["raw_screen_coordinates"]["y_pos_cm"].values, - "raw_screen_coordinates_spherical_x_deg": eye_gaze_data["raw_screen_coordinates_spherical"]["x_pos_deg"].values, - "raw_screen_coordinates_spherical_y_deg": eye_gaze_data["raw_screen_coordinates_spherical"]["y_pos_deg"].values, - - "filtered_eye_area": eye_gaze_data["new_eye_areas"].values, - "filtered_pupil_area": eye_gaze_data["new_pupil_areas"].values, - "filtered_screen_coordinates_x_cm": eye_gaze_data["new_screen_coordinates"]["x_pos_cm"].values, - "filtered_screen_coordinates_y_cm": eye_gaze_data["new_screen_coordinates"]["y_pos_cm"].values, - "filtered_screen_coordinates_spherical_x_deg": eye_gaze_data["new_screen_coordinates_spherical"]["x_pos_deg"].values, - "filtered_screen_coordinates_spherical_y_deg": eye_gaze_data["new_screen_coordinates_spherical"]["y_pos_deg"].values + eye_gaze_mapping_df = pd.DataFrame({ + "raw_eye_area": eye_gaze_data["raw_eye_areas"].values, + "raw_pupil_area": eye_gaze_data["raw_pupil_areas"].values, + "raw_screen_coordinates_x_cm": + eye_gaze_data["raw_screen_coordinates"]["x_pos_cm"].values, + "raw_screen_coordinates_y_cm": + eye_gaze_data["raw_screen_coordinates"]["y_pos_cm"].values, + "raw_screen_coordinates_spherical_x_deg": + eye_gaze_data["raw_screen_coordinates_spherical"]["x_pos_deg"].values, + "raw_screen_coordinates_spherical_y_deg": + eye_gaze_data["raw_screen_coordinates_spherical"]["y_pos_deg"].values, + "filtered_eye_area": eye_gaze_data["new_eye_areas"].values, + "filtered_pupil_area": eye_gaze_data["new_pupil_areas"].values, + "filtered_screen_coordinates_x_cm": + eye_gaze_data["new_screen_coordinates"]["x_pos_cm"].values, + "filtered_screen_coordinates_y_cm": + eye_gaze_data["new_screen_coordinates"]["y_pos_cm"].values, + "filtered_screen_coordinates_spherical_x_deg": + eye_gaze_data["new_screen_coordinates_spherical"]["x_pos_deg"].values, + "filtered_screen_coordinates_spherical_y_deg": + eye_gaze_data["new_screen_coordinates_spherical"]["y_pos_deg"].values }, index=eye_gaze_data["synced_frame_timestamps"].values ) @@ -187,31 +199,37 @@ def eye_tracking_data_is_valid(eye_dlc_tracking_data: dict, log.warn("The number of camera sync pulses in the " f"sync file ({len(synced_timestamps)}) do not match " "with the number of eye tracking frames " - f"({pupil_params.shape[0]})! No ellipse fits will be written!") + f"({pupil_params.shape[0]})! No ellipse fits will be " + "written!") is_valid = False return is_valid def create_eye_tracking_nwb_processing_module(eye_dlc_tracking_data: dict, - synced_timestamps: pd.Series) -> pynwb.ProcessingModule: + synced_timestamps: pd.Series + ) -> pynwb.ProcessingModule: # Top level container for eye tracking processed data - eye_tracking_mod = pynwb.ProcessingModule(name='eye_tracking', - description='Eye tracking processing module') + eye_tracking_mod = pynwb.ProcessingModule( + name='eye_tracking', + description='Eye tracking processing module') # Data interfaces of dlc_fits_container - pupil_fits = eye_dlc_tracking_data["pupil_params"].assign(timestamps=synced_timestamps) - pupil_params = pynwb.core.DynamicTable.from_dataframe(df=pupil_fits, - name="pupil_ellipse_fits") + pupil_fits = eye_dlc_tracking_data["pupil_params"].assign( + timestamps=synced_timestamps) + pupil_params = pynwb.core.DynamicTable.from_dataframe( + df=pupil_fits, name="pupil_ellipse_fits") - cr_fits = eye_dlc_tracking_data["cr_params"].assign(timestamps=synced_timestamps) + cr_fits = eye_dlc_tracking_data["cr_params"].assign( + timestamps=synced_timestamps) cr_params = pynwb.core.DynamicTable.from_dataframe(df=cr_fits, name="cr_ellipse_fits") - eye_fits = eye_dlc_tracking_data["eye_params"].assign(timestamps=synced_timestamps) - eye_params = pynwb.core.DynamicTable.from_dataframe(df=eye_fits, - name="eye_ellipse_fits") + eye_fits = eye_dlc_tracking_data["eye_params"].assign( + timestamps=synced_timestamps) + eye_params = pynwb.core.DynamicTable.from_dataframe( + df=eye_fits, name="eye_ellipse_fits") eye_tracking_mod.add_data_interface(pupil_params) eye_tracking_mod.add_data_interface(cr_params) @@ -225,7 +243,8 @@ def add_eye_gaze_data_interfaces(pynwb_container: pynwb.NWBContainer, eye_areas: pd.Series, screen_coordinates: pd.DataFrame, screen_coordinates_spherical: pd.DataFrame, - synced_timestamps: pd.Series) -> pynwb.NWBContainer: + synced_timestamps: pd.Series + ) -> pynwb.NWBContainer: pupil_area_ts = pynwb.base.TimeSeries( name="pupil_area", @@ -265,35 +284,40 @@ def add_eye_gaze_data_interfaces(pynwb_container: pynwb.NWBContainer, def create_gaze_mapping_nwb_processing_modules(eye_gaze_data: dict): # Container for raw gaze mapped data - raw_gaze_mapping_mod = pynwb.ProcessingModule(name='raw_gaze_mapping', - description='Gaze mapping processing module raw outputs') - - raw_gaze_mapping_mod = add_eye_gaze_data_interfaces(raw_gaze_mapping_mod, - pupil_areas=eye_gaze_data["raw_pupil_areas"], - eye_areas=eye_gaze_data["raw_eye_areas"], - screen_coordinates=eye_gaze_data["raw_screen_coordinates"], - screen_coordinates_spherical=eye_gaze_data["raw_screen_coordinates_spherical"], - synced_timestamps=eye_gaze_data["synced_frame_timestamps"]) + raw_gaze_mapping_mod = pynwb.ProcessingModule( + name='raw_gaze_mapping', + description='Gaze mapping processing module raw outputs') + + raw_gaze_mapping_mod = add_eye_gaze_data_interfaces( + raw_gaze_mapping_mod, + pupil_areas=eye_gaze_data["raw_pupil_areas"], + eye_areas=eye_gaze_data["raw_eye_areas"], + screen_coordinates=eye_gaze_data["raw_screen_coordinates"], + screen_coordinates_spherical=eye_gaze_data["raw_screen_coordinates_spherical"], # noqa: E501 + synced_timestamps=eye_gaze_data["synced_frame_timestamps"]) # Container for filtered gaze mapped data - filt_gaze_mapping_mod = pynwb.ProcessingModule(name='filtered_gaze_mapping', - description='Gaze mapping processing module filtered outputs') - - filt_gaze_mapping_mod = add_eye_gaze_data_interfaces(filt_gaze_mapping_mod, - pupil_areas=eye_gaze_data["new_pupil_areas"], - eye_areas=eye_gaze_data["new_eye_areas"], - screen_coordinates=eye_gaze_data["new_screen_coordinates"], - screen_coordinates_spherical=eye_gaze_data["new_screen_coordinates_spherical"], - synced_timestamps=eye_gaze_data["synced_frame_timestamps"]) + filt_gaze_mapping_mod = pynwb.ProcessingModule( + name='filtered_gaze_mapping', + description='Gaze mapping processing module filtered outputs') + + filt_gaze_mapping_mod = add_eye_gaze_data_interfaces( + filt_gaze_mapping_mod, + pupil_areas=eye_gaze_data["new_pupil_areas"], + eye_areas=eye_gaze_data["new_eye_areas"], + screen_coordinates=eye_gaze_data["new_screen_coordinates"], + screen_coordinates_spherical=eye_gaze_data["new_screen_coordinates_spherical"], # noqa: E501 + synced_timestamps=eye_gaze_data["synced_frame_timestamps"]) return (raw_gaze_mapping_mod, filt_gaze_mapping_mod) def add_eye_tracking_ellipse_fit_data_to_nwbfile(nwbfile: pynwb.NWBFile, eye_dlc_tracking_data: dict, - synced_timestamps: pd.Series) -> pynwb.NWBFile: - eye_tracking_mod = create_eye_tracking_nwb_processing_module(eye_dlc_tracking_data, - synced_timestamps) + synced_timestamps: pd.Series + ) -> pynwb.NWBFile: + eye_tracking_mod = create_eye_tracking_nwb_processing_module( + eye_dlc_tracking_data, synced_timestamps) nwbfile.add_processing_module(eye_tracking_mod) return nwbfile @@ -301,7 +325,8 @@ def add_eye_tracking_ellipse_fit_data_to_nwbfile(nwbfile: pynwb.NWBFile, def add_eye_gaze_mapping_data_to_nwbfile(nwbfile: pynwb.NWBFile, eye_gaze_data: dict) -> pynwb.NWBFile: - raw_gaze_mapping_mod, filt_gaze_mapping_mod = create_gaze_mapping_nwb_processing_modules(eye_gaze_data) + raw_gaze_mapping_mod, filt_gaze_mapping_mod = \ + create_gaze_mapping_nwb_processing_modules(eye_gaze_data) nwbfile.add_processing_module(raw_gaze_mapping_mod) nwbfile.add_processing_module(filt_gaze_mapping_mod) @@ -434,11 +459,13 @@ def add_stimulus_template(nwbfile: NWBFile, return nwbfile -def create_stimulus_presentation_time_interval(name: str, description: str, - columns_to_add: Iterable) -> pynwb.epoch.TimeIntervals: +def create_stimulus_presentation_time_interval( + name: str, description: str, + columns_to_add: Iterable) -> pynwb.epoch.TimeIntervals: column_descriptions = { "stimulus_name": "Name of stimulus", - "stimulus_block": "Index of contiguous presentations of one stimulus type", + "stimulus_block": ("Index of contiguous presentations of " + "one stimulus type"), "temporal_frequency": "Temporal frequency of stimulus", "x_position": "Horizontal position of stimulus on screen", "y_position": "Vertical position of stimulus on screen", @@ -470,13 +497,15 @@ def create_stimulus_presentation_time_interval(name: str, description: str, for column_name in columns_to_add: if column_name not in columns_to_ignore: - description = column_descriptions.get(column_name, "No description") + description = column_descriptions.get( + column_name, "No description") interval.add_column(name=column_name, description=description) return interval -def add_stimulus_presentations(nwbfile, stimulus_table, tag='stimulus_time_interval'): +def add_stimulus_presentations(nwbfile, stimulus_table, + tag='stimulus_time_interval'): """Adds a stimulus table (defining stimulus characteristics for each time point in a session) to an nwbfile as TimeIntervals. @@ -484,9 +513,10 @@ def add_stimulus_presentations(nwbfile, stimulus_table, tag='stimulus_time_inter ---------- nwbfile : pynwb.NWBFile stimulus_table: pd.DataFrame - Each row corresponds to an interval of time. Columns define the interval - (start and stop time) and its characteristics. - Nans in columns with string data will be replaced with the empty strings. + Each row corresponds to an interval of time. Columns define the + interval (start and stop time) and its characteristics. + Nans in columns with string data will be replaced with the empty + strings. Required columns are: start_time :: the time at which this interval started stop_time :: the time at which this interval ended @@ -507,7 +537,7 @@ def add_stimulus_presentations(nwbfile, stimulus_table, tag='stimulus_time_inter stimulus_names = stimulus_table[stimulus_name_column].unique() for stim_name in sorted(stimulus_names): - specific_stimulus_table = stimulus_table[stimulus_table[stimulus_name_column] == stim_name] + specific_stimulus_table = stimulus_table[stimulus_table[stimulus_name_column] == stim_name] # noqa: E501 # Drop columns where all values in column are NaN cleaned_table = specific_stimulus_table.dropna(axis=1, how='all') # For columns with mixed strings and NaNs, fill NaNs with 'N/A' @@ -579,7 +609,8 @@ def setup_table_for_invalid_times(invalid_epochs): Returns ------- - pd.DataFrame of invalid times if epochs are not empty, otherwise return None + pd.DataFrame of invalid times if epochs are not empty, + otherwise return None """ if invalid_epochs: @@ -604,21 +635,23 @@ def setup_table_for_invalid_times(invalid_epochs): def setup_table_for_epochs(table, timeseries, tag): - table = table.copy() - indices = np.searchsorted(timeseries.timestamps[:], table['start_time'].values) + indices = np.searchsorted(timeseries.timestamps[:], + table['start_time'].values) if len(indices > 0): - diffs = np.concatenate([np.diff(indices), [table.shape[0] - indices[-1]]]) + diffs = np.concatenate([np.diff(indices), + [table.shape[0] - indices[-1]]]) else: diffs = [] table['tags'] = [(tag,)] * table.shape[0] - table['timeseries'] = [[[indices[ii], diffs[ii], timeseries]] for ii in range(table.shape[0])] + table['timeseries'] = [[[indices[ii], diffs[ii], timeseries]] + for ii in range(table.shape[0])] return table -def add_stimulus_timestamps(nwbfile, stimulus_timestamps, module_name='stimulus'): - +def add_stimulus_timestamps(nwbfile, stimulus_timestamps, + module_name='stimulus'): stimulus_ts = TimeSeries( data=stimulus_timestamps, name='timestamps', @@ -635,22 +668,32 @@ def add_stimulus_timestamps(nwbfile, stimulus_timestamps, module_name='stimulus' def add_trials(nwbfile, trials, description_dict={}): - order = list(trials.index) for _, row in trials[['start_time', 'stop_time']].iterrows(): row_dict = row.to_dict() nwbfile.add_trial(**row_dict) - for c in [c for c in trials.columns if c not in ['start_time', 'stop_time']]: + for c in trials.columns: + if c in ['start_time', 'stop_time']: + continue index, data = dict_to_indexed_array(trials[c].to_dict(), order) if data.dtype == ' pd.DataFrame: def create_refined_eye_tracking_df(data: np.ndarray) -> pd.DataFrame: - columns = ["time", "cr_area", "eye_area", "pupil_area", "likely_blink", - "pupil_area_raw", "cr_area_raw", "eye_area_raw", + columns = ["timestamps", "cr_area", "eye_area", "pupil_area", + "likely_blink", "pupil_area_raw", "cr_area_raw", "eye_area_raw", "cr_center_x", "cr_center_y", "cr_width", "cr_height", "cr_phi", "eye_center_x", "eye_center_y", "eye_width", "eye_height", "eye_phi", "pupil_center_x", "pupil_center_y", "pupil_width", @@ -200,7 +200,7 @@ def test_process_eye_tracking_data_truncation(eye_tracking_df, by <= 15 """ df = process_eye_tracking_data(eye_tracking_df, frame_times) - np.testing.assert_array_almost_equal(df.time.to_numpy(), + np.testing.assert_array_almost_equal(df.timestamps.to_numpy(), np.array([0.0, 0.1]), decimal=10) diff --git a/allensdk/test/brain_observatory/behavior/test_rewards_processing.py b/allensdk/test/brain_observatory/behavior/test_rewards_processing.py index efb42fdcf..d344bce6e 100644 --- a/allensdk/test/brain_observatory/behavior/test_rewards_processing.py +++ b/allensdk/test/brain_observatory/behavior/test_rewards_processing.py @@ -30,7 +30,7 @@ def test_get_rewards(): expected = pd.DataFrame( {"volume": [0.007, 0.008], "timestamps": [14.0, 15.0], - "autorewarded": [False, True]}).set_index("timestamps", drop=True) + "autorewarded": [False, True]}) timesteps = -1*np.ones(100, dtype=float) timesteps[55] = 14.0