Skip to content

Commit afc0135

Browse files
authored
Merge pull request #1953 from AllenInstitute/1919-time-column-names
1919-time-column-names
2 parents 35a5823 + bf3b698 commit afc0135

14 files changed

+274
-205
lines changed

allensdk/brain_observatory/behavior/eye_tracking_processing.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -231,7 +231,7 @@ def process_eye_tracking_data(eye_data: pd.DataFrame,
231231
cr_areas[likely_blinks] = np.nan
232232
eye_areas[likely_blinks] = np.nan
233233

234-
eye_data.insert(0, "time", frame_times)
234+
eye_data.insert(0, "timestamps", frame_times)
235235
eye_data.insert(1, "cr_area", cr_areas)
236236
eye_data.insert(2, "eye_area", eye_areas)
237237
eye_data.insert(3, "pupil_area", pupil_areas)

allensdk/brain_observatory/behavior/rewards_processing.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
from typing import Dict
22
import numpy as np
33
import pandas as pd
4-
from collections import defaultdict
54

65

76
def get_rewards(data: Dict,
@@ -31,13 +30,14 @@ def get_rewards(data: Dict,
3130
trial_df = pd.DataFrame(data["items"]["behavior"]["trial_log"])
3231
rewards_dict = {"volume": [], "timestamps": [], "autorewarded": []}
3332
for idx, trial in trial_df.iterrows():
34-
rewards = trial["rewards"] # as i write this there can only ever be one reward per trial
33+
rewards = trial["rewards"]
34+
# as i write this there can only ever be one reward per trial
3535
if rewards:
3636
rewards_dict["volume"].append(rewards[0][0])
3737
rewards_dict["timestamps"].append(timestamps[rewards[0][2]])
3838
auto_rwrd = trial["trial_params"]["auto_reward"]
3939
rewards_dict["autorewarded"].append(auto_rwrd)
4040

41-
df = pd.DataFrame(rewards_dict).set_index("timestamps", drop=True)
41+
df = pd.DataFrame(rewards_dict)
4242

4343
return df

allensdk/brain_observatory/behavior/session_apis/data_io/behavior_nwb_api.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -224,7 +224,7 @@ def get_licks(self) -> np.ndarray:
224224
licks = lick_module.get_data_interface('licks')
225225

226226
return pd.DataFrame({
227-
'time': licks.timestamps[:],
227+
'timestamps': licks.timestamps[:],
228228
'frame': licks.data[:]
229229
})
230230
else:
@@ -238,11 +238,11 @@ def get_rewards(self) -> np.ndarray:
238238
volume = rewards.get_data_interface('volume').data[:]
239239
return pd.DataFrame({
240240
'volume': volume, 'timestamps': time,
241-
'autorewarded': autorewarded}).set_index('timestamps')
241+
'autorewarded': autorewarded})
242242
else:
243243
return pd.DataFrame({
244244
'volume': [], 'timestamps': [],
245-
'autorewarded': []}).set_index('timestamps')
245+
'autorewarded': []})
246246

247247
def get_metadata(self) -> dict:
248248

allensdk/brain_observatory/behavior/session_apis/data_io/behavior_ophys_nwb_api.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -233,7 +233,7 @@ def get_eye_tracking(self,
233233
eye_tracking_acquisition.corneal_reflection_tracking
234234

235235
eye_tracking_dict = {
236-
"time": eye_tracking.timestamps[:],
236+
"timestamps": eye_tracking.timestamps[:],
237237
"cr_area": corneal_reflection_tracking.area_raw[:],
238238
"eye_area": eye_tracking.area_raw[:],
239239
"pupil_area": pupil_tracking.area_raw[:],
@@ -480,7 +480,7 @@ def add_eye_tracking_data_to_nwb(self, nwbfile: NWBFile,
480480
width=eye_tracking_df['eye_width'].values,
481481
height=eye_tracking_df['eye_height'].values,
482482
angle=eye_tracking_df['eye_phi'].values,
483-
timestamps=eye_tracking_df['time'].values
483+
timestamps=eye_tracking_df['timestamps'].values
484484
)
485485

486486
pupil_tracking = EllipseSeries(

allensdk/brain_observatory/behavior/session_apis/data_transforms/behavior_data_transforms.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,7 @@ def get_licks(self) -> pd.DataFrame:
110110
'range')
111111

112112
lick_times = [stimulus_timestamps[frame] for frame in lick_frames]
113-
return pd.DataFrame({"time": lick_times, "frame": lick_frames})
113+
return pd.DataFrame({"timestamps": lick_times, "frame": lick_frames})
114114

115115
@memoize
116116
def get_rewards(self) -> pd.DataFrame:

allensdk/brain_observatory/behavior/trials_processing.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -519,15 +519,14 @@ def get_trials_from_data_transform(input_transform) -> pd.DataFrame:
519519
monitor_delay = input_transform.get_monitor_delay()
520520
data = input_transform._behavior_stimulus_file()
521521

522-
assert rewards_df.index.name == 'timestamps'
523522
stimuli = data["items"]["behavior"]["stimuli"]
524523
trial_log = data["items"]["behavior"]["trial_log"]
525524

526525
trial_bounds = get_trial_bounds(trial_log)
527526

528527
all_trial_data = [None] * len(trial_log)
529528
lick_frames = licks_df.frame.values
530-
reward_times = rewards_df.index.values
529+
reward_times = rewards_df['timestamps'].values
531530

532531
for idx, trial in enumerate(trial_log):
533532
# match each event in the trial log to the sync timestamps
@@ -628,7 +627,8 @@ def get_time(exp_data):
628627
def data_to_licks(data, time):
629628
lick_frames = data['items']['behavior']['lick_sensors'][0]['lick_events']
630629
lick_times = time[lick_frames]
631-
return pd.DataFrame(data={"frame": lick_frames, 'time': lick_times})
630+
return pd.DataFrame(data={"timestamps": lick_times,
631+
"frame": lick_frames})
632632

633633

634634
def get_mouse_id(exp_data):
@@ -926,12 +926,12 @@ def find_licks(reward_times, licks, window=3.5):
926926
return []
927927
else:
928928
reward_time = one(reward_times)
929-
reward_lick_mask = ((licks['time'] > reward_time) &
930-
(licks['time'] < (reward_time + window)))
929+
reward_lick_mask = ((licks['timestamps'] > reward_time) &
930+
(licks['timestamps'] < (reward_time + window)))
931931

932932
tr_licks = licks[reward_lick_mask].copy()
933-
tr_licks['time'] -= reward_time
934-
return tr_licks['time'].values
933+
tr_licks['timestamps'] -= reward_time
934+
return tr_licks['timestamps'].values
935935

936936

937937
def calculate_reward_rate(response_latency=None,

0 commit comments

Comments
 (0)