diff --git a/pipeline/mtl_analysis/helper_functions.py b/pipeline/mtl_analysis/helper_functions.py index 719c10da..23e3b8e2 100644 --- a/pipeline/mtl_analysis/helper_functions.py +++ b/pipeline/mtl_analysis/helper_functions.py @@ -474,6 +474,11 @@ def water2subject(water,date): session_num = (experiment.Session() * lab.WaterRestriction & {'water_restriction_number': water, 'session_date': date}).fetch('session') return subject_id, session_num +def subject2water(subject,session_num): + water = (lab.WaterRestriction & {'subject_id': subject}).fetch('water_restriction_number') + date = (experiment.Session() * lab.WaterRestriction & {'subject_id': subject, 'session': session_num}).fetch('session_date') + return water, date + def rose_plot(ax, angles, bins=16, density=None, offset=0, lab_unit="degrees", start_zero=False, **param_dict): """ Plot polar histogram of angles on ax. ax must have been created using @@ -532,10 +537,10 @@ def plot_glmfit(unit_key, num_time=2000): fig, ax = plt.subplots(1, 1, figsize=(24, 16)) - ax.plot(t_vec, test_x[0][:num_time, 1]+6,color='k',label='jaw ' +str(weights_r[max_r2_idx[0][0],2])) - ax.plot(t_vec, test_x[0][:num_time, 4]+6,color='g',label='tongue '+str(weights_r[max_r2_idx[0][0],5])) + ax.plot(t_vec, test_x[0][:num_time, 1]+6,color='k',label='jaw DV'+str(weights_r[max_r2_idx[0][0],2])+' ML'+str(weights_r[max_r2_idx[0][0],3])+' AP'+str(weights_r[max_r2_idx[0][0],1])) # jaw y (DV) + ax.plot(t_vec, test_x[0][:num_time, 3]+6,color='g',label='tongue DV'+str(weights_r[max_r2_idx[0][0],6])+' ML'+str(weights_r[max_r2_idx[0][0],4])+' AP'+str(weights_r[max_r2_idx[0][0],5])) # tongue x (ML) ax.plot(t_vec, test_x[0][:num_time, 6]+12,color='b',label='breathing ' +str(weights_r[max_r2_idx[0][0],7])) - ax.plot(t_vec, test_x[0][:num_time, 7]+14,color='r',label='whisking '+str(weights_r[max_r2_idx[0][0],8])) + ax.plot(t_vec, test_x[0][:num_time, 7]+16,color='r',label='whisking '+str(weights_r[max_r2_idx[0][0],8])) test_y_roll=np.roll(test_y[0],taus[max_r2_idx[0][0]]) ax.plot(t_vec,test_y_roll[:num_time],color='k',label='test') ax.plot(t_vec,predict_y[0][max_r2_idx[0][0]][:num_time],color='r',label='prediction') @@ -554,15 +559,81 @@ def plot_glmfit(unit_key, num_time=2000): def plot_dir_tuning(unit_key): - dir_tuning=(oralfacial_analysis.DirectionTuning() & unit_key).fetch('direction_tuning') + dir_tuning, dir_idx=(oralfacial_analysis.DirectionTuning() & unit_key).fetch('direction_tuning','direction_index') fr_mat=[[dir_tuning[0][2],dir_tuning[0][5],dir_tuning[0][8]], [dir_tuning[0][1],dir_tuning[0][4],dir_tuning[0][7]], [dir_tuning[0][0],dir_tuning[0][3],dir_tuning[0][6]]] fig, ax = plt.subplots(1, 1, figsize=(3, 3)) neg=ax.imshow(fr_mat,vmin = 0, cmap='Reds', interpolation='none') fig.colorbar(neg,ax=ax, location='right', anchor=(0, 0.3), shrink=0.7) + ax.set_title(str(np.round(dir_idx[0],decimals=2))) ax.set_xticks([]) ax.set_yticks([]) ax.spines['left'].set_visible(False) + ax.spines['right'].set_visible(False) + ax.spines['top'].set_visible(False) + ax.spines['bottom'].set_visible(False) + + return fig + +def plot_breathing_hist(session_key): + + inspir_onset=(oralfacial_analysis.MovementTiming & session_key).fetch1('inspiration_onset') + + fig, ax = plt.subplots(1, 1, figsize=(3, 3)) + + ax.hist(1/np.diff(inspir_onset), 100,range=[0,12]) + ax.set_xlabel('Frequency (Hz)') + ax.set_ylabel('Number of breaths') + + ax.spines['right'].set_visible(False) + ax.spines['top'].set_visible(False) + + return fig + +def volcano_plot_licking(session_key): + inspir_onset,lick_onset_time,lick_offset_time,ton_onset=(oralfacial_analysis.MovementTiming & session_key).fetch1('inspiration_onset','lick_onset','lick_offset','tongue_onset') + + inspir_onset_l=[] # restrict by whisking bouts + for i,val in enumerate(lick_onset_time): + inspir_onset_l.append(inspir_onset[(inspir_onset>(lick_onset_time[i]+0.2)) & (inspir_onset<(lick_offset_time[i]-0.2))]) + inspir_onset_l=np.array(inspir_onset_l) + inspir_onset_l=np.hstack(inspir_onset_l) + + licks = [] + + ibi = np.zeros(len(inspir_onset_l)-3) + ibi2 = np.zeros(len(inspir_onset_l)-4) + + max_ibi=np.max(np.diff(inspir_onset)) + + for i,_ in enumerate(inspir_onset_l[2:-2]): + + lick=ton_onset[(ton_onset > inspir_onset_l[i]) & (ton_onset ephys.Unit + --- + r2_nolick: mediumblob + weights_nolick: mediumblob + y_nolick: longblob + predict_y_nolick: longblob + x_nolick: longblob + """ + # mtl sessions only + key_source = experiment.Session & v_tracking.TongueTracking3DBot & experiment.Breathing & v_oralfacial_analysis.WhiskerSVD & ephys.Unit & 'rig = "RRig-MTL"' + + def make(self, key): + good_units=ephys.Unit * ephys.ClusterMetric * ephys.UnitStat & key & 'presence_ratio > 0.9' & 'amplitude_cutoff < 0.15' & 'avg_firing_rate > 0.2' & 'isi_violation < 10' & 'unit_amp > 150' + unit_keys=good_units.fetch('KEY') + bin_width = 0.017 + + # from the cameras + tongue_thr = 0.95 + traces_s = tracking.Tracking.TongueTracking & key & {'tracking_device': 'Camera 3'} + traces_b = tracking.Tracking.TongueTracking & key & {'tracking_device': 'Camera 4'} + + if len(experiment.SessionTrial & (ephys.Unit.TrialSpikes & key)) != len(traces_s): + print(f'Mismatch in tracking trial and ephys trial number: {key}') + return + if len(experiment.SessionTrial & (ephys.Unit.TrialSpikes & key)) != len(traces_b): + print(f'Mismatch in tracking trial and ephys trial number: {key}') + return + + # from the cameras + tongue_thr = 0.95 + trial_key=(v_tracking.TongueTracking3DBot & key).fetch('trial', order_by='trial') + traces_s = tracking.Tracking.TongueTracking & key & {'tracking_device': 'Camera 3'} & [{'trial': tr} for tr in trial_key] + traces_b = tracking.Tracking.TongueTracking & key & {'tracking_device': 'Camera 4'} & [{'trial': tr} for tr in trial_key] + session_traces_s_l = traces_s.fetch('tongue_likelihood', order_by='trial') + session_traces_b_l = traces_b.fetch('tongue_likelihood', order_by='trial') + + session_traces_s_l = np.vstack(session_traces_s_l) + session_traces_b_l = np.vstack(session_traces_b_l) + session_traces_t_l = session_traces_b_l + session_traces_t_l[np.where((session_traces_s_l > tongue_thr) & (session_traces_b_l > tongue_thr))] = 1 + session_traces_t_l[np.where((session_traces_s_l <= tongue_thr) | (session_traces_b_l <= tongue_thr))] = 0 + session_traces_t_l = np.hstack(session_traces_t_l) + + session_traces_s_l_f = np.vstack(session_traces_s_l) + session_traces_b_l_f = np.vstack(session_traces_b_l) + session_traces_t_l_f = session_traces_b_l_f + session_traces_t_l_f[np.where((session_traces_s_l_f > tongue_thr) & (session_traces_b_l_f > tongue_thr))] = 1 + session_traces_t_l_f[np.where((session_traces_s_l_f <= tongue_thr) | (session_traces_b_l_f <= tongue_thr))] = 0 + + # from 3D calibration + traces_s = v_tracking.JawTracking3DSid & key & [{'trial': tr} for tr in trial_key] + traces_b = v_tracking.TongueTracking3DBot & key & [{'trial': tr} for tr in trial_key] + session_traces_s_y, session_traces_s_x, session_traces_s_z = traces_s.fetch('jaw_y', 'jaw_x', 'jaw_z', order_by='trial') + session_traces_b_y, session_traces_b_x, session_traces_b_z = traces_b.fetch('tongue_y', 'tongue_x', 'tongue_z', order_by='trial') + session_traces_s_y = stats.zscore(np.vstack(session_traces_s_y),axis=None) + session_traces_s_x = stats.zscore(np.vstack(session_traces_s_x),axis=None) + session_traces_s_z = stats.zscore(np.vstack(session_traces_s_z),axis=None) + session_traces_b_y = np.vstack(session_traces_b_y) + traces_y_mean=np.mean(session_traces_b_y[np.where(session_traces_t_l_f == 1)]) + traces_y_std=np.std(session_traces_b_y[np.where(session_traces_t_l_f == 1)]) + session_traces_b_y = (session_traces_b_y - traces_y_mean)/traces_y_std + session_traces_b_x = np.vstack(session_traces_b_x) + traces_x_mean=np.mean(session_traces_b_x[np.where(session_traces_t_l_f == 1)]) + traces_x_std=np.std(session_traces_b_x[np.where(session_traces_t_l_f == 1)]) + session_traces_b_x = (session_traces_b_x - traces_x_mean)/traces_x_std + session_traces_b_z = np.vstack(session_traces_b_z) + traces_z_mean=np.mean(session_traces_b_z[np.where(session_traces_t_l_f == 1)]) + traces_z_std=np.std(session_traces_b_z[np.where(session_traces_t_l_f == 1)]) + session_traces_b_z = (session_traces_b_z - traces_z_mean)/traces_z_std + + traces_len = np.size(session_traces_b_z, axis = 1) + num_trial = np.size(session_traces_b_z, axis = 0) + + # format the video data + session_traces_s_y = np.hstack(session_traces_s_y) + session_traces_s_x = np.hstack(session_traces_s_x) + session_traces_s_z = np.hstack(session_traces_s_z) + session_traces_b_y = np.hstack(session_traces_b_y) + session_traces_b_x = np.hstack(session_traces_b_x) + session_traces_b_z = np.hstack(session_traces_b_z) + # -- moving-average and down-sample + window_size = int(bin_width/0.0034) # sample + kernel = np.ones(window_size) / window_size + session_traces_s_x = np.convolve(session_traces_s_x, kernel, 'same') + session_traces_s_x = session_traces_s_x[window_size::window_size] + session_traces_s_y = np.convolve(session_traces_s_y, kernel, 'same') + session_traces_s_y = session_traces_s_y[window_size::window_size] + session_traces_s_z = np.convolve(session_traces_s_z, kernel, 'same') + session_traces_s_z = session_traces_s_z[window_size::window_size] + session_traces_b_x = np.convolve(session_traces_b_x, kernel, 'same') + session_traces_b_x = session_traces_b_x[window_size::window_size] + session_traces_b_y = np.convolve(session_traces_b_y, kernel, 'same') + session_traces_b_y = session_traces_b_y[window_size::window_size] + session_traces_b_z = np.convolve(session_traces_b_z, kernel, 'same') + session_traces_b_z = session_traces_b_z[window_size::window_size] + session_traces_t_l = np.convolve(session_traces_t_l, kernel, 'same') + session_traces_t_l = session_traces_t_l[window_size::window_size] + session_traces_t_l[np.where(session_traces_t_l < 1)] = 0 + session_traces_s_x = np.reshape(session_traces_s_x,(-1,1)) + session_traces_s_y = np.reshape(session_traces_s_y,(-1,1)) + session_traces_s_z = np.reshape(session_traces_s_z,(-1,1)) + session_traces_b_x = np.reshape(session_traces_b_x * session_traces_t_l, (-1,1)) + session_traces_b_y = np.reshape(session_traces_b_y * session_traces_t_l, (-1,1)) + session_traces_b_z = np.reshape(session_traces_b_z * session_traces_t_l, (-1,1)) + + # get breathing + breathing, breathing_ts = (experiment.Breathing & key & [{'trial': tr} for tr in trial_key]).fetch('breathing', 'breathing_timestamps', order_by='trial') + good_breathing = breathing + for i, d in enumerate(breathing): + good_breathing[i] = d[breathing_ts[i] < traces_len*3.4/1000] + good_breathing = stats.zscore(np.vstack(good_breathing),axis=None) + + good_breathing = np.hstack(good_breathing) + # -- moving-average + window_size = int(round(bin_width/(breathing_ts[0][1]-breathing_ts[0][0]),0)) # sample + kernel = np.ones(window_size) / window_size + good_breathing = np.convolve(good_breathing, kernel, 'same') + # -- down-sample + good_breathing = good_breathing[window_size::window_size] + good_breathing = np.reshape(good_breathing,(-1,1)) + + # get whisker + session_traces_w = (v_oralfacial_analysis.WhiskerSVD & key).fetch('mot_svd') + if len(session_traces_w[0][:,0]) % 1471 != 0: + print('Bad videos in bottom view') + #return + else: + num_trial_w = int(len(session_traces_w[0][:,0])/1471) + session_traces_w = np.reshape(session_traces_w[0][:,0], (num_trial_w, 1471)) + + trial_idx_nat = [d.astype(str) for d in np.arange(num_trial_w)] + trial_idx_nat = sorted(range(len(trial_idx_nat)), key=lambda k: trial_idx_nat[k]) + trial_idx_nat = sorted(range(len(trial_idx_nat)), key=lambda k: trial_idx_nat[k]) + session_traces_w = session_traces_w[trial_idx_nat,:] + session_traces_w_o = stats.zscore(session_traces_w,axis=None) + session_traces_w = session_traces_w_o[trial_key-1] + + session_traces_w = np.hstack(session_traces_w) + window_size = int(bin_width/0.0034) # sample + kernel = np.ones(window_size) / window_size + session_traces_w = np.convolve(session_traces_w, kernel, 'same') + session_traces_w = session_traces_w[window_size::window_size] + session_traces_w = np.reshape(session_traces_w,(-1,1)) + + # stimulus + lick_onset_time,lick_offset_time=(v_oralfacial_analysis.MovementTiming & key).fetch1('lick_onset','lick_offset') + + all_period_idx=np.arange(len(session_traces_b_y)) + good_period_idx=[all_period_idx[(all_period_idx*bin_widthlick_offset_time[i]+0.2)]) + good_period_idx.append(all_period_idx[(all_period_idx*bin_width>lick_offset_time[-1]+0.2)]) + good_period_idx=np.array(good_period_idx) + good_period_idx=np.hstack(good_period_idx) + + session_traces_s_x=stats.zscore(session_traces_s_x[good_period_idx]) + session_traces_s_y=stats.zscore(session_traces_s_y[good_period_idx]) + session_traces_s_z=stats.zscore(session_traces_s_z[good_period_idx]) + session_traces_b_x=session_traces_b_x[good_period_idx] + traces_x_mean=np.mean(session_traces_b_x[session_traces_b_x != 0]) + traces_x_std=np.std(session_traces_b_x[session_traces_b_x != 0]) + session_traces_b_x = (session_traces_b_x - traces_x_mean)/traces_x_std + session_traces_b_y=session_traces_b_y[good_period_idx] + traces_y_mean=np.mean(session_traces_b_y[session_traces_b_y != 0]) + traces_y_std=np.std(session_traces_b_y[session_traces_b_y != 0]) + session_traces_b_y = (session_traces_b_y - traces_y_mean)/traces_y_std + session_traces_b_z=session_traces_b_z[good_period_idx] + traces_z_mean=np.mean(session_traces_b_z[session_traces_b_z != 0]) + traces_z_std=np.std(session_traces_b_z[session_traces_b_z != 0]) + session_traces_b_z = (session_traces_b_z - traces_z_mean)/traces_z_std + good_breathing=stats.zscore(good_breathing[good_period_idx]) + session_traces_w=stats.zscore(session_traces_w[good_period_idx]) + + V_design_matrix = np.concatenate((session_traces_s_x, session_traces_s_y, session_traces_s_z, session_traces_b_x, session_traces_b_y, session_traces_b_z, good_breathing, session_traces_w), axis=1) + + #set up GLM + sm_log_Link = sm.genmod.families.links.log + + taus = np.arange(-5,6) + + units_glm = [] + + for unit_key in unit_keys: # loop for each neuron + + all_spikes=(ephys.Unit.TrialSpikes & unit_key & [{'trial': tr} for tr in trial_key]).fetch('spike_times', order_by='trial') + + good_spikes =all_spikes # get good spikes + for i, d in enumerate(good_spikes): + good_spikes[i] = d[d < traces_len*3.4/1000]+traces_len*3.4/1000*i + good_spikes = np.hstack(good_spikes) + y, bin_edges = np.histogram(good_spikes, np.arange(0, traces_len*3.4/1000*num_trial, bin_width)) + y=y[good_period_idx] + + r2s=np.zeros(len(taus)) + weights_t=np.zeros((len(taus),9)) + predict_ys=np.zeros((len(taus),len(y))) + for i, tau in enumerate(taus): + y_roll=np.roll(y,tau) + glm_poiss = sm.GLM(y_roll, sm.add_constant(V_design_matrix), family=sm.families.Poisson(link=sm_log_Link)) + + try: + glm_result = glm_poiss.fit() + + sst_val = sum(map(lambda x: np.power(x,2),y_roll-np.mean(y_roll))) + sse_val = sum(map(lambda x: np.power(x,2),glm_result.resid_response)) + r2 = 1.0 - sse_val/sst_val + + r2s[i] = r2 + + y_roll_t_p=glm_result.predict(sm.add_constant(V_design_matrix)) + predict_ys[i,:]=y_roll_t_p + weights_t[i,:] = glm_result.params + + except: + pass + + units_glm.append({**unit_key, 'r2_nolick': r2s, 'weights_nolick': weights_t, 'y_nolick': y, 'predict_y_nolick': predict_ys, 'x_nolick': V_design_matrix}) + print(unit_key) + + self.insert(units_glm, ignore_extra_fields=True) @schema class WhiskerSVD(dj.Computed): @@ -619,6 +843,8 @@ class DirectionTuning(dj.Computed): -> ephys.Unit --- direction_tuning: mediumblob + direction_index: float + preferred_phase: float """ @@ -644,7 +870,223 @@ def make(self, key): direction_lick[dir_idx]=direction_lick[dir_idx]+len(tr_fr) direction_tun=direction_spk/direction_lick - - unit_dir.append({**unit_key, 'direction_tuning': direction_tun}) - self.insert(unit_dir, ignore_extra_fields=True) \ No newline at end of file + tuning_y=direction_tun[[7,8,5,2,1,0,3,6]] + tuning_x=np.linspace(0,7*np.pi/4,8) + tuning_y_n=tuning_y[~np.isnan(tuning_y)] + tuning_x_n=tuning_x[~np.isnan(tuning_y)] + pref_phase,dir_idx=helper_functions.compute_phase_tuning(tuning_x_n, tuning_y_n) + if np.isnan(dir_idx): + dir_idx=0 + pref_phase=0 + unit_dir.append({**unit_key, 'direction_tuning': direction_tun, 'preferred_phase': pref_phase, 'direction_index': dir_idx}) + + self.insert(unit_dir, ignore_extra_fields=True) + +@schema +class MovementTiming(dj.Computed): + definition = """ + -> experiment.Session + --- + inspiration_onset: mediumblob + tongue_onset: mediumblob + lick_onset: mediumblob + lick_offset: mediumblob + whisker_onset: mediumblob + whisk_onset: mediumblob + whisk_offset: mediumblob + """ + + key_source = experiment.Session & v_tracking.TongueTracking3DBot & experiment.Breathing & v_oralfacial_analysis.WhiskerSVD & ephys.Unit & 'rig = "RRig-MTL"' + + def make(self, key): + + bin_width = 0.0034 + + # from the cameras + tongue_thr = 0.95 + trial_key=(v_tracking.TongueTracking3DBot & key).fetch('trial', order_by='trial') + traces_s = tracking.Tracking.TongueTracking & key & {'tracking_device': 'Camera 3'} & [{'trial': tr} for tr in trial_key] + traces_b = tracking.Tracking.TongueTracking & key & {'tracking_device': 'Camera 4'} & [{'trial': tr} for tr in trial_key] + session_traces_s_l = traces_s.fetch('tongue_likelihood', order_by='trial') + session_traces_b_l = traces_b.fetch('tongue_likelihood', order_by='trial') + + session_traces_s_l = np.vstack(session_traces_s_l) + session_traces_b_l = np.vstack(session_traces_b_l) + session_traces_t_l = session_traces_b_l + session_traces_t_l[np.where((session_traces_s_l > tongue_thr) & (session_traces_b_l > tongue_thr))] = 1 + session_traces_t_l[np.where((session_traces_s_l <= tongue_thr) | (session_traces_b_l <= tongue_thr))] = 0 + session_traces_t_l = np.hstack(session_traces_t_l) + + session_traces_s_l_f = np.vstack(session_traces_s_l) + session_traces_b_l_f = np.vstack(session_traces_b_l) + session_traces_t_l_f = session_traces_b_l_f + session_traces_t_l_f[np.where((session_traces_s_l_f > tongue_thr) & (session_traces_b_l_f > tongue_thr))] = 1 + session_traces_t_l_f[np.where((session_traces_s_l_f <= tongue_thr) | (session_traces_b_l_f <= tongue_thr))] = 0 + + # from 3D calibration + traces_s = v_tracking.JawTracking3DSid & key & [{'trial': tr} for tr in trial_key] + traces_b = v_tracking.TongueTracking3DBot & key & [{'trial': tr} for tr in trial_key] + session_traces_s_y, session_traces_s_x, session_traces_s_z = traces_s.fetch('jaw_y', 'jaw_x', 'jaw_z', order_by='trial') + session_traces_b_y, session_traces_b_x, session_traces_b_z = traces_b.fetch('tongue_y', 'tongue_x', 'tongue_z', order_by='trial') + session_traces_s_y = stats.zscore(np.vstack(session_traces_s_y),axis=None) + session_traces_s_x = stats.zscore(np.vstack(session_traces_s_x),axis=None) + session_traces_s_z = stats.zscore(np.vstack(session_traces_s_z),axis=None) + session_traces_b_y = np.vstack(session_traces_b_y) + traces_y_mean=np.mean(session_traces_b_y[np.where(session_traces_t_l_f == 1)]) + traces_y_std=np.std(session_traces_b_y[np.where(session_traces_t_l_f == 1)]) + session_traces_b_y = (session_traces_b_y - traces_y_mean)/traces_y_std + session_traces_b_x = np.vstack(session_traces_b_x) + traces_x_mean=np.mean(session_traces_b_x[np.where(session_traces_t_l_f == 1)]) + traces_x_std=np.std(session_traces_b_x[np.where(session_traces_t_l_f == 1)]) + session_traces_b_x = (session_traces_b_x - traces_x_mean)/traces_x_std + session_traces_b_z = np.vstack(session_traces_b_z) + traces_z_mean=np.mean(session_traces_b_z[np.where(session_traces_t_l_f == 1)]) + traces_z_std=np.std(session_traces_b_z[np.where(session_traces_t_l_f == 1)]) + session_traces_b_z = (session_traces_b_z - traces_z_mean)/traces_z_std + + traces_len = np.size(session_traces_b_z, axis = 1) + + # format the video data + session_traces_s_y = np.hstack(session_traces_s_y) + session_traces_s_x = np.hstack(session_traces_s_x) + session_traces_s_z = np.hstack(session_traces_s_z) + session_traces_b_y = np.hstack(session_traces_b_y) + session_traces_b_x = np.hstack(session_traces_b_x) + session_traces_b_z = np.hstack(session_traces_b_z) + # -- moving-average and down-sample + window_size = int(bin_width/0.0034) # sample + kernel = np.ones(window_size) / window_size + session_traces_s_x = np.convolve(session_traces_s_x, kernel, 'same') + session_traces_s_x = session_traces_s_x[window_size::window_size] + session_traces_s_y = np.convolve(session_traces_s_y, kernel, 'same') + session_traces_s_y = session_traces_s_y[window_size::window_size] + session_traces_s_z = np.convolve(session_traces_s_z, kernel, 'same') + session_traces_s_z = session_traces_s_z[window_size::window_size] + session_traces_b_x = np.convolve(session_traces_b_x, kernel, 'same') + session_traces_b_x = session_traces_b_x[window_size::window_size] + session_traces_b_y = np.convolve(session_traces_b_y, kernel, 'same') + session_traces_b_y = session_traces_b_y[window_size::window_size] + session_traces_b_z = np.convolve(session_traces_b_z, kernel, 'same') + session_traces_b_z = session_traces_b_z[window_size::window_size] + session_traces_t_l = np.convolve(session_traces_t_l, kernel, 'same') + session_traces_t_l = session_traces_t_l[window_size::window_size] + session_traces_t_l[np.where(session_traces_t_l < 1)] = 0 + session_traces_s_x = np.reshape(session_traces_s_x,(-1,1)) + session_traces_s_y = np.reshape(session_traces_s_y,(-1,1)) + session_traces_s_z = np.reshape(session_traces_s_z,(-1,1)) + session_traces_b_x = np.reshape(session_traces_b_x * session_traces_t_l, (-1,1)) + session_traces_b_y = np.reshape(session_traces_b_y * session_traces_t_l, (-1,1)) + session_traces_b_z = np.reshape(session_traces_b_z * session_traces_t_l, (-1,1)) + + # get breathing + breathing, breathing_ts = (experiment.Breathing & key & [{'trial': tr} for tr in trial_key]).fetch('breathing', 'breathing_timestamps', order_by='trial') + good_breathing = breathing + for i, d in enumerate(breathing): + good_breathing[i] = d[breathing_ts[i] < traces_len*3.4/1000] + good_breathing = stats.zscore(np.vstack(good_breathing),axis=None) + + good_breathing = np.hstack(good_breathing) + # -- moving-average + window_size = int(round(bin_width/(breathing_ts[0][1]-breathing_ts[0][0]),0)) # sample + kernel = np.ones(window_size) / window_size + good_breathing = np.convolve(good_breathing, kernel, 'same') + # -- down-sample + good_breathing = good_breathing[window_size::window_size] + good_breathing = np.reshape(good_breathing,(-1,1)) + + # get whisker + session_traces_w = (v_oralfacial_analysis.WhiskerSVD & key).fetch('mot_svd') + if len(session_traces_w[0][:,0]) % 1471 != 0: + print('Bad videos in bottom view') + #return + else: + num_trial_w = int(len(session_traces_w[0][:,0])/1471) + session_traces_w = np.reshape(session_traces_w[0][:,0], (num_trial_w, 1471)) + + trial_idx_nat = [d.astype(str) for d in np.arange(num_trial_w)] + trial_idx_nat = sorted(range(len(trial_idx_nat)), key=lambda k: trial_idx_nat[k]) + trial_idx_nat = sorted(range(len(trial_idx_nat)), key=lambda k: trial_idx_nat[k]) + session_traces_w = session_traces_w[trial_idx_nat,:] + session_traces_w_o = stats.zscore(session_traces_w,axis=None) + session_traces_w = session_traces_w_o[trial_key-1] + + session_traces_w = np.hstack(session_traces_w) + window_size = int(bin_width/0.0034) # sample + kernel = np.ones(window_size) / window_size + session_traces_w = np.convolve(session_traces_w, kernel, 'same') + session_traces_w = session_traces_w[window_size::window_size] + session_traces_w = np.reshape(session_traces_w,(-1,1)) + + # coordination of movements + amp_b, phase_b=behavior_plot.compute_insta_phase_amp(good_breathing, 1/bin_width, freq_band=(1, 15)) # breathing + phase_b = phase_b + np.pi + + threshold = 1 + cond = (phase_b < threshold) & (np.roll(phase_b,-1) >= threshold) + inspir_onset=np.argwhere(cond)[:,0]*bin_width # get onset of breath + + # licking epochs + threshold = 0.5 # tongue detection + a_cond = (session_traces_t_l < threshold) & (np.roll(session_traces_t_l,-1) >= threshold) + ton_onset=np.argwhere(a_cond)[:,0]*bin_width # get onset of breath + + ilf=1/np.diff(ton_onset) + + ton_onset=ton_onset[:-1] + f_cond=(ilf>3) & (ilf<9) # lick freq > 3 & < 9 + ton_onset_idx=np.argwhere(f_cond)[:,0] # index of tongue appearance + lick_onset_idx=[] + next_lick=np.diff(ton_onset_idx) + for i,tongue in enumerate(ton_onset_idx[:-2]): + #if (next_lick[i]==1) & (next_lick[i+1]==1): # num licks > 3 + if (next_lick[i]==1): # num licks > 3 + lick_onset_idx.append(tongue) # index of tongue + lick_onset_idx=np.array(lick_onset_idx) + lick_onset_d=np.diff(lick_onset_idx) + lick_cond_on = np.roll(lick_onset_d,1) >= 2 + lick_cond_off = lick_onset_d >= 2 + lick_bout_onset=np.argwhere(lick_cond_on)[:,0] + lick_bout_offset=np.argwhere(lick_cond_off)[:,0] + if lick_bout_onset[0]!=0: + lick_bout_onset=np.concatenate((np.array([0]),lick_bout_onset)) # add first lick + lick_bout_offset=np.concatenate((lick_bout_offset,np.array([len(lick_onset_idx)-1]))) # add last lick + + lick_onset_time=ton_onset[lick_onset_idx[lick_bout_onset]] # get onset of licks + lick_offset_time=ton_onset[lick_onset_idx[lick_bout_offset]+2] + + # whisking epochs + if (np.median(session_traces_w) > (np.mean(session_traces_w)+0.1)): + session_traces_w=session_traces_w*-1 + amp_w, phase_w=behavior_plot.compute_insta_phase_amp(session_traces_w, 1/bin_width, freq_band=(3, 25)) + phase_w = phase_w + np.pi + + threshold = 1 # whisking detection + a_cond = (amp_w < threshold) & (np.roll(amp_w,-1) >= threshold) + whi_onset=np.argwhere(a_cond)[:,0]*bin_width # get onset of breath + + iwf=1/np.diff(whi_onset) + + whi_onset=whi_onset[:-1] + f_cond=(iwf>1) & (iwf<25) # whisk freq > 1 & < 25 + whisker_onset_idx=np.argwhere(f_cond)[:,0] # index of tongue appearance + whi_onset_idx=[] + next_whi=np.diff(whisker_onset_idx) + for i,whisker in enumerate(whisker_onset_idx[:-2]): + if (next_whi[i]==1) & (next_whi[i+1]==1): # num licks > 3 + #if (next_lick[i]==1): # num licks > 3 + whi_onset_idx.append(whisker) # index of tongue + whi_onset_idx=np.array(whi_onset_idx) + whi_onset_d=np.diff(whi_onset_idx) + whi_cond_on = np.roll(whi_onset_d,1) >= 2 + whi_cond_off = whi_onset_d >= 2 + whi_bout_onset=np.argwhere(whi_cond_on)[:,0] + whi_bout_offset=np.argwhere(whi_cond_off)[:,0] + if whi_bout_onset[0]!=0: + whi_bout_onset=np.concatenate((np.array([0]),whi_bout_onset)) # add first lick + whi_bout_offset=np.concatenate((whi_bout_offset,np.array([len(whi_onset_idx)-1]))) # add last lick + + whi_onset_time=whi_onset[whi_onset_idx[whi_bout_onset]] # get onset of licks + whi_offset_time=whi_onset[whi_onset_idx[whi_bout_offset]+2] + + self.insert1({**key, 'inspiration_onset': inspir_onset, 'lick_onset': lick_onset_time, 'lick_offset': lick_offset_time, 'whisk_onset': whi_onset_time, 'whisk_offset': whi_offset_time, 'tongue_onset': ton_onset, 'whisker_onset': whi_onset}) \ No newline at end of file diff --git a/pipeline/shell.py b/pipeline/shell.py index 8d9563bb..aa73590f 100644 --- a/pipeline/shell.py +++ b/pipeline/shell.py @@ -481,8 +481,10 @@ def populate_foraging_analysis(populate_settings={'reserve_jobs': True, 'display def populate_oralfacial_analysis(populate_settings={'reserve_jobs': True, 'display_progress': True}): - log.info('oralfacial_analysis.GLMFit.populate()') - oralfacial_analysis.GLMFit.populate(**dict(populate_settings, max_calls=100)) + log.info('oralfacial_analysis.GLMFitNoLick.populate()') + #log.info('oralfacial_analysis.GLMFit.populate()') + oralfacial_analysis.GLMFitNoLick.populate(**dict(populate_settings, max_calls=100)) + #oralfacial_analysis.GLMFit.populate(**dict(populate_settings, max_calls=100)) def generate_report(populate_settings={'reserve_jobs': True, 'display_progress': True}):