import numpy as np
import random as rand
# load data
with np.load('/datasets/hw3_dataset/ReachData.npz', allow_pickle=True) as data:
r = data['r']
cfr = data['cfr']
print(data.files)
data = np.load('/datasets/hw3_dataset/ReachData.npz', allow_pickle=True)
# Description of data in 'r'
# - r[i].unit[j].spikeTimes - array of spiketimes relative to trial start (in ms) for trial i, neuron j
# - r[i].timeTouchHeld - the time the reach target appears and movement planning nominally begins for trial i
# - r[i].timeGoCue - the time the animal is cued to move and planning nominally ends
# - r[i].timeTargetAcquire - the time movement ends
# 'pp' - delay period duration
# RT - unknown but length of 1127
# NumberInClass - use this to balance across
# There are a total of 1127 trials and 190 neurons
['r', 'numUnits', 'cfr', 'dataset', 'classes', 'cInds', 'NC', 'targets', 'NumberInClass', 'RT', 'numTrials', 'pp']
# Initialize spike rate matrices
spike_rates_undiff = np.zeros((1127, 190)) # Matrix with dimensions (# of trials) x (number of neurons), each row is spike rates vector (in spikes/ms)
spike_rates_plan_only = np.zeros((1127, 190)) # Matrix with dimensions (# of trials) x (number of neurons), each row is spike rates vector (in spikes/ms)
spike_rates_move_only = np.zeros((1127, 190)) # Matrix with dimensions (# of trials) x (number of neurons), each row is spike rates vector (in spikes/ms)
for i in range(1127):
for j in range(190):
spiketimes = np.array([r[i].unit[j].spikeTimes]) if isinstance(r[i].unit[j].spikeTimes, float) else r[i].unit[j].spikeTimes # spike times for current trial/neuron
spike_rates_undiff[i][j] = len(spiketimes[(spiketimes > r[i].timeTouchHeld) & (spiketimes <= r[i].timeTargetAcquire)]) # count of spikes in plan + move phases
spike_rates_undiff[i][j] = 1000*spike_rates_undiff[i][j] / (r[i].timeTargetAcquire - r[i].timeTouchHeld)
spike_rates_plan_only[i][j] = len(spiketimes[(spiketimes > r[i].timeTouchHeld) & (spiketimes <= r[i].timeGoCue)]) # count of spikes in plan phase
spike_rates_plan_only[i][j] = 1000*spike_rates_plan_only[i][j] / (r[i].timeGoCue - r[i].timeTouchHeld)
spike_rates_move_only[i][j] = len(spiketimes[(spiketimes > r[i].timeGoCue) & (spiketimes <= r[i].timeTargetAcquire)]) # count of spikes in move phase
spike_rates_move_only[i][j] = 1000*spike_rates_move_only[i][j] / (r[i].timeTargetAcquire - r[i].timeGoCue)
spike_rates_plan_move = np.concatenate((spike_rates_plan_only, spike_rates_move_only), axis=1)
from math import pi, sqrt, exp
def gauss_kernel(length,sigma=15):
'''
Gaussian kernel function, centered at 0, with given length and standard deviation
'''
r = range(-int(length/2),int(length/2)+1)
return [1 / (sigma * sqrt(2*pi)) * exp(-float(x)**2/(2*sigma**2)) for x in r]
# Help with generating spike rate over time - https://rcweb.dartmouth.edu/~mvdm/wiki/doku.php?id=analysis:nsb2016:week9
from sklearn.decomposition import PCA
from scipy.ndimage import gaussian_filter1d
from sklearn.preprocessing import StandardScaler
def pc_directions(bin_size, kernel_length, kernel_sigma):
'''
Computes leading principal component direction for each unit
Returns: dictionaries mapping neuron -> 1st principal component (for plan and move)
'''
plan_pc_directions = {} # dictionary mapping neuron number -> 1st principal component
move_pc_directions = {} # dictionary mapping neuron number -> 1st principal component
for i in range(190):
avg_spike_trains_plan = [] # to store average plan spike train across trials, length = number of targets (8)
avg_spike_trains_move = [] # to store average move spike train across trials, length = number of targets (8)
for target in range(1, 9):
relevant_trials = np.squeeze(np.argwhere(cfr == target)) # indices of trials for neuron i which have given target
spiketimes = [np.array([r[trial].unit[i].spikeTimes]) if isinstance(r[trial].unit[i].spikeTimes, float)
else r[trial].unit[i].spikeTimes for trial in relevant_trials] # array containing spiketimes for each trial for this neuron
plan_start = [r[trial].timeTouchHeld for trial in relevant_trials]
plan_end = [r[trial].timeGoCue for trial in relevant_trials]
move_end = [r[trial].timeTargetAcquire for trial in relevant_trials]
all_plan_conv = []
all_move_conv = []
for spike_train, start, middle, end in zip(spiketimes, plan_start, plan_end, move_end):
# bin the spikes
binned_plan, bin_edges_plan = np.histogram(spike_train, bins=bin_size, range=(start, middle))
binned_move, bin_edges_move = np.histogram(spike_train, bins=bin_size, range=(middle, end))
bin_size_plan = bin_edges_plan[1] - bin_edges_plan[0] # bin width for the plan data
bin_size_move = bin_edges_move[1] - bin_edges_move[0] # bin width for the move data
# convolve with gaussian
plan_convolved = np.convolve(binned_plan, gauss_kernel(kernel_length, kernel_sigma), mode='same') / bin_size_plan # normalize by bin size (gives rates instead of counts)
move_convolved = np.convolve(binned_move, gauss_kernel(kernel_length, kernel_sigma), mode='same') / bin_size_move # normalize by bin size (gives rates instead of counts)
all_plan_conv.append(plan_convolved)
all_move_conv.append(move_convolved)
avg_spike_trains_plan.append(np.mean(all_plan_conv, axis=0))
avg_spike_trains_move.append(np.mean(all_move_conv, axis=0))
# PCA on average_spike_trains & save first PC
pca = PCA()
sc = StandardScaler()
avg_spike_trains_plan = sc.fit_transform(avg_spike_trains_plan)
pca.fit(avg_spike_trains_plan)
plan_pc_directions[i] = pca.components_[0]
pca = PCA()
sc = StandardScaler()
avg_spike_trains_move = sc.fit_transform(avg_spike_trains_move)
pca.fit(avg_spike_trains_move)
move_pc_directions[i] = pca.components_[0]
return move_pc_directions, plan_pc_directions
def get_pc_scores(bin_size, kernel_length, kernel_sigma, move_pc_directions, plan_pc_directions):
'''
Given PC directions as found above, compute the spike rate PC scores needed for downstream modeling tasks
'''
spike_rates_move_pc = np.zeros((1127, 190))
spike_rates_plan_pc = np.zeros((1127, 190))
for i in range(1127):
for j in range(190):
spiketimes = np.array([r[i].unit[j].spikeTimes]) if isinstance(r[i].unit[j].spikeTimes, float) else r[i].unit[j].spikeTimes # spike train for this neuron/trial
start = r[i].timeTouchHeld
middle = r[i].timeGoCue
end = r[i].timeTargetAcquire
# bin the spikes
binned_plan, bin_edges_plan = np.histogram(spiketimes, bins=bin_size, range=(start, middle))
binned_move, bin_edges_move = np.histogram(spiketimes, bins=bin_size, range=(middle, end))
bin_size_plan = bin_edges_plan[1] - bin_edges_plan[0] # bin width for the plan data
bin_size_move = bin_edges_move[1] - bin_edges_move[0] # bin width for the move data
# convolve with gaussian
plan_convolved = np.convolve(binned_plan, gauss_kernel(kernel_length, kernel_sigma), mode='same') / bin_size_plan # normalize by bin size
move_convolved = np.convolve(binned_move, gauss_kernel(kernel_length, kernel_sigma), mode='same') / bin_size_move # normalize by bin size
spike_rates_move_pc[i][j] = np.inner(move_convolved, move_pc_directions[j])
spike_rates_plan_pc[i][j] = np.inner(plan_convolved, plan_pc_directions[j])
return spike_rates_move_pc, spike_rates_plan_pc
move_pc_directions, plan_pc_directions = pc_directions(500, 50,10)
spike_rates_move_pc, spike_rates_plan_pc = get_pc_scores(500, 50, 10, move_pc_directions, plan_pc_directions)
spike_rates_plan_move_pc = np.concatenate((spike_rates_plan_only, spike_rates_move_pc), axis=1)
spike_rates_plan_pc_move_pc = np.concatenate((spike_rates_plan_pc, spike_rates_move_pc), axis=1)
# print(np.count_nonzero(spike_rates_move_pc==0))
import matplotlib.pyplot as plt
plt.plot(plan_pc_directions[0])
plt.xlabel("Time (ms)")
plt.ylabel("Gaussian Smoothed Rate (spikes/ms)")
plt.title("Example Principal Component for Neuron 1 - Plan")
plt.show()
def learn_gaussian(training_data, labels, correction):
'''
Inputs: training data, class labels for all training trials (as in cfr)
Outputs: dict mapping class (1-8) to a vector of gaussian parameters (tuple of (mean, covariance)). Assumes diagonal covariance!
'''
gaussian_params = {} # dict mapping target number -> (mean, covariance)
for k in range(1, 9):
rows = training_data[labels == k] # Get training data corresponding to given indices, for current class
# Compute max-likelihood parameter estimates
mean = np.mean(rows, axis=0)
cov = np.diag(np.cov(rows.T, bias=True))
# Accounting for 0 variance values
cov.setflags(write=1)
cov[cov == 0] = correction # correcting for 0 variance.
gaussian_params[k] = (mean, cov)
return gaussian_params
from scipy.stats import multivariate_normal
from scipy.stats import norm
import math
# def gaussian_llh(data, params, penalty):
# '''
# NOT USED
# Inputs: data, tuple containing mean and variances of Gaussian
# Outputs: log-likelihood of data under the given Gaussian model
# '''
# means = params[0]
# variances = params[1]
# llh = 0
# for i in range(len(data)):
# if norm.pdf(data[i], means[i], variances[i]) != 0:
# llh += np.log(norm.pdf(data[i], means[i], variances[i]))
# else:
# llh += penalty
# return llh
def predict_target(test_data, gaussian_params):
'''
Inputs: test_data, and dictionary containing the gaussian params for each target
Outputs: predicted targets
'''
predicted_targets = []
# for row in test_data:
# predicted_targets.append(max(range(1, 9), key=lambda target: gaussian_llh(row, gaussian_params[target], penalty))) # choose target with max log-likelihood
# print("Prediction: " + str(predicted_targets[-1]))
for row in test_data:
max_target, probs = target_prob(row, gaussian_params)
predicted_targets.append(max_target)
# print("Prediction: " + str(predicted_targets[-1]))
return predicted_targets
from functools import reduce
import math
def target_prob(data, gaussian_params):
'''
Inputs: data from a single trial, gaussian params for each target
Outputs: max likelihood target estimate, array of p(target | data) for targets 1-8
'''
probs = [] # probs of target given data
log_probs = [] # log probs of data given each target
for i in range(1, 9):
mean = gaussian_params[i][0]
variance = gaussian_params[i][1]
# print(np.log(norm.pdf(data, mean, variance)))
temp_logs = np.log(norm.pdf(data, mean, variance))
temp_logs[temp_logs == -np.inf] = -1000
# temp = sum(temp_logs)
log_probs.append(sum(temp_logs))
# log_probs.append(temp if temp != -np.inf else -1000)
# print(log_probs)
max_target = 1 + np.argmax(log_probs) # target with highest log(p(data|target))
# print(max_target)
for i in range(1, 9):
log_numerator = log_probs[i-1]
log_denominator = max_target + np.log(np.sum(np.exp(log_probs - max_target)))
# print(log_denominator)
probs.append(np.exp(log_numerator - log_denominator))
# print(probs)
return max_target, probs
def predict_target_AP(test_data, gaussian_params):
'''
Uses mean of the a posteriori distribution as the target estimator
Inputs: test_data, and dictionary containing the gaussian params for each target
Outputs: predicted targets
'''
predicted_targets = []
for row in test_data:
max_target, probs = target_prob(row, gaussian_params)
prediction = np.inner(np.arange(1,9), probs)
predicted_targets.append(prediction if prediction != np.inf else max_target)
# print("Prediction: " + str(predicted_targets[-1]))
return predicted_targets
def train_test_split(training_size):
'''
Inputs: desired number of trials per target in the training set
Outputs: train indices and test indices, balanced by each target
'''
trials_by_target = {} # dict mapping each target (1 to 8) to the indices of trials for that target
for target in range(1, 9):
trials_by_target[target] = list(np.squeeze(np.argwhere(cfr == target)))
train_indices = []
test_indices = []
for target in range(1, 9):
train = rand.sample(trials_by_target[target], training_size) # train indices for this target
test = np.setdiff1d(trials_by_target[target], train) # test indices for this target
train_indices.extend(train)
test_indices.extend(test)
return train_indices, test_indices
train_indices, test_indices = train_test_split(75)
def angular_error(predicted, actual):
"""
Takes predicted and actual values and computes the angular error
Assumptions: Assumes that each target is 45 degrees apart from the others
Outputs: Average angular error (as defined by Yu et al.)
"""
distances = abs(predicted - actual)
return np.mean(45*np.minimum(distances, 8 - distances))
import itertools
def tuning(bin_sizes, kernel_sigmas, corrections, penalties):
"""
Tuning the different parameters, saving best parameters out / print
"""
search_results = {}
param_grid = [bin_sizes, kernel_sigmas, corrections, penalties]
options = list(itertools.product(*param_grid))
for params in options:
# print(params)
move_pc_directions, plan_pc_directions = pc_directions(params[0], 50,params[1])
spike_rates_move_pc, spike_rates_plan_pc = get_pc_scores(params[0], 50, params[1], move_pc_directions, plan_pc_directions)
gaussian_params = learn_gaussian(spike_rates_plan_pc_move_pc[train_indices], cfr[train_indices], params[2])
predicted = predict_target(spike_rates_plan_pc_move_pc[test_indices], gaussian_params, params[3])
err = angular_error(predicted, cfr[test_indices])
search_results[params] = err
print("param_grid: ", params, "error: ", err)
return search_results
# potential_bins = [500,750,1000,1250]
# potential_sigmas = [10,15,20]
# potential_corrections = [1e-2, 1e-5]
# potential_penalties = [-1000,-50000,-100000]
# # 500,10,0.01, -1000 - 2.73
# results = tuning(potential_bins, potential_sigmas, potential_corrections, potential_penalties)
gaussian_params = learn_gaussian(spike_rates_undiff[train_indices], cfr[train_indices], 1e-5)
predicted = predict_target(spike_rates_undiff[test_indices], gaussian_params)
print(angular_error(predicted, cfr[test_indices]))
/opt/venv/lib/python3.7/site-packages/ipykernel_launcher.py:16: RuntimeWarning: divide by zero encountered in log
app.launch_new_instance()
/opt/venv/lib/python3.7/site-packages/ipykernel_launcher.py:28: RuntimeWarning: divide by zero encountered in log
22.3719165085389
gaussian_params = learn_gaussian(spike_rates_move_only[train_indices], cfr[train_indices], 1e-5)
predicted = predict_target(spike_rates_move_only[test_indices], gaussian_params)
print(angular_error(predicted, cfr[test_indices]))
/opt/venv/lib/python3.7/site-packages/ipykernel_launcher.py:16: RuntimeWarning: divide by zero encountered in log
app.launch_new_instance()
/opt/venv/lib/python3.7/site-packages/ipykernel_launcher.py:28: RuntimeWarning: divide by zero encountered in log
22.0303605313093
gaussian_params = learn_gaussian(spike_rates_plan_only[train_indices], cfr[train_indices], 1e-5)
predicted = predict_target(spike_rates_plan_only[test_indices], gaussian_params)
print(angular_error(predicted, cfr[test_indices]))
/opt/venv/lib/python3.7/site-packages/ipykernel_launcher.py:16: RuntimeWarning: divide by zero encountered in log
app.launch_new_instance()
/opt/venv/lib/python3.7/site-packages/ipykernel_launcher.py:28: RuntimeWarning: divide by zero encountered in log
35.69259962049336
gaussian_params = learn_gaussian(spike_rates_plan_move[train_indices], cfr[train_indices], 1e-5)
predicted = predict_target(spike_rates_plan_move[test_indices], gaussian_params)
print(angular_error(predicted, cfr[test_indices]))
/opt/venv/lib/python3.7/site-packages/ipykernel_launcher.py:16: RuntimeWarning: divide by zero encountered in log
app.launch_new_instance()
/opt/venv/lib/python3.7/site-packages/ipykernel_launcher.py:28: RuntimeWarning: divide by zero encountered in log
26.98292220113852
gaussian_params = learn_gaussian(spike_rates_plan_move_pc[train_indices], cfr[train_indices], 1e-5)
predicted = predict_target(spike_rates_plan_move_pc[test_indices], gaussian_params)
print(angular_error(predicted, cfr[test_indices]))
/opt/venv/lib/python3.7/site-packages/ipykernel_launcher.py:16: RuntimeWarning: divide by zero encountered in log
app.launch_new_instance()
/opt/venv/lib/python3.7/site-packages/ipykernel_launcher.py:28: RuntimeWarning: divide by zero encountered in log
6.74573055028463
gaussian_params = learn_gaussian(spike_rates_plan_pc_move_pc[train_indices], cfr[train_indices], 1e-5)
predicted = predict_target(spike_rates_plan_pc_move_pc[test_indices], gaussian_params)
print(angular_error(predicted, cfr[test_indices]))
/opt/venv/lib/python3.7/site-packages/ipykernel_launcher.py:16: RuntimeWarning: divide by zero encountered in log
app.launch_new_instance()
/opt/venv/lib/python3.7/site-packages/ipykernel_launcher.py:28: RuntimeWarning: divide by zero encountered in log
5.208728652751423
import matplotlib.pyplot as plt
# num_trials = 10
# num_units = 40 # number of neurons to use (out of 190)
# training_size = 90 # number of trials in training set
# train_indices, test_indices = train_test_split(training_size)
# unit_indices = np.random.choice(190, num_units, replace=False)
# spike_rates_plan_move_pc[np.ix_(train_indices, unit_indices)].shape
from scipy.stats import sem
num_trials = 1000
num_units = 140 # number of neurons to use (out of 190)
training_size = 65 # number of trials in training set
move_error = []
plan_error = []
undiff_error = []
plan_move_error = []
plan_move_pc_error = []
planpc_movepc_error = []
for i in range(num_trials):
train_indices, test_indices = train_test_split(training_size)
unit_indices = np.random.choice(190, num_units, replace=False)
# unit_indices2 = np.random.choice(380, 190, replace=False)
unit_indices2 = np.concatenate((unit_indices, unit_indices + 190))
# Move Only
params_move_only = learn_gaussian(spike_rates_move_only[np.ix_(train_indices, unit_indices)], cfr[train_indices], 1e-5)
predicted = predict_target(spike_rates_move_only[np.ix_(test_indices, unit_indices)], params_move_only)
move_error.append(angular_error(predicted, cfr[test_indices]))
# Plan Only
params_plan_only = learn_gaussian(spike_rates_plan_only[np.ix_(train_indices, unit_indices)], cfr[train_indices], 1e-5)
predicted = predict_target(spike_rates_plan_only[np.ix_(test_indices, unit_indices)], params_plan_only)
plan_error.append(angular_error(predicted, cfr[test_indices]))
# Undifferentiated
params_undiff = learn_gaussian(spike_rates_undiff[np.ix_(train_indices, unit_indices)], cfr[train_indices], 1e-5)
predicted = predict_target(spike_rates_undiff[np.ix_(test_indices, unit_indices)], params_undiff)
undiff_error.append(angular_error(predicted, cfr[test_indices]))
# Plan and Move
params_plan_move = learn_gaussian(spike_rates_plan_move[np.ix_(train_indices, unit_indices2)], cfr[train_indices], 1e-5)
predicted = predict_target(spike_rates_plan_move[np.ix_(test_indices, unit_indices2)], params_plan_move)
plan_move_error.append(angular_error(predicted, cfr[test_indices]))
# Plan + Move PC
gaussian_params = learn_gaussian(spike_rates_plan_move_pc[np.ix_(train_indices, unit_indices2)], cfr[train_indices], 1e-5)
predicted = predict_target(spike_rates_plan_move_pc[np.ix_(test_indices, unit_indices2)], gaussian_params)
plan_move_pc_error.append(angular_error(predicted, cfr[test_indices]))
# Plan PC Move PC
params_planpc_movepc = learn_gaussian(spike_rates_plan_pc_move_pc[np.ix_(train_indices, unit_indices2)], cfr[train_indices], 1e-5)
predicted = predict_target(spike_rates_plan_pc_move_pc[np.ix_(test_indices, unit_indices2)], params_planpc_movepc)
planpc_movepc_error.append(angular_error(predicted, cfr[test_indices]))
# print(angular_error(predicted, cfr[test_indices]))
# gaussian_params = learn_gaussian(spike_rates_plan_pc_move_pc[train_indices], cfr[train_indices], 1e-5)
# predicted = predict_target(spike_rates_plan_pc_move_pc[test_indices], gaussian_params)
# print(angular_error(predicted, cfr[test_indices]))
move_error_mean = np.mean(move_error)
plan_error_mean = np.mean(plan_error)
undiff_error_mean = np.mean(undiff_error)
plan_move_error_mean = np.mean(plan_move_error)
plan_move_pc_error_mean = np.mean(plan_move_pc_error)
planpc_movepc_error_mean = np.mean(planpc_movepc_error)
move_error_std = np.std(move_error)
plan_error_std = np.std(plan_error)
undiff_error_std = np.std(undiff_error)
plan_move_error_std = np.std(plan_move_error)
plan_move_pc_error_std = np.std(plan_move_pc_error)
planpc_movepc_error_std = np.std(planpc_movepc_error)
categories = ['Move Rate', 'Plan Rate', 'Undiff. Rate', 'Plan Rate/Move Rate', 'Plan Rate/Move PC', 'Plan PC/Move PC']
x_pos = np.arange(len(categories))
errors = [move_error_mean, plan_error_mean, undiff_error_mean, plan_move_error_mean, plan_move_pc_error_mean, planpc_movepc_error_mean]
stds = [move_error_std, plan_error_std, undiff_error_std, plan_move_error_std, plan_move_pc_error_std, planpc_movepc_error_std]
# Build the plot
fig, ax = plt.subplots()
ax.bar(x_pos, errors, yerr=stds, align='center', alpha=0.6, ecolor='black', capsize=10)
ax.set_ylabel('Angular Error (in degrees)')
ax.set_xticks(x_pos)
ax.set_xticklabels(categories)
ax.set_title('Comparison of cross-validated decode performance')
ax.yaxis.grid(True)
plt.setp(ax.get_xticklabels(), rotation=30, horizontalalignment='right')
# Save the figure and show
plt.tight_layout()
plt.savefig('Fig4_bar_plot.png')
plt.show()
print(move_error_mean, plan_error_mean, undiff_error_mean, plan_move_error_mean, plan_move_pc_error_mean, planpc_movepc_error_mean)
print(move_error_std, plan_error_std, undiff_error_std, plan_move_error_std, plan_move_pc_error_std, planpc_movepc_error_std)
/opt/venv/lib/python3.7/site-packages/ipykernel_launcher.py:15: RuntimeWarning: divide by zero encountered in log
from ipykernel import kernelapp as app
/opt/venv/lib/python3.7/site-packages/ipykernel_launcher.py:27: RuntimeWarning: divide by zero encountered in log
32.950453047775945 35.99206754530478 26.00658978583196 29.240436573311367 7.727397034596375 9.739497528830313
3.7003843925349647 2.986886103196578 2.6926228234200424 2.419587827601291 3.0989507557659692 4.452023218026437
move_error_mean
num_trials = 100
units = range(10, 210, 20) # number of neurons to use (out of 190)
training_size = 65 # number of trials in training set
undiff_means = []
undiff_stds = []
plan_move_means = []
plan_move_stds = []
plan_move_pc_means = []
plan_move_pc_stds = []
for num_units in units:
undiff_error = []
plan_move_error = []
plan_move_pc_error = []
for i in range(num_trials):
train_indices, test_indices = train_test_split(training_size)
unit_indices = np.random.choice(190, num_units, replace=False)
unit_indices2 = np.concatenate((unit_indices, unit_indices + 190))
# Undifferentiated
params_undiff = learn_gaussian(spike_rates_undiff[np.ix_(train_indices, unit_indices)], cfr[train_indices], 1e-5)
predicted = predict_target(spike_rates_undiff[np.ix_(test_indices, unit_indices)], params_undiff)
undiff_error.append(angular_error(predicted, cfr[test_indices]))
# Plan and Move
params_plan_move = learn_gaussian(spike_rates_plan_move[np.ix_(train_indices, unit_indices2)], cfr[train_indices], 1e-5)
predicted = predict_target(spike_rates_plan_move[np.ix_(test_indices, unit_indices2)], params_plan_move)
plan_move_error.append(angular_error(predicted, cfr[test_indices]))
# Plan + Move PC
gaussian_params = learn_gaussian(spike_rates_plan_move_pc[np.ix_(train_indices, unit_indices2)], cfr[train_indices], 1e-5)
predicted = predict_target(spike_rates_plan_move_pc[np.ix_(test_indices, unit_indices2)], gaussian_params)
plan_move_pc_error.append(angular_error(predicted, cfr[test_indices]))
undiff_means.append(np.mean(undiff_error))
plan_move_means.append(np.mean(plan_move_error))
plan_move_pc_means.append(np.mean(plan_move_pc_error))
undiff_stds.append(sem(undiff_error))
plan_move_stds.append(sem(plan_move_error))
plan_move_pc_stds.append(sem(plan_move_pc_error))
# print(plan_move_means)
plt.errorbar(units, undiff_means, yerr=undiff_stds, color='blue')
plt.errorbar(units, plan_move_means, yerr=plan_move_stds, color='red')
plt.errorbar(units, plan_move_pc_means, yerr=plan_move_pc_stds, color='green')
plt.legend(["Undiff. Rate", "Plan Rate/Move Rate", "Plan Rate/Move PC"])
plt.xlabel("Number of Units")
plt.ylabel("Angular Error (degrees)")
plt.title("Angular Error vs. Unit Count")
plt.savefig('Fig5a_err_vs_units.png')
plt.show()
/opt/venv/lib/python3.7/site-packages/ipykernel_launcher.py:15: RuntimeWarning: divide by zero encountered in log
from ipykernel import kernelapp as app
/opt/venv/lib/python3.7/site-packages/ipykernel_launcher.py:27: RuntimeWarning: divide by zero encountered in log
num_trials = 100
training_sizes = [5,10, 20, 30, 40, 50, 60, 70] # number of neurons to use (out of 190)
num_units = 140 # number of trials in training set
undiff_means = []
undiff_stds = []
plan_move_means = []
plan_move_stds = []
plan_move_pc_means = []
plan_move_pc_stds = []
for training_size in training_sizes:
undiff_error = []
plan_move_error = []
plan_move_pc_error = []
for i in range(num_trials):
train_indices, test_indices = train_test_split(training_size)
unit_indices = np.random.choice(190, num_units, replace=False)
unit_indices2 = np.concatenate((unit_indices, unit_indices + 190))
# Undifferentiated
params_undiff = learn_gaussian(spike_rates_undiff[np.ix_(train_indices, unit_indices)], cfr[train_indices], 1e-5)
predicted = predict_target(spike_rates_undiff[np.ix_(test_indices, unit_indices)], params_undiff)
undiff_error.append(angular_error(predicted, cfr[test_indices]))
# Plan and Move
params_plan_move = learn_gaussian(spike_rates_plan_move[np.ix_(train_indices, unit_indices2)], cfr[train_indices], 1e-5)
predicted = predict_target(spike_rates_plan_move[np.ix_(test_indices, unit_indices2)], params_plan_move)
plan_move_error.append(angular_error(predicted, cfr[test_indices]))
# Plan + Move PC
gaussian_params = learn_gaussian(spike_rates_plan_move_pc[np.ix_(train_indices, unit_indices2)], cfr[train_indices], 1e-5)
predicted = predict_target(spike_rates_plan_move_pc[np.ix_(test_indices, unit_indices2)], gaussian_params)
plan_move_pc_error.append(angular_error(predicted, cfr[test_indices]))
undiff_means.append(np.mean(undiff_error))
plan_move_means.append(np.mean(plan_move_error))
plan_move_pc_means.append(np.mean(plan_move_pc_error))
undiff_stds.append(sem(undiff_error))
plan_move_stds.append(sem(plan_move_error))
plan_move_pc_stds.append(sem(plan_move_pc_error))
# print(plan_move_means)
plt.errorbar(training_sizes, undiff_means, yerr=undiff_stds, color='blue')
plt.errorbar(training_sizes, plan_move_means, yerr=plan_move_stds, color='red')
plt.errorbar(training_sizes, plan_move_pc_means, yerr=plan_move_pc_stds, color='green')
plt.legend(["Undiff. Rate", "Plan Rate/Move Rate", "Plan Rate/Move PC"])
plt.xlabel("Training Size")
plt.ylabel("Angular Error (degrees)")
plt.ylim(0, 50)
plt.title("Angular Error vs. Training Size")
plt.savefig('Fig5b_err_vs_train.png')
plt.show()
/opt/venv/lib/python3.7/site-packages/ipykernel_launcher.py:15: RuntimeWarning: divide by zero encountered in log
from ipykernel import kernelapp as app
/opt/venv/lib/python3.7/site-packages/ipykernel_launcher.py:27: RuntimeWarning: divide by zero encountered in log