Is it possible to implement populate
such that it utilizes insert
instead of insert1
?
For large data sets, I am getting a server time out error as using insert1
takes hours to populate a table. Here is what I have for:
def make(self, key):
keys = Session.fetch('KEY') # get the primary key(s) from session .fetch('KEYS') for multiple
filename = 'data/AJ0{mouse_id}_{session_id}'.format(**key) # get the filename of the session you are interested in
mat = spio.loadmat(filename, squeeze_me=True,struct_as_record=False) #load the data in .mat format
data = mat[list(mat)[-1]] # unpack the dictionaries to select the specific data
activity_arr = data.deResp
n_trials, n_neuron = activity_arr.shape
for neuro_id in range(0, n_neuron):
for trial_id in range(0, n_trials):
key['neuro_id'] = neuro_id
key['activity'] = activity_arr[trial_id, neuro_id]
self.insert1(Key, skip_duplicates=True)
What I would like is something like this
for neuro_id in range(0, n_neuro):
key['neuro_id'] = np.asarray([neuro_id]*n_trials)
key['activity'] = activity_arr[0:n_trials,neuro_id]
self.insert(key, skip_duplicates=True)
Sort of inserting a list of dictionaries and being able to call the populate method. Any suggestions?
In the past, I implemented the make function below, but with this one I am not able to call populate on it.
def make(self):
activity_arr = data.deResp
n_trials, n_neuron = activity_arr.shape
for neuro_id in range(0, n_neuron):
trial_ids = np.arange(0,n_trials)
mouse_id = np.asarray([self.mouse_id]*n_trials)
neuro_ids = np.asarray([neuro_id]*n_trials)
sess = np.asarray([self.session_id]*n_trials)
acts=activity_arr[0:n_trials,neuro_id]
arr=np.vstack((mouse_id,sess,trial_ids,neuro_ids,acts)).T
self.insert(list(arr), skip_duplicates=True)