I work in ComputerVision in Google Colab. I have an array of video titles. in a loop, I iterate over the given array. At each iteration, I read the video using cv2 and close the window before finishing the iteration. After processing several videos, my RAM is full in google collab. What should I do to avoid overflow my RAM?
# make dataset only for one person
def make_dataset(df, type_, seq_len=6):
uses_keypoints = [0, 11, 12, 13, 14, 15, 16, 23, 24, 25, 26, 27, 28]
x = tf.Variable(tf.zeros(shape=(0, seq_len, 13, 3)), dtype='float32')
y = []
for k in tqdm(range(df.shape[0])):
cap = cv2.VideoCapture(df.iloc[k].file_name)
detector = poseDetector()
keypoints = tf.Variable(tf.zeros(shape=(0, 13, 3)), dtype='float32')
for i in range(0, int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 6):
cap.set(1, i)
success, img = cap.read()
lmList = detector.findPose(img)
if lmList:
lmList = np.array(lmList)
lmList = lmList[uses_keypoints]
lmList = tf.cast(lmList, dtype='float32')
lmList = tf.expand_dims(lmList, 0)
keypoints = tf.concat([keypoints, lmList], axis=0)
cap.release()
cv2.destroyAllWindows()
sequences = [ keypoints[i - seq_len : i] for i in range(seq_len, keypoints.shape[0])]
label = df.iloc[k].person_id
for sequence in sequences:
x = tf.concat([x, tf.expand_dims(sequence, 0)], axis=0)
y.append(label)
y = to_categorical(y)
path_to_load = f'/some_path'
save_dataset(x, y, path_to_load)