I'm working on a demo that will require me to capture video from a camera and display it in real time (after some processing). I followed the template from OpenCV's docs, but implemented the capturing (and processing) in a separate thread. As per this blog post, separating the capturing and display threads can give a significant performance boost to the display's FPS.
This below code is what I currently have.
import cv2
import sys
from threading import Thread
import time
class ThreadedProcessor(Thread):
def __enter__(self):
self.init()
return self
def __exit__(self, *args):
self.cleanup()
def init(self):
self.start()
def cleanup(self):
if self.is_alive():
sys.exit()
class BasicVideoProcessor(ThreadedProcessor):
def __init__(self, camera=0, size=None, init_timeout=10):
super().__init__()
self.camera = camera
self.size = size
self.capture = None
self._frame = None
def init(self):
self.capture = cv2.VideoCapture(self.camera)
super().init()
def cleanup(self):
self.capture.release()
super().cleanup()
def run(self):
self._wait_for_stream()
while self.capture.isOpened():
grabbed, frame = self.capture.read()
self._frame = frame if grabbed else None
def _wait_for_stream(self):
for _ in range(10):
if self.capture.isOpened():
return
raise ValueError("Timeout")
def get_frame(self):
return self._frame
def display(src, fps=30, title="Display"):
time_per_frame = 1 / fps
while not src.is_alive():
time.sleep(1)
next_frame_time = time.time()
while src.is_alive():
frame = src.get_frame()
if frame is not None:
cv2.imshow(title, frame)
next_frame_time += time_per_frame
curr_time = time.time()
if curr_time < next_frame_time and cv2_wait_for_q(int((next_frame_time - curr_time) / 1000)):
break
# https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html
def cv2_wait_for_q(ms=0):
return cv2.waitKey(ms) & 0xFF == ord('q')
def main():
with BasicVideoProcessor() as capture:
display(capture)
if __name__ == '__main__':
main()
This code works well enough, and at the moment (without any image processing) is easily able to maintain a 60FPS display, which is good enough given my monitor's refresh rate and my camera's capturing FPS.
However, this code only creates a separate thread for the video capturing. At some point I may need to have several displays active at the same time (e.g. if I wanted to run this on a VR headset, I would need two simultaneous displays), which with the current code would all run in the same thread.
This is why my original code replaced the above display
and main
methods with:
class Display(ThreadedProcessor):
def __init__(self, source, fps=30, title="Display"):
super().__init__()
self.src = source
self.fps = fps
self.time_per_frame = 1 / fps
self.title = title
def cleanup(self):
cv2.destroyAllWindows()
super().cleanup()
def run(self):
while not self.src.is_alive():
time.sleep(1)
next_frame_time = time.time()
while self.src.is_alive():
frame = self.src.get_frame()
if frame is not None: #
cv2.imshow(self.title, frame) #
next_frame_time += self.time_per_frame
curr_time = time.time()
if curr_time < next_frame_time and cv2_wait_for_q(int((next_frame_time - curr_time) / 1000)):
break
def main():
with BasicVideoProcessor() as capture, Display(capture):
while not cv2_wait_for_q(1):
pass
This, however, resulted in the following error:
QObject::startTimer: Timers cannot be started from another thread
The code (unlike the above one) also refuses to terminate with either a Q press (which might be unrelated and caused by the questionable implementation of the main
method) or Ctrl+C. If I remove the two display lines (marked with #
), the code runs without errors and does terminate on Ctrl+C, but (obviously) doesn't display anything.
I've tried searching around for the above error and found a few answers like this one, explaining that PyQt (which is used by cv2.imshow
) doesn't allow GUI method calls on any thread but the main one.
Is there a way to use a backend that will allow me to implement a threaded display or a way to bypass this issue in PyQt?