0

I have a demo.py and videostream.py files that need to use the VideoCapture function from cv2 at the same time. The first file is used to determine if the person is real or is it just an image in fornt of the camera. The second one is used handle the video stream in the website.

My issue is how can I use both without it showing the camera index error.

The demo.py file:

def liveness():
    FULL_POINTS = list(range(0, 68))
    FACE_POINTS = list(range(17, 68))

    JAWLINE_POINTS = list(range(0, 17))
    RIGHT_EYEBROW_POINTS = list(range(17, 22))
    LEFT_EYEBROW_POINTS = list(range(22, 27))
    NOSE_POINTS = list(range(27, 36))
    RIGHT_EYE_POINTS = list(range(36, 42))
    LEFT_EYE_POINTS = list(range(42, 48))
    MOUTH_OUTLINE_POINTS = list(range(48, 61))
    MOUTH_INNER_POINTS = list(range(61, 68))

    EYE_AR_THRESH = 0.30
    EYE_AR_CONSEC_FRAMES = 2

    # initializing the parameters
    COUNTER_LEFT = 0
    TOTAL_LEFT = 0

    COUNTER_RIGHT = 0
    TOTAL_RIGHT = 0
    video_capture = cv2.VideoCapture(0)
    print(video_capture.isOpened())
    # looping over frames
    while True:
        # checkpoint 1
        ret, frame = video_capture.read()
        if frame is None:
                video_capture.release()
                video_capture = cv2.VideoCapture(0)
                continue
        if ret:

            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            rects = detector(gray, 0)
            frame = imutils.resize(frame, width=600)
            for rect in rects:

                x = rect.left()
                y = rect.top()
                x1 = rect.right()
                y1 = rect.bottom()
                landmarks = np.matrix([[p.x, p.y] for p in predictor(frame, rect).parts()])
left_eye = landmarks[LEFT_EYE_POINTS]
                right_eye = landmarks[RIGHT_EYE_POINTS]
                left_eye_hull = cv2.convexHull(left_eye)
                right_eye_hull = cv2.convexHull(right_eye)
                ear_left = eye_aspect_ratio(left_eye)
                ear_right = eye_aspect_ratio(right_eye)

                # calculating blink wheneer the ear value drops down below the threshold

                if ear_left < EYE_AR_THRESH:

                    COUNTER_LEFT += 1

                else:

                    if COUNTER_LEFT >= EYE_AR_CONSEC_FRAMES:
                        TOTAL_LEFT += 1
                        print("Left eye winked")

                        COUNTER_LEFT = 0
                if ear_right < EYE_AR_THRESH:

                    COUNTER_RIGHT += 1

                else:

                    if COUNTER_RIGHT >= EYE_AR_CONSEC_FRAMES:
                        TOTAL_RIGHT += 1
                        print("Right eye winked")
                        COUNTER_RIGHT = 0

                x = TOTAL_LEFT + TOTAL_RIGHT

        (h, w) = frame.shape[:2]
        temp = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 1.0,
                                     (300, 300), (104.0, 177.0, 123.0))
        net.setInput(temp)
        detections = net.forward()
        for i in range(0, detections.shape[2]):

            confidence = detections[0, 0, i, 2]

            # staisfying the union need of veryfying through ROI and blink detection.
            if confidence > 0.5 and x > 2:
                # detect a bounding box
                # take dimensions
                box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
                # get the dimensions
                (startX, startY, endX, endY) = box.astype("int")

                startX = max(0, startX)
                startY = max(0, startY)
                endX = min(w, endX)
                endY = min(h, endY)

                # extract the face ROI and then preproces it in the exact
                # same manner as our training data
                face = frame[startY:endY, startX:endX]
                face = cv2.resize(face, (32, 32))
                face = face.astype("float") / 255.0
                face = img_to_array(face)
                face = np.expand_dims(face, axis=0)

                # pass the model to determine the liveness
                preds = model.predict(face)[0]
                j = np.argmax(preds)                label = le.classes_[j]
                return label

The videostream.py file:

class Stream(Thread):
    def __init__(self):
        self.flag = [False]
        self.capture_flag = [False]
        self.clients = []
        Thread.__init__(self, name=Stream.__name__)
        
    def run(self):
        self.camera = cv2.VideoCapture(0)
        prev_input = 1  
        
        while True:
            # Read camera and get frame

            rval, frame = self.camera.read()
            if frame is None:
                self.camera.release()
                self.camera = cv2.VideoCapture(0)
                continue
            
            # Send camera stream to web
            if self.flag[0]:
                rvel, jpeg = cv2.imencode('.jpg', frame)
                encode_string = base64.b64encode(jpeg)
                for client in self.clients:
                    client.write_message(encode_string)
            
            # A visitor pushes button
            but = GPIO.input(17)
            if(not prev_input and but):
                # It affects dlib speed. If frame size is small, dlib would be faster than normal size frame
                small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
                visit.visit(small_frame)
                for client in self.clients:
                    client.write_message("log")
            prev_input = but
            time.sleep(0.05)
            
            # Click makephotos on web browser
            if self.capture_flag[0] == True:

                enough_image = collect.make_photo(frame)

                if enough_image == "success":
                    print("Success to register")
                else:
                    print("Fail to register")
                for client in self.clients:
                    client.write_message(enough_image)

                self.capture_flag[0] = False
    
        self.camera.release()

Sorry if the code is too much.

1 Answers1

0

Not sure if this is possible or efficient, it might be easier to have everything in one file, or somehow stream data from one file to another here's one way to do so How to pass video stream from one python to another?

35308
  • 575
  • 5
  • 17