I am trying to make a game with pygame and face recognition. In order to do that I need to windows open. The one with the face recognizing one and one with the game one. But when I imported the face_recog.py , the game window wouldn't show until the face_recog would be closed. What should I do??
I tried importing in side the while of the game.py file
...python game.py code
import pygame
import face_recog
from background import *
FRAME=0
class Game:
def __init__(self):
self.width=900
self.height=600
self.screen=pygame.display.set_mode((self.width,self.height))
self.clock=pygame.time.Clock()
self.fire_rect=[530,40]
def main(self):
global FRAME
#sprite 그룹 생성
self.all_sprites=pygame.sprite.Group()
self.platforms=pygame.sprite.Group()
self.player_group=pygame.sprite.Group()
pygame.init()
#sprite 그룹에 sprite 추가
self.player1=Player((self.width/2,self.height/2),self)
self.all_sprites.add(self.player1)
self.player_group.add(self.player1)
#배경 벽 불러옴
for plat in PlatformList:
p=Platform(*plat)
self.all_sprites.add(p)
self.platforms.add(p)
#초기화
trap1=trap(self)
background_=background(self.width,self.height)
item_=item(self)
self.shot_=shot(self.screen,self)
item_.item_display(self.screen) #아이템은 사라질 수 있으므로 while 밖
while True:
#settings
time=self.clock.tick(60)
FRAME+=1
self.screen.fill((255,193,158))
#배경 그림
background_.background(self.screen)
#item_.item_display(self.screen)
item_.item_eat(self.screen)
trap1.trap_draw(self.screen,self.fire_rect)
self.shot_.shooting()
self.event()
self.all_sprites.update()
self.all_sprites.draw(self.screen)
pygame.display.flip()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit(0)
'''face_recog.py code import sys import os import dlib import glob from skimage import io import numpy as np import cv2 from scipy.spatial import distance as dist #입술 사이 거리 계산 위해 import math import pygame from game import *
cap = cv2.VideoCapture(0) #동영상 입력부분
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi',fourcc, 20.0, (1280, 720))
predictor_path = 'shape_predictor_81_face_landmarks.dat'
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path) #LANDMARK PREDICTOR
(mStart,mEnd)=(48,54) #mouth의 시작점, 끝점 번호
MOUTH_AR_THRESH = 0.1
while(cap.isOpened()):
ret, frame = cap.read() #영상 읽어들임
frame = cv2.flip(frame, 1)
dets = detector(frame, 0) #rects
for k, d in enumerate(dets):
shape = predictor(frame, d)
landmarks = np.matrix([[p.x, p.y] for p in shape.parts()])
for num in range(shape.num_parts):
cv2.circle(frame, (shape.parts()[num].x, shape.parts()[num].y), 3, (0,255,0), -1)
A=dist.euclidean((shape.parts()[61].x,shape.parts()[61].y),(shape.parts()[67].x,shape.parts()[67].y))
B=dist.euclidean((shape.parts()[63].x,shape.parts()[63].y),(shape.parts()[65].x,shape.parts()[65].y))
C=dist.euclidean((shape.parts()[48].x,shape.parts()[48].y),(shape.parts()[54].x,shape.parts()[54].y))
mar=(A+B)/(2.0*C)
mar=round(mar,5)
if mar>MOUTH_AR_THRESH:
cv2.putText(frame,"MOUTH IS OPEN!",(30,60),cv2.FONT_HERSHEY_SIMPLEX,0.7,(0,0,255),2)
cv2.imshow('frame', frame) #윈도우 창의 제목
out.write(frame)
if cv2.waitKey(1) & 0xFF == ord('q'): #key입력을 기다림, q를 입력받으면 종료
print("q pressed")
break
cap.release()
out.release()
cv2.destroyAllWindows()