I have this code which connects Nodejs to Python script. The script contains ML models with Tensor flow backend and so on.., it basically gives a string output. I send an image URL from node js via.child process spawn to python and it gives back its recognised expression as a string. Basically I am doing facial recognition, coded in python but calling through Node js and send the string to response as JSON data(Rest API).
The problem I am facing is whenever I call spawn, it runs whole code of python and its taking so long as the python script has to load all modules if we start from the top and finally giving output.
Here is the python code
from gtts import gTTS
language = 'en'
#myobj = gTTS(text='Do you know the person? Yes or No', lang=language, slow=True)
#myobj.save("question1.mp3")
#myobj = gTTS(text='What is his or her name', lang=language, slow=True)
#myobj.save("question2.mp3")
import csv
import pandas as pd
import numpy as np
#with open('database.csv','w') as f:
# writer=csv.writer(f)
# writer.writerow(['Chinmay',embedded])
face_embeddings=np.array(pd.read_csv('database.csv',header=None))
face_names=np.array(pd.read_csv('database_names.csv',header=None))
from cv2 import cv2
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from align import AlignDlib
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
from torch.autograd import Variable
from model import create_model
import transforms as transforms
from skimage import io
from skimage.transform import resize
from models import *
import matplotlib.pyplot as plt
from keras.models import load_model
from keras.preprocessing.image import load_img, img_to_array
from util.model import CNNModel, generate_caption_beam_search
import os
from config import config
from pickle import load
import sys
cut_size = 44
transform_test = transforms.Compose([
transforms.TenCrop(cut_size),
transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])),
])
class_names = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']
final_text=''
nn4_small2_pretrained = create_model()
nn4_small2_pretrained.load_weights('weights/nn4.small2.v1.h5')
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])
def load_image(path):
img = cv2.imread(path, 1)
# OpenCV loads images with color channels
# in BGR order. So we need to reverse them
return img[...,::-1]
def extract_features(filename, model, model_type):
if model_type == 'inceptionv3':
from keras.applications.inception_v3 import preprocess_input
target_size = (299, 299)
elif model_type == 'vgg16':
from keras.applications.vgg16 import preprocess_input
target_size = (224, 224)
# Loading and resizing image
image = load_img(filename, target_size=target_size)
# Convert the image pixels to a numpy array
image = img_to_array(image)
# Reshape data for the model
image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
# Prepare the image for the CNN Model model
image = preprocess_input(image)
# Pass image into model to get encoded features
features = model.predict(image, verbose=0)
return features
def getrecogstr( imgurl ):
# Path of Image
#image_file=imgurl
image_file = sys.argv[1]
# Initialize the OpenFace face alignment utility
alignment = AlignDlib('models/landmarks.dat')
# Load an image
jc_orig = load_image(image_file)
# Detect face and return bounding box -
bb = alignment.getAllFaceBoundingBoxes(jc_orig)
net = VGG('VGG19')
checkpoint = torch.load(os.path.join('FER2013_VGG19', 'PrivateTest_model.t7'),map_location='cpu')
net.load_state_dict(checkpoint['net'])
# Load the tokenizer
tokenizer_path = config['tokenizer_path']
tokenizer = load(open(tokenizer_path, 'rb'))
# Max sequence length (from training)
max_length = config['max_length']
caption_model = load_model('model.hdf5')
image_model = CNNModel(config['model_type'])
for i in bb:
# Transform image using specified face landmark indices and crop image to 96x96
jc_aligned = alignment.align(96, jc_orig, i, landmarkIndices=AlignDlib.OUTER_EYES_AND_NOSE)
location=(i.height()+i.width())/(jc_orig.shape[0]+jc_orig.shape[1])
# Finding the emotion of cropped image
gray = rgb2gray(jc_aligned)
gray = resize(gray, (48,48), mode='symmetric').astype(np.uint8)
img = gray[:, :, np.newaxis]
img = np.concatenate((img, img, img), axis=2)
img = Image.fromarray(img)
inputs = transform_test(img)
#net.cuda()
net.eval()
ncrops, c, h, w = np.shape(inputs)
inputs = inputs.view(-1, c, h, w)
#inputs = inputs.cuda()
inputs = Variable(inputs, volatile=True)
outputs = net(inputs)
outputs_avg = outputs.view(ncrops, -1).mean(0) # avg over crops
score = F.softmax(outputs_avg)
_, predicted = torch.max(outputs_avg.data, 0)
# Find the name of the person in the image
jc_aligned = (jc_aligned / 255.).astype(np.float32)
embeddings = nn4_small2_pretrained.predict(np.expand_dims(jc_aligned, axis=0))[0]
print("@@")
print(embeddings)
matched_embeddings=1000
for j in range(len(face_embeddings)):
temp=np.sum(np.square(embeddings-face_embeddings[j]))
if (temp<=0.56 and temp <matched_embeddings):
matched_embeddings=np.sum(np.square(embeddings-face_embeddings[j]))
face_index=j
print(temp)
print('above')
if matched_embeddings!=1000:
face_name=face_names[face_index][0]
print("@@known")
else:
face_name='Unknown'
print("@@unknown")
#print("Unknown Person detected. Do you know this person yes or no ?")
#Play welcome1.mp3
#Play welcome2.mp3 if input is yes
final_text+= face_name+' expression is '+class_names[int(predicted.cpu().numpy())] + "."
print("@@"+final_text)
sys.stdout.flush()
getrecogstr()
Here is the Node code
const express = require('express');
const app = express();
const bodyParser = require('body-parser');
const port = 1000;
const spawn = require("child_process").spawn;
app.use(bodyParser.json()); // application/json
app.use((req, res, next) => {
res.setHeader('Access-Control-Allow-Origin', '*');
res.setHeader('Access-Control-Allow-Methods', 'OPTIONS, GET, POST, PUT, PATCH, DELETE');
res.setHeader('Access-Control-Allow-Headers', 'Content-Type, Authorization');
next();
});
app.get('/test', (req, res, next) => {
const imgurl = req.query.imgurl;
var process = spawn('python', ["./final.py",
imgurl,
]);
process.stdout.on('data', function (data) {
const recog_str = data.toString().split('@@')[3];
console.log(recog_str);
res.json(recog_str)
})
})
server.listen(port, () => {
console.log("Ok");
})
I just want to skip that part of loading modules every time. I know we have to run the modules for them to be in memory but it's taking so long. Can do like the python script is running all the time and we can send arguments from node js in the middle of that running and call a function which can return that string?