I am struggling on finding the solution for this:
I'm trying to create an image stream system where i can get all the frames and pass them through a neural network, but somehow I've not managed to get properly base64 image strings from my functions below. The provided code works perfectly if i just call the decoded image from streaming instead of passing it through my functions where i convert to base64 and read them in memory and make cv2 show them properly.
My server code functions responsible to convert and decode base64 are described below:
Convert image object from stream into base64 BYTES and convert to one STRING (this is working as intended)
def convertImgBase64(image):
try:
imgString = base64.b64encode(image).decode('utf-8')
print('convertida com sucesso')
return imgString
except os.error as err :
print(f"Erro:'{err}'")
Base64 decoder that should convert to a readable cv2 compatible frame (Here is where the error begins):
def readb64(base64_string):
storage = '/home/caio/Desktop/img/'
try:
sbuf = BytesIO()
sbuf.write(base64.b64decode(str(base64_string)))
pimg = im.open(sbuf)
out = open('arq.jpeg', 'wb')
out.write(sbuf.read())
out.close()
print('leu string b64')
return cv2.cvtColor(np.array(pimg), cv2.COLOR_RGB2BGR)
except os.error as err :
print(f"Erro:'{err}'")
This is the current server i am building, but before proceeding i need to accomplish the frame capture correctly.
from io import BytesIO, StringIO
import numpy as np
import cv2
from imutils.video import FPS
import imagezmq
import base64
import darknet
import os
from PIL import Image as im
from numpy import asarray
from time import sleep
#imagezmq protocol receiver from client
image_hub = imagezmq.ImageHub()
def convertImgBase64(image):
try:
imgString = base64.b64encode(image).decode('utf-8')
return imgString
except os.error as err :
print(f"Error:'{err}'")
def readb64(base64_string):
try:
sbuf = BytesIO()
sbuf.write(base64.b64decode(str(base64_string)))
pimg = im.open(sbuf)
return cv2.cvtColor(np.array(pimg), cv2.COLOR_RGB2BGR)
except os.error as err :
print(f"Error:'{err}'")
def capture_img():
while True:
camera, jpg_buffer = image_hub.recv_jpg()
buffer = np.frombuffer(jpg_buffer, dtype='uint8')
imagedecoder = cv2.imdecode(buffer, cv2.IMREAD_COLOR)
img = im.fromarray(imagedecoder)
try:
string = convertImgBase64(imagedecoder)
cvimg = readb64(string)
#cv2.imshow(camera, cvimg) this is the line where its not working!
except os.error as err :
print(f"Error:'{err}'")
cv2.imshow(camera, imagedecoder)
cv2.waitKey(1) #cv2 wont work without this
image_hub.send_reply(b'OK') #imageZMQ needs acknowledge that its ok
Client code (raspberry pi code) is given below:
import sys
import socket
import time
import cv2
from imutils.video import VideoStream
import imagezmq
import argparse
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-s", "--server-ip", required=True,
help="ip address of the server to which the client will connect")
args = vars(ap.parse_args())
# initialize the ImageSender object with the socket address of the
# server
sender = imagezmq.ImageSender(connect_to="tcp://{}:5555".format(
args["server_ip"]))
# use either of the formats below to specifiy address of display computer
# sender = imagezmq.ImageSender(connect_to='tcp://192.168.1.190:5555')
rpi_name = socket.gethostname() # send RPi hostname with each image
vs = VideoStream(usePiCamera=True, resolution=(800, 600)).start()
time.sleep(2.0) # allow camera sensor to warm up
jpeg_quality = 95 # 0 to 100, higher is better quality, 95 is cv2 default
while True: # send images as stream until Ctrl-C
image = vs.read()
ret_code, jpg_buffer = cv2.imencode(
".jpg", image, [int(cv2.IMWRITE_JPEG_QUALITY), jpeg_quality])
sender.send_jpg(rpi_name, jpg_buffer)
I have been trying solution from here and here
If you would know another better way to pass an Image Object that i can use to process inside yolo/darknet neural network it could be awesome!!
Thanks!