I have a program that is involved with flask and python, It is actually part of a larger program that recognizes license plates and does other things at the same time, I want to access a variable that is inside a while loop and that loop is inside a function. The boxes variable at the bottom of the code is the variable I am talking about but no matter what I try, it is not possible. The final result is : NameError: name 'boxes' is not defined. I am really confused now how to solve this problem, Is there anyone who can help me? In these situations, how can I access my variable from outside the function? Even though I changed the variable to global, it is still not accessible. This is one of the ways I tried
This is Flask.py
from flask import Flask, render_template, Response
from camera import CameraStream
import cv2
import os
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as viz_utils
from object_detection.builders import model_builder
from object_detection.utils import config_util
import tensorflow as tf
from keras.models import load_model
from keras.preprocessing.image import img_to_array
import numpy as np
import functools
import imutils
import math
from datetime import datetime
import mysql.connector as mysql
import time
app = Flask(__name__)
cap = CameraStream().start()
@app.route('/')
def index():
"""Video streaming home page."""
return render_template('index.html')
##################################################################################
#### Load Check points, config files, labelmap and Detect License plate Object ###
##################################################################################
CONFIG_PATH = './pipeline.config'
CHECKPOINT_PATH = './training'
# Load pipeline config and build a detection model
configs = config_util.get_configs_from_pipeline_file(CONFIG_PATH)
detection_model = model_builder.build(model_config=configs['model'], is_training=False)
# Restore checkpoint
ckpt = tf.compat.v2.train.Checkpoint(model=detection_model)
ckpt.restore(os.path.join(CHECKPOINT_PATH, 'ckpt-52')).expect_partial()
@tf.function
def detect_fn(image):
image, shapes = detection_model.preprocess(image)
prediction_dict = detection_model.predict(image, shapes)
detections = detection_model.postprocess(prediction_dict, shapes)
return detections
category_index = label_map_util.create_category_index_from_labelmap('label_map.pbtxt')
def streaming():
while cap:
frame = cap.read()
# Check That If Frame Is Capturing Or Not
if frame is None:
print("disconnected!")
# You Can Adjust The Streaming Window size
frame = cv2.resize(frame, (0,0), fx=0.4, fy=0.4)
image_np = np.array(frame)
input_tensor = tf.convert_to_tensor(np.expand_dims(image_np, 0), dtype=tf.float32)
detections = detect_fn(input_tensor)
num_detections = int(detections.pop('num_detections'))
detections = {key: value[0, :num_detections].numpy()
for key, value in detections.items()}
detections['num_detections'] = num_detections
# detection_classes should be ints.
detections['detection_classes'] = detections['detection_classes'].astype(np.int64)
label_id_offset = 1
image_np_with_detections = image_np.copy()
viz_utils.visualize_boxes_and_labels_on_image_array(
image_np_with_detections,
detections['detection_boxes'],
detections['detection_classes']+label_id_offset,
detections['detection_scores'],
category_index,
use_normalized_coordinates=True,
max_boxes_to_draw=5,
min_score_thresh=.8,
agnostic_mode=False)
detection_thereshold = 0.7
image = image_np_with_detections
# Scores, boxes and classes above threhold
scores = list(filter(lambda x: x> detection_thereshold, detections['detection_scores']))
global boxes
boxes = detections['detection_boxes'][:len(scores)]
classes = detections['detection_classes'][:len(scores)]
frame = cv2.imencode('.jpg', image_np_with_detections)[1].tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n') # concate frame one by one and show result
print(boxes)
@app.route('/video_feed')
def video_feed():
"""Video streaming route. Put this in the src attribute of an img tag."""
return Response(streaming(),
mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == '__main__':
app.run(host='127.0.0.1', threaded=True)
This is camera.py
from threading import Thread, Lock
import cv2
class CameraStream(object):
def __init__(self, src="rtsp://admin:admin@192.168.1.2:554/1/1"):
self.stream = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self.stream.read()
self.started = False
self.read_lock = Lock()
def start(self):
if self.started:
print("already started!!")
return None
self.started = True
self.thread = Thread(target=self.update, args=())
self.thread.start()
return self
def update(self):
while self.started:
(grabbed, frame) = self.stream.read()
self.read_lock.acquire()
self.grabbed, self.frame = grabbed, frame
self.read_lock.release()
def read(self):
self.read_lock.acquire()
frame = self.frame.copy()
self.read_lock.release()
return frame
def stop(self):
self.started = False
self.thread.join()
def __exit__(self, exc_type, exc_value, traceback):
self.stream.release()