2

So I want to write a code in python where I extract photos as frames from an rtsp camera (live streaming). But I would want these photos to be stored with timestamp and date as well which I think I have done. My only challenge is that I want these photos to automatically save to my local computer every minute and ends after 24 hours.

How do I go about this?

This is my current code

 imagesFolder = "C:/Users/<user>/documents"
cap = cv2.VideoCapture("rtsp://username:password@cameraIP/axis-media/media.amp")
frameRate = cap.get(5) #frame rate
count = 0

while cap.isOpened():
    frameId = cap.get(1)  # current frame number
    ret, frame = cap.read()

    if (ret != True):
        break
    if (frameId % math.floor(frameRate) == 0):
        filename = imagesFolder + "/image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p"))  + ".jpg"
        cv2.imwrite(filename, frame)

    cap.release()
    print ("Done!")

cv2.destroyAllWindows()
user13214870
  • 21
  • 1
  • 3

3 Answers3

0

You may simply wait 60 seconds between frame capturing, and break the loop after 24*60 cycles.

I tried testing my code using public RTSP stream, but I am getting black frames, so I can't test my code.

Here is the code:

import cv2
import time
from datetime import datetime
import getpass

#imagesFolder = "C:/Users/<user>/documents"

# https://stackoverflow.com/questions/842059/is-there-a-portable-way-to-get-the-current-username-in-python
imagesFolder = "C:/Users/" + getpass.getuser() + "/documents"

#cap = cv2.VideoCapture("rtsp://username:password@cameraIP/axis-media/media.amp")

# Use public RTSP Streaming for testing, but I am getting black frames!
cap = cv2.VideoCapture("rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mov")
frameRate = cap.get(5) #frame rate
count = 0


while cap.isOpened():
    start_time = time.time()

    frameId = cap.get(1)  # current frame number
    ret, frame = cap.read()

    if (ret != True):
        break

    filename = imagesFolder + "/image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p"))  + ".jpg"
    cv2.imwrite(filename, frame)

    # Show frame for testing
    cv2.imshow('frame', frame)
    cv2.waitKey(1)

    count += 1

    #Break loop after 24*60 minus
    if count > 24*60:
        break

    elapsed_time = time.time() - start_time

    # Wait for 60 seconds (subtract elapsed_time in order to be accurate).
    time.sleep(60 - elapsed_time)


cap.release()
print ("Done!")

cv2.destroyAllWindows()

Update:

The code sample above is not working - the first frame is repeated every minute.

Suggested solution:

  • Grab all the video frame, and save a frame every minute.
    The one minute time delta is going to be accurate up to 0.2 seconds in case of 5Hz video.
  • Use separate timer for measuring 24 hours.

Here is the updated code (reading from public RTSP):

import cv2
import time
from datetime import datetime
import getpass

imagesFolder = "C:/Users/" + getpass.getuser() + "/documents"
#cap = cv2.VideoCapture("rtsp://username:password@cameraIP/axis-media/media.amp")

# Use public RTSP Streaming for testing:
cap = cv2.VideoCapture("rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mov")

#cap = cv2.VideoCapture("test2.mp4")
frameRate = cap.get(5) #frame rate

cur_time = time.time()  # Get current time

# start_time_24h measures 24 hours
start_time_24h = cur_time

# start_time_1min measures 1 minute
start_time_1min = cur_time - 59  # Subtract 59 seconds for start grabbing first frame after one second (instead of waiting a minute for the first frame).

while cap.isOpened():
    frameId = cap.get(1)  # current frame number
    ret, frame = cap.read()

    if (ret != True):
        break

    cur_time = time.time()  # Get current time
    elapsed_time_1min = cur_time - start_time_1min  # Time elapsed from previous image saving.

    # If 60 seconds were passed, reset timer, and store image.
    if elapsed_time_1min >= 60:
        # Reset the timer that is used for measuring 60 seconds
        start_time_1min = cur_time

        filename = imagesFolder + "/image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p"))  + ".jpg"
        #filename = "image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p"))  + ".jpg"
        cv2.imwrite(filename, frame)

        # Show frame for testing
        cv2.imshow('frame', frame)
        cv2.waitKey(1)

    elapsed_time_24h = time.time() - start_time_24h

    #Break loop after 24*60*60 seconds
    if elapsed_time_24h > 24*60*60:
        break

    #time.sleep(60 - elapsed_time) # Sleeping is a bad idea - we need to grab all the frames.


cap.release()
print ("Done!")

cv2.destroyAllWindows()

Now the images from the public RTSP look OK:

enter image description here

enter image description here

enter image description here

enter image description here

enter image description here


Update:

You may try capturing the video stream using FFmpeg (instead of OpenCV).

Read the following blog: Read and Write Video Frames in Python Using FFMPEG

In case you are using Windows OS, download the latest stable 64-bit static version from here (currently 4.2.2).
Extract the zip file, and place ffmpeg.exe in the same folder as your Python script.

Here is the code (capturing using FFmpeg as sub-process and stdout as a PIPE):

import cv2
import time
from datetime import datetime
import getpass
import numpy as np
import subprocess as sp

imagesFolder = "C:/Users/" + getpass.getuser() + "/documents"
#cap = cv2.VideoCapture("rtsp://username:password@cameraIP/axis-media/media.amp")

# Use public RTSP Streaming for testing:
in_stream = "rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mov"
cap = cv2.VideoCapture(in_stream)

#cap = cv2.VideoCapture("test2.mp4")
frameRate = cap.get(5) #frame rate

# Get resolution of input video
width  = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

# Release VideoCapture - it was used just for getting video resolution
cap.release()

#in_stream = "rtsp://xxx.xxx.xxx.xxx:xxx/Streaming/Channels/101?transportmode=multicast",

#Use public RTSP Streaming for testing
in_stream = "rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mov"


# http://zulko.github.io/blog/2013/09/27/read-and-write-video-frames-in-python-using-ffmpeg/
FFMPEG_BIN = "ffmpeg" # on Linux ans Mac OS (also works on Windows when ffmpeg.exe is in the path)
#FFMPEG_BIN = "ffmpeg.exe" # on Windows

command = [ FFMPEG_BIN,
            '-i', in_stream,
            '-f', 'image2pipe',
            '-pix_fmt', 'bgr24',
            '-vcodec', 'rawvideo', '-an', '-']

# Open sub-process that gets in_stream as input and uses stdout as an output PIPE.
pipe = sp.Popen(command, stdout=sp.PIPE, bufsize=10**8)


cur_time = time.time()  # Get current time

# start_time_24h measures 24 hours
start_time_24h = cur_time

# start_time_1min measures 1 minute
start_time_1min = cur_time - 30  # Subtract 30 seconds for start grabbing first frame after 30 seconds (instead of waiting a minute for the first frame).



while True:
    # read width*height*3 bytes from stdout (= 1 frame)
    raw_frame = pipe.stdout.read(width*height*3)

    if len(raw_frame) != (width*height*3):
        print('Error reading frame!!!')  # Break the loop in case of an error (too few bytes were read).
        break

    cur_time = time.time()  # Get current time
    elapsed_time_1min = cur_time - start_time_1min  # Time elapsed from previous image saving.

    # If 60 seconds were passed, reset timer, and store image.
    if elapsed_time_1min >= 60:
        # Reset the timer that is used for measuring 60 seconds
        start_time_1min = cur_time

        # Transform the byte read into a numpy array, and reshape it to video frame dimensions
        frame = np.fromstring(raw_frame, np.uint8)
        frame = frame.reshape((height, width, 3))

        filename = imagesFolder + "/image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p"))  + ".jpg"
        #filename = "image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p"))  + ".jpg"
        cv2.imwrite(filename, frame)

        # Show frame for testing
        cv2.imshow('frame', frame)
        cv2.waitKey(1)

    elapsed_time_24h = time.time() - start_time_24h

    #Break loop after 24*60*60 seconds
    if elapsed_time_24h > 24*60*60:
        break

    #time.sleep(60 - elapsed_time) # Sleeping is a bad idea - we need to grab all the frames.


print ("Done!")

pipe.kill()  # Kill the sub-process after 24 hours
cv2.destroyAllWindows()
Rotem
  • 30,366
  • 4
  • 32
  • 65
  • Thanks @Rotem, this looks good to go. However, it seems to be producing the same first single frame every minute instead of capturing the real live feed/frame every minute. – user13214870 Apr 06 '20 at 15:02
  • Thanks @Rotem, this looks good to go. However, the code seem to be repeating the photo from the first frame every minute instead of capturing new photos every minute. And after about 1 hour I get this ERROR ---- [h264 @ 0e8e8180] error while decoding MB 42 24, bytestream -5 – user13214870 Apr 06 '20 at 16:10
  • It look like you need to grab all the video frame, and save a frame every minute. I updated my post. Now it's also working with a public RTSP video stream. – Rotem Apr 06 '20 at 16:14
  • In case of an error - when `ret = False` you may need to execute `cap.release()` and `cap = cv2.VideoCapture` (reopen the connection), instead of breaking the loop. – Rotem Apr 06 '20 at 16:33
  • The code sample updated looks good though. I tried using same for the survellience camera which I'm working on (using the camera IP) but I get only one photo from the frame this time but after about 40 seconds, I get this error - [h264 @ 0f1f1a80] error while decoding MB 46 36, bytestream -21......... What do you think the challenge is here? @Rotem – user13214870 Apr 06 '20 at 19:01
  • Last try... you may try capturing the video using FFmpeg (as sub-process) instead of using OpenCV. I updated my post. – Rotem Apr 06 '20 at 20:02
  • Thanks @Rotem for your time on this challenge. I had tried on this but also received these continuous errors - `[rtsp @ 0000000000706940] RTP: dropping old packet received too late [rtsp @ 0000000000706940] max delay reached. need to consume packet [rtsp @ 0000000000706940] RTP: missed 178 packets` I've also tried using BlockingScheduler from apscheduler.schedulers.blocking as an alternative to schedule the work being executed, but to no avail. Regardless, I'd share the code of what I tried. – user13214870 Apr 07 '20 at 22:15
  • Try switching from UDP to TCP: Add `'-rtsp_transport', 'tcp',` after `FFMPEG_BIN,`. There is a change that the camera supports TCP protocol. – Rotem Apr 07 '20 at 22:34
  • You mean this? `FFMPEG_BIN = "ffmpeg.exe" # on Windows` `command = [ FFMPEG_BIN, '-rtsp_transport', 'tcp', '-i', in_stream, '-f', 'image2pipe', '-pix_fmt', 'bgr24', '-vcodec', 'rawvideo', '-an', '-']` I think it does support TCP protocol because I added that code to the other chunk of code and it seem to capture the first photo from the frame but hasn't captured any other since then. lol. – user13214870 Apr 07 '20 at 22:49
  • I think it's the same challenge with the previous proposed solution. The first frame comes up and it picks up a snapshot from it which is then repeated on intervals instead of getting new frames and snapshots. – user13214870 Apr 07 '20 at 23:28
0

I tried using the apscheduler API but to no avail. Maybe someone can look at this differently and make it work.

    import cv2
    import math
    import datetime
    from datetime import datetime
    from apscheduler.schedulers.blocking import BlockingScheduler


imagesFolder = "C:/Users/" + getpass.getuser() + "/documents"
cap = cv2.VideoCapture("rtsp://username:password@CameraIP/axis-media/media.amp")
frameRate = cap.get(5) #frame rate
count = 0




def some_job():
    while cap.isOpened():
        frameId = cap.get(1)
        ret, frame = cap.read()
        if (ret != True):
            break
        if (frameId % math.floor(frameRate) == 0):
                filename = imagesFolder + "/image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p"))  + ".jpg"
                cv2.imwrite(filename, frame)
                scheduler = BlockingScheduler()
                scheduler.add_job(some_job, 'interval', seconds=60, start_date='2020-04-07 16:23:00', end_date='2020-04-08 16:23:00')
                scheduler.start()
        cap.release()
print ("Done!")
# Closes all the frames
cv2.destroyAllWindows()
user13214870
  • 21
  • 1
  • 3
0

This works somewhat better than other solutions. The only challenge here is that the photos stop saving after the first 3 minutes (first 3 photos which are taken in seconds but saved later during each minute) have been saved. The solution now is to ensure that it saves every minute up to 24 hours before it stops.

    import cv2
    import time
    import getpass
    import numpy as np
    import subprocess as sp
    from datetime import datetime

imagesFolder = "C:/Users/" + getpass.getuser() + "/documents"

# RTSP Streaming:
in_stream = "rtsp://username:password@cameraIP/axis-media/media.amp"
cap = cv2.VideoCapture(in_stream)

frameRate = cap.get(5) #frame rate

# Get resolution of input video
width  = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

# Release VideoCapture - it was used just for getting video resolution
cap.release()

in_stream = "rtsp://username:password@cameraIP/axis-media/media.amp"

FFMPEG_BIN = "ffmpeg.exe" # on Windows

# Suspecting camera supports TCP protocol hence added: '-rtsp_transport', 'tcp'
command = [ FFMPEG_BIN,
            '-rtsp_transport', 'tcp',
            '-i', in_stream,
            '-f', 'image2pipe',
            '-pix_fmt', 'bgr24',
            '-vcodec', 'rawvideo', '-an', '-']

# Open sub-process that gets in_stream as input and uses stdout as an output PIPE.
pipe = sp.Popen(command, stdout=sp.PIPE, bufsize=10**8)


cur_time = time.time()  # Get current time

# start_time_24h measures 24 hours
start_time_24h = cur_time

# start_time_1min measures 1 minute
start_time_1min = cur_time - 30  # Subtract 30 seconds for start grabbing first frame after 30 seconds (instead of waiting a minute for the first frame).



while True:
    # read width*height*3 bytes from stdout (= 1 frame)
    raw_frame = pipe.stdout.read(width*height*3)

    if len(raw_frame) != (width*height*3):
        print('Error reading frame!!!')  # Break the loop in case of an error (too few bytes were read).
        break

    cur_time = time.time()  # Get current time
    elapsed_time_1min = cur_time - start_time_1min  # Time elapsed from previous image saving.

    # If 60 seconds were passed, reset timer, and store image.
    if elapsed_time_1min >= 60:
        # Reset the timer that is used for measuring 60 seconds
        start_time_1min = cur_time

        # Transform the byte read into a numpy array, and reshape it to video frame dimensions
        frame = np.fromstring(raw_frame, np.uint8)
        frame = frame.reshape((height, width, 3))

        filename = imagesFolder + "/image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p"))  + ".jpg"
        #filename = "image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p"))  + ".jpg"
        cv2.imwrite(filename, frame)

        # Show frame for testing
        cv2.imshow('frame', frame)
        cv2.waitKey(1)

    elapsed_time_24h = time.time() - start_time_24h

    #Break loop after 24*60*60 seconds
    if elapsed_time_24h > 24*60*60:
        break

    #time.sleep(60 - elapsed_time) # Sleeping is a bad idea - we need to grab all the frames.


print ("Done!")

pipe.kill()  # Kill the sub-process after 24 hours
cv2.destroyAllWindows()
user13214870
  • 21
  • 1
  • 3