1


My pipeline grabs frame from mp4 and save to callbacks register using g_signal_connect

gst-launch-1.0 filesrc location=test.mp4 ! qtdemux !  h264parse ! v4l2h264dec ! capsfilter caps=video/x-raw,format=I420 ! appsink name=sink

Sharing cpp code to measure frames received:

#include <gst/gst.h>
#include <stdio.h>
#include <signal.h>
#include <stdlib.h>
#include <iostream>
#include <unistd.h>
#include <chrono>

#include "opencv2/opencv.hpp"
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>

#define ZWIDTH 320
#define ZHEIGHT 240
#define TOTALFRAMESIZE ZWIDTH*ZHEIGHT*3

GstElement *pipeline;
GstBus *bus;
GstMessage *msg;
char* dataStore;
char* actualAdd;
int storeSize = 0;
static int framecount = 0;

using namespace cv;
using namespace std;

int duration = 0;
auto t1 = std::chrono::high_resolution_clock::now();
auto t2 = std::chrono::high_resolution_clock::now();
auto t3 = std::chrono::high_resolution_clock::now();

void NV21_T_RGB(int width , int height , char *yuyv , char *rgb)
{
    const int nv_start = width * height ;
    uint32_t  i, j, index = 0, rgb_index = 0;
    uint8_t y, u, v;
    int r, g, b, nv_index = 0;
 
    for(i = 0; i < height; i++){
        for(j = 0; j < width; j ++){
            //nv_index = (rgb_index / 2 - width / 2 * ((i + 1) / 2)) * 2;
            nv_index = i / 2  * width + j - j % 2;
 
            y = yuyv[rgb_index];
            u = yuyv[nv_start + nv_index ];
            v = yuyv[nv_start + nv_index + 1];
 
            r = y + (140 * (v-128))/100;  //r
            g = y - (34 * (u-128))/100 - (71 * (v-128))/100; //g
            b = y + (177 * (u-128))/100; //b
 
            if(r > 255)   r = 255;
            if(g > 255)   g = 255;
            if(b > 255)   b = 255;
            if(r < 0)     r = 0;
            if(g < 0)     g = 0;
            if(b < 0)     b = 0;
 
            index = rgb_index % width + (height - i - 1) * width;

            rgb[i * width * 3 + 3 * j + 0] = r;
            rgb[i * width * 3 + 3 * j + 1] = g;
            rgb[i * width * 3 + 3 * j + 2] = b;
 
            rgb_index++;
        }
    }
}

/*Call back */
static GstFlowReturn
have_frame (GstElement * appsink, gpointer app)
{
  GstBuffer *buffer;
  GstSample *sample;

  /* get the buffer, we can also wakeup the mainloop to get the subtitle from
   * appsink in the mainloop */
  g_signal_emit_by_name (appsink, "pull-sample", &sample);
  
  if (sample) 
  {
    GstMapInfo map;
    gint64 position;
    GstClock *clock;
    GstClockTime base_time, running_time;

    buffer = gst_sample_get_buffer (sample);
    gst_element_query_position (appsink, GST_FORMAT_TIME, &position);

    clock = gst_element_get_clock (appsink);
    base_time = gst_element_get_base_time (appsink);

    running_time = gst_clock_get_time (clock) - base_time;

    gst_object_unref (clock);

     if (gst_buffer_map(buffer, &map, GST_MAP_READ)) 
     {
          printf("Count %d\n",framecount);
          g_print("Appsink: Buffer Received: Content = %u\n", map.size);
          storeSize = storeSize + map.size;
          g_print("Appsink: Total Buffer Received: Content = %u\n", storeSize);
          memcpy(dataStore,(char*)map.data,map.size);
          if(storeSize == TOTALFRAMESIZE)
          {
               if(framecount == 0)
               {
                    t2 = std::chrono::high_resolution_clock::now();
                    int diff = std::chrono::duration_cast<std::chrono::milliseconds>(t2 - t1).count();
                    duration = duration + diff;
                    printf("Time for Frame %d---%d ms ==============> \n",framecount,diff);
                    t3 = std::chrono::high_resolution_clock::now();
               }
               else
               {
                    auto t4 = std::chrono::high_resolution_clock::now();
                    int diff = std::chrono::duration_cast<std::chrono::milliseconds>(t4 - t3).count();
                    duration = duration + diff;
                    printf("Time for Frame %d---%d ms ==============> \n",framecount,diff);
                    t3 = std::chrono::high_resolution_clock::now();
               }
               framecount++;
               printf("All data received \n");
               char* rgb = new char[TOTALFRAMESIZE];
               NV21_T_RGB(ZWIDTH,ZHEIGHT,dataStore,rgb);
               Mat pData(Size(ZWIDTH, ZHEIGHT), CV_8UC3, (char*)rgb, Mat::AUTO_STEP);  //if its a 320x240 size image
               string path = "./Result/"+to_string(framecount)+"_XYZ.jpg";
               imwrite(path,pData);
               storeSize = 0;
               dataStore = actualAdd;
          }
          else
          {
               dataStore = dataStore + map.size;
          }
     }

    gst_buffer_unmap (buffer, &map);
    gst_sample_unref (sample);
  }
  return GST_FLOW_OK;
}

int main(int argc, char *argv[]) 
{
     t1 = std::chrono::high_resolution_clock::now();
     dataStore = new char[TOTALFRAMESIZE];
     memset(dataStore,0,TOTALFRAMESIZE);
     actualAdd = dataStore;
     /* Initialize GStreamer */
     gst_init (&argc, &argv);

     /*Create Pipeline */
     pipeline = gst_parse_launch
     ("filesrc location=../short.mp4 !"
     "qtdemux !"
     "h264parse !"
     "v4l2h264dec !"
     "capsfilter caps=\"video/x-raw, format=NV21\" !"
     "appsink name=appsink",
     NULL);

     GstElement *sink = gst_bin_get_by_name(GST_BIN(pipeline), "appsink");
     if (!sink) 
     {
          printf("sink is NULL\n");
          exit(1);
     }
     
     g_object_set (G_OBJECT (sink), "emit-signals", TRUE, NULL);
     g_signal_connect (sink, "new-sample", G_CALLBACK (have_frame),NULL);
     
     /* Start playing */
     gst_element_set_state (pipeline, GST_STATE_PLAYING);
     /* Wait until error or EOS */
     bus = gst_element_get_bus (pipeline);
     msg = gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE,static_cast<GstMessageType>( GST_MESSAGE_ERROR | GST_MESSAGE_EOS));
     
     /* Parse message */
     if (msg != NULL) 
     {
          GError *err;
          gchar *debug_info;

          switch (GST_MESSAGE_TYPE (msg)) {
          case GST_MESSAGE_ERROR:
               gst_message_parse_error (msg, &err, &debug_info);
               g_printerr ("Error received from element %s: %s\n", GST_OBJECT_NAME (msg->src), err->message);
               g_printerr ("Debugging information: %s\n", debug_info ? debug_info : "none");
               g_clear_error (&err);
               g_free (debug_info);
               break;
          case GST_MESSAGE_EOS:
               g_print ("End-Of-Stream reached.\n");
               printf("Final Time take to write %d frames in final duration %d ms ==============> \n",framecount,duration);
               printf("Avg time for receiving %d frames %d ms\n",framecount,duration/framecount);
               break;
          default:
               /* We should not reach here because we only asked for ERRORs and EOS */
               g_printerr ("Unexpected message received.\n");
               break;
          }
          gst_message_unref (msg);
     }

     /* Free resources */
     if (msg != NULL)
          gst_message_unref (msg);
     gst_object_unref (bus);
     gst_element_set_state (pipeline, GST_STATE_NULL);
     gst_object_unref (pipeline);
     
     return 0;
}


Command to compile :

g++ -Wall short.cpp -I ./ -o short $(pkg-config --cflags --libs gstreamer-app-1.0) $(pkg-config opencv --cflags --libs)

short.mp4 video : https://streamable.com/7bsung
Problem:
short.mp4 plays at 15FPS and i get frames by appsink at 7.5 FPS.
What changes can i make in pipeline to receive 15 FPS by appsink.

nayana
  • 3,787
  • 3
  • 20
  • 51
  • you should perhaps show us your code.. it might be implementation bug. How did you calculate those 7.5fps? – nayana Feb 12 '21 at 22:46
  • That is the platform being used? This looks like back copying the gpu buffer back to host memory. That is expensive. – Florian Zwoch Feb 13 '21 at 09:49
  • Hi @nayana i have shared my implementation kindly guide me thanks. Also for calculation i have measure the timing of func have_frame using chrono API of c++. Video share have frame size 320x240. And i receive the whole frame in 2 call back of size 320x240x1.5 each. –  Feb 15 '21 at 04:25
  • First - why dont you use videoconvert to get the rgb from GStreamer pipeline? What happens if you add queue before appsink (this should separate threads)? What happens if you remove the video conversion and just read and drop the received frames? – nayana Feb 15 '21 at 15:28
  • Hi @nayana Why dont you use videoconvert to get the rgb from GStreamer pipeline? --- As per my design i need a raw frames NV21 and will convert to RGB later on What happens if you add queue before appsink (this should separate threads)? --- Nothing happens What happens if you remove the video conversion and just read and drop the received frames? --- Tried removing the video conversion code (Still receive the callbacks in 2 chunks 320x240x1.5 each) –  Feb 17 '21 at 04:57
  • I dont understand the last part. So if you remove NV21_T_RGB call you still get 7.5 fps? – nayana Feb 17 '21 at 09:51
  • Yes @nayana i have removed NV21_T_RGB completely and still get 7.5 fps –  Feb 18 '21 at 05:07
  • hm thats weird.. maybe the other commenter here could check it out? no time to dive into the code.. anyway you added value to your question by adding the reproducible code etc. thanks for your effort. – nayana Feb 21 '21 at 20:12

1 Answers1

0

I was able to resolve the above issue.
Calculation of 2 Chunks was correct (320x240x1.5)
Details : As the callback return the image in NV21 format.
" RGB model have to use 3 bytes (24 bits) to record a color, but in YUV model, we get half the size if we use yuv420p (NV21) format"
I have save the received (320x240x1.5) using opencv and it worked :D

cv::Mat mat_src = cv::Mat(ZHEIGHT*1.5, ZWIDTH, CV_8UC1, (char*)map.data);
cv::Mat mat_dst = cv::Mat(ZHEIGHT, ZWIDTH, CV_8UC3);
string path = "./Result/"+to_string(framecount)+"_XYZ.jpg";
cv::cvtColor(mat_src, mat_dst, cv::COLOR_YUV2BGR_NV21);
imwrite(path,mat_dst);
Wai Ha Lee
  • 8,598
  • 83
  • 57
  • 92