0

I am refactoring a gstreamer element that takes two CSI cameras using the argus lib from nvidia - the refactor is from the old nvbuf_utils to NvUtils, https://developer.nvidia.com/sites/default/files/akamai/embedded/nvbuf_utils_to_nvutils_migration_guide.pdf .

The element itself consumes > 2 CSI cameras, syncs the frames and sends them to the src pad of the element. In the original element, the following pipelines respectively output 1 video with 1 frame and 1 video with two frames side-by-side.

Single Pipeline:

gst-launch-1.0 -v nvarguscameras sensors='0 1' silent=false sync-threshold=16700000 ! "video/x-raw(memory:NVMM),format=(string)NV12,width=(int)1920,height=(int)1080,framerate=(fraction)30/1" ! nvvideoconvert ! nvv4l2h264enc ! h264parse !  filesink location=single.mp4 -e

Double Pipeline:

gst-launch-1.0 -v nvarguscameras master-sensor=0 sensors='0 1' sync-threshold=16700000 silent=false ! nvvideoconvert ! "video/x-raw(memory:NVMM),format=(string)NV12,width=(int)1920,height=(int)1080,framerate=(fraction)30/1" ! nvmultistreamtiler width=3840 height=1080 rows= columns=2 ! nvvideoconvert ! 'video/x-raw(memory:NVMM), format=(string)I420' ! nvv4l2h264enc bitrate=800 ! h264parse ! qtmux ! filesink location="test.mp4" -e

In hte original implementation, the API used the NvBufferTransform to perform some modification on the frame. However, this time i just want to pass the frames through.

For reference the frameInfo is struct that holds the width,height and fd of the buffer from another thread.

Original implementation:

      // copy buffer surface 
      GST_DEBUG_OBJECT (src, "consumer prepare buffer surfaces"); 
      GstMapInfo outmap = GST_MAP_INFO_INIT;
      gst_buffer_map (buffer, &outmap, GST_MAP_WRITE);
      NvBufSurface* surf = (NvBufSurface*) outmap.data;
      assert(surf->batchSize == src->sensors_num);
      
      assert(frames_buffer->len <= src->sensors_num); 
      surf->numFilled = 0; 
      for (int i = 0; i < frames_buffer->len; i++) {
        GST_DEBUG_OBJECT (src, "consumer fill buffer surface %d", i); 
        NvArgusFrameInfo *frameInfo = g_array_index(frames_buffer, NvArgusFrameInfo*, i);
        gint retn = NvBufferTransform (frameInfo->fd, (gint)surf->surfaceList[i].bufferDesc, &src->transform_params);
        if (retn != 0) {
          GST_ERROR_OBJECT(src, "consumer NvBufferTransform Failed");
          break;
        }
        surf->numFilled++; 
        // attach frame meta 
        NvDsFrameMeta *frame_meta = nvds_acquire_frame_meta_from_pool(batch_meta);

        frame_meta->pad_index = i;
        frame_meta->batch_id = i;
        frame_meta->frame_num = frameInfo->frameNum;
        frame_meta->buf_pts = frameInfo->frameTime;
        frame_meta->ntp_timestamp = 0;
        frame_meta->source_id = src->sensors[i];
        frame_meta->num_surfaces_per_frame = 1; 
        frame_meta->source_frame_width = frameInfo->width;
        frame_meta->source_frame_height = frameInfo->height;
        nvds_add_frame_meta_to_batch(batch_meta, frame_meta);
      }
      gst_buffer_unmap (buffer, &outmap);
      if (surf->numFilled != frames_buffer->len || 
          batch_meta->num_frames_in_batch != frames_buffer->len) {
        GST_ERROR_OBJECT(src, "consumer failed fill nvmm buffer");
        break;
      }   
    }

Updated Implementation:

      // copy buffer surface 
      GST_DEBUG_OBJECT (src, "consumer prepare buffer surfaces"); 
      GstMapInfo outmap = GST_MAP_INFO_INIT;
      if (!gst_buffer_map (buffer, &outmap, GST_MAP_WRITE)){
        g_print ("Error: Failed to map gst buffer\n");
        break;
      }

      NvBufSurface* surf = (NvBufSurface*) outmap.data;

      assert(surf->batchSize == src->sensors_num);
      assert(frames_buffer->len <= src->sensors_num); 
      surf->numFilled = 0; 



      for (int i = 0; i < frames_buffer->len; i++) {
        
        NvArgusFrameInfo *frameInfo = g_array_index(frames_buffer, NvArgusFrameInfo*, i);
        surf->surfaceList[i].bufferDesc = frameInfo->fd;

        surf->numFilled++;

        NvDsFrameMeta *frame_meta = nvds_acquire_frame_meta_from_pool(batch_meta);

        frame_meta->pad_index = i;
        frame_meta->batch_id = i;
        frame_meta->frame_num = frameInfo->frameNum;
        frame_meta->buf_pts = frameInfo->frameTime;
        frame_meta->ntp_timestamp = 0;
        frame_meta->source_id = src->sensors[i];
        frame_meta->num_surfaces_per_frame = 1; 
        frame_meta->source_frame_width = frameInfo->width;
        frame_meta->source_frame_height = frameInfo->height;
        nvds_add_frame_meta_to_batch(batch_meta, frame_meta);
        

      }
        
      gst_buffer_unmap (buffer, &outmap);
      if (surf->numFilled != frames_buffer->len || 
          batch_meta->num_frames_in_batch != frames_buffer->len) {
        GST_ERROR_OBJECT(src, "consumer failed fill nvmm buffer");
        break;
      }   
    }

The single pipeline works fine, i can get the video of one frame, but when i try the second pipeline i just get a green video. Any suggestions here guys ?

I tried using the NvBufSurfTransform instead of just passing the fd to the NvBufSurface, but this yields the same results.

Both frames are available in my updated code as i can hard code in the for loop to choose which one is present in the single video pipeline.

0 Answers0