I am streaming from a webcam using OpenCV. Each captured frame (type is cv::Mat
) is transported via custom signal to slots in a node-based QGraphicsScene
. Since I have a single type of node that actually requires to display the cv::Mat
while the rest is just for processing the data I have created a viewer widget that converts the cv::Mat
data to QImage
data and then using paintEvent()
draws it on its surface:
Here is the code for the widget itself:
qcvn_node_viewer.hpp
#ifndef QCVN_NODE_VIEWER_HPP
#define QCVN_NODE_VIEWER_HPP
#include <QWidget>
#include <QImage>
#include <QSize>
#include <QPaintEvent>
#include <opencv2/core.hpp>
class QCVN_Node_Viewer : public QWidget
{
Q_OBJECT
QImage output;
public:
explicit QCVN_Node_Viewer(QWidget *parent = 0);
~QCVN_Node_Viewer();
QSize sizeHint() const;
QSize minimumSizeHint() const;
// Source: http://stackoverflow.com/a/17137998/1559401
static QImage Mat2QImage(cv::Mat const& frame);
static cv::Mat QImage2Mat(QImage const& src);
protected:
void paintEvent(QPaintEvent *event);
signals:
// Nodes connect to sendFrame(cv::Mat) signal
void sendFrame(cv::Mat);
public slots:
void receiveFrame(cv::Mat frame);
};
#endif // QCVN_NODE_VIEWER_HPP
qcvn_node_viewer.cpp
#include "qcvn_node_viewer.hpp"
#include <opencv2/imgproc.hpp>
#include <QPainter>
QCVN_Node_Viewer::QCVN_Node_Viewer(QWidget *parent) :
QWidget(parent)
{
qRegisterMetaType<cv::Mat>("cv::Mat");
}
QCVN_Node_Viewer::~QCVN_Node_Viewer()
{
}
QSize QCVN_Node_Viewer::sizeHint() const
{
return output.size();
}
QSize QCVN_Node_Viewer::minimumSizeHint() const
{
return output.size();
}
QImage QCVN_Node_Viewer::Mat2QImage(cv::Mat const& frame)
{
cv::Mat temp;
cvtColor(frame, temp,CV_BGR2RGB);
QImage dest((const uchar *) temp.data, temp.cols, temp.rows, temp.step, QImage::Format_RGB888);
dest.bits(); // enforce deep copy
return dest;
}
cv::Mat QCVN_Node_Viewer::QImage2Mat(QImage const& frame)
{
cv::Mat tmp(frame.height(),frame.width(),CV_8UC3,(uchar*)frame.bits(),frame.bytesPerLine());
cv::Mat result; // deep copy just in case (my lack of knowledge with open cv)
cvtColor(tmp, result,CV_BGR2RGB);
return result;
}
void QCVN_Node_Viewer::paintEvent(QPaintEvent *event)
{
QPainter painter(this);
painter.drawImage(QPoint(0,0), output);
painter.end();
}
void QCVN_Node_Viewer::receiveFrame(cv::Mat frame)
{
if(frame.empty()) return;
// Pass along the received frame
emit sendFrame(frame);
// Generate
// http://stackoverflow.com/questions/17127762/cvmat-to-qimage-and-back
// Applications crashes sometimes! Conversion seems to be incorrect
output = Mat2QImage(frame);
output.bits();
setFixedSize(output.width(), output.height());
repaint();
}
As you can see from the screenshot above the QWidget
is embedded in a QGraphicsScene
using QGraphicsProxyWidget
along with a QGraphicsRectItem
that I use for selecting/moving the node around in the scene.
Using QPainter
is considered to be more efficient than using QLabel
and settings its QPixmap
to the captured and converted frame. Using an OpenGL viewport inside a QGraphicsScene
as far as I know is not an option. It seems that processing the paint events in the scene also handles the local ones inside the QGraphicsItem
s and QGraphicsProxyWidget
s. The frame that the viewer paints on its surface changes only when the scene's paint event handler is called (hovering some of my nodes, moving them around etc.).
Is there a way to fix this? I'm about to go for the QLabel+QPixmap
option (even though I'm not sure if it won't suffer from the same or different problem) but first I'd like to make sure there is nothing I can do to keep my current way of doing things.
EDIT: Okay, so I have changed my viewer to use QLabel
instead of the widget's paint event and it works like a charm. The question is still opened though since I'd like to know how to use local paint events.