I haven't actually solved the problem how I wanted to at the beginning. But now it's working. If someone has the same problem, maybe my solution can provide some ideas. If you want to display a video in qt or if you have problems with the OpenCV libraries, maybe I can help.
following are a few code snippets. they are not very much commented but I hope the concept is clear:
First I have a MainWindow with a label that I promoted to the type of my CustomLabel. The CustomLabel is my container to display the video and react on my mouse inputs.
CustomLabel::CustomLabel(QWidget* parent) : QLabel(parent), currentImage(NULL),
tickrate_ms(33), vid_fps(0), video_width(0), video_height(0), myTimer(NULL), cap(NULL)
{
// init variables
showPoints = true;
calculatedCenter = cv::Point(0,0);
oldCenter = cv::Point(0,0);
currentState = STATE_NO_STREAM;
NOF_corners = 30; //default init value
termcrit = cv::TermCriteria(cv::TermCriteria::COUNT | cv::TermCriteria::EPS, 30,0.01);
// enable mouse Tracking
this->setMouseTracking(true);
// connect signals with slots
QObject::connect(getMainWindow(), SIGNAL(sendFileOpen()), this, SLOT(onOpenClick()));
QObject::connect(getMainWindow(), SIGNAL(sendWebcamOpen()), this, SLOT(onWebcamBtnOpen()));
QObject::connect(getMainWindow(), SIGNAL(closeVideoStreamSignal()), this, SLOT(onCloseVideoStream()));
}
You have to overwrite the paintEvent-Method:
void CustomLabel::paintEvent(QPaintEvent *e){
QPainter painter(this);
// When no image is loaded, paint the window black
if (!currentImage){
painter.fillRect(QRectF(QPoint(0, 0), QSize(width(), height())), Qt::black);
QWidget::paintEvent(e);
return;
}
// Draw a frame from the video
drawVideoFrame(painter);
QWidget::paintEvent(e);
}
method that was called in paintEvent:
void CustomLabel::drawVideoFrame(QPainter &painter){
painter.drawImage(QRectF(QPoint(0, 0), QSize(width(), height())), *currentImage,
QRectF(QPoint(0, 0), currentImage->size()));
}
And on every tick of my timer I call onTick()
void CustomLabel::onTick() {
/* This method is called every couple of milliseconds.
* It reads from the OpenCV's capture interface and saves a frame as QImage
* the state machine is implemented here. every tick is handled
*/
if(cap->isOpened()){
switch(currentState) {
case STATE_IDLE:
if (!cap->read(currentFrame)){
qDebug() << "cvWindow::_tick !!! Failed to read frame from the capture interface in STATE_IDLE";
}
break;
case STATE_DRAWING:
if (!cap->read(currentFrame)){
qDebug() << "cvWindow::_tick !!! Failed to read frame from the capture interface in STATE_DRAWING";
}
currentFrame.copyTo(currentCopy);
cv::circle(currentCopy, cv::Point(focusPt.x*xScale, focusPt.y*yScale),
sqrt((focusPt.x - currentMousePos.x())*(focusPt.x - currentMousePos.x())*xScale*xScale+(focusPt.y - currentMousePos.y())*
(focusPt.y - currentMousePos.y())*yScale*yScale), cv::Scalar(0, 0, 255), 2, 8, 0);
//qDebug() << "focus pt x " << focusPt.x << "y " << focusPt.y;
break;
case STATE_TRACKING:
if (!cap->read(currentFrame)){
qDebug() << "cvWindow::_tick !!! Failed to read frame from the capture interface in STATE_TRACKING";
}
cv::cvtColor(currentFrame, currentFrame, CV_BGR2GRAY, 0);
if(initGrayFrame){
currentGrayFrame.copyTo(previousGrayFrame);
initGrayFrame = false;
return;
}
cv::calcOpticalFlowPyrLK(previousGrayFrame, currentFrame, previousPts, currentPts, featuresFound, err, cv::Size(21, 21),
3, termcrit, 0, 1e-4);
AcquireNewPoints();
currentCopy = CalculateCenter(currentFrame, currentPts);
if(showPoints){
DrawPoints(currentCopy, currentPts);
}
break;
case STATE_LOST_POLE:
currentState = STATE_IDLE;
initGrayFrame = true;
cv::cvtColor(currentFrame, currentFrame, CV_GRAY2BGR);
break;
default:
break;
}
// if not tracking, draw currentFrame
// OpenCV uses BGR order, convert it to RGB
if(currentState == STATE_IDLE) {
cv::cvtColor(currentFrame, currentFrame, CV_BGR2RGB);
memcpy(currentImage->scanLine(0), (unsigned char*)currentFrame.data, currentImage->width() * currentImage->height() * currentFrame.channels());
} else {
cv::cvtColor(currentCopy, currentCopy, CV_BGR2RGB);
memcpy(currentImage->scanLine(0), (unsigned char*)currentCopy.data, currentImage->width() * currentImage->height() * currentCopy.channels());
previousGrayFrame = currentFrame;
previousPts = currentPts;
}
}
// Trigger paint event to redraw the window
update();
}
Don't mind the yScale and xScale factors, they are just for the opencv drawing functions because the customLabel size is not the same as the video resolution