Here https://stackoverflow.com/a/49817506/1277317 there is an example of how to use a convolution network in OpenCV. But this example is in Python. How to do the same in C++? Namely, how to do this in C++:
net = cv.dnn.readNetFromTensorflow('model.pb')
net.setInput(inp.transpose(0, 3, 1, 2))
cv_out = net.forward()
?
And how to create Mat for the setInput function for an image size: 60x162x1? I use float for the data just like in the python example. Now I have this code and it gives incorrect results:
Net net = readNet("e://xor.pb");
float x0[60][162];
for(int i=0;i<60;i++)
{
for(int j=0;j<162;j++)
{
x0[i][j]=0;
}
}
x0[5][59]=0.5;
x0[5][60]=1;
x0[5][61]=1;
x0[5][62]=0.5;
Mat aaa = cv::Mat(60,162, CV_32F, x0);
Mat inputBlob = dnn::blobFromImage(aaa, 1.0, Size(60,162));
net.setInput(inputBlob , "conv2d_input");
Mat prob = net.forward("activation_2/Softmax");
for(int i=0;i<prob.cols;i++)
{
qDebug()<<i<<prob.at<float>(0,i);
}