I am trying to process scanned Image using OpenCV in iOS (Swift) but I am not getting clear Image after scanning it
Reference of below code is taken from here :- Scanned Document - Text & Background clarity not good using OpenCV + iOS
Below are the images that shows result comparison between the quality of image before and after scanning it. And the result is same, no enhanced quality in After processing image.
Image before processing
Image After Processing
Here is my code for scanning image.
+ (cv::Mat)cvMatFromUIImage3:(cv::Mat)image
{
// NSString *foo = path;
// std::string bar = std::string([image UTF8String]);
cv::Mat input = image;
// cv::imread(image,IMREAD_UNCHANGED);
int maxdim = input.cols; //std::max(input.rows,input.cols);
const int dim = 1024;
if ( maxdim > dim )
{
double scale = (double)dim/(double)maxdim;
cv::Mat t;
cv::resize( input, t, cv::Size(), scale,scale );
input = t;
}
if ( input.type()!=CV_8UC4 )
CV_Error(CV_HAL_ERROR_UNKNOWN,"!bgr");
cv::Mat result;
input.copyTo( result ); // result is just for drawing the text rectangles
// as previously...
cv::Mat median;
// remove highlight pixels e.g., those from debayer-artefacts and noise
cv::medianBlur(input,median,5);
cv::Mat localmax;
// find local maximum
cv::Mat kernel = cv::getStructuringElement(cv::MORPH_RECT,cv::Size(15,15) );
cv::morphologyEx( median,localmax,cv::MORPH_CLOSE,kernel,cv::Point(-1,-1),1,cv::BORDER_REFLECT101 );
std::vector< cv::Rect > bb;
// detectLetters by @William, modified to internally do the grayscale conversion if necessary
// https://stackoverflow.com/questions/23506105/extracting-text-opencv?rq=1
std::vector<cv::Rect> letterBBoxes1=detectLetters(input);
// detectLetters( input, bb );
// compose a simple Gaussian model for text background (still assumed white)
cv::Mat mask( input.size(),CV_8UC1,cv::Scalar( 0 ) );
if ( bb.empty() )
return image; // TODO; none found
for ( size_t i=0;i<bb.size(); ++i )
{
cv::rectangle( result, bb[i], cv::Scalar(0,0,255),2,8 ); // visualize only
cv::rectangle( mask, bb[i], cv::Scalar( 1 ), -1 ); // create a mask for cv::meanStdDev
}
cv::Mat mean,dev;
cv::meanStdDev( localmax, mean, dev, mask );
if ( mean.type()!=CV_64FC1 || dev.type()!=CV_64FC1 || mean.size()!=cv::Size(1,3) || dev.size()!=cv::Size(1,3) )
CV_Error(CV_HAL_ERROR_UNKNOWN, "should never happen");
double minimum[3];
double maximum[3];
// simply truncate the localmax according to our simple Gaussian model (+/- one standard deviation)
for ( unsigned int u=0;u<3;++u )
{
minimum[u] = mean.at<double>(u ) - dev.at<double>( u );
maximum[u] = mean.at<double>(u ) + dev.at<double>( u );
}
for ( int y=0;y<mask.rows;++y){
for ( int x=0;x<mask.cols;++x){
cv::Vec3b & col = localmax.at<cv::Vec3b>(y,x);
for ( unsigned int u=0;u<3;++u )
{
if ( col[u]>maximum[u] )
col[u]=maximum[u];
else if ( col[u]<minimum[u] )
col[u]=minimum[u];
}
}
}
// do the per pixel gain then
cv::Mat dst;
input.copyTo( dst );
for ( int y=0;y<input.rows;++y){
for ( int x=0;x<input.cols;++x){
const cv::Vec3b & v1=input.at<cv::Vec3b>(y,x);
const cv::Vec3b & v2=localmax.at<cv::Vec3b>(y,x);
cv::Vec3b & v3=dst.at<cv::Vec3b>(y,x);
for ( int i=0;i<3;++i )
{
double gain = 255.0/(double)v2[i];
v3[i] = cv::saturate_cast<unsigned char>( gain * v1[i] );
}
}
}
return dst;
}