0

I am using CvVideoCamera's output to a UIImageView then calculating the contours and drawing the largest contour onto an overlaying UIImageView. I'm using a rectangular ROI (region of interest) to crop image so that the calculated contours are entirely within the cropped area. Now how can I do the same for any shape that can be drawn with gestures on an iPad?

Using OpenCV 3.1

@interface FCCameraViewController() <CvVideoCameraDelegate>

@property (nonatomic, strong) FCVideoCamera *camera;
@property (nonatomic, assign) UIImageOrientation imageOrientation;

@end

@implementation FCCameraViewController

- (void)setStarted:(BOOL)started {
    _started = started;
    if (started) {
        [self.camera start];
    } else {
        [self.camera stop];
    }
}

- (void)viewWillLayoutSubviews
{
    [super viewWillLayoutSubviews];

    self.imageView.frame = self.view.frame;

    NSArray *layers = [[self.imageView layer] sublayers];
    for (CALayer *layer in layers) {
        layer.frame = self.view.frame;
    }
}

- (void)viewDidLoad
{
    //Camera
    self.camera = [[FCVideoCamera alloc] initWithParentView: self.imageView];
    self.camera.defaultAVCaptureDevicePosition = AVCaptureDevicePositionBack;
    self.camera.defaultAVCaptureSessionPreset = AVCaptureSessionPreset1280x720;
    self.camera.defaultAVCaptureVideoOrientation = AVCaptureVideoOrientationPortrait;
    self.camera.defaultFPS = 30;
    self.camera.useAVCaptureVideoPreviewLayer = NO;
    self.camera.grayscaleMode = NO;
    self.camera.delegate = self;
    self.isAutoEdgeMode = NO;
    self.imageOrientation = UIImageOrientationUp;
}

- (void)setIsAutoEdgeMode:(BOOL)isAutoEdgeMode
{
    _isAutoEdgeMode = isAutoEdgeMode;
    if (!self.started && self.imageView.image != nil) {

        cv::Mat mat = [self CVMatFromUIImage:self.imageView.image];
        self.imageOrientation = self.imageView.image.imageOrientation;

        [self processImage:mat];
    }
}

#pragma mark - CvVideoCameraDelegate

cv::Mat original, src_gray, drawing_mat, cropped_area;
int thresh = 192;//100;
int max_thresh = 255;
cv::RNG rng(12345);

- (void)processImage:(cv::Mat &)image
{
    if (!self.isAutoEdgeMode) {
        return;
    }

    original = image.clone();

    cropped_area = [self crop:image];

    cv::cvtColor(cropped_area, src_gray, cv::COLOR_BGR2GRAY);
    cv::blur(src_gray, src_gray, cv::Size(3,3));

    cv::Mat threshold_output;
    std::vector<std::vector<cv::Point> > contours;
    std::vector<cv::Vec4i> hierarchy;

    /// Detect edges using Threshold
    cv::threshold( src_gray, threshold_output, thresh, 255, cv::THRESH_BINARY );

    /// Find contours
    cv::findContours(threshold_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cv::Point(0, 0) );

    // approximate the convex hulls
    std::vector<std::vector<cv::Point>> hull(contours.size());
    for( size_t i = 0; i < contours.size(); i++ ) {
        convexHull( cv::Mat(contours[i]), hull[i], false );
    }

    // Approximate contours to polygons + get bounding rects and circles
    std::vector<std::vector<cv::Point> > contours_poly( contours.size() );
    std::vector<cv::Rect> boundRect( contours.size() );
    std::vector<cv::Point2f>center( contours.size() );
    std::vector<float>radius( contours.size() );

    std::vector<cv::Point> *largestContourPoly = nil;
    std::vector<cv::Point> *secondLargest = nil;
    double largestArea = 0;

    for(int i = 0; i < contours.size(); i++ ) {
        std::vector<cv::Point> a_contour = contours[i];
        double contourArea = cv::contourArea(a_contour);
        double size = contourArea;

        // Calculate the polygon based on the contour
        cv::approxPolyDP(cv::Mat(contours[i]), contours_poly[i], 1, true );

        // Get random color
        cv::Scalar color(rand() & 255, rand() & 255, rand() & 255);
        cv::Vec3b otherColor(color[2], color[0], color[1]);

        // DEBUG HELPER: Draw all polygons
        cv::polylines(cropped_area, contours_poly[i], true, color, FCLineWidth);

        // DEBUG HELPER: Draw convex hull
        cv::drawContours(cropped_area, hull, (int)i, RED, 2, 8, std::vector<cv::Vec4i>(), 0, cv::Point() );

        // Save the largest polygon
        if (size > largestArea) {
            largestArea = contourArea;
            secondLargest = largestContourPoly;
            largestContourPoly = &contours_poly[i];
        }
    }

    // Draw the largest polygon on the non-gray scale
    if (largestContourPoly != NULL) {
        cv::polylines(cropped_area, *largestContourPoly, true, GREEN, FCLineWidth);

        // Show on ImageView if using a still image
        if (!self.started) {
            // use with imageOrientation in mind
            UIImage *convertedImage = [self UIImageFromCVMat:cropped_area];
            self.imageView.image = convertedImage;
        }

        // Add back to original image
        UIImage *testCrop = [self UIImageFromCVMat:cropped_area];
        cropped_area.copyTo(image(cv::Rect(300, 300, cropped_area.cols, cropped_area.rows)));

        // Show drawing on other image view
        cv::Mat matContour(image.size(), CV_8UC1);
        matContour = cv::Scalar(255,255,255,0); // make background white
        cv::polylines(matContour, *largestContourPoly, true, GREEN, FCLineWidth);

        // TODO: will need to offset the matContour drawing by the croped_area's offset

        UIImage *autoEdge = [self UIImageFromCVMat:matContour];
        self.autoEdgeImage = [autoEdge replaceColor:[UIColor colorWithRed:1.0f green:1.0f blue:1.0f alpha:1.0] inImage:autoEdge withTolerance:10];
    }
}



#pragma mark - Helpers

- (cv::Mat)crop:(cv::Mat)uncropped {
    // Setup a rectangle to define your region of interest
    cv::Rect myROI(300, 300, 300, 300);

    // Crop the full image to that image contained by the rectangle myROI
    // Note that this doesn't copy the data
    cv::Mat croppedRef = uncropped(myROI);

    // Copy the data into new matrix
    cv::Mat cropped;
    croppedRef.copyTo(cropped);
    UIImage *croppedImage = [self UIImageFromCVMat:cropped];
    return cropped;
}

-(cv::Mat)CVMatFromUIImage:(UIImage *)image
{
    CGColorSpaceRef colorSpace = CGImageGetColorSpace(image.CGImage);
    CGFloat cols = image.size.width;
    CGFloat rows = image.size.height;

    if  (image.imageOrientation == UIImageOrientationUp
         || image.imageOrientation == UIImageOrientationDown) {
        cols = image.size.height;
        rows = image.size.width;
    }

    cv::Mat cvMat(rows, cols, CV_8UC4); // 8 bits per component, 4 channels

    CGContextRef contextRef = CGBitmapContextCreate(cvMat.data,                 // Pointer to backing data
                                                    cols,                      // Width of bitmap
                                                    rows,                     // Height of bitmap
                                                    8,                          // Bits per component
                                                    cvMat.step[0],              // Bytes per row
                                                    colorSpace,                 // Colorspace
                                                    kCGImageAlphaNoneSkipLast |
                                                    kCGBitmapByteOrderDefault); // Bitmap info flags

    CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), image.CGImage);
    CGContextRelease(contextRef);

    return cvMat;
}

- (UIImage *)UIImageFromCVMat:(cv::Mat)cvMat
{
    NSData *data = [NSData dataWithBytes:cvMat.data length:cvMat.elemSize()*cvMat.total()];
    CGColorSpaceRef colorSpace;

    if (cvMat.elemSize() == 1) {
        colorSpace = CGColorSpaceCreateDeviceGray();
    } else {
        colorSpace = CGColorSpaceCreateDeviceRGB();
    }

    CGDataProviderRef provider = CGDataProviderCreateWithCFData((__bridge CFDataRef)data);

    // Creating CGImage from cv::Mat
    CGImageRef imageRef = CGImageCreate(cvMat.cols,                                 //width
                                        cvMat.rows,                                 //height
                                        8,                                          //bits per component
                                        8 * cvMat.elemSize(),                       //bits per pixel
                                        cvMat.step[0],                            //bytesPerRow
                                        colorSpace,                                 //colorspace
                                        kCGImageAlphaNone|kCGBitmapByteOrderDefault,// bitmap info
                                        provider,                                   //CGDataProviderRef
                                        NULL,                                       //decode
                                        false,                                      //should interpolate
                                        kCGRenderingIntentDefault                   //intent
                                        );


    // Getting UIImage from CGImage
    UIImage *finalImage = [UIImage imageWithCGImage:imageRef scale:1 orientation:self.imageOrientation];


    CGImageRelease(imageRef);
    CGDataProviderRelease(provider);
    CGColorSpaceRelease(colorSpace);

    return finalImage;
}

@end
subjective_c
  • 282
  • 2
  • 10
  • Just an idea - I'd suggest that you check if a pixel belongs to the region defined by user. If it does, you keep the pixel value, otherwise you play with alpha channel or set the pixel to black. The shape of the image can only be rectangular, that being said you can crop based on min and max values of the region. Inside it do as I suggesed above. – Arnas Ivanavičius Oct 26 '16 at 08:42

1 Answers1

1

I ended up using converting UIBezierPath to an array of CGPoints. Using these points, I used cv::drawContours to draw the area and create a mask. At the end, I return the region of interest and am now using it for computing the largest contour area in my original post.

- (cv::Mat)cropArbitrary:(cv::Mat)uncropped {

    cv::Mat dst(uncropped.rows, uncropped.cols, uncropped.type(), cv::Scalar(0));
    int new_w=0;
    int new_h=0;
    if(uncropped.cols>dst.cols)
        new_w=dst.cols;
    else
        new_w=uncropped.cols;

    if(uncropped.rows>dst.rows)
        new_h=dst.rows;
    else
        new_h=uncropped.rows;

    cv::Rect rectROI(0,0,uncropped.cols,uncropped.rows);
    cv::Mat mask(uncropped.rows, uncropped.cols, CV_8UC1, cv::Scalar(0));

    std::vector< std::vector<cv::Point> >  co_ordinates;
    co_ordinates.push_back(std::vector<cv::Point>());

    for (int i = 0; i < self.points.count; i++) {
        NSValue *value = self.points[i];
        CGPoint point = value.CGPointValue;
        cv::Point newPoint(point.x, point.y);
        co_ordinates[0].push_back(newPoint);
    }

    cv::drawContours( mask,co_ordinates,0, cv::Scalar(255),CV_FILLED, 8 );

    cv::Mat srcROI = uncropped(rectROI);
    cv::Mat dstROI = dst(rectROI);
    cv::Mat dst1;
    cv::Mat dst2;

    srcROI.copyTo(dst1,mask);

    bitwise_not(mask,mask);
    dstROI.copyTo(dst2,mask);

    dstROI.setTo(0);
    dstROI=dst1+dst2;
    return dst1;
}
Community
  • 1
  • 1
subjective_c
  • 282
  • 2
  • 10