2

This code does not show the detection of face in camera, even there is no error. I want the face should be detected in realtime in camera with red squire surrounded, but I think I have not placed the code properly or where I should place something in Viewdidload or something else?

import UIKit
import CoreImage

class ViewController: UIViewController ,UIAlertViewDelegate, UIImagePickerControllerDelegate, UINavigationControllerDelegate  {

@IBOutlet var imageView: UIImageView!
@IBAction func Moodify(_ sender: UIButton) {


    func detect() {

        guard let personciImage = CIImage(image: imageView.image!) else {
            return
        }

        let accuracy = [CIDetectorAccuracy: CIDetectorAccuracyHigh]
        let faceDetector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options: accuracy)
        let faces = faceDetector?.features(in: personciImage)


        // For converting the Core Image Coordinates to UIView Coordinates
        let ciImageSize = personciImage.extent.size
        var transform = CGAffineTransform(scaleX: 1, y: -1)
        transform = transform.translatedBy(x: 0, y: -ciImageSize.height)

        for face in faces as! [CIFaceFeature] {

            print("Found bounds are \(face.bounds)")

            // Apply the transform to convert the coordinates
            var faceViewBounds = face.bounds.applying(transform)

            // Calculate the actual position and size of the rectangle in the image view
            let viewSize = imageView.bounds.size
            let scale = min(viewSize.width / ciImageSize.width,
                            viewSize.height / ciImageSize.height)
            let offsetX = (viewSize.width - ciImageSize.width * scale) / 2
            let offsetY = (viewSize.height - ciImageSize.height * scale) / 2

            faceViewBounds = faceViewBounds.applying(CGAffineTransform(scaleX: scale, y: scale))
            faceViewBounds.origin.x += offsetX
            faceViewBounds.origin.y += offsetY

            let faceBox = UIView(frame: faceViewBounds)
            //let faceBox = UIView(frame: face.bounds)
            faceBox.layer.borderWidth = 3
            faceBox.layer.borderColor = UIColor.red.cgColor
            faceBox.backgroundColor = UIColor.clear
            imageView.addSubview(faceBox)

            if face.hasLeftEyePosition {
                print("Left eye bounds are \(face.leftEyePosition)")
            }

            if face.hasRightEyePosition {
                print("Right eye bounds are \(face.rightEyePosition)")
            }
        }
    }

    let picker = UIImagePickerController()
    picker.delegate = self
    picker.allowsEditing = true
    picker.sourceType = .camera
    picker.cameraDevice = .front
    self.present(picker, animated: true, completion: { _ in })

    func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [AnyHashable: Any]) {
        let chosenImage = info[UIImagePickerControllerEditedImage]
        self.imageView!.image = chosenImage as? UIImage
        picker.dismiss(animated: true, completion: { _ in })
    }

     // picker.dismiss(animated: true, completion: { _ in })
    func imagePickerControllerDidCancel(_ picker: UIImagePickerController) {
        picker.dismiss(animated: true, completion: { _ in })
    }
}

override func viewDidLoad() {

    let alert = UIAlertController(title: "Ooops!!!", message: "Camera is not connected", preferredStyle: UIAlertControllerStyle.alert)
    alert.addAction(UIAlertAction(title: "Connect", style: UIAlertActionStyle.default, handler: nil))
    self.present(alert, animated: true, completion: nil)

    super.viewDidLoad()
    // Do any additional setup after loading the view, typically from a nib.

}

override func didReceiveMemoryWarning() {
    super.didReceiveMemoryWarning()
    // Dispose of any resources that can be recreated.
}
}
Xcodian Solangi
  • 2,342
  • 5
  • 24
  • 52
  • could you please share the tutorial where you have taken this code from ? – Tung Fam Dec 20 '16 at 09:58
  • @TungFam here is the link: http://www.appcoda.com/face-detection-core-image/ – Xcodian Solangi Dec 20 '16 at 10:09
  • i'm not sure if you asked in a correct way but you stated "This code does not show the detection of face in camera". According to tutorial it should show the face on the image but not in camera real time. – Tung Fam Dec 20 '16 at 10:38
  • @TungFam But i have done some editing to a real time camera you can run this code. this code only starts camera but does not do anything detection. – Xcodian Solangi Dec 20 '16 at 10:55
  • could you tell me please who do you expect triggering your `-detect` method if it is __nested__ in your event handler's body? and why did you put and try to show an alert controller in you `-viewDidLoad` method's body? etc... – holex Jan 17 '17 at 09:39
  • I don't understand much better but I mixed up from some tutorials now I am getting camera at button but camera is not detecting, so I want camera should detect in realtime – Xcodian Solangi Jan 17 '17 at 09:49
  • @Solangi, maybe you should not read any (=poor) 3rd party tutorials, but get the concept and idea __[directly from Apple](https://developer.apple.com/library/content/documentation/GraphicsImaging/Conceptual/CoreImaging/ci_detect_faces/ci_detect_faces.html)__. – holex Jan 17 '17 at 12:05
  • thanx for your suggestion @holex but I am working on Swift 3 and these documentation includes objective-c so I am following swift 3 tutorials – Xcodian Solangi Jan 17 '17 at 12:07
  • Do you have any idea about open CV? – User511 Jan 23 '17 at 12:22
  • Yes dear! Its library and I have also used its wrapper EmguCv in my C# project – Xcodian Solangi Jan 23 '17 at 12:24

2 Answers2

0

You most probably need just to trigger the function the way how it is described in the document

We will invoke the detect method in viewDidLoad. So insert the following line of code in the method:

override func viewDidLoad() {
   super.viewDidLoad()

   detect()

}

Compile and run the app.

EDIT: This is solution while the function "detect" is as a subclass method, but in your case, you use IBAction, which has different syntax like this. You should try to delete name of the function detect() and this bracket

}

let picker =

and this part have to inside a function

let picker = UIImagePickerController()
picker.delegate = self
picker.allowsEditing = true
picker.sourceType = .camera
picker.cameraDevice = .front
self.present(picker, animated: true, completion: { _ in })

for your case you can probably omit this part as well.

Vanya
  • 4,973
  • 5
  • 32
  • 57
  • @Solangi What is the version of SWIFT you use? Before in older versions it was needed to use self.detect() – Vanya Jan 17 '17 at 10:23
  • i am using Swift 3 – Xcodian Solangi Jan 17 '17 at 10:23
  • @Solangi Looks like a mystery. I have been doing iOS development for 7 years, but have never run into such situation. I mean you did a subclass, then the subclass has a method and that method is not recognised while being called from the subclass. – Vanya Jan 17 '17 at 13:55
  • so what should i do would u suggest me something to do? – Xcodian Solangi Jan 17 '17 at 15:02
  • Thanx for your effort but I am still getting this error Use of unresolved identifier "detect" error Would you please update my question with the code you write in answer. – Xcodian Solangi Jan 18 '17 at 07:35
0

After looking through your code, you didn't even call detect() after you take a snap. I tried fixed it as described below, however, the detect() will return zero face found as I describe in Face Detection with Camera.

lazy var picker: UIImagePickerController = {
    let picker = UIImagePickerController()
    picker.delegate = self
    picker.allowsEditing = true
    picker.sourceType = .camera
    picker.cameraDevice = .front
    return picker
}()

@IBOutlet var imageView: UIImageView!
override func viewDidLoad() {
    super.viewDidLoad()
    imageView.contentMode = .scaleAspectFit
}

@IBAction func TakePhoto(_ sender: Any) {
    self.present(picker, animated: true, completion: nil)
}

// MARK: - UIImagePickerControllerDelegate
func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [String : Any]) {
    if let chosenImage = info[UIImagePickerControllerOriginalImage] as? UIImage {
        self.imageView!.image = chosenImage
        // Got the image from camera, the imageView.image is not nil, so it's time for facial detection
        detect()
        picker.dismiss(animated: true, completion: nil)
    }
}


func imagePickerControllerDidCancel(_ picker: UIImagePickerController) {
    picker.dismiss(animated: true, completion: nil)
}

// MARK: - Face Detection

func detect() {

    guard let personciImage = CIImage(image: imageView.image!) else {
        return
    }

    let accuracy = [CIDetectorAccuracy: CIDetectorAccuracyHigh]
    let faceDetector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options: accuracy)
    let faces = faceDetector?.features(in: personciImage)


    // For converting the Core Image Coordinates to UIView Coordinates
    let ciImageSize = personciImage.extent.size
    var transform = CGAffineTransform(scaleX: 1, y: -1)
    transform = transform.translatedBy(x: 0, y: -ciImageSize.height)
    print("faces.count = \(faces?.count)")

    for face in faces as! [CIFaceFeature] {

        print("Found bounds are \(face.bounds)")

        // Apply the transform to convert the coordinates
        var faceViewBounds = face.bounds.applying(transform)

        // Calculate the actual position and size of the rectangle in the image view
        let viewSize = imageView.bounds.size
        let scale = min(viewSize.width / ciImageSize.width,
                        viewSize.height / ciImageSize.height)
        let offsetX = (viewSize.width - ciImageSize.width * scale) / 2
        let offsetY = (viewSize.height - ciImageSize.height * scale) / 2

        faceViewBounds = faceViewBounds.applying(CGAffineTransform(scaleX: scale, y: scale))
        faceViewBounds.origin.x += offsetX
        faceViewBounds.origin.y += offsetY

        let faceBox = UIView(frame: faceViewBounds)
        //let faceBox = UIView(frame: face.bounds)
        faceBox.layer.borderWidth = 3
        faceBox.layer.borderColor = UIColor.red.cgColor
        faceBox.backgroundColor = UIColor.clear
        imageView.addSubview(faceBox)

        if face.hasLeftEyePosition {
            print("Left eye bounds are \(face.leftEyePosition)")
        }

        if face.hasRightEyePosition {
            print("Right eye bounds are \(face.rightEyePosition)")
        }
    }
}
Community
  • 1
  • 1
Willjay
  • 6,381
  • 4
  • 33
  • 58
  • It is not detecting yet! And I also tried to mention detect() in viewdidload but it gives error fatal error unwrapping optional would you update changes in my question's code when you update in answer? – Xcodian Solangi Jan 18 '17 at 09:33
  • It would crash when you call `detect()` in `viewDidLoad()` because the imageView.image is not set at the beginning – Willjay Jan 18 '17 at 09:34
  • what's ur button (Moodify) for? In my case, it is just a button to show up pickerView for snapshot. After I take a picture, the delegate `iimagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [String : Any]) ` will run and then `detect()` – Willjay Jan 18 '17 at 09:45
  • Yes Moodify is my button – Xcodian Solangi Jan 18 '17 at 09:50
  • I put it [here](https://github.com/Weijay/FaceDetection/tree/master/CameraWithPickerView/tmp) – Willjay Jan 18 '17 at 10:07
  • detection is not working in my view controller and you should not put anothers material on Github with your own copyrights. – Xcodian Solangi Jan 18 '17 at 10:47