6

I am implementing the example given in google-vision face tracker. MyFaceDetector class:

public class MyFaceDetector extends Detector<Face> {
    private Detector<Face> mDelegate;

    MyFaceDetector(Detector<Face> delegate) {
        mDelegate = delegate;
    }

    public SparseArray<Face> detect(Frame frame) {
        return mDelegate.detect(frame);
    }

    public boolean isOperational() {
        return mDelegate.isOperational();
    }

    public boolean setFocus(int id) {
        return mDelegate.setFocus(id);
    }

}

FaceTrackerActivity class:

private void createCameraSource() {

    imageView = (ImageView) findViewById(R.id.face);

    FaceDetector faceDetector = new FaceDetector.Builder(this).build();
    myFaceDetector = new MyFaceDetector(faceDetector);
    myFaceDetector.setProcessor(new MultiProcessor.Builder<>(new GraphicFaceTrackerFactory())
            .build());
    mCameraSource = new CameraSource.Builder(this, myFaceDetector)
            .setRequestedPreviewSize(640, 480)
            .setFacing(CameraSource.CAMERA_FACING_FRONT)
            .setRequestedFps(60.0f)
            .build();

    if (!myFaceDetector.isOperational()) {
        Log.w(TAG, "Face detector dependencies are not yet available.");
    }
}

I need to crop the face and set it on ImageView. I am not able to implement my custom Frame here. frame.getBitmap() always returns null in detect(Frame frame). How do I achieve this?

Andro
  • 952
  • 9
  • 19
  • Look here https://stackoverflow.com/questions/32299947/mobile-vision-api-concatenate-new-detector-object-to-continue-frame-processing/32314136#32314136 – George Aug 19 '17 at 16:59

2 Answers2

2

frame.getBitmap() will only return a value if the frame was originally created from a bitmap. CameraSource supplies image information as ByteBuffers rather than bitmaps, so that is the image information that is available.

frame.getGrayscaleImageData() will return the image data.

frame.getMetadata() will return metadata such as the image dimensions and the image format.

pm0733464
  • 2,862
  • 14
  • 16
0

This goes in CameraSource.java

Frame outputFrame = new Frame.Builder()
    .setImageData(mPendingFrameData, mPreviewSize.getWidth(),
                  mPreviewSize.getHeight(), ImageFormat.NV21)
    .setId(mPendingFrameId)
    .setTimestampMillis(mPendingTimeMillis)
    .setRotation(mRotation)
    .build();

int w = outputFrame.getMetadata().getWidth();
int h = outputFrame.getMetadata().getHeight();
SparseArray<Face> detectedFaces = mDetector.detect(outputFrame);
Bitmap bitmap = Bitmap.createBitmap(w, h, Bitmap.Config.ARGB_8888);

if (detectedFaces.size() > 0) {
    ByteBuffer byteBufferRaw = outputFrame.getGrayscaleImageData();
    byte[] byteBuffer = byteBufferRaw.array();
    YuvImage yuvimage  = new YuvImage(byteBuffer, ImageFormat.NV21, w, h, null);

    Face face = detectedFaces.valueAt(0);
    int left = (int) face.getPosition().x;
    int top = (int) face.getPosition().y;
    int right = (int) face.getWidth() + left;
    int bottom = (int) face.getHeight() + top;

    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    yuvimage.compressToJpeg(new Rect(left, top, right, bottom), 80, baos);
    byte[] jpegArray = baos.toByteArray();
    bitmap = BitmapFactory.decodeByteArray(jpegArray, 0, jpegArray.length);
}
((FaceTrackerActivity) mContext).setBitmapToImageView(bitmap);
Victor Gomes
  • 463
  • 6
  • 15
Andro
  • 952
  • 9
  • 19