0

We are working with the Kinect to track faces for a schoolproject. We have set up Visual Studio 2012, and all the test programs are working correctly. However we are trying to run this code and it gives us an error. After many attempts to fix the code, it gives the following error:

"The application was unable to start correctly (0xc000007b).Click OK to close the application.

The good thing is that it's finally running. The bad thing is that the compiler doesn't throw any errors other than this vague error.

We are completely lost and we hope that someone can help us or point us into the right direction. Thanks in advance for helping us.

The code:

#include "stdafx.h"
#include <iostream>
#include <Windows.h>
#include <NuiApi.h>
#include <FaceTrackLib.h>
#include <NuiSensor.h>

using namespace std;

HANDLE rgbStream;
HANDLE depthStream;

INuiSensor* sensor;

#define width 640
#define height 480

bool initKinect() {
    // Get a working kinect sensor
    int numSensors;
    if (NuiGetSensorCount(&numSensors) < 0 || numSensors < 1) return false;
    if (NuiCreateSensorByIndex(0, &sensor) < 0) return false;
    // Initialize sensor
    sensor->NuiInitialize(NUI_INITIALIZE_FLAG_USES_DEPTH | NUI_INITIALIZE_FLAG_USES_COLOR);
    sensor->NuiImageStreamOpen(
        NUI_IMAGE_TYPE_COLOR,            // Depth camera or rgb camera?
        NUI_IMAGE_RESOLUTION_640x480,    // Image resolution
        0,      // Image stream flags, e.g. near mode
        2,      // Number of frames to buffer
        NULL,   // Event handle
        &rgbStream);
    // --------------- END CHANGED CODE -----------------
    return true;
}
BYTE* dataEnd;
USHORT* dataEndD;
void getKinectDataD(){
    NUI_IMAGE_FRAME imageFrame;
    NUI_LOCKED_RECT LockedRect;

    if (sensor->NuiImageStreamGetNextFrame(rgbStream, 0, &imageFrame) < 0) return;
    INuiFrameTexture* texture = imageFrame.pFrameTexture;
    texture->LockRect(0, &LockedRect, NULL, 0);

    const USHORT* curr = (const USHORT*)LockedRect.pBits;
    const USHORT* dataEnding = curr + (width*height);

    if (LockedRect.Pitch != 0)
    {
        const BYTE* curr = (const BYTE*)LockedRect.pBits;
        dataEnd = (BYTE*)(curr + (width*height) * 4);
    }

    while (curr < dataEnding) {
        // Get depth in millimeters
        USHORT depth = NuiDepthPixelToDepth(*curr++);
        dataEndD = (USHORT*)depth;
        // Draw a grayscale image of the depth:
        // B,G,R are all set to depth%256, alpha set to 1.
        }
            texture->UnlockRect(0);
        sensor->NuiImageStreamReleaseFrame(rgbStream, &imageFrame);
}

// This example assumes that the application provides
// void* cameraFrameBuffer, a buffer for an image, and that there is a method
// to fill the buffer with data from a camera, for example
// cameraObj.ProcessIO(cameraFrameBuffer)

int main(){
    initKinect();
    // Create an instance of a face tracker
    IFTFaceTracker* pFT = FTCreateFaceTracker();
    if (!pFT)
    {
        // Handle errors
    }

    // Initialize cameras configuration structures.
    // IMPORTANT NOTE: resolutions and focal lengths must be accurate, since it affects tracking precision!
    // It is better to use enums defined in NuiAPI.h

    // Video camera config with width, height, focal length in pixels
    // NUI_CAMERA_COLOR_NOMINAL_FOCAL_LENGTH_IN_PIXELS focal length is computed for 640x480 resolution
    // If you use different resolutions, multiply this focal length by the scaling factor
    FT_CAMERA_CONFIG videoCameraConfig = { 640, 480, NUI_CAMERA_COLOR_NOMINAL_FOCAL_LENGTH_IN_PIXELS };

    // Depth camera config with width, height, focal length in pixels
    // NUI_CAMERA_COLOR_NOMINAL_FOCAL_LENGTH_IN_PIXELS focal length is computed for 320x240 resolution
    // If you use different resolutions, multiply this focal length by the scaling factor
    FT_CAMERA_CONFIG depthCameraConfig = { 320, 240, NUI_CAMERA_DEPTH_NOMINAL_FOCAL_LENGTH_IN_PIXELS };

    // Initialize the face tracker
    HRESULT hr = pFT->Initialize(&videoCameraConfig, &depthCameraConfig, NULL, NULL);
    if (FAILED(hr))
    {
        // Handle errors
    }

    // Create a face tracking result interface
    IFTResult* pFTResult = NULL;
    hr = pFT->CreateFTResult(&pFTResult);
    if (FAILED(hr))
    {
        // Handle errors
    }

    // Prepare image interfaces that hold RGB and depth data
    IFTImage* pColorFrame = FTCreateImage();
    IFTImage* pDepthFrame = FTCreateImage();
    if (!pColorFrame || !pDepthFrame)
    {
        // Handle errors
    }

    // Attach created interfaces to the RGB and depth buffers that are filled with
    // corresponding RGB and depth frame data from Kinect cameras
    pColorFrame->Attach(640, 480, dataEnd, FTIMAGEFORMAT_UINT8_R8G8B8, 640 * 3);
    pDepthFrame->Attach(320, 240, dataEndD, FTIMAGEFORMAT_UINT16_D13P3, 320 * 2);
    // You can also use Allocate() method in which case IFTImage interfaces own their memory.
    // In this case use CopyTo() method to copy buffers

    FT_SENSOR_DATA sensorData;
    sensorData.ZoomFactor = 1.0f;       // Not used must be 1.0

    bool isFaceTracked = false;

    // Track a face
    while (true)
    {
        // Call Kinect API to fill videoCameraFrameBuffer and depthFrameBuffer with RGB and depth data
        getKinectDataD();

        // Check if we are already tracking a face
        if (!isFaceTracked)
        {
            // Initiate face tracking.
            // This call is more expensive and searches the input frame for a face.
            hr = pFT->StartTracking(&sensorData, NULL, NULL, pFTResult);
            if (SUCCEEDED(hr))
            {
                isFaceTracked = true;
            }
            else
            {
                // No faces found
                isFaceTracked = false;
            }
        }
        else
        {
            // Continue tracking. It uses a previously known face position.
            // This call is less expensive than StartTracking()
            hr = pFT->ContinueTracking(&sensorData, NULL, pFTResult);
            if (FAILED(hr))
            {
                // Lost the face
                isFaceTracked = false;
            }
        }

        // Do something with pFTResult like visualize the mask, drive your 3D avatar,
        // recognize facial expressions
    }

    // Clean up
    pFTResult->Release();
    pColorFrame->Release();
    pDepthFrame->Release();
    pFT->Release();
    return 0;
}
  • This is run-time error, so compiler won't give you any hint. Run the application in debugger to see run-time error root cause. – Iuri Covalisin Oct 10 '13 at 15:24
  • First, learn to use Google. The error code means: 0xC000007B STATUS_INVALID_IMAGE_FORMAT. Most likely there is issue "that the 32-bit app tried to load a 64-bit DLL". Also "This error message may occur on 64 bit operating systems when the Microsoft Visual C++ Redistributable Package is not properly configured." Use debugger and Dependancy Walker app. See http://stackoverflow.com/questions/10492037/the-application-was-unable-to-start-correctly-0xc000007b – SChepurin Oct 10 '13 at 15:38

1 Answers1

0

We figured it out we used the wrong dll indeed, it runs without errors now. But we ran in to an another problem, we have no clue how to use the pFTResult and retrieve the face angles with use of "getFaceRect". Does somebody know how?