How can I convert two bytes (UInt8) to a half-precision (16-bit) Float in Swift, such as needed when reading the output of CIAreaHistogram with the kCIFormatRGBAh, as in the following example:
func areaHistogram(image : UIImage) {
let inputImage = CIImage(image: image)
let totalBytes : Int = bpp * BINS //8 * 64 for example
let bitmap : UnsafeMutablePointer<Void> = calloc(totalBytes, bpp)
let filter = CIFilter(name: "CIAreaHistogram")!
filter.setValue(inputImage, forKey: kCIInputImageKey)
filter.setValue(CIVector(x: 0, y: 0, z: image.size.width, w: image.size.height), forKey: kCIInputExtentKey)
filter.setValue(BINS, forKey: "inputCount")
filter.setValue(1, forKey: "inputScale")
let myEAGLContext = EAGLContext(API: .OpenGLES2)
let options = [kCIContextWorkingColorSpace : kCFNull]
let context : CIContext = CIContext(EAGLContext: myEAGLContext, options: options)
context.render(filter.outputImage!, toBitmap: bitmap, rowBytes: totalBytes, bounds: filter.outputImage!.extent, format: kCIFormatRGBAh, colorSpace: CGColorSpaceCreateDeviceRGB())
let bytes = UnsafeBufferPointer<UInt8>(start: UnsafePointer<UInt8>(bitmap), count: bpp * BINS)
//HOW TO CONVERT TWO CONSECUTIVE BYTES AS 16-BIT FLOATS?
//THIS CODE DOES NOT WORK (I guess because Float in Swift is 32-bit):
for var i=0; i < self.bpp * self.BINS; i+=self.bpp {
let bitsR = UnsafePointer<Float._BitsType>(self.queryHist!)[i+0].bigEndian
let R = Float( Float._fromBitPattern(bitsR) )
let bitsG = UnsafePointer<Float._BitsType>(self.queryHist!)[i+2].bigEndian
let G = Float( Float._fromBitPattern(bitsG) )
let bitsB = UnsafePointer<Float._BitsType>(self.queryHist!)[i+4].bigEndian
let B = Float( Float._fromBitPattern(bitsB) )
print("R/G/B = \(R) \(G) \(B)")
}
free(bitmap)
}