When looking for a way to manipulate pixels in an NSImage, I found a promising approach at https://gist.github.com/larsaugustin/af414f3637885f56c837b88c3fea1e6b
However, when putting the image into a pixel array and converting the array back to an NSImage, the result is distorted.
Image after putting into pixel array and converting back:
This is the code to reproduce the problem:
import Foundation
import AppKit
struct Pixel {
var a: UInt8
var r: UInt8
var g: UInt8
var b: UInt8
}
let fileManager : FileManager = FileManager.default
let fileURL : URL = fileManager.homeDirectoryForCurrentUser.appendingPathComponent("IMG_RGB_ORIG.png")
let imageData = try Data(contentsOf: fileURL)
let imageOrig = NSImage(data: imageData)
let pixels = imageToPixels(image: imageOrig!)
let imageConv = pixelsToImage(pixels: pixels,
width: Int(imageOrig!.size.width),
height: Int(imageOrig!.size.height))
func imageToPixels(image: NSImage) -> [Pixel] {
var returnPixels = [Pixel]()
let pixelData = (image.cgImage(forProposedRect: nil, context: nil, hints: nil)!).dataProvider!.data
let data: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)
for y in 0..<Int(image.size.height) {
for x in 0..<Int(image.size.width) {
let pos = CGPoint(x: x, y: y)
let pixelInfo: Int = ((Int(image.size.width) * Int(pos.y) * 4) + Int(pos.x) * 4)
let r = data[pixelInfo]
let g = data[pixelInfo + 1]
let b = data[pixelInfo + 2]
let a = data[pixelInfo + 3]
returnPixels.append(Pixel(a: a, r: r, g: g, b: b))
}
}
return returnPixels
}
func pixelsToImage(pixels: [Pixel], width: Int, height: Int) -> NSImage? {
guard width > 0 && height > 0 else { return nil }
guard pixels.count == width * height else { return nil }
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedFirst.rawValue)
let bitsPerComponent = 8
let bitsPerPixel = 32
var data = pixels
guard let providerRef = CGDataProvider(data: NSData(bytes: &data,
length: data.count * MemoryLayout<Pixel>.size)
)
else { return nil }
guard let cgim = CGImage(
width: width,
height: height,
bitsPerComponent: bitsPerComponent,
bitsPerPixel: bitsPerPixel,
bytesPerRow: width * MemoryLayout<Pixel>.size,
space: rgbColorSpace,
bitmapInfo: bitmapInfo,
provider: providerRef,
decode: nil,
shouldInterpolate: true,
intent: .defaultIntent
)
else { return nil }
return NSImage(cgImage: cgim, size: CGSize(width: width, height: height))
}
I am using an M1 mac and the result is the same with Rosetta or without. The problem seems to occur already in the conversion into the pixel array. Any ideas on how to correct the code?