0

When looking for a way to manipulate pixels in an NSImage, I found a promising approach at https://gist.github.com/larsaugustin/af414f3637885f56c837b88c3fea1e6b

However, when putting the image into a pixel array and converting the array back to an NSImage, the result is distorted.

Original image: original image

Image after putting into pixel array and converting back: image after conversion

This is the code to reproduce the problem:

import Foundation
import AppKit

struct Pixel {
    var a: UInt8
    var r: UInt8
    var g: UInt8
    var b: UInt8
}

let fileManager : FileManager = FileManager.default
let fileURL : URL = fileManager.homeDirectoryForCurrentUser.appendingPathComponent("IMG_RGB_ORIG.png")

let imageData = try Data(contentsOf: fileURL)
let imageOrig = NSImage(data: imageData)

let pixels = imageToPixels(image: imageOrig!)

let imageConv = pixelsToImage(pixels: pixels,
                              width: Int(imageOrig!.size.width),
                              height: Int(imageOrig!.size.height))


func imageToPixels(image: NSImage) -> [Pixel] {
    var returnPixels = [Pixel]()

    let pixelData = (image.cgImage(forProposedRect: nil, context: nil, hints: nil)!).dataProvider!.data
    let data: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)

    for y in 0..<Int(image.size.height) {
        for x in 0..<Int(image.size.width) {
            let pos = CGPoint(x: x, y: y)

            let pixelInfo: Int = ((Int(image.size.width) * Int(pos.y) * 4) + Int(pos.x) * 4)

            let r = data[pixelInfo]
            let g = data[pixelInfo + 1]
            let b = data[pixelInfo + 2]
            let a = data[pixelInfo + 3]
            returnPixels.append(Pixel(a: a, r: r, g: g, b: b))
        }
    }
    return returnPixels
}

func pixelsToImage(pixels: [Pixel], width: Int, height: Int) -> NSImage? {
    guard width > 0 && height > 0 else { return nil }
    guard pixels.count == width * height else { return nil }

    let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
    let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedFirst.rawValue)
    let bitsPerComponent = 8
    let bitsPerPixel = 32

    var data = pixels
    guard let providerRef = CGDataProvider(data: NSData(bytes: &data,
                            length: data.count * MemoryLayout<Pixel>.size)
        )
        else { return nil }

    guard let cgim = CGImage(
        width: width,
        height: height,
        bitsPerComponent: bitsPerComponent,
        bitsPerPixel: bitsPerPixel,
        bytesPerRow: width * MemoryLayout<Pixel>.size,
        space: rgbColorSpace,
        bitmapInfo: bitmapInfo,
        provider: providerRef,
        decode: nil,
        shouldInterpolate: true,
        intent: .defaultIntent
        )
        else { return nil }

    return NSImage(cgImage: cgim, size: CGSize(width: width, height: height))
}

I am using an M1 mac and the result is the same with Rosetta or without. The problem seems to occur already in the conversion into the pixel array. Any ideas on how to correct the code?

Dávid Pásztor
  • 51,403
  • 9
  • 85
  • 116
Jekapa
  • 169
  • 15
  • What are `bitmapFormat`, `bitsPerPixel` etc. of the original image? – Willeke Dec 16 '22 at 10:23
  • Not sure how to check that ... The goal is that the code works for any NSImage, so if bitmapFormat, bitsPerPixel of the original image are important then the code should take that into account, right? Please note that the problem already occurs in the conversion to the pixel array. – Jekapa Dec 16 '22 at 16:20
  • Have you tried the properties of the `CGImage`? Yes, your code should take that into account. – Willeke Dec 16 '22 at 17:18

1 Answers1

0

Meanwhile, I managed to change the pixels in NSImage based on the approach described in Objective-C: NSBitmapImageRep SetColor which uses the setPixel() method on an NSBitmapImageRep.

Below code illustrates how it works (please do not force unwrap optionals in your code).

import Foundation
import AppKit

let fileManager : FileManager = FileManager.default
let fileURL : URL = fileManager.homeDirectoryForCurrentUser.appendingPathComponent("IMG_RGB_ORIG.png")

let imgData = try Data(contentsOf: fileURL)
let imgOrig = NSImage(data: imgData)

let rep = NSBitmapImageRep(bitmapDataPlanes: nil,
                           pixelsWide: Int(imgOrig!.size.width),
                           pixelsHigh: Int(imgOrig!.size.height),
                           bitsPerSample: 8,
                           samplesPerPixel: 4,
                           hasAlpha: true,
                           isPlanar: false,
                           colorSpaceName: .deviceRGB,
                           bytesPerRow: Int(imgOrig!.size.width) * 4,
                           bitsPerPixel: 32)

let ctx = NSGraphicsContext.init(bitmapImageRep: rep!)
NSGraphicsContext.saveGraphicsState()
NSGraphicsContext.current = NSGraphicsContext(bitmapImageRep: rep!)
imgOrig!.draw(at: NSZeroPoint, from: NSZeroRect, operation: NSCompositingOperation.copy, fraction: 1.0)
ctx?.flushGraphics()
NSGraphicsContext.restoreGraphicsState()

for y in 0..<Int(imgOrig!.size.height) {
    for x in 0..<Int(imgOrig!.size.width) {
        // you can read the color of pixels like this:
        let color = rep!.colorAt(x: x, y: y)
        var colorR = color!.redComponent
        var colorG = color!.greenComponent
        var colorB = color!.blueComponent
        var colorA = color!.alphaComponent
    }
}

var yellowPixel : [Int] = [255, 255, 0  , 255]
var redPixel    : [Int] = [255, 0  , 0  , 255]
var greenPixel  : [Int] = [0  , 255, 0  , 255]
var bluePixel   : [Int] = [0  , 0  , 255, 255]

for y in 10..<Int(imgOrig!.size.height) {
    for x in 10..<Int(imgOrig!.size.width) {
        // you can change the color of pixels like this:
        rep!.setPixel(&yellowPixel, atX: x, y: y)
    }
}

let imgConv = NSImage(cgImage: rep!.cgImage!, size: NSSize(width: Int(imgOrig!.size.width), height: Int(imgOrig!.size.height)))
Jekapa
  • 169
  • 15