See Technical Q&A QA1509, which basically advises
- create pixel buffer (by creating
CGContextRef
of predefined format and drawing your image to that);
- manipulate the bytes within that pixel buffer as you want; and
- create resulting image with
CGBitmapContextCreateImage
.
E.g.
- (UIImage *)convertImage:(UIImage *)image
{
// get image
CGImageRef imageRef = image.CGImage;
// prepare context
CGBitmapInfo bitmapInfo = kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big; // bytes in RGBA order
NSInteger width = CGImageGetWidth(imageRef);
NSInteger height = CGImageGetHeight(imageRef);
CGColorSpaceRef colorspace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(NULL, width, height, 8, 4 * width, colorspace, bitmapInfo);
// draw image to that context
CGRect rect = CGRectMake(0, 0, width, height);
CGContextDrawImage(context, rect, imageRef);
uint8_t *buffer = CGBitmapContextGetData(context);
// ... manipulate pixel buffer bytes here ...
// get image from buffer
CGImageRef outputImage = CGBitmapContextCreateImage(context);
UIImage *result = [UIImage imageWithCGImage:outputImage scale:image.scale orientation:image.imageOrientation];
// clean up
CGImageRelease(outputImage);
CGContextRelease(context);
CGColorSpaceRelease(colorspace);
return result;
}
For example, here is a B&W conversion routine:
/** Convert the image to B&W as a single (non-parallel) task.
*
* This assumes the pixel buffer is in RGBA, 8 bits per pixel format.
*
* @param buffer The pixel buffer.
* @param width The image width in pixels.
* @param height The image height in pixels.
*/
- (void)convertToBWSimpleInBuffer:(uint8_t *)buffer width:(NSInteger)width height:(NSInteger)height
{
for (NSInteger row = 0; row < height; row++) {
for (NSInteger col = 0; col < width; col++) {
NSUInteger offset = (col + row * width) * 4;
uint8_t *pixel = buffer + offset;
// get the channels
uint8_t red = pixel[0];
uint8_t green = pixel[1];
uint8_t blue = pixel[2];
uint8_t alpha = pixel[3];
// update the channels with result
uint8_t gray = 0.2126 * red + 0.7152 * green + 0.0722 * blue;
pixel[0] = gray;
pixel[1] = gray;
pixel[2] = gray;
pixel[3] = alpha;
}
}
}
If you want to improve performance, you can use dispatch_apply
and stride through your image buffer in parallel:
/** Convert the image to B&W, using GCD to split the conversion into several concurrent GCD tasks.
*
* This assumes the pixel buffer is in RGBA, 8 bits per pixel format.
*
* @param buffer The pixel buffer.
* @param width The image width in pixels.
* @param height The image height in pixels.
* @param count How many GCD tasks should the conversion be split into.
*/
- (void)convertToBWConcurrentInBuffer:(uint8_t *)buffer width:(NSInteger)width height:(NSInteger)height count:(NSInteger)count
{
dispatch_queue_t queue = dispatch_queue_create("com.domain.app", DISPATCH_QUEUE_CONCURRENT);
NSInteger stride = height / count;
dispatch_apply(height / stride, queue, ^(size_t idx) {
size_t j = idx * stride;
size_t j_stop = MIN(j + stride, height);
for (NSInteger row = j; row < j_stop; row++) {
for (NSInteger col = 0; col < width; col++) {
NSUInteger offset = (col + row * width) * 4;
uint8_t *pixel = buffer + offset;
// get the channels
uint8_t red = pixel[0];
uint8_t green = pixel[1];
uint8_t blue = pixel[2];
uint8_t alpha = pixel[3];
// update the channels with result
uint8_t gray = 0.2126 * red + 0.7152 * green + 0.0722 * blue;
pixel[0] = gray;
pixel[1] = gray;
pixel[2] = gray;
pixel[3] = alpha;
}
}
});
}