I am trying to get a view to position on top of an image. I have an xCode App with an iPhone image in an UIImageView and a UIView that positions itself on top of image to show iPhone screens. It worked in earlier releases because the image was a static size, but now I need the image to scale to fill in the screen for new devices like iPhone X. Think Aspect Fit is part of the answer. I need the view in question to position properly across all iPhone devices and the simulators. I have some code that seems to work for devices but then that same code works differently in the Simulators. I need the code to work the same in the simulators and devices as I do not have all devices to test with.
How can I, either through the story board or code, position a rectangle on top of an image that is being scaled by the system? Need a universal solution that works across devices and in the simulator.
Am including rough code samples I have been experimenting with.
- The WeedplatesViewController.swift file contains the code for devices that seems to position properly for devices and then similar code I copied and was tweaking to test simulators that doesn’t position properly. Have a UIImage extension to create an image of the view and then some code to look for the black rectangle in the image. Using pixel comparison code found here on stack overflow.
On the Storyboard is the Weedpaper view controller that has a the Weedpaper title, "for Apple iPhone" text, the iPhone Image, the UIView that I want to position properly on top of the iPhone image, "number of Weedpapers installed" text and a row of autosizing buttons along the bottom.
- First got a tough lesson using the story board constraints to position the rectangle and it seems could get it to work in the story board but did not work on devices or the simulators.
- Tried hard coding the position it seemed worked on devices but did not work on the simulator and vice versa and takes way to long to test and is obviously not the right way to do it.
- Next modified the png file putting a black (255,255,255) rectangle in the iPhone image file, then in code, copied that view to a UIImage and tried looking for the black rectangle pixels that represent the frame. Once the pixels are found then should be able to position the view using those coordinates. But I am getting different screenshot bitmaps from devices vs simulators.
- Also tried using AVMakeRect(aspectRatio: CGSize(width: w, height: h), insideRect: self.uiImageView.bounds) to no avail.
Need the UIView to position on top of the image which is being scaled by the system across devices and the simulator.
override func viewDidAppear(_ animated: Bool) {
if (UIDevice.modelName.contains("Simulator"))
{
deviceName = ProcessInfo.init().environment["SIMULATOR_DEVICE_NAME"] ?? "NoN"
bSimulator = true
}
else
{
deviceName = UIDevice.modelName
bSimulator = false
}
print("deviceName:", deviceName)
print("simulator:", bSimulator)
var frame:CGRect!
if bSimulator{
frame = self.getImageRectForSimulators()
}
else
{
frame = self.getImageRectForDevices()
}
self.uiViewPhotos.frame = frame
self.uiViewPhotos.isHidden = false
}
func getImageRectForDevices() -> CGRect
{
// Write the view to an image so we can get the positioning rectangle
// Positioning Rectangle is a black rectangle in the image
// it has the only true black pixels in the image
let imgView:UIImageView = self.uiImageView
let img:UIImage = self.uiImageView.asImage()
// Write to the photo album for testing
//UIImageWriteToSavedPhotosAlbum(img, nil, nil, nil)
var pixelData = img.cgImage!.dataProvider!.data
var data: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)
let maxX = img.size.width
let maxY = img.size.height
let halfwayX = maxX / 2
let halfwayY = maxY / 2
let screenScale = UIScreen.main.scale
let scaleFactorX = img.scale
let scaleFactorY = img.scale * screenScale
let screenFactor = UIScreen.main.bounds.width/UIScreen.main.bounds.height
let imgViewFactor = self.uiImageView.frame.width/self.uiImageView.frame.height
var pnt:CGPoint = CGPoint(x: -1, y: -1)
var pixelInfo: Int = -1
var r:CGFloat!, g:CGFloat!
var b:CGFloat!, a:CGFloat!
var uiColor:UIColor!
var v1:CGFloat!, v2:CGFloat!
var v3:CGFloat!, v4:CGFloat!
var newRect:CGRect!
// Find this color in the image to locate the black pixel frame
// use that to size the view accordingly
// Seems to change among devices so round use black color
let uiColor_phoneFramePixelColor = UIColor(red:0.0, green:0.0, blue:0.0, alpha:1.0)
// Device code
for i in stride(from: halfwayX*scaleFactorX, to: 0, by: -1)
{
pnt.x = i
pnt.y = halfwayY*scaleFactorY
pixelInfo = ((Int(img.size.width) * Int(pnt.y)) + Int(pnt.x)) * 4
r = CGFloat(data[pixelInfo])/CGFloat(255.0)
g = CGFloat(data[pixelInfo+1])/CGFloat(255.0)
b = CGFloat(data[pixelInfo+2])/CGFloat(255.0)
a = CGFloat(data[pixelInfo+3])/CGFloat(255.0)
uiColor = UIColor(red: r, green: g, blue: b, alpha: a)
print("searching for i black pixel at i, y:", i, pnt.y, 255.0*r, 255.0*g, 255.0*b, a)
if (uiColor == uiColor_phoneFramePixelColor)
{
v1 = i
print("found i pixel at:", i)
break
}
}
print(" ")
// find top y pixel
// Device code
for j in stride(from: halfwayY*scaleFactorY, to: 0, by: -1)
{
pnt.x = halfwayX*scaleFactorX
pnt.y = j
pixelInfo = ((Int(img.size.width) * Int(pnt.y)) + Int(pnt.x)) * 4
r = CGFloat(data[pixelInfo])/CGFloat(255.0)
g = CGFloat(data[pixelInfo+1])/CGFloat(255.0)
b = CGFloat(data[pixelInfo+2])/CGFloat(255.0)
a = CGFloat(data[pixelInfo+3])/CGFloat(255.0)
uiColor = UIColor(red: r, green: g, blue: b, alpha: a)
print("searching for j black pixel at j, x:", j, pnt.x, 255.0*r, 255.0*g, 255.0*b, a)
if (uiColor == uiColor_phoneFramePixelColor)
{
v2 = j
print("found j pixel at:", j)
break
}
}
print(" ")
// Find bottom x pixel
// Device code
for k in stride(from: halfwayX*scaleFactorX, to: maxX*scaleFactorX, by: 1)
{
pnt.x = k
pnt.y = halfwayY
pixelInfo = ((Int(img.size.width) * Int(pnt.y)) + Int(pnt.x)) * 4
r = CGFloat(data[pixelInfo])/CGFloat(255.0)
g = CGFloat(data[pixelInfo+1])/CGFloat(255.0)
b = CGFloat(data[pixelInfo+2])/CGFloat(255.0)
a = CGFloat(data[pixelInfo+3])/CGFloat(255.0)
uiColor = UIColor(red: r, green: g, blue: b, alpha: a)
print("searching for k black pixel at k, y:", k, pnt.y, 255.0*r, 255.0*g, 255.0*b, a)
if (uiColor == uiColor_phoneFramePixelColor)
{
v3 = k
print("found bottom k pixel at:", k)
break
}
}
print(" ")
// Find bottom y pixel
// Device code
for l in stride(from: halfwayY*scaleFactorY, to: maxY*scaleFactorY, by: 1)
{
pnt.x = halfwayX
pnt.y = l
pixelInfo = ((Int(img.size.width) * Int(pnt.y)) + Int(pnt.x)) * 4
r = CGFloat(data[pixelInfo])/CGFloat(255.0)
g = CGFloat(data[pixelInfo+1])/CGFloat(255.0)
b = CGFloat(data[pixelInfo+2])/CGFloat(255.0)
a = CGFloat(data[pixelInfo+3])/CGFloat(255.0)
uiColor = UIColor(red: r, green: g, blue: b, alpha: a)
print("searching for l black pixel at l, x:", l, pnt.x, 255.0*r, 255.0*g, 255.0*b, a)
if (uiColor == uiColor_phoneFramePixelColor)
{
v4 = l
print("found bottom l pixel at:", l)
break
}
}
print(" ")
// this is the Black Rectangle from the bitmap of the orginal image
let w = (v3 - v1)
let h = (v4 - v2)
newRect = CGRect(x: v1/scaleFactorX, y: v2/scaleFactorY, width: w/scaleFactorX, height: h/scaleFactorY)
print("calculated rectangle:", newRect)
return newRect
}
extension UIView {
func asImage()-> UIImage
{
// Get an image of the view. Apple discourages using UIGraphicsBeginImageContext
// Starting with IOS 10 UIGraphicsBeginImageContext is sRBG only and 32 bit only.
// Use UIGraphicsImageRenderer
if #available(iOS 10.0, *) {
let renderer = UIGraphicsImageRenderer(bounds: bounds)
let renderFormat = UIGraphicsImageRendererFormat.default()
renderFormat.opaque = false
let renderedImage = renderer.image {
rendererContext in
layer.render(in: rendererContext.cgContext)
}
return renderedImage
}
else{
UIGraphicsBeginImageContext(self.frame.size)
self.layer.render(in: UIGraphicsGetCurrentContext()!)
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return UIImage(cgImage: image!.cgImage!)
}
}
}