I am attempting to reconstruct an image using Laplacian Pyramids, and I'm getting a low loss of 200's when I take the L2 norm between my original image and reconstructed image, while it is supposed to be lossless. Here is the code:
import math
import cv2
import numpy as np
from sklearn.preprocessing import normalize
import matplotlib.pyplot as plt
from google.colab.patches import cv2_imshow
import scipy.spatial.distance as dist
def pyramidsGL(image, num_levels):
''' Creates Gaussian (G) and Laplacian (L) pyramids of level "num_levels" from image im.
G and L are list where G[i], L[i] stores the i-th level of Gaussian and Laplacian pyramid, respectively. '''
G = list()
L = list()
curr_img = image
G = [image]
for i in range(0, num_levels-1):
width = ((curr_img.shape[1]))
height = ((curr_img.shape[0]))
downsample_dimension = (int(width*0.5), int(height*0.5))
upsample_dimension = (width,height)
blurred_image = cv2.GaussianBlur(curr_img,(5,5),10)
blurred_image[::2, ::2]
downsampled_image = cv2.resize(blurred_image, downsample_dimension)
G.append((downsampled_image).astype(np.uint8))
upsampled_image = cv2.resize(downsampled_image,upsample_dimension)
smoothened_upsampled = cv2.GaussianBlur(upsampled_image,(5,5),10)
smoothened_upsampled[::2, ::2]
residual = curr_img/255 - smoothened_upsampled/255
curr_img = downsampled_image
L.append((residual*255))
L.append((downsampled_image).astype(np.uint8))
return G, L
def reconstructLaplacianPyramid(L):
'''Given a Laplacian Pyramid L, reconstruct an image img. L has 5 levels'''
w = L[3].shape[1]
h = L[3].shape[0]
dim = (w,h)
print(dim)
upsampled_image = cv2.resize(L[4],dim)
smoothened_upsampled = cv2.GaussianBlur(upsampled_image,(5,5),10)
smoothened_upsampled[::2,::2]
new_g = L[3]/255 + smoothened_upsampled/255
new_g = (new_g*255)
size_L = len(L)
#print(size_L)
for ele in range(size_L-2, 0, -1):
w = L[ele-1].shape[1]
h = L[ele-1].shape[0]
dim = (w,h)
#print(dim)
upsampled_image = cv2.resize(new_g,dim)
smoothened_upsampled = cv2.GaussianBlur(upsampled_image,(5,5),10)
smoothened_upsampled[::2,::2]
new_g = (L[ele-1])/255 + smoothened_upsampled/255
new_g = (new_g*255)
##main:
image = cv2.imread('/content/data/Afghan_girl_after.jpg')
gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
G, L = pyramidsGL(gray_img, 5)
reconstructed_img = reconstructLaplacianPyramid(L)
cv2_imshow(reconstructed_img)
cv2_imshow(gray_img)
L2_loss_2 = np.linalg.norm(reconstructed_img-gray_img)
print(L2_loss_2)
The images look visually the same and are of same dimensions, although I am seeing 1-pixel differences between the image matrices of the same.