I am trying to create a 16-bit greyscale ring simulation and for some reason, it just does not work.
Let me explain, at the beginning, I wrote it in 8-bit format, and then I realized I need it in 16-bit. The code I wrote for the 8-bit format worked just fine and is presented below:
from PIL import Image, ImageFilter
from scipy.ndimage import convolve, gaussian_filter
import NumPy as np
def theta(x, y, phi):
if np.angle(x - y*1j, deg=True) - phi > 180:
return 1*(np.angle(x - y*1j, deg=True) - phi - 360)
if np.angle(x - y*1j, deg=True) - phi < -180:
return 1*(np.angle(x - y*1j, deg=True) - phi + 360)
else:
return np.angle(x - y*1j, deg=True) - phi
# FWHM = 2.355*Sigma
# Simulation Parameters:
Intensity = 190 # Light intensity.
SIG = 1.666/2.355 # Sigma of radial Gaussian.
SIG1 = 45 # Sigma of first azimuthal Gaussian.
SIG2 = 25 # Sigma of second azimuthal Gaussian.
SIG3 = 10 # Sigma of third azimuthal Gaussian.
r0 = 8 # Radius of reference of radial Gaussian.
theta1 = 31 # Angle of reference of first azimuthal Gaussian.
theta2 = 157 # Angle of reference of second azimuthal Gaussian.
theta3 = -105 # Angle of reference of third azimuthal Gaussian.
# PSF Parameters:
Kernel = MakeGaussian(10, 1.666) # Convolution kernel.
# Noise Parameters:
offset = 1 # Gaussian noise amplitude.
Ex = 10 # Gaussian noise expectation. (3*Var)
Var = 7 # Gaussian noise variance.
# Frame Parameters:
t = 1 # Number of frames.
w, h = 300, 300 # Frame size.
data = np.zeros((t, h, w), dtype=np.uint8)
noise = np.zeros((t, h, w), dtype=np.uint8)
for l in range(t):
for i in range(w):
for k in range(h):
r = np.sqrt((i - w / 2) ** 2 + (k - h / 2) ** 2)
data[l][i][k] = Intensity * np.exp(-((r - r0)**2)/(2*SIG**2)) * 1 * (np.exp(-((theta(k - w / 2, i - h / 2, theta1))**2)/(2*SIG1**2)) + np.exp(-((theta(k - w / 2, i - h / 2, theta2))**2)/(2*SIG2**2)) + np.exp(-((theta(k - w / 2, i - h / 2, theta3))**2)/(2*SIG3**2)) )
noise[l][i][k] = offset * (1/np.sqrt(2 * np.pi * Var**2)) * np.random.normal(Ex, Var)
pic = gaussian_filter(data[l], 1.666, 0) + noise[l]
img = Image.fromarray(pic, 'L')
img.save('%s.tiff' % l, format="tiff")
Now, when I am naively trying to make this code create 16-bit images by swapping to dtype='uint.16' it all goes to hell.
I would appreciate it if anyone can shed some light on what I should do to fix this problem.