Convolution is supposed to be commutative and associative, so ((f * g) * h)(x) = (f * (g * h))(x) = ((f * h) * g)(x) etc. I've found that this isn't the case with some implementations. Here is the test I did:
import time
import cv2
import numpy as np
from scipy import signal
from scipy import ndimage
def convolve_1(image, kernel):
return cv2.filter2D(image, cv2.CV_64FC1, cv2.flip(kernel, -1), borderType=cv2.BORDER_CONSTANT)
def convolve_2(image, kernel):
return signal.convolve2d(image, kernel, mode='full', boundary='fill', fillvalue=0)
def convolve_3(image, kernel):
return signal.fftconvolve(image, kernel, mode='full')
def convolve_4(image, kernel):
return signal.oaconvolve(image, kernel, mode='full')
def convolve_5(image, kernel):
return ndimage.convolve(image, kernel, mode='constant', cval=0)
CONVOLUTION_FUNCTIONS = {
'cv2.filter2D': convolve_1,
'scipy.signal.convolve2d': convolve_2,
'scipy.signal.fftconvolve': convolve_3,
'scipy.signal.oaconvolve': convolve_4,
'scipy.ndimage.convolve': convolve_5,
}
def main():
image = cv2.imread('bird.jpg', cv2.IMREAD_GRAYSCALE).astype(np.float64)
kernel_1 = np.array([[0, -1, 0],
[-1, 5, -1],
[0, -1, 0]], dtype=np.float64)
kernel_2 = np.array([[ 0, 0, -1, 0, 0],
[ 0, -1, -2, 0, 0],
[-1, -2, 16, -2, -1],
[ 0, -1, -2, -1, 0],
[ 0, 0, -1, 0, 0]], dtype=np.float64)
for function_name, convolve in CONVOLUTION_FUNCTIONS.items():
print(function_name)
start = time.perf_counter()
# these results should all be the same
result_1 = convolve(image, kernel_1)
result_1 = convolve(result_1, kernel_2)
result_2 = convolve(image, kernel_2)
result_2 = convolve(result_2, kernel_1)
result_3 = convolve(image, convolve(kernel_2, kernel_1))
print('error 1:', np.mean(abs(result_1 - result_2)))
print('error 2:', np.mean(abs(result_1 - result_3)))
print('error 3:', np.mean(abs(result_2 - result_3)))
print('time taken:', time.perf_counter() - start)
print()
if __name__ == '__main__':
main()
Here's the image I used. Here's the output:
cv2.filter2D
error 1: 0.8387194444444445
error 2: 290.7564638888889
error 3: 291.58391666666665
time taken: 0.022129999999999983
scipy.signal.convolve2d
error 1: 0.0
error 2: 0.0
error 3: 0.0
time taken: 0.19091079999999994
scipy.signal.fftconvolve
error 1: 5.110803021852462e-13
error 2: 9.780014690145642e-13
error 3: 9.461453672609482e-13
time taken: 0.15763480000000007
scipy.signal.oaconvolve
error 1: 5.656074741440057e-13
error 2: 1.0786530017634777e-12
error 3: 1.1119329649131375e-12
time taken: 0.2334368
scipy.ndimage.convolve
error 1: 0.8387194444444445
error 2: 290.7564638888889
error 3: 291.58391666666665
time taken: 0.040781100000000015
It looks like the more accurate functions are slower.
I converted to float64 in case it was a precision error, but that didn't help.
I would like to know why the errors are so large. I would also like to know if there is an implementation (preferably in opencv) that is both accurate and fast, or if this is just an inherent limitation of convolution functions.