I have the following code which involves summing up over a number of nested for loops.
What are some ways I can speed up the execution of this code? I am interested in not just one method of speeding it up, I'd really like to see a range of methods, e.g. 'Pure Python', Numpy, Scipy, Cython, etc..
This is so that for similar, but (much) more complicated, code I have to write I could choose a speed up option that gives a good trade-off of execution speed vs. complexity of implementation. Anything to save me from having to write C++ code which will cause me to lose the will to live.
def f(a,b,c,d):
return a+b+c+d
x = [0.04691008, 0.23076534, 0.5, 0.76923466, 0.95308992]
w = [0.11846344, 0.23931434, 0.28444444, 0.23931434, 0.11846344]
numQuadNodes = 5
def tensorGauss(func):
sum = 0;
for i in range(0,numQuadNodes):
for j in range(0,numQuadNodes):
for k in range(0,numQuadNodes):
for l in range(0,numQuadNodes):
sum += w[i]*w[j]*w[k]*w[l]*func(x[l],x[k],x[j],x[i])
return sum
print(tensorGauss(f))
Edit - More realistic code As you can see tensorGauss is already much faster than nquad (0.07 secs vs 20.86 secs on my machine), but I really would like to have some ways to make tensorGauss faster again as I will have to compute a ton of tensorGauss evaluations!
import numpy as np
import numpy.linalg as LA
from scipy.integrate import nquad
import time
##################################################
# Triangle vertices
##################################################
v_a_1 = np.array([[4,0,0]]).T
v_a_2 = np.array([[5,1,0]]).T
v_a_3 = np.array([[4,2,0]]).T
v_b_1 = np.array([[4,0,0]]).T
v_b_2 = np.array([[5,-1,0]]).T
v_b_3 = np.array([[4,-2,0]]).T
##################################################
# g_tau
##################################################
def g_tau():
J_tau = v_a_2-v_a_1
J_tau = np.append(J_tau, v_a_3-v_a_2,axis=1)
G = np.dot(J_tau.T,J_tau)
return np.sqrt(LA.det(G))
##################################################
# g_t
##################################################
def g_t():
J_t = v_b_2-v_b_1
J_t = np.append(J_t, v_b_3-v_b_2,axis=1)
G = np.dot(J_t.T,J_t)
return np.sqrt(LA.det(G))
##################################################
# chi_tau
##################################################
def chi_tau(x):
return v_a_1 + (v_a_2-v_a_1)*x[0] + (v_a_3-v_a_2)*x[1]
##################################################
# chi_t
##################################################
def chi_t(y):
return v_b_1 + (v_b_2-v_b_1)*y[0] + (v_b_3-v_b_2)*y[1]
##################################################
# k_
##################################################
def k_(x,y):
return LA.norm(x+y)
##################################################
# k
##################################################
def k(x,y):
return k_(chi_tau(x),chi_t(y))*g_tau()*g_t()
start=time.time()
##################################################
# tensorGauss
##################################################
x = [0.04691008, 0.23076534, 0.5, 0.76923466, 0.95308992]
w = [0.11846344, 0.23931434, 0.28444444, 0.23931434, 0.11846344]
numQuadNodes = 5
def f(z, y, x, w):
a_1_1 = z;
a_1_2 = z * w;
a_2_1 = z * x;
a_2_2 = z * x * y;
a_1 = np.array([a_1_1,a_1_2]).T
a_2 = np.array([a_2_1,a_2_2]).T
res = k(a_1,a_2)
a_1_1 = z * x;
a_1_2 = z * x * y;
a_2_1 = z;
a_2_2 = z * w;
a_1 = np.array([a_1_1,a_1_2]).T
a_2 = np.array([a_2_1,a_2_2]).T
res += k(a_1,a_2)
a_1_1 = z * y;
a_1_2 = z * w;
a_2_1 = z * x;
a_2_2 = z;
a_1 = np.array([a_1_1,a_1_2]).T
a_2 = np.array([a_2_1,a_2_2]).T
res += k(a_1,a_2)
return res
def tensorGauss(func):
sum = 0;
for i in range(0,numQuadNodes):
for j in range(0,numQuadNodes):
for k in range(0,numQuadNodes):
for l in range(0,numQuadNodes):
sum += w[i]*w[j]*w[k]*w[l]*func(x[l],x[k],x[j],x[i])
return sum
start=time.time()
tensorGauss_res = tensorGauss(f)
end=time.time()
tensorGauss_time = end-start
start=time.time()
[nquad_res, err] = nquad(f, [[0,1], [0,1], [0,1], [0,1]])
end=time.time()
nquad_time = end-start
print(f'tensor-gauss: {tensorGauss_res}')
print(f'nquad: {nquad_res}')
print('\n')
print(f'tensor-gauss time: {tensorGauss_time}')
print(f'nquad time: {nquad_time}')