We could use vectorized linspace : create_ranges
-
# https://stackoverflow.com/a/40624614/ @Divakar
def create_ranges(start, stop, N, endpoint=True):
if endpoint==1:
divisor = N-1
else:
divisor = N
steps = (1.0/divisor) * (stop - start)
return steps[:,None]*np.arange(N) + start[:,None]
def ranges_based(a,N):
ranges2D = create_ranges(a[:-1],a[1:],N-1,endpoint=False)
return np.concatenate((ranges2D.ravel(),[a[-1]]))
Sample run -
In [151]: a
Out[151]: array([1, 4, 2])
In [152]: ranges_based(a,N=5)
Out[152]: array([1. , 1.75, 2.5 , 3.25, 4. , 3.5 , 3. , 2.5 , 2. ])
Benchmarking for vectorized solutions
# @Psidom's soln
def interp_based(a,N=5):
s = N-1
l = (a.size - 1) * s + 1 # total length after interpolation
return np.interp(np.arange(l), np.arange(l, step=s), a)
Timings on large arrays with 5
interval -
In [199]: np.random.seed(0)
In [200]: a = np.random.randint(0,10,(10000))
In [201]: %timeit interp_based(a,N=5)
...: %timeit ranges_based(a,N=5)
1000 loops, best of 3: 318 µs per loop
1000 loops, best of 3: 227 µs per loop
In [202]: np.random.seed(0)
In [203]: a = np.random.randint(0,10,(100000))
In [204]: %timeit interp_based(a,N=5)
...: %timeit ranges_based(a,N=5)
100 loops, best of 3: 3.39 ms per loop
100 loops, best of 3: 2.77 ms per loop
Timings on large arrays with bigger 50
interval -
In [205]: np.random.seed(0)
In [206]: a = np.random.randint(0,10,(10000))
In [207]: %timeit interp_based(a,N=50)
...: %timeit ranges_based(a,N=50)
100 loops, best of 3: 3.65 ms per loop
100 loops, best of 3: 2.14 ms per loop
In [208]: np.random.seed(0)
In [209]: a = np.random.randint(0,10,(100000))
In [210]: %timeit interp_based(a,N=50)
...: %timeit ranges_based(a,N=50)
10 loops, best of 3: 43.4 ms per loop
10 loops, best of 3: 31.1 ms per loop
With bigger interval lengths, it seems the performance boost with create_ranges
is getting bigger too.
Further improvement
We could optimize further by doing a concatenation at the start and then slicing out at the end, thus avoiding the concatenation there, like so -
def ranges_based_v2(a,N):
start = a
stop = np.concatenate((a[1:],[0]))
return create_ranges(start, stop, N-1, endpoint=False).ravel()[:-N+2]
Timings on larger array with 5
and 50
interval lengths -
In [243]: np.random.seed(0)
In [244]: a = np.random.randint(0,10,(100000))
In [245]: %timeit interp_based(a,N=5)
...: %timeit ranges_based(a,N=5)
...: %timeit ranges_based_v2(a,N=5)
100 loops, best of 3: 3.38 ms per loop
100 loops, best of 3: 2.71 ms per loop
100 loops, best of 3: 2.49 ms per loop
In [246]: %timeit interp_based(a,N=50)
...: %timeit ranges_based(a,N=50)
...: %timeit ranges_based_v2(a,N=50)
10 loops, best of 3: 42.8 ms per loop
10 loops, best of 3: 30.1 ms per loop
10 loops, best of 3: 22.2 ms per loop
More with numexpr
We could leverage multi-core
with numexpr
-
# https://stackoverflow.com/a/40624614/ @Divakar
import numexpr as ne
def create_ranges_numexpr(start, stop, N, endpoint=True):
if endpoint==1:
divisor = N-1
else:
divisor = N
s0 = start[:,None]
s1 = stop[:,None]
r = np.arange(N)
return ne.evaluate('((1.0/divisor) * (s1 - s0))*r + s0')
def ranges_based_v3(a,N):
start = a
stop = np.concatenate((a[1:],[0]))
return create_ranges_numexpr(start, stop, N-1, endpoint=False).ravel()[:-N+2]
Timings -
In [276]: np.random.seed(0)
In [277]: a = np.random.randint(0,10,(100000))
In [278]: %timeit interp_based(a,N=5)
...: %timeit ranges_based(a,N=5)
...: %timeit ranges_based_v2(a,N=5)
...: %timeit ranges_based_v3(a,N=5)
100 loops, best of 3: 3.39 ms per loop
100 loops, best of 3: 2.75 ms per loop
100 loops, best of 3: 2.49 ms per loop
1000 loops, best of 3: 1.17 ms per loop
In [279]: %timeit interp_based(a,N=50)
...: %timeit ranges_based(a,N=50)
...: %timeit ranges_based_v2(a,N=50)
...: %timeit ranges_based_v3(a,N=50)
10 loops, best of 3: 43.1 ms per loop
10 loops, best of 3: 31.3 ms per loop
10 loops, best of 3: 22.3 ms per loop
100 loops, best of 3: 11.4 ms per loop