A functional programming approach using itertools
:
from itertools import chain, zip_longest
list_a = ["4123", "7648", "afjsdn", "ujaf", "huh23", "n23kl3l24"]
regex = r'\d+'
# pre-compilation of the regex
pattern = re.compile(regex)
# group the matches
grps = ((i, m) for i, m in enumerate(chain(map(pattern.findall, list_a))) if m)
# coupling the groups + flatting
out = list(chain.from_iterable(tuple(zip_longest((i,), matches, fillvalue=i)) for i, matches in grps))
print(out)
#[(0, '4123'), (1, '7648'), (4, '23'), (5, '23'), (5, '3'), (5, '24')]
Performace test
I tried to make a fair test: each answer has two implementations:
- v1: with regex-precompilation,
pattern.findall
- v2: without precompilation,
re.findall
The test's parameter are the amount of repetition (to get a statistically stable result) and a length factor to stretch the size of the list of data.
In all cases is evedent that the precompilation makes the code more performant. The list-comprehension seems to be the better solution, then the generator, last itertool.
At 1 significant figures, for small data, all of them work the same.
Time measurement done with timeit
.
from timeit import timeit
from itertools import chain, zip_longest
import re
def with_itertools_v1(lst):
grps = ((i, m) for i, m in enumerate(chain(map(lambda x: re.findall(r'\d+', x), lst))) if m)
return list(chain.from_iterable(list(zip_longest((i,), matches, fillvalue=i)) for i, matches in grps))
def with_itertools_v2(lst):
pattern = re.compile(r'\d+')
grps = ((i, m) for i, m in enumerate(chain(map(pattern.findall, lst))) if m)
return list(chain.from_iterable(list(zip_longest((i,), matches, fillvalue=i)) for i, matches in grps))
def with_generators_v1(lst):
def gen():
for index, item in enumerate(lst):
yield from [[index, match] for match in re.findall(r"\d+", item)]
return list(gen())
def with_generators_v2(lst):
def gen():
pattern = re.compile(r'\d+')
for index, item in enumerate(lst):
yield from [[index, match] for match in pattern.findall(item)]
return list(gen())
def with_list_comprehension_v1(lst):
return [[index, match] for index, item in enumerate(lst) for match in re.findall(r"\d+", item)]
def with_list_comprehension_v2(lst):
pattern = re.compile(r'\d+')
return [[index, match] for index, item in enumerate(lst) for match in pattern.findall(item)]
funcs = (with_itertools_v1, with_itertools_v2, with_generators_v1, with_generators_v2, with_list_comprehension_v1, with_list_comprehension_v2)
def tester(repetition=5, length_factor=1):
data = ["4123", "7648", "afjsdn", "ujaf", "huh23", "n23kl3l24"] * length_factor
print(f'Test with repeated {repetition}-times with data with {len(data)*length_factor} terms')
for func in funcs:
t = timeit(lambda: func(data), number=repetition)
print(func.__name__, f'ms: {t / repetition * 1e3:3f}')
print()
tester(5, 1)
tester(5, 10)
tester(5, 100)
tester(5, 1000)
Results
Test with repeated 5-times with data with 6 terms
with_itertools_v1 ms: 0.060872
with_itertools_v2 ms: 0.019029
with_generators_v1 ms: 0.023321
with_generators_v2 ms: 0.015797
with_list_comprehension_v1 ms: 0.017898
with_list_comprehension_v2 ms: 0.010917
Test with repeated 5-times with data with 600 terms
with_itertools_v1 ms: 0.248608
with_itertools_v2 ms: 0.142064
with_generators_v1 ms: 0.201945
with_generators_v2 ms: 0.116305
with_list_comprehension_v1 ms: 0.160765
with_list_comprehension_v2 ms: 0.076942
Test with repeated 5-times with data with 60000 terms
with_itertools_v1 ms: 2.165053
with_itertools_v2 ms: 1.612787
with_generators_v1 ms: 1.484747
with_generators_v2 ms: 0.639824
with_list_comprehension_v1 ms: 11.219491
with_list_comprehension_v2 ms: 0.809310
Test with repeated 5-times with data with 6000000 terms
with_itertools_v1 ms: 46.531748
with_itertools_v2 ms: 35.922768
with_generators_v1 ms: 77.271880
with_generators_v2 ms: 20.042708
with_list_comprehension_v1 ms: 22.927475
with_list_comprehension_v2 ms: 16.760901