Building a single-element cache with a time-to-live is pretty trivial:
_last_result_time = None
_last_result_value = None
def result(val):
global _last_result_time
global _last_result_value
now = datetime.datetime.now()
if not _last_result_time or now - _last_result_time > datetime.timedelta(hours=1):
_last_result_value = <expensive computation here>
_last_result_time = now
return _last_result_value
If you want to generalize this as a decorator, it's not much harder:
def cache(ttl=datetime.timedelta(hours=1)):
def wrap(func):
time, value = None, None
@functools.wraps(func)
def wrapped(*args, **kw):
nonlocal time
nonlocal value
now = datetime.datetime.now()
if not time or now - time > ttl:
value = func(*args, **kw)
time = now
return value
return wrapped
return wrap
If you want it to handle different arguments, storing a time-to-live for each one:
def cache(ttl=datetime.timedelta(hours=1)):
def wrap(func):
cache = {}
@functools.wraps(func)
def wrapped(*args, **kw):
now = datetime.datetime.now()
# see lru_cache for fancier alternatives
key = tuple(args), frozenset(kw.items())
if key not in cache or now - cache[key][0] > ttl:
value = func(*args, **kw)
cache[key] = (now, value)
return cache[key][1]
return wrapped
return wrap
You can of course key adding features to it—give it a max size and evict by time of storage or by LRU or whatever else you want, expose cache stats as attributes on the decorated function, etc. The implementation of lru_cache
in the stdlib should help show you how to do most of the trickier things (since it does almost all of them).