Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Introduce skip_cache_func #652

Merged
merged 11 commits into from
Feb 9, 2023
30 changes: 28 additions & 2 deletions aiocache/decorators.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,10 @@ class cached:
:param key_builder: Callable that allows to build the function dynamically. It receives
the function plus same args and kwargs passed to the function.
This behavior is necessarily different than ``BaseCache.build_key()``
:param skip_cache_func: Callable that receives the result after calling the
wrapped function and should return `True` if the value should skip the
cache (or `False` to store in the cache).
e.g. to avoid caching `None` results: `lambda r: r is None`
:param cache: cache class to use when calling the ``set``/``get`` operations.
Default is :class:`aiocache.SimpleMemoryCache`.
:param serializer: serializer instance to use when calling the ``dumps``/``loads``.
Expand All @@ -58,6 +62,7 @@ def __init__(
ttl=SENTINEL,
namespace=None,
key_builder=None,
skip_cache_func=lambda x: False,
cache=Cache.MEMORY,
serializer=None,
plugins=None,
Expand All @@ -67,6 +72,7 @@ def __init__(
):
self.ttl = ttl
self.key_builder = key_builder
self.skip_cache_func = skip_cache_func
self.noself = noself
self.alias = alias
self.cache = None
Expand Down Expand Up @@ -111,6 +117,9 @@ async def decorator(

result = await f(*args, **kwargs)

if self.skip_cache_func(result):
return result

if cache_write:
if aiocache_wait_for_write:
await self.set_in_cache(key, result)
Expand Down Expand Up @@ -171,6 +180,10 @@ class cached_stampede(cached):
:param key_builder: Callable that allows to build the function dynamically. It receives
the function plus same args and kwargs passed to the function.
This behavior is necessarily different than ``BaseCache.build_key()``
:param skip_cache_func: Callable that receives the result after calling the
wrapped function and should return `True` if the value should skip the
cache (or `False` to store in the cache).
e.g. to avoid caching `None` results: `lambda r: r is None`
:param cache: cache class to use when calling the ``set``/``get`` operations.
Default is :class:`aiocache.SimpleMemoryCache`.
:param serializer: serializer instance to use when calling the ``dumps``/``loads``.
Expand Down Expand Up @@ -202,6 +215,9 @@ async def decorator(self, f, *args, **kwargs):

result = await f(*args, **kwargs)

if self.skip_cache_func(result):
return result

await self.set_in_cache(key, result)

return result
Expand Down Expand Up @@ -268,6 +284,9 @@ class multi_cached:
``keys_from_attr``, the decorated callable, and the positional and keyword arguments
that were passed to the decorated callable. This behavior is necessarily different than
``BaseCache.build_key()`` and the call signature differs from ``cached.key_builder``.
:param skip_cache_keys: Callable that receives both key and value and returns True
if that key-value pair should not be cached (or False to store in cache).
The keys and values to be passed are taken from the wrapped function result.
:param ttl: int seconds to store the keys. Default is 0 which means no expiration.
:param cache: cache class to use when calling the ``multi_set``/``multi_get`` operations.
Default is :class:`aiocache.SimpleMemoryCache`.
Expand All @@ -286,6 +305,7 @@ def __init__(
keys_from_attr,
namespace=None,
key_builder=None,
skip_cache_func=lambda k, v: False,
ttl=SENTINEL,
cache=Cache.MEMORY,
serializer=None,
Expand All @@ -295,6 +315,7 @@ def __init__(
):
self.keys_from_attr = keys_from_attr
self.key_builder = key_builder or (lambda key, f, *args, **kwargs: key)
self.skip_cache_func = skip_cache_func
self.ttl = ttl
self.alias = alias
self.cache = None
Expand Down Expand Up @@ -354,12 +375,17 @@ async def decorator(
result = await f(*new_args, **kwargs)
result.update(partial)

to_cache = {k: v for k, v in result.items() if not self.skip_cache_func(k, v)}

if not to_cache:
return result

if cache_write:
if aiocache_wait_for_write:
await self.set_in_cache(result, f, args, kwargs)
await self.set_in_cache(to_cache, f, args, kwargs)
else:
# TODO: Use aiojobs to avoid warnings.
asyncio.create_task(self.set_in_cache(result, f, args, kwargs))
asyncio.create_task(self.set_in_cache(to_cache, f, args, kwargs))

return result

Expand Down
38 changes: 38 additions & 0 deletions tests/acceptance/test_decorators.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,31 @@ async def fn(self, a, b=2):
await fn("self", 1, 3)
assert await cache.exists(build_key(fn, "self", 1, 3)) is True

@pytest.mark.parametrize("decorator", (cached, cached_stampede))
async def test_cached_skip_cache_func(self, cache, decorator):
@decorator(skip_cache_func=lambda r: r is None)
async def sk_func(x):
return x if x > 0 else None

arg = 1
res = await sk_func(arg)
assert res

key = decorator().get_cache_key(sk_func, args=(1,), kwargs={})

assert key
assert await cache.exists(key)
assert await cache.get(key) == res

arg = -1

await sk_func(arg)

key = decorator().get_cache_key(sk_func, args=(-1,), kwargs={})

assert key
assert not await cache.exists(key)

async def test_cached_without_namespace(self, cache):
"""Default cache key is created when no namespace is provided"""
@cached(namespace=None)
Expand Down Expand Up @@ -149,6 +174,19 @@ async def fn(self, keys, market="ES"):
assert await cache.exists("fn_" + _ensure_key(Keys.KEY) + "_ES") is True
assert await cache.exists("fn_" + _ensure_key(Keys.KEY_1) + "_ES") is True

async def test_multi_cached_skip_keys(self, cache):
@multi_cached(keys_from_attr="keys", skip_cache_func=lambda _, v: v is None)
async def multi_sk_fn(keys, values):
return {k: v for k, v in zip(keys, values)}

res = await multi_sk_fn(keys=[Keys.KEY, Keys.KEY_1], values=[42, None])
assert res
assert Keys.KEY in res and Keys.KEY_1 in res

assert await cache.exists(Keys.KEY)
assert await cache.get(Keys.KEY) == res[Keys.KEY]
assert not await cache.exists(Keys.KEY_1)

async def test_fn_with_args(self, cache):
@multi_cached("keys")
async def fn(keys, *args):
Expand Down