Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove aiocache.Cache and alias support #948

Merged
merged 13 commits into from
Jan 18, 2025
2 changes: 1 addition & 1 deletion README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ Or as a decorator


@cached(
ttl=10, cache=Cache.REDIS, key="key", serializer=PickleSerializer(), port=6379, namespace="main")
cache=RedisCache(), key="key", serializer=PickleSerializer(), port=6379, namespace="main")
async def cached_call():
print("Sleeping for three seconds zzzz.....")
await asyncio.sleep(3)
Expand Down
14 changes: 5 additions & 9 deletions aiocache/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import logging
from typing import Any, Dict, Type
from typing import Any, Type

from .backends.memory import SimpleMemoryCache
from .base import BaseCache
Expand All @@ -8,7 +8,7 @@

logger = logging.getLogger(__name__)

AIOCACHE_CACHES: Dict[str, Type[BaseCache[Any]]] = {SimpleMemoryCache.NAME: SimpleMemoryCache}
_AIOCACHE_CACHES: list[Type[BaseCache[Any]]] = [SimpleMemoryCache]

try:
import redis
Expand All @@ -17,7 +17,7 @@
else:
from aiocache.backends.redis import RedisCache

AIOCACHE_CACHES[RedisCache.NAME] = RedisCache
_AIOCACHE_CACHES.append(RedisCache)
del redis

try:
Expand All @@ -27,18 +27,14 @@
else:
from aiocache.backends.memcached import MemcachedCache

AIOCACHE_CACHES[MemcachedCache.NAME] = MemcachedCache
_AIOCACHE_CACHES.append(MemcachedCache)
del aiomcache

from .decorators import cached, cached_stampede, multi_cached # noqa: E402,I202
from .factory import Cache, caches # noqa: E402


__all__ = (
"caches",
"Cache",
"cached",
"cached_stampede",
"multi_cached",
*(c.__name__ for c in AIOCACHE_CACHES.values()),
*sorted(c.__name__ for c in _AIOCACHE_CACHES),
)
136 changes: 12 additions & 124 deletions aiocache/decorators.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
import logging

from aiocache.base import SENTINEL
from aiocache.factory import Cache, caches
from aiocache.lock import RedLock

logger = logging.getLogger(__name__)
Expand All @@ -15,89 +14,36 @@ class cached:
Caches the functions return value into a key generated with module_name, function_name
and args. The cache is available in the function object as ``<function_name>.cache``.

In some cases you will need to send more args to configure the cache object.
An example would be endpoint and port for the Redis cache. You can send those args as
kwargs and they will be propagated accordingly.

Only one cache instance is created per decorated call. If you expect high concurrency of
calls to the same function, you should adapt the pool size as needed.

Extra args that are injected in the function that you can use to control the cache
behavior are:

- ``cache_read``: Controls whether the function call will try to read from cache first or
not. Enabled by default.
- ``cache_write``: Controls whether the function call will try to write in the cache once
the result has been retrieved. Enabled by default.
- ``aiocache_wait_for_write``: Controls whether the call of the function will wait for the
value in the cache to be written. If set to False, the write
happens in the background. Enabled by default

:param cache: cache instance to use when calling the ``set``/``get`` operations.
:param ttl: int seconds to store the function call. Default is None which means no expiration.
:param namespace: string to use as default prefix for the key used in all operations of
the backend. Default is an empty string, "".
:param key_builder: Callable that allows to build the function dynamically. It receives
the function plus same args and kwargs passed to the function.
This behavior is necessarily different than ``BaseCache.build_key()``
:param skip_cache_func: Callable that receives the result after calling the
wrapped function and should return `True` if the value should skip the
cache (or `False` to store in the cache).
e.g. to avoid caching `None` results: `lambda r: r is None`
:param cache: cache class to use when calling the ``set``/``get`` operations.
Default is :class:`aiocache.SimpleMemoryCache`.
:param serializer: serializer instance to use when calling the ``dumps``/``loads``.
If its None, default one from the cache backend is used.
:param plugins: list plugins to use when calling the cmd hooks
Default is pulled from the cache class being used.
:param alias: str specifying the alias to load the config from. If alias is passed, other
config parameters are ignored. Same cache identified by alias is used on every call. If
you need a per function cache, specify the parameters explicitly without using alias.
:param noself: bool if you are decorating a class function, by default self is also used to
generate the key. This will result in same function calls done by different class instances
to use different cache keys. Use noself=True if you want to ignore it.
"""

def __init__(
self,
cache,
*,
ttl=SENTINEL,
namespace="",
key_builder=None,
skip_cache_func=lambda x: False,
cache=Cache.MEMORY,
serializer=None,
plugins=None,
alias=None,
noself=False,
**kwargs,
):
self.ttl = ttl
self.key_builder = key_builder
self.skip_cache_func = skip_cache_func
self.noself = noself
self.alias = alias
self.cache = None

self._cache = cache
self._serializer = serializer
self._namespace = namespace
self._plugins = plugins
self._kwargs = kwargs
self.cache = cache

def __call__(self, f):
if self.alias:
self.cache = caches.get(self.alias)
for arg in ("serializer", "namespace", "plugins"):
if getattr(self, f'_{arg}', None) is not None:
logger.warning(f"Using cache alias; ignoring {arg!r} argument.")
else:
self.cache = _get_cache(
cache=self._cache,
serializer=self._serializer,
namespace=self._namespace,
plugins=self._plugins,
**self._kwargs,
)

@functools.wraps(f)
async def wrapper(*args, **kwargs):
return await self.decorator(f, *args, **kwargs)
Expand Down Expand Up @@ -163,42 +109,27 @@ class cached_stampede(cached):
Caches the functions return value into a key generated with module_name, function_name and args
while avoids for cache stampede effects.

In some cases you will need to send more args to configure the cache object.
An example would be endpoint and port for the Redis cache. You can send those args as
kwargs and they will be propagated accordingly.

Only one cache instance is created per decorated function. If you expect high concurrency
of calls to the same function, you should adapt the pool size as needed.

:param cache: cache instance to use when calling the ``set``/``get`` operations.
Default is :class:`aiocache.SimpleMemoryCache`.
:param lease: int seconds to lock function call to avoid cache stampede effects.
If 0 or None, no locking happens (default is 2). redis and memory backends support
float ttls
:param ttl: int seconds to store the function call. Default is None which means no expiration.
:param key_from_attr: str arg or kwarg name from the function to use as a key.
:param namespace: string to use as default prefix for the key used in all operations of
the backend. Default is an empty string, "".
:param key_builder: Callable that allows to build the function dynamically. It receives
the function plus same args and kwargs passed to the function.
This behavior is necessarily different than ``BaseCache.build_key()``
:param skip_cache_func: Callable that receives the result after calling the
wrapped function and should return `True` if the value should skip the
cache (or `False` to store in the cache).
e.g. to avoid caching `None` results: `lambda r: r is None`
:param cache: cache class to use when calling the ``set``/``get`` operations.
Default is :class:`aiocache.SimpleMemoryCache`.
:param serializer: serializer instance to use when calling the ``dumps``/``loads``.
Default is JsonSerializer.
:param plugins: list plugins to use when calling the cmd hooks
Default is pulled from the cache class being used.
:param alias: str specifying the alias to load the config from. If alias is passed,
other config parameters are ignored. New cache is created every time.
:param noself: bool if you are decorating a class function, by default self is also used to
generate the key. This will result in same function calls done by different class instances
to use different cache keys. Use noself=True if you want to ignore it.
"""

def __init__(self, lease=2, **kwargs):
super().__init__(**kwargs)
def __init__(self, cache, lease=2, **kwargs):
super().__init__(cache, **kwargs)
self.lease = lease

async def decorator(self, f, *args, **kwargs):
Expand All @@ -223,10 +154,6 @@ async def decorator(self, f, *args, **kwargs):
return result


def _get_cache(cache=Cache.MEMORY, serializer=None, plugins=None, **cache_kwargs):
return Cache(cache, serializer=serializer, plugins=plugins, **cache_kwargs)


def _get_args_dict(func, args, kwargs):
defaults = {
arg_name: arg.default
Expand Down Expand Up @@ -261,9 +188,6 @@ class multi_cached:

The cache is available in the function object as ``<function_name>.cache``.

Only one cache instance is created per decorated function. If you expect high concurrency
of calls to the same function, you should adapt the pool size as needed.

Extra args that are injected in the function that you can use to control the cache
behavior are:

Expand All @@ -275,10 +199,9 @@ class multi_cached:
value in the cache to be written. If set to False, the write
happens in the background. Enabled by default

:param cache: cache instance to use when calling the ``multi_set``/``multi_get`` operations.
:param keys_from_attr: name of the arg or kwarg in the decorated callable that contains
an iterable that yields the keys returned by the decorated callable.
:param namespace: string to use as default prefix for the key used in all operations of
the backend. Default is an empty string, "".
:param key_builder: Callable that enables mapping the decorated function's keys to the keys
used by the cache. Receives a key from the iterable corresponding to
``keys_from_attr``, the decorated callable, and the positional and keyword arguments
Expand All @@ -288,59 +211,24 @@ class multi_cached:
if that key-value pair should not be cached (or False to store in cache).
The keys and values to be passed are taken from the wrapped function result.
:param ttl: int seconds to store the keys. Default is 0 which means no expiration.
:param cache: cache class to use when calling the ``multi_set``/``multi_get`` operations.
Default is :class:`aiocache.SimpleMemoryCache`.
:param serializer: serializer instance to use when calling the ``dumps``/``loads``.
If its None, default one from the cache backend is used.
:param plugins: plugins to use when calling the cmd hooks
Default is pulled from the cache class being used.
:param alias: str specifying the alias to load the config from. If alias is passed,
other config parameters are ignored. Same cache identified by alias is used on
every call. If you need a per function cache, specify the parameters explicitly
without using alias.
"""

def __init__(
self,
cache=None,
*,
keys_from_attr,
namespace="",
key_builder=None,
skip_cache_func=lambda k, v: False,
ttl=SENTINEL,
cache=Cache.MEMORY,
serializer=None,
plugins=None,
alias=None,
**kwargs,
):
self.cache = cache
self.keys_from_attr = keys_from_attr
self.key_builder = key_builder or (lambda key, f, *args, **kwargs: key)
self.skip_cache_func = skip_cache_func
self.ttl = ttl
self.alias = alias
self.cache = None

self._cache = cache
self._serializer = serializer
self._namespace = namespace
self._plugins = plugins
self._kwargs = kwargs

def __call__(self, f):
if self.alias:
self.cache = caches.get(self.alias)
for arg in ("serializer", "namespace", "plugins"):
if getattr(self, f'_{arg}', None) is not None:
logger.warning(f"Using cache alias; ignoring {arg!r} argument.")
else:
self.cache = _get_cache(
cache=self._cache,
serializer=self._serializer,
namespace=self._namespace,
plugins=self._plugins,
**self._kwargs,
)

@functools.wraps(f)
async def wrapper(*args, **kwargs):
return await self.decorator(f, *args, **kwargs)
Expand Down
Loading
Loading