diff --git a/.flake8 b/.flake8
new file mode 100644
index 0000000000..0e0ace6a4a
--- /dev/null
+++ b/.flake8
@@ -0,0 +1,21 @@
+[flake8]
+max-line-length = 88
+exclude =
+ *.egg-info,
+ *.pyc,
+ .git,
+ .tox,
+ .venv*,
+ build,
+ docs/*,
+ dist,
+ docker,
+ venv*,
+ .venv*,
+ whitelist.py,
+ tasks.py
+ignore =
+ F405
+ W503
+ E203
+ E126
\ No newline at end of file
diff --git a/.github/workflows/integration.yaml b/.github/workflows/integration.yaml
index f49a4fcd46..1bab506c32 100644
--- a/.github/workflows/integration.yaml
+++ b/.github/workflows/integration.yaml
@@ -16,6 +16,10 @@ on:
schedule:
- cron: '0 1 * * *' # nightly build
+concurrency:
+ group: ${{ github.event.pull_request.number || github.ref }}
+ cancel-in-progress: true
+
permissions:
contents: read # to fetch code (actions/checkout)
@@ -48,7 +52,7 @@ jobs:
run-tests:
runs-on: ubuntu-latest
- timeout-minutes: 30
+ timeout-minutes: 60
strategy:
max-parallel: 15
fail-fast: false
@@ -68,32 +72,77 @@ jobs:
- name: run tests
run: |
pip install -U setuptools wheel
+ pip install -r requirements.txt
pip install -r dev_requirements.txt
- tox -e ${{matrix.test-type}}-${{matrix.connection-type}}
+ if [ "${{matrix.connection-type}}" == "hiredis" ]; then
+ pip install hiredis
+ fi
+ invoke devenv
+ sleep 5 # time to settle
+ invoke ${{matrix.test-type}}-tests
+
- uses: actions/upload-artifact@v2
if: success() || failure()
with:
- name: pytest-results-${{matrix.test-type}}
+ name: pytest-results-${{matrix.test-type}}-${{matrix.connection-type}}-${{matrix.python-version}}
path: '${{matrix.test-type}}*results.xml'
+
- name: Upload codecov coverage
uses: codecov/codecov-action@v3
+ if: ${{matrix.python-version == '3.11'}}
with:
fail_ci_if_error: false
- # - name: View Test Results
- # uses: dorny/test-reporter@v1
- # if: success() || failure()
- # with:
- # name: Test Results ${{matrix.python-version}} ${{matrix.test-type}}-${{matrix.connection-type}}
- # path: '${{matrix.test-type}}*results.xml'
- # reporter: java-junit
- # list-suites: failed
- # list-tests: failed
- # max-annotations: 10
+
+ - name: View Test Results
+ uses: dorny/test-reporter@v1
+ if: success() || failure()
+ continue-on-error: true
+ with:
+ name: Test Results ${{matrix.python-version}} ${{matrix.test-type}}-${{matrix.connection-type}}
+ path: '*.xml'
+ reporter: java-junit
+ list-suites: all
+ list-tests: all
+ max-annotations: 10
+ fail-on-error: 'false'
+
+ resp3_tests:
+ runs-on: ubuntu-latest
+ strategy:
+ fail-fast: false
+ matrix:
+ python-version: ['3.7', '3.11']
+ test-type: ['standalone', 'cluster']
+ connection-type: ['hiredis', 'plain']
+ protocol: ['3']
+ env:
+ ACTIONS_ALLOW_UNSECURE_COMMANDS: true
+ name: RESP3 [${{ matrix.python-version }} ${{matrix.test-type}}-${{matrix.connection-type}}]
+ steps:
+ - uses: actions/checkout@v3
+ - uses: actions/setup-python@v4
+ with:
+ python-version: ${{ matrix.python-version }}
+ cache: 'pip'
+ - name: run tests
+ run: |
+ pip install -U setuptools wheel
+ pip install -r requirements.txt
+ pip install -r dev_requirements.txt
+ if [ "${{matrix.connection-type}}" == "hiredis" ]; then
+ pip install hiredis
+ fi
+ invoke devenv
+ sleep 5 # time to settle
+ invoke ${{matrix.test-type}}-tests
+ invoke ${{matrix.test-type}}-tests --uvloop
build_and_test_package:
name: Validate building and installing the package
runs-on: ubuntu-latest
+ needs: [run-tests]
strategy:
+ fail-fast: false
matrix:
extension: ['tar.gz', 'whl']
steps:
diff --git a/.isort.cfg b/.isort.cfg
new file mode 100644
index 0000000000..039f0337a2
--- /dev/null
+++ b/.isort.cfg
@@ -0,0 +1,5 @@
+[settings]
+profile=black
+multi_line_output=3
+src_paths = ["redis", "tests"]
+skip_glob=benchmarks/*
\ No newline at end of file
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index e31ec3491e..2909f04f0b 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -38,8 +38,9 @@ Here's how to get started with your code contribution:
a. python -m venv .venv
b. source .venv/bin/activate
c. pip install -r dev_requirements.txt
+ c. pip install -r requirements.txt
-4. If you need a development environment, run `invoke devenv`
+4. If you need a development environment, run `invoke devenv`. Note: this relies on docker-compose to build environments, and assumes that you have a version supporting [docker profiles](https://docs.docker.com/compose/profiles/).
5. While developing, make sure the tests pass by running `invoke tests`
6. If you like the change and think the project could use it, send a
pull request
@@ -59,7 +60,6 @@ can execute docker and its various commands.
- Three sentinel Redis nodes
- A redis cluster
- An stunnel docker, fronting the master Redis node
-- A Redis node, running unstable - the latest redis
The replica node, is a replica of the master node, using the
[leader-follower replication](https://redis.io/topics/replication)
diff --git a/dev_requirements.txt b/dev_requirements.txt
index 8ffb1e944f..cdb3774ab6 100644
--- a/dev_requirements.txt
+++ b/dev_requirements.txt
@@ -1,15 +1,14 @@
click==8.0.4
black==22.3.0
flake8==5.0.4
+flake8-isort==6.0.0
flynt~=0.69.0
-isort==5.10.1
mock==4.0.3
packaging>=20.4
pytest==7.2.0
-pytest-timeout==2.0.1
+pytest-timeout==2.1.0
pytest-asyncio>=0.20.2
tox==3.27.1
-tox-docker==3.1.0
invoke==1.7.3
pytest-cov>=4.0.0
vulture>=2.3.0
diff --git a/docker-compose.yml b/docker-compose.yml
new file mode 100644
index 0000000000..17d4b23977
--- /dev/null
+++ b/docker-compose.yml
@@ -0,0 +1,109 @@
+---
+
+version: "3.8"
+
+services:
+
+ redis:
+ image: redis/redis-stack-server:edge
+ container_name: redis-standalone
+ ports:
+ - 6379:6379
+ environment:
+ - "REDIS_ARGS=--enable-debug-command yes --enable-module-command yes"
+ profiles:
+ - standalone
+ - sentinel
+ - replica
+ - all
+
+ replica:
+ image: redis/redis-stack-server:edge
+ container_name: redis-replica
+ depends_on:
+ - redis
+ environment:
+ - "REDIS_ARGS=--replicaof redis 6379"
+ ports:
+ - 6380:6379
+ profiles:
+ - replica
+ - all
+
+ cluster:
+ container_name: redis-cluster
+ build:
+ context: .
+ dockerfile: dockers/Dockerfile.cluster
+ ports:
+ - 16379:16379
+ - 16380:16380
+ - 16381:16381
+ - 16382:16382
+ - 16383:16383
+ - 16384:16384
+ volumes:
+ - "./dockers/cluster.redis.conf:/redis.conf:ro"
+ profiles:
+ - cluster
+ - all
+
+ stunnel:
+ image: redisfab/stunnel:latest
+ depends_on:
+ - redis
+ ports:
+ - 6666:6666
+ profiles:
+ - all
+ - standalone
+ - ssl
+ volumes:
+ - "./dockers/stunnel/conf:/etc/stunnel/conf.d:ro"
+ - "./dockers/stunnel/keys:/etc/stunnel/keys:ro"
+
+ sentinel:
+ image: redis/redis-stack-server:edge
+ container_name: redis-sentinel
+ depends_on:
+ - redis
+ environment:
+ - "REDIS_ARGS=--port 26379"
+ entrypoint: "/opt/redis-stack/bin/redis-sentinel /redis.conf --port 26379"
+ ports:
+ - 26379:26379
+ volumes:
+ - "./dockers/sentinel.conf:/redis.conf"
+ profiles:
+ - sentinel
+ - all
+
+ sentinel2:
+ image: redis/redis-stack-server:edge
+ container_name: redis-sentinel2
+ depends_on:
+ - redis
+ environment:
+ - "REDIS_ARGS=--port 26380"
+ entrypoint: "/opt/redis-stack/bin/redis-sentinel /redis.conf --port 26380"
+ ports:
+ - 26380:26380
+ volumes:
+ - "./dockers/sentinel.conf:/redis.conf"
+ profiles:
+ - sentinel
+ - all
+
+ sentinel3:
+ image: redis/redis-stack-server:edge
+ container_name: redis-sentinel3
+ depends_on:
+ - redis
+ entrypoint: "/opt/redis-stack/bin/redis-sentinel /redis.conf --port 26381"
+ ports:
+ - 26381:26381
+ volumes:
+ - "./dockers/sentinel.conf:/redis.conf"
+ profiles:
+ - sentinel
+ - all
diff --git a/docker/base/Dockerfile b/docker/base/Dockerfile
deleted file mode 100644
index c76d15db36..0000000000
--- a/docker/base/Dockerfile
+++ /dev/null
@@ -1,4 +0,0 @@
-# produces redisfab/redis-py:6.2.6
-FROM redis:6.2.6-buster
-
-CMD ["redis-server", "/redis.conf"]
diff --git a/docker/base/Dockerfile.cluster b/docker/base/Dockerfile.cluster
deleted file mode 100644
index 5c246dcf28..0000000000
--- a/docker/base/Dockerfile.cluster
+++ /dev/null
@@ -1,11 +0,0 @@
-# produces redisfab/redis-py-cluster:6.2.6
-FROM redis:6.2.6-buster
-
-COPY create_cluster.sh /create_cluster.sh
-RUN chmod +x /create_cluster.sh
-
-EXPOSE 16379 16380 16381 16382 16383 16384
-
-ENV START_PORT=16379
-ENV END_PORT=16384
-CMD /create_cluster.sh
diff --git a/docker/base/Dockerfile.cluster4 b/docker/base/Dockerfile.cluster4
deleted file mode 100644
index 3158d6edd4..0000000000
--- a/docker/base/Dockerfile.cluster4
+++ /dev/null
@@ -1,9 +0,0 @@
-# produces redisfab/redis-py-cluster:4.0
-FROM redis:4.0-buster
-
-COPY create_cluster4.sh /create_cluster4.sh
-RUN chmod +x /create_cluster4.sh
-
-EXPOSE 16391 16392 16393 16394 16395 16396
-
-CMD [ "/create_cluster4.sh"]
\ No newline at end of file
diff --git a/docker/base/Dockerfile.cluster5 b/docker/base/Dockerfile.cluster5
deleted file mode 100644
index 3becfc853a..0000000000
--- a/docker/base/Dockerfile.cluster5
+++ /dev/null
@@ -1,9 +0,0 @@
-# produces redisfab/redis-py-cluster:5.0
-FROM redis:5.0-buster
-
-COPY create_cluster5.sh /create_cluster5.sh
-RUN chmod +x /create_cluster5.sh
-
-EXPOSE 16385 16386 16387 16388 16389 16390
-
-CMD [ "/create_cluster5.sh"]
\ No newline at end of file
diff --git a/docker/base/Dockerfile.redis4 b/docker/base/Dockerfile.redis4
deleted file mode 100644
index 7528ac1631..0000000000
--- a/docker/base/Dockerfile.redis4
+++ /dev/null
@@ -1,4 +0,0 @@
-# produces redisfab/redis-py:4.0
-FROM redis:4.0-buster
-
-CMD ["redis-server", "/redis.conf"]
\ No newline at end of file
diff --git a/docker/base/Dockerfile.redis5 b/docker/base/Dockerfile.redis5
deleted file mode 100644
index 6bcbe20bfc..0000000000
--- a/docker/base/Dockerfile.redis5
+++ /dev/null
@@ -1,4 +0,0 @@
-# produces redisfab/redis-py:5.0
-FROM redis:5.0-buster
-
-CMD ["redis-server", "/redis.conf"]
\ No newline at end of file
diff --git a/docker/base/Dockerfile.redismod_cluster b/docker/base/Dockerfile.redismod_cluster
deleted file mode 100644
index 5b80e495fb..0000000000
--- a/docker/base/Dockerfile.redismod_cluster
+++ /dev/null
@@ -1,12 +0,0 @@
-# produces redisfab/redis-py-modcluster:6.2.6
-FROM redislabs/redismod:edge
-
-COPY create_redismod_cluster.sh /create_redismod_cluster.sh
-RUN chmod +x /create_redismod_cluster.sh
-
-EXPOSE 46379 46380 46381 46382 46383 46384
-
-ENV START_PORT=46379
-ENV END_PORT=46384
-ENTRYPOINT []
-CMD /create_redismod_cluster.sh
diff --git a/docker/base/Dockerfile.sentinel b/docker/base/Dockerfile.sentinel
deleted file mode 100644
index ef659e3004..0000000000
--- a/docker/base/Dockerfile.sentinel
+++ /dev/null
@@ -1,4 +0,0 @@
-# produces redisfab/redis-py-sentinel:6.2.6
-FROM redis:6.2.6-buster
-
-CMD ["redis-sentinel", "/sentinel.conf"]
diff --git a/docker/base/Dockerfile.sentinel4 b/docker/base/Dockerfile.sentinel4
deleted file mode 100644
index 45bb03e88e..0000000000
--- a/docker/base/Dockerfile.sentinel4
+++ /dev/null
@@ -1,4 +0,0 @@
-# produces redisfab/redis-py-sentinel:4.0
-FROM redis:4.0-buster
-
-CMD ["redis-sentinel", "/sentinel.conf"]
\ No newline at end of file
diff --git a/docker/base/Dockerfile.sentinel5 b/docker/base/Dockerfile.sentinel5
deleted file mode 100644
index 6958154e46..0000000000
--- a/docker/base/Dockerfile.sentinel5
+++ /dev/null
@@ -1,4 +0,0 @@
-# produces redisfab/redis-py-sentinel:5.0
-FROM redis:5.0-buster
-
-CMD ["redis-sentinel", "/sentinel.conf"]
\ No newline at end of file
diff --git a/docker/base/Dockerfile.stunnel b/docker/base/Dockerfile.stunnel
deleted file mode 100644
index bf4510907c..0000000000
--- a/docker/base/Dockerfile.stunnel
+++ /dev/null
@@ -1,11 +0,0 @@
-# produces redisfab/stunnel:latest
-FROM ubuntu:18.04
-
-RUN apt-get update -qq --fix-missing
-RUN apt-get upgrade -qqy
-RUN apt install -qqy stunnel
-RUN mkdir -p /etc/stunnel/conf.d
-RUN echo "foreground = yes\ninclude = /etc/stunnel/conf.d" > /etc/stunnel/stunnel.conf
-RUN chown -R root:root /etc/stunnel/
-
-CMD ["/usr/bin/stunnel"]
diff --git a/docker/base/Dockerfile.unstable b/docker/base/Dockerfile.unstable
deleted file mode 100644
index ab5b7fc6fb..0000000000
--- a/docker/base/Dockerfile.unstable
+++ /dev/null
@@ -1,18 +0,0 @@
-# produces redisfab/redis-py:unstable
-FROM ubuntu:bionic as builder
-RUN apt-get update
-RUN apt-get upgrade -y
-RUN apt-get install -y build-essential git
-RUN mkdir /build
-WORKDIR /build
-RUN git clone https://github.com/redis/redis
-WORKDIR /build/redis
-RUN make
-
-FROM ubuntu:bionic as runner
-COPY --from=builder /build/redis/src/redis-server /usr/bin/redis-server
-COPY --from=builder /build/redis/src/redis-cli /usr/bin/redis-cli
-COPY --from=builder /build/redis/src/redis-sentinel /usr/bin/redis-sentinel
-
-EXPOSE 6379
-CMD ["redis-server", "/redis.conf"]
diff --git a/docker/base/Dockerfile.unstable_cluster b/docker/base/Dockerfile.unstable_cluster
deleted file mode 100644
index 2e3ed55371..0000000000
--- a/docker/base/Dockerfile.unstable_cluster
+++ /dev/null
@@ -1,11 +0,0 @@
-# produces redisfab/redis-py-cluster:6.2.6
-FROM redisfab/redis-py:unstable-bionic
-
-COPY create_cluster.sh /create_cluster.sh
-RUN chmod +x /create_cluster.sh
-
-EXPOSE 6372 6373 6374 6375 6376 6377
-
-ENV START_PORT=6372
-ENV END_PORT=6377
-CMD ["/create_cluster.sh"]
diff --git a/docker/base/Dockerfile.unstable_sentinel b/docker/base/Dockerfile.unstable_sentinel
deleted file mode 100644
index fe6d062de8..0000000000
--- a/docker/base/Dockerfile.unstable_sentinel
+++ /dev/null
@@ -1,17 +0,0 @@
-# produces redisfab/redis-py-sentinel:unstable
-FROM ubuntu:bionic as builder
-RUN apt-get update
-RUN apt-get upgrade -y
-RUN apt-get install -y build-essential git
-RUN mkdir /build
-WORKDIR /build
-RUN git clone https://github.com/redis/redis
-WORKDIR /build/redis
-RUN make
-
-FROM ubuntu:bionic as runner
-COPY --from=builder /build/redis/src/redis-server /usr/bin/redis-server
-COPY --from=builder /build/redis/src/redis-cli /usr/bin/redis-cli
-COPY --from=builder /build/redis/src/redis-sentinel /usr/bin/redis-sentinel
-
-CMD ["redis-sentinel", "/sentinel.conf"]
diff --git a/docker/base/README.md b/docker/base/README.md
deleted file mode 100644
index a2f26a8106..0000000000
--- a/docker/base/README.md
+++ /dev/null
@@ -1 +0,0 @@
-Dockers in this folder are built, and uploaded to the redisfab dockerhub store.
diff --git a/docker/base/create_cluster4.sh b/docker/base/create_cluster4.sh
deleted file mode 100755
index a39da58784..0000000000
--- a/docker/base/create_cluster4.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-#! /bin/bash
-mkdir -p /nodes
-touch /nodes/nodemap
-for PORT in $(seq 16391 16396); do
- mkdir -p /nodes/$PORT
- if [[ -e /redis.conf ]]; then
- cp /redis.conf /nodes/$PORT/redis.conf
- else
- touch /nodes/$PORT/redis.conf
- fi
- cat << EOF >> /nodes/$PORT/redis.conf
-port ${PORT}
-cluster-enabled yes
-daemonize yes
-logfile /redis.log
-dir /nodes/$PORT
-EOF
- redis-server /nodes/$PORT/redis.conf
- if [ $? -ne 0 ]; then
- echo "Redis failed to start, exiting."
- exit 3
- fi
- echo 127.0.0.1:$PORT >> /nodes/nodemap
-done
-echo yes | redis-cli --cluster create $(seq -f 127.0.0.1:%g 16391 16396) --cluster-replicas 1
-tail -f /redis.log
\ No newline at end of file
diff --git a/docker/base/create_cluster5.sh b/docker/base/create_cluster5.sh
deleted file mode 100755
index 0c63d8e910..0000000000
--- a/docker/base/create_cluster5.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-#! /bin/bash
-mkdir -p /nodes
-touch /nodes/nodemap
-for PORT in $(seq 16385 16390); do
- mkdir -p /nodes/$PORT
- if [[ -e /redis.conf ]]; then
- cp /redis.conf /nodes/$PORT/redis.conf
- else
- touch /nodes/$PORT/redis.conf
- fi
- cat << EOF >> /nodes/$PORT/redis.conf
-port ${PORT}
-cluster-enabled yes
-daemonize yes
-logfile /redis.log
-dir /nodes/$PORT
-EOF
- redis-server /nodes/$PORT/redis.conf
- if [ $? -ne 0 ]; then
- echo "Redis failed to start, exiting."
- exit 3
- fi
- echo 127.0.0.1:$PORT >> /nodes/nodemap
-done
-echo yes | redis-cli --cluster create $(seq -f 127.0.0.1:%g 16385 16390) --cluster-replicas 1
-tail -f /redis.log
\ No newline at end of file
diff --git a/docker/base/create_redismod_cluster.sh b/docker/base/create_redismod_cluster.sh
deleted file mode 100755
index 20443a4c42..0000000000
--- a/docker/base/create_redismod_cluster.sh
+++ /dev/null
@@ -1,46 +0,0 @@
-#! /bin/bash
-
-mkdir -p /nodes
-touch /nodes/nodemap
-if [ -z ${START_PORT} ]; then
- START_PORT=46379
-fi
-if [ -z ${END_PORT} ]; then
- END_PORT=46384
-fi
-if [ ! -z "$3" ]; then
- START_PORT=$2
- START_PORT=$3
-fi
-echo "STARTING: ${START_PORT}"
-echo "ENDING: ${END_PORT}"
-
-for PORT in `seq ${START_PORT} ${END_PORT}`; do
- mkdir -p /nodes/$PORT
- if [[ -e /redis.conf ]]; then
- cp /redis.conf /nodes/$PORT/redis.conf
- else
- touch /nodes/$PORT/redis.conf
- fi
- cat << EOF >> /nodes/$PORT/redis.conf
-port ${PORT}
-cluster-enabled yes
-daemonize yes
-logfile /redis.log
-dir /nodes/$PORT
-EOF
-
- set -x
- redis-server /nodes/$PORT/redis.conf
- if [ $? -ne 0 ]; then
- echo "Redis failed to start, exiting."
- continue
- fi
- echo 127.0.0.1:$PORT >> /nodes/nodemap
-done
-if [ -z "${REDIS_PASSWORD}" ]; then
- echo yes | redis-cli --cluster create `seq -f 127.0.0.1:%g ${START_PORT} ${END_PORT}` --cluster-replicas 1
-else
- echo yes | redis-cli -a ${REDIS_PASSWORD} --cluster create `seq -f 127.0.0.1:%g ${START_PORT} ${END_PORT}` --cluster-replicas 1
-fi
-tail -f /redis.log
diff --git a/docker/cluster/redis.conf b/docker/cluster/redis.conf
deleted file mode 100644
index dff658c79b..0000000000
--- a/docker/cluster/redis.conf
+++ /dev/null
@@ -1,3 +0,0 @@
-# Redis Cluster config file will be shared across all nodes.
-# Do not change the following configurations that are already set:
-# port, cluster-enabled, daemonize, logfile, dir
diff --git a/docker/redis4/master/redis.conf b/docker/redis4/master/redis.conf
deleted file mode 100644
index b7ed0ebf00..0000000000
--- a/docker/redis4/master/redis.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-port 6381
-save ""
diff --git a/docker/redis4/sentinel/sentinel_1.conf b/docker/redis4/sentinel/sentinel_1.conf
deleted file mode 100644
index cfee17c051..0000000000
--- a/docker/redis4/sentinel/sentinel_1.conf
+++ /dev/null
@@ -1,6 +0,0 @@
-port 26385
-
-sentinel monitor redis-py-test 127.0.0.1 6381 2
-sentinel down-after-milliseconds redis-py-test 5000
-sentinel failover-timeout redis-py-test 60000
-sentinel parallel-syncs redis-py-test 1
diff --git a/docker/redis4/sentinel/sentinel_2.conf b/docker/redis4/sentinel/sentinel_2.conf
deleted file mode 100644
index 68d930aea8..0000000000
--- a/docker/redis4/sentinel/sentinel_2.conf
+++ /dev/null
@@ -1,6 +0,0 @@
-port 26386
-
-sentinel monitor redis-py-test 127.0.0.1 6381 2
-sentinel down-after-milliseconds redis-py-test 5000
-sentinel failover-timeout redis-py-test 60000
-sentinel parallel-syncs redis-py-test 1
\ No newline at end of file
diff --git a/docker/redis4/sentinel/sentinel_3.conf b/docker/redis4/sentinel/sentinel_3.conf
deleted file mode 100644
index 60abf65c9b..0000000000
--- a/docker/redis4/sentinel/sentinel_3.conf
+++ /dev/null
@@ -1,6 +0,0 @@
-port 26387
-
-sentinel monitor redis-py-test 127.0.0.1 6381 2
-sentinel down-after-milliseconds redis-py-test 5000
-sentinel failover-timeout redis-py-test 60000
-sentinel parallel-syncs redis-py-test 1
\ No newline at end of file
diff --git a/docker/redis5/master/redis.conf b/docker/redis5/master/redis.conf
deleted file mode 100644
index e479c48b28..0000000000
--- a/docker/redis5/master/redis.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-port 6382
-save ""
diff --git a/docker/redis5/replica/redis.conf b/docker/redis5/replica/redis.conf
deleted file mode 100644
index a2dc9e0945..0000000000
--- a/docker/redis5/replica/redis.conf
+++ /dev/null
@@ -1,3 +0,0 @@
-port 6383
-save ""
-replicaof master 6382
diff --git a/docker/redis5/sentinel/sentinel_1.conf b/docker/redis5/sentinel/sentinel_1.conf
deleted file mode 100644
index c748a0ba72..0000000000
--- a/docker/redis5/sentinel/sentinel_1.conf
+++ /dev/null
@@ -1,6 +0,0 @@
-port 26382
-
-sentinel monitor redis-py-test 127.0.0.1 6382 2
-sentinel down-after-milliseconds redis-py-test 5000
-sentinel failover-timeout redis-py-test 60000
-sentinel parallel-syncs redis-py-test 1
diff --git a/docker/redis5/sentinel/sentinel_2.conf b/docker/redis5/sentinel/sentinel_2.conf
deleted file mode 100644
index 0a50c9a623..0000000000
--- a/docker/redis5/sentinel/sentinel_2.conf
+++ /dev/null
@@ -1,6 +0,0 @@
-port 26383
-
-sentinel monitor redis-py-test 127.0.0.1 6382 2
-sentinel down-after-milliseconds redis-py-test 5000
-sentinel failover-timeout redis-py-test 60000
-sentinel parallel-syncs redis-py-test 1
\ No newline at end of file
diff --git a/docker/redis5/sentinel/sentinel_3.conf b/docker/redis5/sentinel/sentinel_3.conf
deleted file mode 100644
index a0e350ba0f..0000000000
--- a/docker/redis5/sentinel/sentinel_3.conf
+++ /dev/null
@@ -1,6 +0,0 @@
-port 26384
-
-sentinel monitor redis-py-test 127.0.0.1 6383 2
-sentinel down-after-milliseconds redis-py-test 5000
-sentinel failover-timeout redis-py-test 60000
-sentinel parallel-syncs redis-py-test 1
\ No newline at end of file
diff --git a/docker/redis6.2/master/redis.conf b/docker/redis6.2/master/redis.conf
deleted file mode 100644
index 15a31b5a38..0000000000
--- a/docker/redis6.2/master/redis.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-port 6379
-save ""
diff --git a/docker/redis6.2/replica/redis.conf b/docker/redis6.2/replica/redis.conf
deleted file mode 100644
index a76d402c5e..0000000000
--- a/docker/redis6.2/replica/redis.conf
+++ /dev/null
@@ -1,3 +0,0 @@
-port 6380
-save ""
-replicaof master 6379
diff --git a/docker/redis6.2/sentinel/sentinel_2.conf b/docker/redis6.2/sentinel/sentinel_2.conf
deleted file mode 100644
index 955621b872..0000000000
--- a/docker/redis6.2/sentinel/sentinel_2.conf
+++ /dev/null
@@ -1,6 +0,0 @@
-port 26380
-
-sentinel monitor redis-py-test 127.0.0.1 6379 2
-sentinel down-after-milliseconds redis-py-test 5000
-sentinel failover-timeout redis-py-test 60000
-sentinel parallel-syncs redis-py-test 1
diff --git a/docker/redis6.2/sentinel/sentinel_3.conf b/docker/redis6.2/sentinel/sentinel_3.conf
deleted file mode 100644
index 62c40512f1..0000000000
--- a/docker/redis6.2/sentinel/sentinel_3.conf
+++ /dev/null
@@ -1,6 +0,0 @@
-port 26381
-
-sentinel monitor redis-py-test 127.0.0.1 6379 2
-sentinel down-after-milliseconds redis-py-test 5000
-sentinel failover-timeout redis-py-test 60000
-sentinel parallel-syncs redis-py-test 1
diff --git a/docker/redis7/master/redis.conf b/docker/redis7/master/redis.conf
deleted file mode 100644
index ef57c1fe99..0000000000
--- a/docker/redis7/master/redis.conf
+++ /dev/null
@@ -1,4 +0,0 @@
-port 6379
-save ""
-enable-debug-command yes
-enable-module-command yes
\ No newline at end of file
diff --git a/docker/redismod_cluster/redis.conf b/docker/redismod_cluster/redis.conf
deleted file mode 100644
index 48f06668a8..0000000000
--- a/docker/redismod_cluster/redis.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-loadmodule /usr/lib/redis/modules/redisai.so
-loadmodule /usr/lib/redis/modules/redisearch.so
-loadmodule /usr/lib/redis/modules/redisgraph.so
-loadmodule /usr/lib/redis/modules/redistimeseries.so
-loadmodule /usr/lib/redis/modules/rejson.so
-loadmodule /usr/lib/redis/modules/redisbloom.so
-loadmodule /var/opt/redislabs/lib/modules/redisgears.so Plugin /var/opt/redislabs/modules/rg/plugin/gears_python.so Plugin /var/opt/redislabs/modules/rg/plugin/gears_jvm.so JvmOptions -Djava.class.path=/var/opt/redislabs/modules/rg/gear_runtime-jar-with-dependencies.jar JvmPath /var/opt/redislabs/modules/rg/OpenJDK/jdk-11.0.9.1+1/
-
diff --git a/docker/unstable/redis.conf b/docker/unstable/redis.conf
deleted file mode 100644
index 93a55cf3b3..0000000000
--- a/docker/unstable/redis.conf
+++ /dev/null
@@ -1,3 +0,0 @@
-port 6378
-protected-mode no
-save ""
diff --git a/docker/unstable_cluster/redis.conf b/docker/unstable_cluster/redis.conf
deleted file mode 100644
index f307a63757..0000000000
--- a/docker/unstable_cluster/redis.conf
+++ /dev/null
@@ -1,4 +0,0 @@
-# Redis Cluster config file will be shared across all nodes.
-# Do not change the following configurations that are already set:
-# port, cluster-enabled, daemonize, logfile, dir
-protected-mode no
diff --git a/dockers/Dockerfile.cluster b/dockers/Dockerfile.cluster
new file mode 100644
index 0000000000..204232a665
--- /dev/null
+++ b/dockers/Dockerfile.cluster
@@ -0,0 +1,7 @@
+FROM redis/redis-stack-server:latest as rss
+
+COPY dockers/create_cluster.sh /create_cluster.sh
+RUN ls -R /opt/redis-stack
+RUN chmod a+x /create_cluster.sh
+
+ENTRYPOINT [ "/create_cluster.sh"]
diff --git a/dockers/cluster.redis.conf b/dockers/cluster.redis.conf
new file mode 100644
index 0000000000..26da33567a
--- /dev/null
+++ b/dockers/cluster.redis.conf
@@ -0,0 +1,6 @@
+protected-mode no
+loadmodule /opt/redis-stack/lib/redisearch.so
+loadmodule /opt/redis-stack/lib/redisgraph.so
+loadmodule /opt/redis-stack/lib/redistimeseries.so
+loadmodule /opt/redis-stack/lib/rejson.so
+loadmodule /opt/redis-stack/lib/redisbloom.so
diff --git a/docker/base/create_cluster.sh b/dockers/create_cluster.sh
old mode 100755
new mode 100644
similarity index 75%
rename from docker/base/create_cluster.sh
rename to dockers/create_cluster.sh
index fcb1b1cd8d..da9a0cb606
--- a/docker/base/create_cluster.sh
+++ b/dockers/create_cluster.sh
@@ -31,7 +31,8 @@ dir /nodes/$PORT
EOF
set -x
- redis-server /nodes/$PORT/redis.conf
+ /opt/redis-stack/bin/redis-server /nodes/$PORT/redis.conf
+ sleep 1
if [ $? -ne 0 ]; then
echo "Redis failed to start, exiting."
continue
@@ -39,8 +40,8 @@ EOF
echo 127.0.0.1:$PORT >> /nodes/nodemap
done
if [ -z "${REDIS_PASSWORD}" ]; then
- echo yes | redis-cli --cluster create `seq -f 127.0.0.1:%g ${START_PORT} ${END_PORT}` --cluster-replicas 1
+ echo yes | /opt/redis-stack/bin/redis-cli --cluster create `seq -f 127.0.0.1:%g ${START_PORT} ${END_PORT}` --cluster-replicas 1
else
- echo yes | redis-cli -a ${REDIS_PASSWORD} --cluster create `seq -f 127.0.0.1:%g ${START_PORT} ${END_PORT}` --cluster-replicas 1
+ echo yes | opt/redis-stack/bin/redis-cli -a ${REDIS_PASSWORD} --cluster create `seq -f 127.0.0.1:%g ${START_PORT} ${END_PORT}` --cluster-replicas 1
fi
tail -f /redis.log
diff --git a/docker/redis6.2/sentinel/sentinel_1.conf b/dockers/sentinel.conf
similarity index 73%
rename from docker/redis6.2/sentinel/sentinel_1.conf
rename to dockers/sentinel.conf
index bd2d830af3..1a33f53344 100644
--- a/docker/redis6.2/sentinel/sentinel_1.conf
+++ b/dockers/sentinel.conf
@@ -1,6 +1,4 @@
-port 26379
-
sentinel monitor redis-py-test 127.0.0.1 6379 2
sentinel down-after-milliseconds redis-py-test 5000
sentinel failover-timeout redis-py-test 60000
-sentinel parallel-syncs redis-py-test 1
+sentinel parallel-syncs redis-py-test 1
\ No newline at end of file
diff --git a/docker/stunnel/README b/dockers/stunnel/README
similarity index 100%
rename from docker/stunnel/README
rename to dockers/stunnel/README
diff --git a/docker/stunnel/conf/redis.conf b/dockers/stunnel/conf/redis.conf
similarity index 83%
rename from docker/stunnel/conf/redis.conf
rename to dockers/stunnel/conf/redis.conf
index 84f6d40133..a150d8b011 100644
--- a/docker/stunnel/conf/redis.conf
+++ b/dockers/stunnel/conf/redis.conf
@@ -1,6 +1,6 @@
[redis]
accept = 6666
-connect = master:6379
+connect = redis:6379
cert = /etc/stunnel/keys/server-cert.pem
key = /etc/stunnel/keys/server-key.pem
verify = 0
diff --git a/docker/stunnel/create_certs.sh b/dockers/stunnel/create_certs.sh
similarity index 100%
rename from docker/stunnel/create_certs.sh
rename to dockers/stunnel/create_certs.sh
diff --git a/docker/stunnel/keys/ca-cert.pem b/dockers/stunnel/keys/ca-cert.pem
similarity index 100%
rename from docker/stunnel/keys/ca-cert.pem
rename to dockers/stunnel/keys/ca-cert.pem
diff --git a/docker/stunnel/keys/ca-key.pem b/dockers/stunnel/keys/ca-key.pem
similarity index 100%
rename from docker/stunnel/keys/ca-key.pem
rename to dockers/stunnel/keys/ca-key.pem
diff --git a/docker/stunnel/keys/client-cert.pem b/dockers/stunnel/keys/client-cert.pem
similarity index 100%
rename from docker/stunnel/keys/client-cert.pem
rename to dockers/stunnel/keys/client-cert.pem
diff --git a/docker/stunnel/keys/client-key.pem b/dockers/stunnel/keys/client-key.pem
similarity index 100%
rename from docker/stunnel/keys/client-key.pem
rename to dockers/stunnel/keys/client-key.pem
diff --git a/docker/stunnel/keys/client-req.pem b/dockers/stunnel/keys/client-req.pem
similarity index 100%
rename from docker/stunnel/keys/client-req.pem
rename to dockers/stunnel/keys/client-req.pem
diff --git a/docker/stunnel/keys/server-cert.pem b/dockers/stunnel/keys/server-cert.pem
similarity index 100%
rename from docker/stunnel/keys/server-cert.pem
rename to dockers/stunnel/keys/server-cert.pem
diff --git a/docker/stunnel/keys/server-key.pem b/dockers/stunnel/keys/server-key.pem
similarity index 100%
rename from docker/stunnel/keys/server-key.pem
rename to dockers/stunnel/keys/server-key.pem
diff --git a/docker/stunnel/keys/server-req.pem b/dockers/stunnel/keys/server-req.pem
similarity index 100%
rename from docker/stunnel/keys/server-req.pem
rename to dockers/stunnel/keys/server-req.pem
diff --git a/pytest.ini b/pytest.ini
new file mode 100644
index 0000000000..f1b716ae96
--- /dev/null
+++ b/pytest.ini
@@ -0,0 +1,13 @@
+[pytest]
+addopts = -s
+markers =
+ redismod: run only the redis module tests
+ pipeline: pipeline tests
+ onlycluster: marks tests to be run only with cluster mode redis
+ onlynoncluster: marks tests to be run only with standalone redis
+ ssl: marker for only the ssl tests
+ asyncio: marker for async tests
+ replica: replica tests
+ experimental: run only experimental tests
+asyncio_mode = auto
+timeout = 30
diff --git a/redis/asyncio/connection.py b/redis/asyncio/connection.py
index c64e282fe0..bf6274922e 100644
--- a/redis/asyncio/connection.py
+++ b/redis/asyncio/connection.py
@@ -30,10 +30,10 @@
else:
from async_timeout import timeout as async_timeout
-
from redis.asyncio.retry import Retry
from redis.backoff import NoBackoff
from redis.compat import Protocol, TypedDict
+from redis.connection import DEFAULT_RESP_VERSION
from redis.credentials import CredentialProvider, UsernamePasswordCredentialProvider
from redis.exceptions import (
AuthenticationError,
@@ -203,7 +203,16 @@ def __init__(
self.set_parser(parser_class)
self._connect_callbacks: List[weakref.WeakMethod[ConnectCallbackT]] = []
self._buffer_cutoff = 6000
- self.protocol = protocol
+ try:
+ p = int(protocol)
+ except TypeError:
+ p = DEFAULT_RESP_VERSION
+ except ValueError:
+ raise ConnectionError("protocol must be an integer")
+ finally:
+ if p < 2 or p > 3:
+ raise ConnectionError("protocol must be either 2 or 3")
+ self.protocol = protocol
def __repr__(self):
repr_args = ",".join((f"{k}={v}" for k, v in self.repr_pieces()))
@@ -386,10 +395,10 @@ async def on_connect(self) -> None:
self._parser.on_connect(self)
await self.send_command("HELLO", self.protocol)
response = await self.read_response()
- if response.get(b"proto") != int(self.protocol) and response.get(
- "proto"
- ) != int(self.protocol):
- raise ConnectionError("Invalid RESP version")
+ # if response.get(b"proto") != self.protocol and response.get(
+ # "proto"
+ # ) != self.protocol:
+ # raise ConnectionError("Invalid RESP version")
# if a client_name is given, set it
if self.client_name:
diff --git a/redis/client.py b/redis/client.py
index cbe8a2ee33..31a7558194 100755
--- a/redis/client.py
+++ b/redis/client.py
@@ -729,7 +729,7 @@ class AbstractRedis:
**string_keys_to_dict("EXPIRE EXPIREAT PEXPIRE PEXPIREAT AUTH", bool),
**string_keys_to_dict("EXISTS", int),
**string_keys_to_dict("INCRBYFLOAT HINCRBYFLOAT", float),
- **string_keys_to_dict("READONLY", bool_ok),
+ **string_keys_to_dict("READONLY MSET", bool_ok),
"CLUSTER DELSLOTS": bool_ok,
"CLUSTER ADDSLOTS": bool_ok,
"COMMAND": parse_command,
@@ -794,6 +794,9 @@ class AbstractRedis:
"CONFIG SET": bool_ok,
**string_keys_to_dict("XREVRANGE XRANGE", parse_stream_list),
"XCLAIM": parse_xclaim,
+ "CLUSTER SET-CONFIG-EPOCH": bool_ok,
+ "CLUSTER REPLICAS": parse_cluster_nodes,
+ "ACL LIST": lambda r: list(map(str_if_bytes, r)),
}
RESP2_RESPONSE_CALLBACKS = {
@@ -801,6 +804,7 @@ class AbstractRedis:
**string_keys_to_dict(
"SDIFF SINTER SMEMBERS SUNION", lambda r: r and set(r) or set()
),
+ **string_keys_to_dict("READWRITE", bool_ok),
**string_keys_to_dict(
"ZPOPMAX ZPOPMIN ZINTER ZDIFF ZUNION ZRANGE ZRANGEBYSCORE "
"ZREVRANGE ZREVRANGEBYSCORE",
@@ -813,7 +817,6 @@ class AbstractRedis:
"MEMORY STATS": parse_memory_stats,
"MODULE LIST": lambda r: [pairs_to_dict(m) for m in r],
"STRALGO": parse_stralgo,
- "ACL LIST": lambda r: list(map(str_if_bytes, r)),
# **string_keys_to_dict(
# "COPY "
# "HEXISTS HMSET MOVE MSETNX PERSIST "
@@ -828,7 +831,7 @@ class AbstractRedis:
# int,
# ),
# **string_keys_to_dict(
- # "FLUSHALL FLUSHDB LSET LTRIM MSET PFMERGE ASKING READWRITE "
+ # "FLUSHALL FLUSHDB LSET LTRIM PFMERGE ASKING "
# "RENAME SAVE SELECT SHUTDOWN SLAVEOF SWAPDB WATCH UNWATCH ",
# bool_ok,
# ),
@@ -843,8 +846,6 @@ class AbstractRedis:
# "CLUSTER ADDSLOTSRANGE": bool_ok,
# "CLUSTER DELSLOTSRANGE": bool_ok,
# "CLUSTER GETKEYSINSLOT": lambda r: list(map(str_if_bytes, r)),
- # "CLUSTER REPLICAS": parse_cluster_nodes,
- # "CLUSTER SET-CONFIG-EPOCH": bool_ok,
# "CONFIG RESETSTAT": bool_ok,
# "DEBUG OBJECT": parse_debug_object,
# "FUNCTION DELETE": bool_ok,
diff --git a/redis/cluster.py b/redis/cluster.py
index 898db29cdc..c09faa1042 100644
--- a/redis/cluster.py
+++ b/redis/cluster.py
@@ -251,7 +251,6 @@ class AbstractRedisCluster:
"CLIENT INFO",
"CLIENT KILL",
"READONLY",
- "READWRITE",
"CLUSTER INFO",
"CLUSTER MEET",
"CLUSTER NODES",
diff --git a/redis/compat.py b/redis/compat.py
index 738687f645..e478493467 100644
--- a/redis/compat.py
+++ b/redis/compat.py
@@ -2,8 +2,5 @@
try:
from typing import Literal, Protocol, TypedDict # lgtm [py/unused-import]
except ImportError:
- from typing_extensions import ( # lgtm [py/unused-import]
- Literal,
- Protocol,
- TypedDict,
- )
+ from typing_extensions import Literal # lgtm [py/unused-import]
+ from typing_extensions import Protocol, TypedDict
diff --git a/redis/connection.py b/redis/connection.py
index 023edd3fef..8c5c5a6ea7 100644
--- a/redis/connection.py
+++ b/redis/connection.py
@@ -42,6 +42,8 @@
SYM_CRLF = b"\r\n"
SYM_EMPTY = b""
+DEFAULT_RESP_VERSION = 2
+
SENTINEL = object()
DefaultParser: Type[Union[_RESP2Parser, _RESP3Parser, _HiredisParser]]
@@ -189,7 +191,17 @@ def __init__(
self.set_parser(parser_class)
self._connect_callbacks = []
self._buffer_cutoff = 6000
- self.protocol = protocol
+ try:
+ p = int(protocol)
+ except TypeError:
+ p = DEFAULT_RESP_VERSION
+ except ValueError:
+ raise ConnectionError("protocol must be an integer")
+ finally:
+ if p < 2 or p > 3:
+ raise ConnectionError("protocol must be either 2 or 3")
+ # p = DEFAULT_RESP_VERSION
+ self.protocol = p
self._command_packer = self._construct_command_packer(command_packer)
def __repr__(self):
@@ -286,6 +298,7 @@ def on_connect(self):
or UsernamePasswordCredentialProvider(self.username, self.password)
)
auth_args = cred_provider.get_credentials()
+
# if resp version is specified and we have auth args,
# we need to send them via HELLO
if auth_args and self.protocol not in [2, "2"]:
@@ -298,10 +311,10 @@ def on_connect(self):
auth_args = ["default", auth_args[0]]
self.send_command("HELLO", self.protocol, "AUTH", *auth_args)
response = self.read_response()
- if response.get(b"proto") != int(self.protocol) and response.get(
- "proto"
- ) != int(self.protocol):
- raise ConnectionError("Invalid RESP version")
+ # if response.get(b"proto") != self.protocol and response.get(
+ # "proto"
+ # ) != self.protocol:
+ # raise ConnectionError("Invalid RESP version")
elif auth_args:
# avoid checking health here -- PING will fail if we try
# to check the health prior to the AUTH
@@ -329,9 +342,10 @@ def on_connect(self):
self._parser.on_connect(self)
self.send_command("HELLO", self.protocol)
response = self.read_response()
- if response.get(b"proto") != int(self.protocol) and response.get(
- "proto"
- ) != int(self.protocol):
+ if (
+ response.get(b"proto") != self.protocol
+ and response.get("proto") != self.protocol
+ ):
raise ConnectionError("Invalid RESP version")
# if a client_name is given, set it
diff --git a/redis/ocsp.py b/redis/ocsp.py
index ab8a35a33d..b0420b4711 100644
--- a/redis/ocsp.py
+++ b/redis/ocsp.py
@@ -15,7 +15,6 @@
from cryptography.hazmat.primitives.hashes import SHA1, Hash
from cryptography.hazmat.primitives.serialization import Encoding, PublicFormat
from cryptography.x509 import ocsp
-
from redis.exceptions import AuthorizationError, ConnectionError
diff --git a/tasks.py b/tasks.py
index 64b3aef80f..5162566183 100644
--- a/tasks.py
+++ b/tasks.py
@@ -1,69 +1,81 @@
+# https://github.com/pyinvoke/invoke/issues/833
+import inspect
import os
import shutil
from invoke import run, task
-with open("tox.ini") as fp:
- lines = fp.read().split("\n")
- dockers = [line.split("=")[1].strip() for line in lines if line.find("name") != -1]
+if not hasattr(inspect, "getargspec"):
+ inspect.getargspec = inspect.getfullargspec
@task
def devenv(c):
- """Builds a development environment: downloads, and starts all dockers
- specified in the tox.ini file.
- """
+ """Brings up the test environment, by wrapping docker compose."""
clean(c)
- cmd = "tox -e devenv"
- for d in dockers:
- cmd += f" --docker-dont-stop={d}"
+ cmd = "docker-compose --profile all up -d"
run(cmd)
@task
def build_docs(c):
"""Generates the sphinx documentation."""
- run("tox -e docs")
+ run("pip install -r docs/requirements.txt")
+ run("make html")
@task
def linters(c):
"""Run code linters"""
- run("tox -e linters")
+ run("flake8 tests redis")
+ run("black --target-version py37 --check --diff tests redis")
+ run("isort --check-only --diff tests redis")
+ run("vulture redis whitelist.py --min-confidence 80")
+ run("flynt --fail-on-change --dry-run tests redis")
@task
def all_tests(c):
- """Run all linters, and tests in redis-py. This assumes you have all
- the python versions specified in the tox.ini file.
- """
+ """Run all linters, and tests in redis-py."""
linters(c)
tests(c)
@task
-def tests(c):
+def tests(c, uvloop=False, protocol=2):
"""Run the redis-py test suite against the current python,
with and without hiredis.
"""
print("Starting Redis tests")
- run("tox -e '{standalone,cluster}'-'{plain,hiredis}'")
+ standalone_tests(c, uvloop=uvloop, protocol=protocol)
+ cluster_tests(c, uvloop=uvloop, protocol=protocol)
@task
-def standalone_tests(c):
- """Run all Redis tests against the current python,
- with and without hiredis."""
- print("Starting Redis tests")
- run("tox -e standalone-'{plain,hiredis,ocsp}'")
+def standalone_tests(c, uvloop=False, protocol=2):
+ """Run tests against a standalone redis instance"""
+ if uvloop:
+ run(
+ f"pytest --protocol={protocol} --cov=./ --cov-report=xml:coverage_redis.xml -W always -m 'not onlycluster' --uvloop --junit-xml=standalone-uvloop-results.xml"
+ )
+ else:
+ run(
+ f"pytest --protocol={protocol} --cov=./ --cov-report=xml:coverage_redis.xml -W always -m 'not onlycluster' --junit-xml=standalone-results.xml"
+ )
@task
-def cluster_tests(c):
- """Run all Redis Cluster tests against the current python,
- with and without hiredis."""
- print("Starting RedisCluster tests")
- run("tox -e cluster-'{plain,hiredis}'")
+def cluster_tests(c, uvloop=False, protocol=2):
+ """Run tests against a redis cluster"""
+ cluster_url = "redis://localhost:16379/0"
+ if uvloop:
+ run(
+ f"pytest --protocol={protocol} --cov=./ --cov-report=xml:coverage_cluster.xml -W always -m 'not onlynoncluster and not redismod' --redis-url={cluster_url} --junit-xml=cluster-uvloop-results.xml --uvloop"
+ )
+ else:
+ run(
+ f"pytest --protocol={protocol} --cov=./ --cov-report=xml:coverage_clusteclient.xml -W always -m 'not onlynoncluster and not redismod' --redis-url={cluster_url} --junit-xml=cluster-results.xml"
+ )
@task
@@ -73,7 +85,7 @@ def clean(c):
shutil.rmtree("build")
if os.path.isdir("dist"):
shutil.rmtree("dist")
- run(f"docker rm -f {' '.join(dockers)}")
+ run("docker-compose --profile all rm -s -f")
@task
diff --git a/tests/conftest.py b/tests/conftest.py
index 6454750353..1d9bc44375 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -6,9 +6,8 @@
from urllib.parse import urlparse
import pytest
-from packaging.version import Version
-
import redis
+from packaging.version import Version
from redis.backoff import NoBackoff
from redis.connection import parse_url
from redis.exceptions import RedisClusterException
@@ -16,8 +15,8 @@
REDIS_INFO = {}
default_redis_url = "redis://localhost:6379/0"
-default_redismod_url = "redis://localhost:36379"
-default_redis_unstable_url = "redis://localhost:6378"
+default_protocol = "2"
+default_redismod_url = "redis://localhost:6379"
# default ssl client ignores verification for the purpose of testing
default_redis_ssl_url = "rediss://localhost:6666"
@@ -73,6 +72,7 @@ def format_usage(self):
def pytest_addoption(parser):
+
parser.addoption(
"--redis-url",
default=default_redis_url,
@@ -81,14 +81,11 @@ def pytest_addoption(parser):
)
parser.addoption(
- "--redismod-url",
- default=default_redismod_url,
+ "--protocol",
+ default=default_protocol,
action="store",
- help="Connection string to redis server"
- " with loaded modules,"
- " defaults to `%(default)s`",
+ help="Protocol version, defaults to `%(default)s`",
)
-
parser.addoption(
"--redis-ssl-url",
default=default_redis_ssl_url,
@@ -105,13 +102,6 @@ def pytest_addoption(parser):
" defaults to `%(default)s`",
)
- parser.addoption(
- "--redis-unstable-url",
- default=default_redis_unstable_url,
- action="store",
- help="Redis unstable (latest version) connection string "
- "defaults to %(default)s`",
- )
parser.addoption(
"--uvloop", action=BooleanOptionalAction, help="Run tests with uvloop"
)
@@ -152,10 +142,8 @@ def pytest_sessionstart(session):
# store REDIS_INFO in config so that it is available from "condition strings"
session.config.REDIS_INFO = REDIS_INFO
- # module info, if the second redis is running
+ # module info
try:
- redismod_url = session.config.getoption("--redismod-url")
- info = _get_info(redismod_url)
REDIS_INFO["modules"] = info["modules"]
except redis.exceptions.ConnectionError:
pass
@@ -289,6 +277,9 @@ def _get_client(
redis_url = request.config.getoption("--redis-url")
else:
redis_url = from_url
+ if "protocol" not in redis_url:
+ kwargs["protocol"] = request.config.getoption("--protocol")
+
cluster_mode = REDIS_INFO["cluster_enabled"]
if not cluster_mode:
url_options = parse_url(redis_url)
@@ -332,20 +323,15 @@ def cluster_teardown(client, flushdb):
client.disconnect_connection_pools()
-# specifically set to the zero database, because creating
-# an index on db != 0 raises a ResponseError in redis
@pytest.fixture()
-def modclient(request, **kwargs):
- rmurl = request.config.getoption("--redismod-url")
- with _get_client(
- redis.Redis, request, from_url=rmurl, decode_responses=True, **kwargs
- ) as client:
+def r(request):
+ with _get_client(redis.Redis, request) as client:
yield client
@pytest.fixture()
-def r(request):
- with _get_client(redis.Redis, request) as client:
+def decoded_r(request):
+ with _get_client(redis.Redis, request, decode_responses=True) as client:
yield client
@@ -444,15 +430,6 @@ def master_host(request):
yield parts.hostname, parts.port
-@pytest.fixture()
-def unstable_r(request):
- url = request.config.getoption("--redis-unstable-url")
- with _get_client(
- redis.Redis, request, from_url=url, decode_responses=True
- ) as client:
- yield client
-
-
def wait_for_command(client, monitor, command, key=None):
# issue a command with a key name that's local to this process.
# if we find a command with our key before the command we're waiting
diff --git a/tests/test_asyncio/conftest.py b/tests/test_asyncio/conftest.py
index 28a6f0626f..ac18f6c12d 100644
--- a/tests/test_asyncio/conftest.py
+++ b/tests/test_asyncio/conftest.py
@@ -5,9 +5,8 @@
import pytest
import pytest_asyncio
-from packaging.version import Version
-
import redis.asyncio as redis
+from packaging.version import Version
from redis.asyncio.client import Monitor
from redis.asyncio.connection import parse_url
from redis.asyncio.retry import Retry
@@ -71,8 +70,12 @@ async def client_factory(
url: str = request.config.getoption("--redis-url"),
cls=redis.Redis,
flushdb=True,
+ protocol=request.config.getoption("--protocol"),
**kwargs,
):
+ if "protocol" not in url:
+ kwargs["protocol"] = request.config.getoption("--protocol")
+
cluster_mode = REDIS_INFO["cluster_enabled"]
if not cluster_mode:
single = kwargs.pop("single_connection_client", False) or single_connection
@@ -131,10 +134,8 @@ async def r2(create_redis):
@pytest_asyncio.fixture()
-async def modclient(request, create_redis):
- return await create_redis(
- url=request.config.getoption("--redismod-url"), decode_responses=True
- )
+async def decoded_r(create_redis):
+ return await create_redis(decode_responses=True)
def _gen_cluster_mock_resp(r, response):
diff --git a/tests/test_asyncio/test_bloom.py b/tests/test_asyncio/test_bloom.py
index bb1f0d58ad..0535ddfe02 100644
--- a/tests/test_asyncio/test_bloom.py
+++ b/tests/test_asyncio/test_bloom.py
@@ -1,7 +1,6 @@
from math import inf
import pytest
-
import redis.asyncio as redis
from redis.exceptions import ModuleError, RedisError
from redis.utils import HIREDIS_AVAILABLE
@@ -16,77 +15,65 @@ def intlist(obj):
return [int(v) for v in obj]
-# @pytest.fixture
-# async def client(modclient):
-# assert isinstance(modawait modclient.bf(), redis.commands.bf.BFBloom)
-# assert isinstance(modawait modclient.cf(), redis.commands.bf.CFBloom)
-# assert isinstance(modawait modclient.cms(), redis.commands.bf.CMSBloom)
-# assert isinstance(modawait modclient.tdigest(), redis.commands.bf.TDigestBloom)
-# assert isinstance(modawait modclient.topk(), redis.commands.bf.TOPKBloom)
-
-# modawait modclient.flushdb()
-# return modclient
-
-
@pytest.mark.redismod
-async def test_create(modclient: redis.Redis):
+async def test_create(decoded_r: redis.Redis):
"""Test CREATE/RESERVE calls"""
- assert await modclient.bf().create("bloom", 0.01, 1000)
- assert await modclient.bf().create("bloom_e", 0.01, 1000, expansion=1)
- assert await modclient.bf().create("bloom_ns", 0.01, 1000, noScale=True)
- assert await modclient.cf().create("cuckoo", 1000)
- assert await modclient.cf().create("cuckoo_e", 1000, expansion=1)
- assert await modclient.cf().create("cuckoo_bs", 1000, bucket_size=4)
- assert await modclient.cf().create("cuckoo_mi", 1000, max_iterations=10)
- assert await modclient.cms().initbydim("cmsDim", 100, 5)
- assert await modclient.cms().initbyprob("cmsProb", 0.01, 0.01)
- assert await modclient.topk().reserve("topk", 5, 100, 5, 0.9)
+ assert await decoded_r.bf().create("bloom", 0.01, 1000)
+ assert await decoded_r.bf().create("bloom_e", 0.01, 1000, expansion=1)
+ assert await decoded_r.bf().create("bloom_ns", 0.01, 1000, noScale=True)
+ assert await decoded_r.cf().create("cuckoo", 1000)
+ assert await decoded_r.cf().create("cuckoo_e", 1000, expansion=1)
+ assert await decoded_r.cf().create("cuckoo_bs", 1000, bucket_size=4)
+ assert await decoded_r.cf().create("cuckoo_mi", 1000, max_iterations=10)
+ assert await decoded_r.cms().initbydim("cmsDim", 100, 5)
+ assert await decoded_r.cms().initbyprob("cmsProb", 0.01, 0.01)
+ assert await decoded_r.topk().reserve("topk", 5, 100, 5, 0.9)
@pytest.mark.redismod
@pytest.mark.experimental
-async def test_tdigest_create(modclient: redis.Redis):
- assert await modclient.tdigest().create("tDigest", 100)
+async def test_tdigest_create(decoded_r: redis.Redis):
+ assert await decoded_r.tdigest().create("tDigest", 100)
@pytest.mark.redismod
-async def test_bf_add(modclient: redis.Redis):
- assert await modclient.bf().create("bloom", 0.01, 1000)
- assert 1 == await modclient.bf().add("bloom", "foo")
- assert 0 == await modclient.bf().add("bloom", "foo")
- assert [0] == intlist(await modclient.bf().madd("bloom", "foo"))
- assert [0, 1] == await modclient.bf().madd("bloom", "foo", "bar")
- assert [0, 0, 1] == await modclient.bf().madd("bloom", "foo", "bar", "baz")
- assert 1 == await modclient.bf().exists("bloom", "foo")
- assert 0 == await modclient.bf().exists("bloom", "noexist")
- assert [1, 0] == intlist(await modclient.bf().mexists("bloom", "foo", "noexist"))
+async def test_bf_add(decoded_r: redis.Redis):
+ assert await decoded_r.bf().create("bloom", 0.01, 1000)
+ assert 1 == await decoded_r.bf().add("bloom", "foo")
+ assert 0 == await decoded_r.bf().add("bloom", "foo")
+ assert [0] == intlist(await decoded_r.bf().madd("bloom", "foo"))
+ assert [0, 1] == await decoded_r.bf().madd("bloom", "foo", "bar")
+ assert [0, 0, 1] == await decoded_r.bf().madd("bloom", "foo", "bar", "baz")
+ assert 1 == await decoded_r.bf().exists("bloom", "foo")
+ assert 0 == await decoded_r.bf().exists("bloom", "noexist")
+ assert [1, 0] == intlist(await decoded_r.bf().mexists("bloom", "foo", "noexist"))
@pytest.mark.redismod
-async def test_bf_insert(modclient: redis.Redis):
- assert await modclient.bf().create("bloom", 0.01, 1000)
- assert [1] == intlist(await modclient.bf().insert("bloom", ["foo"]))
- assert [0, 1] == intlist(await modclient.bf().insert("bloom", ["foo", "bar"]))
- assert [1] == intlist(await modclient.bf().insert("captest", ["foo"], capacity=10))
- assert [1] == intlist(await modclient.bf().insert("errtest", ["foo"], error=0.01))
- assert 1 == await modclient.bf().exists("bloom", "foo")
- assert 0 == await modclient.bf().exists("bloom", "noexist")
- assert [1, 0] == intlist(await modclient.bf().mexists("bloom", "foo", "noexist"))
- info = await modclient.bf().info("bloom")
+async def test_bf_insert(decoded_r: redis.Redis):
+ assert await decoded_r.bf().create("bloom", 0.01, 1000)
+ assert [1] == intlist(await decoded_r.bf().insert("bloom", ["foo"]))
+ assert [0, 1] == intlist(await decoded_r.bf().insert("bloom", ["foo", "bar"]))
+ assert [1] == intlist(await decoded_r.bf().insert("captest", ["foo"], capacity=10))
+ assert [1] == intlist(await decoded_r.bf().insert("errtest", ["foo"], error=0.01))
+ assert 1 == await decoded_r.bf().exists("bloom", "foo")
+ assert 0 == await decoded_r.bf().exists("bloom", "noexist")
+ assert [1, 0] == intlist(await decoded_r.bf().mexists("bloom", "foo", "noexist"))
+ info = await decoded_r.bf().info("bloom")
assert_resp_response(
- modclient,
+ decoded_r,
2,
info.get("insertedNum"),
info.get("Number of items inserted"),
)
assert_resp_response(
- modclient,
+ decoded_r,
1000,
info.get("capacity"),
info.get("Capacity"),
)
assert_resp_response(
- modclient,
+ decoded_r,
1,
info.get("filterNum"),
info.get("Number of filters"),
@@ -94,19 +81,19 @@ async def test_bf_insert(modclient: redis.Redis):
@pytest.mark.redismod
-async def test_bf_scandump_and_loadchunk(modclient: redis.Redis):
+async def test_bf_scandump_and_loadchunk(decoded_r: redis.Redis):
# Store a filter
- await modclient.bf().create("myBloom", "0.0001", "1000")
+ await decoded_r.bf().create("myBloom", "0.0001", "1000")
# test is probabilistic and might fail. It is OK to change variables if
# certain to not break anything
async def do_verify():
res = 0
for x in range(1000):
- await modclient.bf().add("myBloom", x)
- rv = await modclient.bf().exists("myBloom", x)
+ await decoded_r.bf().add("myBloom", x)
+ rv = await decoded_r.bf().exists("myBloom", x)
assert rv
- rv = await modclient.bf().exists("myBloom", f"nonexist_{x}")
+ rv = await decoded_r.bf().exists("myBloom", f"nonexist_{x}")
res += rv == x
assert res < 5
@@ -114,54 +101,54 @@ async def do_verify():
cmds = []
if HIREDIS_AVAILABLE:
with pytest.raises(ModuleError):
- cur = await modclient.bf().scandump("myBloom", 0)
+ cur = await decoded_r.bf().scandump("myBloom", 0)
return
- cur = await modclient.bf().scandump("myBloom", 0)
+ cur = await decoded_r.bf().scandump("myBloom", 0)
first = cur[0]
cmds.append(cur)
while True:
- cur = await modclient.bf().scandump("myBloom", first)
+ cur = await decoded_r.bf().scandump("myBloom", first)
first = cur[0]
if first == 0:
break
else:
cmds.append(cur)
- prev_info = await modclient.bf().execute_command("bf.debug", "myBloom")
+ prev_info = await decoded_r.bf().execute_command("bf.debug", "myBloom")
# Remove the filter
- await modclient.bf().client.delete("myBloom")
+ await decoded_r.bf().client.delete("myBloom")
# Now, load all the commands:
for cmd in cmds:
- await modclient.bf().loadchunk("myBloom", *cmd)
+ await decoded_r.bf().loadchunk("myBloom", *cmd)
- cur_info = await modclient.bf().execute_command("bf.debug", "myBloom")
+ cur_info = await decoded_r.bf().execute_command("bf.debug", "myBloom")
assert prev_info == cur_info
await do_verify()
- await modclient.bf().client.delete("myBloom")
- await modclient.bf().create("myBloom", "0.0001", "10000000")
+ await decoded_r.bf().client.delete("myBloom")
+ await decoded_r.bf().create("myBloom", "0.0001", "10000000")
@pytest.mark.redismod
-async def test_bf_info(modclient: redis.Redis):
+async def test_bf_info(decoded_r: redis.Redis):
expansion = 4
# Store a filter
- await modclient.bf().create("nonscaling", "0.0001", "1000", noScale=True)
- info = await modclient.bf().info("nonscaling")
+ await decoded_r.bf().create("nonscaling", "0.0001", "1000", noScale=True)
+ info = await decoded_r.bf().info("nonscaling")
assert_resp_response(
- modclient,
+ decoded_r,
None,
info.get("expansionRate"),
info.get("Expansion rate"),
)
- await modclient.bf().create("expanding", "0.0001", "1000", expansion=expansion)
- info = await modclient.bf().info("expanding")
+ await decoded_r.bf().create("expanding", "0.0001", "1000", expansion=expansion)
+ info = await decoded_r.bf().info("expanding")
assert_resp_response(
- modclient,
+ decoded_r,
4,
info.get("expansionRate"),
info.get("Expansion rate"),
@@ -169,7 +156,7 @@ async def test_bf_info(modclient: redis.Redis):
try:
# noScale mean no expansion
- await modclient.bf().create(
+ await decoded_r.bf().create(
"myBloom", "0.0001", "1000", expansion=expansion, noScale=True
)
assert False
@@ -178,68 +165,68 @@ async def test_bf_info(modclient: redis.Redis):
@pytest.mark.redismod
-async def test_bf_card(modclient: redis.Redis):
+async def test_bf_card(decoded_r: redis.Redis):
# return 0 if the key does not exist
- assert await modclient.bf().card("not_exist") == 0
+ assert await decoded_r.bf().card("not_exist") == 0
# Store a filter
- assert await modclient.bf().add("bf1", "item_foo") == 1
- assert await modclient.bf().card("bf1") == 1
+ assert await decoded_r.bf().add("bf1", "item_foo") == 1
+ assert await decoded_r.bf().card("bf1") == 1
- # Error when key is of a type other than Bloom filter.
+ # Error when key is of a type other than Bloom filtedecoded_r.
with pytest.raises(redis.ResponseError):
- await modclient.set("setKey", "value")
- await modclient.bf().card("setKey")
+ await decoded_r.set("setKey", "value")
+ await decoded_r.bf().card("setKey")
@pytest.mark.redismod
-async def test_cf_add_and_insert(modclient: redis.Redis):
- assert await modclient.cf().create("cuckoo", 1000)
- assert await modclient.cf().add("cuckoo", "filter")
- assert not await modclient.cf().addnx("cuckoo", "filter")
- assert 1 == await modclient.cf().addnx("cuckoo", "newItem")
- assert [1] == await modclient.cf().insert("captest", ["foo"])
- assert [1] == await modclient.cf().insert("captest", ["foo"], capacity=1000)
- assert [1] == await modclient.cf().insertnx("captest", ["bar"])
- assert [1] == await modclient.cf().insertnx("captest", ["food"], nocreate="1")
- assert [0, 0, 1] == await modclient.cf().insertnx("captest", ["foo", "bar", "baz"])
- assert [0] == await modclient.cf().insertnx("captest", ["bar"], capacity=1000)
- assert [1] == await modclient.cf().insert("empty1", ["foo"], capacity=1000)
- assert [1] == await modclient.cf().insertnx("empty2", ["bar"], capacity=1000)
- info = await modclient.cf().info("captest")
+async def test_cf_add_and_insert(decoded_r: redis.Redis):
+ assert await decoded_r.cf().create("cuckoo", 1000)
+ assert await decoded_r.cf().add("cuckoo", "filter")
+ assert not await decoded_r.cf().addnx("cuckoo", "filter")
+ assert 1 == await decoded_r.cf().addnx("cuckoo", "newItem")
+ assert [1] == await decoded_r.cf().insert("captest", ["foo"])
+ assert [1] == await decoded_r.cf().insert("captest", ["foo"], capacity=1000)
+ assert [1] == await decoded_r.cf().insertnx("captest", ["bar"])
+ assert [1] == await decoded_r.cf().insertnx("captest", ["food"], nocreate="1")
+ assert [0, 0, 1] == await decoded_r.cf().insertnx("captest", ["foo", "bar", "baz"])
+ assert [0] == await decoded_r.cf().insertnx("captest", ["bar"], capacity=1000)
+ assert [1] == await decoded_r.cf().insert("empty1", ["foo"], capacity=1000)
+ assert [1] == await decoded_r.cf().insertnx("empty2", ["bar"], capacity=1000)
+ info = await decoded_r.cf().info("captest")
assert_resp_response(
- modclient, 5, info.get("insertedNum"), info.get("Number of items inserted")
+ decoded_r, 5, info.get("insertedNum"), info.get("Number of items inserted")
)
assert_resp_response(
- modclient, 0, info.get("deletedNum"), info.get("Number of items deleted")
+ decoded_r, 0, info.get("deletedNum"), info.get("Number of items deleted")
)
assert_resp_response(
- modclient, 1, info.get("filterNum"), info.get("Number of filters")
+ decoded_r, 1, info.get("filterNum"), info.get("Number of filters")
)
@pytest.mark.redismod
-async def test_cf_exists_and_del(modclient: redis.Redis):
- assert await modclient.cf().create("cuckoo", 1000)
- assert await modclient.cf().add("cuckoo", "filter")
- assert await modclient.cf().exists("cuckoo", "filter")
- assert not await modclient.cf().exists("cuckoo", "notexist")
- assert 1 == await modclient.cf().count("cuckoo", "filter")
- assert 0 == await modclient.cf().count("cuckoo", "notexist")
- assert await modclient.cf().delete("cuckoo", "filter")
- assert 0 == await modclient.cf().count("cuckoo", "filter")
+async def test_cf_exists_and_del(decoded_r: redis.Redis):
+ assert await decoded_r.cf().create("cuckoo", 1000)
+ assert await decoded_r.cf().add("cuckoo", "filter")
+ assert await decoded_r.cf().exists("cuckoo", "filter")
+ assert not await decoded_r.cf().exists("cuckoo", "notexist")
+ assert 1 == await decoded_r.cf().count("cuckoo", "filter")
+ assert 0 == await decoded_r.cf().count("cuckoo", "notexist")
+ assert await decoded_r.cf().delete("cuckoo", "filter")
+ assert 0 == await decoded_r.cf().count("cuckoo", "filter")
@pytest.mark.redismod
-async def test_cms(modclient: redis.Redis):
- assert await modclient.cms().initbydim("dim", 1000, 5)
- assert await modclient.cms().initbyprob("prob", 0.01, 0.01)
- assert await modclient.cms().incrby("dim", ["foo"], [5])
- assert [0] == await modclient.cms().query("dim", "notexist")
- assert [5] == await modclient.cms().query("dim", "foo")
- assert [10, 15] == await modclient.cms().incrby("dim", ["foo", "bar"], [5, 15])
- assert [10, 15] == await modclient.cms().query("dim", "foo", "bar")
- info = await modclient.cms().info("dim")
+async def test_cms(decoded_r: redis.Redis):
+ assert await decoded_r.cms().initbydim("dim", 1000, 5)
+ assert await decoded_r.cms().initbyprob("prob", 0.01, 0.01)
+ assert await decoded_r.cms().incrby("dim", ["foo"], [5])
+ assert [0] == await decoded_r.cms().query("dim", "notexist")
+ assert [5] == await decoded_r.cms().query("dim", "foo")
+ assert [10, 15] == await decoded_r.cms().incrby("dim", ["foo", "bar"], [5, 15])
+ assert [10, 15] == await decoded_r.cms().query("dim", "foo", "bar")
+ info = await decoded_r.cms().info("dim")
assert info["width"]
assert 1000 == info["width"]
assert 5 == info["depth"]
@@ -248,26 +235,26 @@ async def test_cms(modclient: redis.Redis):
@pytest.mark.redismod
@pytest.mark.onlynoncluster
-async def test_cms_merge(modclient: redis.Redis):
- assert await modclient.cms().initbydim("A", 1000, 5)
- assert await modclient.cms().initbydim("B", 1000, 5)
- assert await modclient.cms().initbydim("C", 1000, 5)
- assert await modclient.cms().incrby("A", ["foo", "bar", "baz"], [5, 3, 9])
- assert await modclient.cms().incrby("B", ["foo", "bar", "baz"], [2, 3, 1])
- assert [5, 3, 9] == await modclient.cms().query("A", "foo", "bar", "baz")
- assert [2, 3, 1] == await modclient.cms().query("B", "foo", "bar", "baz")
- assert await modclient.cms().merge("C", 2, ["A", "B"])
- assert [7, 6, 10] == await modclient.cms().query("C", "foo", "bar", "baz")
- assert await modclient.cms().merge("C", 2, ["A", "B"], ["1", "2"])
- assert [9, 9, 11] == await modclient.cms().query("C", "foo", "bar", "baz")
- assert await modclient.cms().merge("C", 2, ["A", "B"], ["2", "3"])
- assert [16, 15, 21] == await modclient.cms().query("C", "foo", "bar", "baz")
+async def test_cms_merge(decoded_r: redis.Redis):
+ assert await decoded_r.cms().initbydim("A", 1000, 5)
+ assert await decoded_r.cms().initbydim("B", 1000, 5)
+ assert await decoded_r.cms().initbydim("C", 1000, 5)
+ assert await decoded_r.cms().incrby("A", ["foo", "bar", "baz"], [5, 3, 9])
+ assert await decoded_r.cms().incrby("B", ["foo", "bar", "baz"], [2, 3, 1])
+ assert [5, 3, 9] == await decoded_r.cms().query("A", "foo", "bar", "baz")
+ assert [2, 3, 1] == await decoded_r.cms().query("B", "foo", "bar", "baz")
+ assert await decoded_r.cms().merge("C", 2, ["A", "B"])
+ assert [7, 6, 10] == await decoded_r.cms().query("C", "foo", "bar", "baz")
+ assert await decoded_r.cms().merge("C", 2, ["A", "B"], ["1", "2"])
+ assert [9, 9, 11] == await decoded_r.cms().query("C", "foo", "bar", "baz")
+ assert await decoded_r.cms().merge("C", 2, ["A", "B"], ["2", "3"])
+ assert [16, 15, 21] == await decoded_r.cms().query("C", "foo", "bar", "baz")
@pytest.mark.redismod
-async def test_topk(modclient: redis.Redis):
+async def test_topk(decoded_r: redis.Redis):
# test list with empty buckets
- assert await modclient.topk().reserve("topk", 3, 50, 4, 0.9)
+ assert await decoded_r.topk().reserve("topk", 3, 50, 4, 0.9)
assert [
None,
None,
@@ -286,7 +273,7 @@ async def test_topk(modclient: redis.Redis):
None,
"D",
None,
- ] == await modclient.topk().add(
+ ] == await decoded_r.topk().add(
"topk",
"A",
"B",
@@ -306,17 +293,17 @@ async def test_topk(modclient: redis.Redis):
"E",
1,
)
- assert [1, 1, 0, 0, 1, 0, 0] == await modclient.topk().query(
+ assert [1, 1, 0, 0, 1, 0, 0] == await decoded_r.topk().query(
"topk", "A", "B", "C", "D", "E", "F", "G"
)
with pytest.deprecated_call():
- assert [4, 3, 2, 3, 3, 0, 1] == await modclient.topk().count(
+ assert [4, 3, 2, 3, 3, 0, 1] == await decoded_r.topk().count(
"topk", "A", "B", "C", "D", "E", "F", "G"
)
# test full list
- assert await modclient.topk().reserve("topklist", 3, 50, 3, 0.9)
- assert await modclient.topk().add(
+ assert await decoded_r.topk().reserve("topklist", 3, 50, 3, 0.9)
+ assert await decoded_r.topk().add(
"topklist",
"A",
"B",
@@ -335,10 +322,10 @@ async def test_topk(modclient: redis.Redis):
"E",
"E",
)
- assert ["A", "B", "E"] == await modclient.topk().list("topklist")
- res = await modclient.topk().list("topklist", withcount=True)
+ assert ["A", "B", "E"] == await decoded_r.topk().list("topklist")
+ res = await decoded_r.topk().list("topklist", withcount=True)
assert ["A", 4, "B", 3, "E", 3] == res
- info = await modclient.topk().info("topklist")
+ info = await decoded_r.topk().info("topklist")
assert 3 == info["k"]
assert 50 == info["width"]
assert 3 == info["depth"]
@@ -346,185 +333,185 @@ async def test_topk(modclient: redis.Redis):
@pytest.mark.redismod
-async def test_topk_incrby(modclient: redis.Redis):
- await modclient.flushdb()
- assert await modclient.topk().reserve("topk", 3, 10, 3, 1)
- assert [None, None, None] == await modclient.topk().incrby(
+async def test_topk_incrby(decoded_r: redis.Redis):
+ await decoded_r.flushdb()
+ assert await decoded_r.topk().reserve("topk", 3, 10, 3, 1)
+ assert [None, None, None] == await decoded_r.topk().incrby(
"topk", ["bar", "baz", "42"], [3, 6, 2]
)
- res = await modclient.topk().incrby("topk", ["42", "xyzzy"], [8, 4])
+ res = await decoded_r.topk().incrby("topk", ["42", "xyzzy"], [8, 4])
assert [None, "bar"] == res
with pytest.deprecated_call():
- assert [3, 6, 10, 4, 0] == await modclient.topk().count(
+ assert [3, 6, 10, 4, 0] == await decoded_r.topk().count(
"topk", "bar", "baz", "42", "xyzzy", 4
)
@pytest.mark.redismod
@pytest.mark.experimental
-async def test_tdigest_reset(modclient: redis.Redis):
- assert await modclient.tdigest().create("tDigest", 10)
+async def test_tdigest_reset(decoded_r: redis.Redis):
+ assert await decoded_r.tdigest().create("tDigest", 10)
# reset on empty histogram
- assert await modclient.tdigest().reset("tDigest")
+ assert await decoded_r.tdigest().reset("tDigest")
# insert data-points into sketch
- assert await modclient.tdigest().add("tDigest", list(range(10)))
+ assert await decoded_r.tdigest().add("tDigest", list(range(10)))
- assert await modclient.tdigest().reset("tDigest")
+ assert await decoded_r.tdigest().reset("tDigest")
# assert we have 0 unmerged nodes
- info = await modclient.tdigest().info("tDigest")
+ info = await decoded_r.tdigest().info("tDigest")
assert_resp_response(
- modclient, 0, info.get("unmerged_nodes"), info.get("Unmerged nodes")
+ decoded_r, 0, info.get("unmerged_nodes"), info.get("Unmerged nodes")
)
@pytest.mark.redismod
@pytest.mark.experimental
-async def test_tdigest_merge(modclient: redis.Redis):
- assert await modclient.tdigest().create("to-tDigest", 10)
- assert await modclient.tdigest().create("from-tDigest", 10)
+async def test_tdigest_merge(decoded_r: redis.Redis):
+ assert await decoded_r.tdigest().create("to-tDigest", 10)
+ assert await decoded_r.tdigest().create("from-tDigest", 10)
# insert data-points into sketch
- assert await modclient.tdigest().add("from-tDigest", [1.0] * 10)
- assert await modclient.tdigest().add("to-tDigest", [2.0] * 10)
+ assert await decoded_r.tdigest().add("from-tDigest", [1.0] * 10)
+ assert await decoded_r.tdigest().add("to-tDigest", [2.0] * 10)
# merge from-tdigest into to-tdigest
- assert await modclient.tdigest().merge("to-tDigest", 1, "from-tDigest")
+ assert await decoded_r.tdigest().merge("to-tDigest", 1, "from-tDigest")
# we should now have 110 weight on to-histogram
- info = await modclient.tdigest().info("to-tDigest")
- if is_resp2_connection(modclient):
+ info = await decoded_r.tdigest().info("to-tDigest")
+ if is_resp2_connection(decoded_r):
assert 20 == float(info["merged_weight"]) + float(info["unmerged_weight"])
else:
assert 20 == float(info["Merged weight"]) + float(info["Unmerged weight"])
# test override
- assert await modclient.tdigest().create("from-override", 10)
- assert await modclient.tdigest().create("from-override-2", 10)
- assert await modclient.tdigest().add("from-override", [3.0] * 10)
- assert await modclient.tdigest().add("from-override-2", [4.0] * 10)
- assert await modclient.tdigest().merge(
+ assert await decoded_r.tdigest().create("from-override", 10)
+ assert await decoded_r.tdigest().create("from-override-2", 10)
+ assert await decoded_r.tdigest().add("from-override", [3.0] * 10)
+ assert await decoded_r.tdigest().add("from-override-2", [4.0] * 10)
+ assert await decoded_r.tdigest().merge(
"to-tDigest", 2, "from-override", "from-override-2", override=True
)
- assert 3.0 == await modclient.tdigest().min("to-tDigest")
- assert 4.0 == await modclient.tdigest().max("to-tDigest")
+ assert 3.0 == await decoded_r.tdigest().min("to-tDigest")
+ assert 4.0 == await decoded_r.tdigest().max("to-tDigest")
@pytest.mark.redismod
@pytest.mark.experimental
-async def test_tdigest_min_and_max(modclient: redis.Redis):
- assert await modclient.tdigest().create("tDigest", 100)
+async def test_tdigest_min_and_max(decoded_r: redis.Redis):
+ assert await decoded_r.tdigest().create("tDigest", 100)
# insert data-points into sketch
- assert await modclient.tdigest().add("tDigest", [1, 2, 3])
+ assert await decoded_r.tdigest().add("tDigest", [1, 2, 3])
# min/max
- assert 3 == await modclient.tdigest().max("tDigest")
- assert 1 == await modclient.tdigest().min("tDigest")
+ assert 3 == await decoded_r.tdigest().max("tDigest")
+ assert 1 == await decoded_r.tdigest().min("tDigest")
@pytest.mark.redismod
@pytest.mark.experimental
@skip_ifmodversion_lt("2.4.0", "bf")
-async def test_tdigest_quantile(modclient: redis.Redis):
- assert await modclient.tdigest().create("tDigest", 500)
+async def test_tdigest_quantile(decoded_r: redis.Redis):
+ assert await decoded_r.tdigest().create("tDigest", 500)
# insert data-points into sketch
- assert await modclient.tdigest().add(
+ assert await decoded_r.tdigest().add(
"tDigest", list([x * 0.01 for x in range(1, 10000)])
)
# assert min min/max have same result as quantile 0 and 1
assert (
- await modclient.tdigest().max("tDigest")
- == (await modclient.tdigest().quantile("tDigest", 1))[0]
+ await decoded_r.tdigest().max("tDigest")
+ == (await decoded_r.tdigest().quantile("tDigest", 1))[0]
)
assert (
- await modclient.tdigest().min("tDigest")
- == (await modclient.tdigest().quantile("tDigest", 0.0))[0]
+ await decoded_r.tdigest().min("tDigest")
+ == (await decoded_r.tdigest().quantile("tDigest", 0.0))[0]
)
- assert 1.0 == round((await modclient.tdigest().quantile("tDigest", 0.01))[0], 2)
- assert 99.0 == round((await modclient.tdigest().quantile("tDigest", 0.99))[0], 2)
+ assert 1.0 == round((await decoded_r.tdigest().quantile("tDigest", 0.01))[0], 2)
+ assert 99.0 == round((await decoded_r.tdigest().quantile("tDigest", 0.99))[0], 2)
# test multiple quantiles
- assert await modclient.tdigest().create("t-digest", 100)
- assert await modclient.tdigest().add("t-digest", [1, 2, 3, 4, 5])
- res = await modclient.tdigest().quantile("t-digest", 0.5, 0.8)
+ assert await decoded_r.tdigest().create("t-digest", 100)
+ assert await decoded_r.tdigest().add("t-digest", [1, 2, 3, 4, 5])
+ res = await decoded_r.tdigest().quantile("t-digest", 0.5, 0.8)
assert [3.0, 5.0] == res
@pytest.mark.redismod
@pytest.mark.experimental
-async def test_tdigest_cdf(modclient: redis.Redis):
- assert await modclient.tdigest().create("tDigest", 100)
+async def test_tdigest_cdf(decoded_r: redis.Redis):
+ assert await decoded_r.tdigest().create("tDigest", 100)
# insert data-points into sketch
- assert await modclient.tdigest().add("tDigest", list(range(1, 10)))
- assert 0.1 == round((await modclient.tdigest().cdf("tDigest", 1.0))[0], 1)
- assert 0.9 == round((await modclient.tdigest().cdf("tDigest", 9.0))[0], 1)
- res = await modclient.tdigest().cdf("tDigest", 1.0, 9.0)
+ assert await decoded_r.tdigest().add("tDigest", list(range(1, 10)))
+ assert 0.1 == round((await decoded_r.tdigest().cdf("tDigest", 1.0))[0], 1)
+ assert 0.9 == round((await decoded_r.tdigest().cdf("tDigest", 9.0))[0], 1)
+ res = await decoded_r.tdigest().cdf("tDigest", 1.0, 9.0)
assert [0.1, 0.9] == [round(x, 1) for x in res]
@pytest.mark.redismod
@pytest.mark.experimental
@skip_ifmodversion_lt("2.4.0", "bf")
-async def test_tdigest_trimmed_mean(modclient: redis.Redis):
- assert await modclient.tdigest().create("tDigest", 100)
+async def test_tdigest_trimmed_mean(decoded_r: redis.Redis):
+ assert await decoded_r.tdigest().create("tDigest", 100)
# insert data-points into sketch
- assert await modclient.tdigest().add("tDigest", list(range(1, 10)))
- assert 5 == await modclient.tdigest().trimmed_mean("tDigest", 0.1, 0.9)
- assert 4.5 == await modclient.tdigest().trimmed_mean("tDigest", 0.4, 0.5)
+ assert await decoded_r.tdigest().add("tDigest", list(range(1, 10)))
+ assert 5 == await decoded_r.tdigest().trimmed_mean("tDigest", 0.1, 0.9)
+ assert 4.5 == await decoded_r.tdigest().trimmed_mean("tDigest", 0.4, 0.5)
@pytest.mark.redismod
@pytest.mark.experimental
-async def test_tdigest_rank(modclient: redis.Redis):
- assert await modclient.tdigest().create("t-digest", 500)
- assert await modclient.tdigest().add("t-digest", list(range(0, 20)))
- assert -1 == (await modclient.tdigest().rank("t-digest", -1))[0]
- assert 0 == (await modclient.tdigest().rank("t-digest", 0))[0]
- assert 10 == (await modclient.tdigest().rank("t-digest", 10))[0]
- assert [-1, 20, 9] == await modclient.tdigest().rank("t-digest", -20, 20, 9)
+async def test_tdigest_rank(decoded_r: redis.Redis):
+ assert await decoded_r.tdigest().create("t-digest", 500)
+ assert await decoded_r.tdigest().add("t-digest", list(range(0, 20)))
+ assert -1 == (await decoded_r.tdigest().rank("t-digest", -1))[0]
+ assert 0 == (await decoded_r.tdigest().rank("t-digest", 0))[0]
+ assert 10 == (await decoded_r.tdigest().rank("t-digest", 10))[0]
+ assert [-1, 20, 9] == await decoded_r.tdigest().rank("t-digest", -20, 20, 9)
@pytest.mark.redismod
@pytest.mark.experimental
-async def test_tdigest_revrank(modclient: redis.Redis):
- assert await modclient.tdigest().create("t-digest", 500)
- assert await modclient.tdigest().add("t-digest", list(range(0, 20)))
- assert -1 == (await modclient.tdigest().revrank("t-digest", 20))[0]
- assert 19 == (await modclient.tdigest().revrank("t-digest", 0))[0]
- assert [-1, 19, 9] == await modclient.tdigest().revrank("t-digest", 21, 0, 10)
+async def test_tdigest_revrank(decoded_r: redis.Redis):
+ assert await decoded_r.tdigest().create("t-digest", 500)
+ assert await decoded_r.tdigest().add("t-digest", list(range(0, 20)))
+ assert -1 == (await decoded_r.tdigest().revrank("t-digest", 20))[0]
+ assert 19 == (await decoded_r.tdigest().revrank("t-digest", 0))[0]
+ assert [-1, 19, 9] == await decoded_r.tdigest().revrank("t-digest", 21, 0, 10)
@pytest.mark.redismod
@pytest.mark.experimental
-async def test_tdigest_byrank(modclient: redis.Redis):
- assert await modclient.tdigest().create("t-digest", 500)
- assert await modclient.tdigest().add("t-digest", list(range(1, 11)))
- assert 1 == (await modclient.tdigest().byrank("t-digest", 0))[0]
- assert 10 == (await modclient.tdigest().byrank("t-digest", 9))[0]
- assert (await modclient.tdigest().byrank("t-digest", 100))[0] == inf
+async def test_tdigest_byrank(decoded_r: redis.Redis):
+ assert await decoded_r.tdigest().create("t-digest", 500)
+ assert await decoded_r.tdigest().add("t-digest", list(range(1, 11)))
+ assert 1 == (await decoded_r.tdigest().byrank("t-digest", 0))[0]
+ assert 10 == (await decoded_r.tdigest().byrank("t-digest", 9))[0]
+ assert (await decoded_r.tdigest().byrank("t-digest", 100))[0] == inf
with pytest.raises(redis.ResponseError):
- (await modclient.tdigest().byrank("t-digest", -1))[0]
+ (await decoded_r.tdigest().byrank("t-digest", -1))[0]
@pytest.mark.redismod
@pytest.mark.experimental
-async def test_tdigest_byrevrank(modclient: redis.Redis):
- assert await modclient.tdigest().create("t-digest", 500)
- assert await modclient.tdigest().add("t-digest", list(range(1, 11)))
- assert 10 == (await modclient.tdigest().byrevrank("t-digest", 0))[0]
- assert 1 == (await modclient.tdigest().byrevrank("t-digest", 9))[0]
- assert (await modclient.tdigest().byrevrank("t-digest", 100))[0] == -inf
+async def test_tdigest_byrevrank(decoded_r: redis.Redis):
+ assert await decoded_r.tdigest().create("t-digest", 500)
+ assert await decoded_r.tdigest().add("t-digest", list(range(1, 11)))
+ assert 10 == (await decoded_r.tdigest().byrevrank("t-digest", 0))[0]
+ assert 1 == (await decoded_r.tdigest().byrevrank("t-digest", 9))[0]
+ assert (await decoded_r.tdigest().byrevrank("t-digest", 100))[0] == -inf
with pytest.raises(redis.ResponseError):
- (await modclient.tdigest().byrevrank("t-digest", -1))[0]
+ (await decoded_r.tdigest().byrevrank("t-digest", -1))[0]
# @pytest.mark.redismod
-# async def test_pipeline(modclient: redis.Redis):
-# pipeline = await modclient.bf().pipeline()
-# assert not await modclient.bf().execute_command("get pipeline")
+# async def test_pipeline(decoded_r: redis.Redis):
+# pipeline = await decoded_r.bf().pipeline()
+# assert not await decoded_r.bf().execute_command("get pipeline")
#
-# assert await modclient.bf().create("pipeline", 0.01, 1000)
+# assert await decoded_r.bf().create("pipeline", 0.01, 1000)
# for i in range(100):
# pipeline.add("pipeline", i)
# for i in range(100):
-# assert not (await modclient.bf().exists("pipeline", i))
+# assert not (await decoded_r.bf().exists("pipeline", i))
#
# pipeline.execute()
#
# for i in range(100):
-# assert await modclient.bf().exists("pipeline", i)
+# assert await decoded_r.bf().exists("pipeline", i)
diff --git a/tests/test_asyncio/test_cluster.py b/tests/test_asyncio/test_cluster.py
index 58c0e0b0c7..1d12877696 100644
--- a/tests/test_asyncio/test_cluster.py
+++ b/tests/test_asyncio/test_cluster.py
@@ -9,7 +9,6 @@
import pytest
import pytest_asyncio
from _pytest.fixtures import FixtureRequest
-
from redis.asyncio.cluster import ClusterNode, NodesManager, RedisCluster
from redis.asyncio.connection import Connection, SSLConnection
from redis.asyncio.retry import Retry
@@ -2692,10 +2691,10 @@ class TestSSL:
"""
ROOT = os.path.join(os.path.dirname(__file__), "../..")
- CERT_DIR = os.path.abspath(os.path.join(ROOT, "docker", "stunnel", "keys"))
+ CERT_DIR = os.path.abspath(os.path.join(ROOT, "dockers", "stunnel", "keys"))
if not os.path.isdir(CERT_DIR): # github actions package validation case
CERT_DIR = os.path.abspath(
- os.path.join(ROOT, "..", "docker", "stunnel", "keys")
+ os.path.join(ROOT, "..", "dockers", "stunnel", "keys")
)
if not os.path.isdir(CERT_DIR):
raise IOError(f"No SSL certificates found. They should be in {CERT_DIR}")
diff --git a/tests/test_asyncio/test_commands.py b/tests/test_asyncio/test_commands.py
index 02bfa71e0f..7e7a40adf3 100644
--- a/tests/test_asyncio/test_commands.py
+++ b/tests/test_asyncio/test_commands.py
@@ -8,7 +8,6 @@
import pytest
import pytest_asyncio
-
import redis
from redis import exceptions
from redis.client import EMPTY_RESPONSE, NEVER_DECODE, parse_info
@@ -122,8 +121,7 @@ async def test_acl_genpass(self, r: redis.Redis):
password = await r.acl_genpass()
assert isinstance(password, str)
- @skip_if_server_version_lt(REDIS_6_VERSION)
- @skip_if_server_version_gte("7.0.0")
+ @skip_if_server_version_lt("7.0.0")
async def test_acl_getuser_setuser(self, r_teardown):
username = "redis-py-user"
r = r_teardown(username)
@@ -159,12 +157,11 @@ async def test_acl_getuser_setuser(self, r_teardown):
keys=["cache:*", "objects:*"],
)
acl = await r.acl_getuser(username)
- assert set(acl["categories"]) == {"-@all", "+@set", "+@hash"}
+ assert set(acl["categories"]) == {"-@all", "+@set", "+@hash", "-@geo"}
assert set(acl["commands"]) == {"+get", "+mget", "-hset"}
assert acl["enabled"] is True
- assert acl["channels"] == [b"*"]
- assert set(acl["flags"]) == {"on", "allchannels", "sanitize-payload"}
- assert acl["keys"] == [b"cache:*", b"objects:*"]
+ assert "on" in acl["flags"]
+ assert set(acl["keys"]) == {"~cache:*", "~objects:*"}
assert len(acl["passwords"]) == 2
# test reset=False keeps existing ACL and applies new ACL on top
@@ -186,12 +183,10 @@ async def test_acl_getuser_setuser(self, r_teardown):
keys=["objects:*"],
)
acl = await r.acl_getuser(username)
- assert set(acl["categories"]) == {"-@all", "+@set", "+@hash"}
assert set(acl["commands"]) == {"+get", "+mget"}
assert acl["enabled"] is True
- assert acl["channels"] == [b"*"]
- assert set(acl["flags"]) == {"on", "allchannels", "sanitize-payload"}
- assert set(acl["keys"]) == {b"cache:*", b"objects:*"}
+ assert "on" in acl["flags"]
+ assert set(acl["keys"]) == {"~cache:*", "~objects:*"}
assert len(acl["passwords"]) == 2
# test removal of passwords
@@ -227,14 +222,13 @@ async def test_acl_getuser_setuser(self, r_teardown):
assert len((await r.acl_getuser(username))["passwords"]) == 1
@skip_if_server_version_lt(REDIS_6_VERSION)
- @skip_if_server_version_gte("7.0.0")
async def test_acl_list(self, r_teardown):
username = "redis-py-user"
r = r_teardown(username)
-
+ start = await r.acl_list()
assert await r.acl_setuser(username, enabled=False, reset=True)
users = await r.acl_list()
- assert f"user {username} off sanitize-payload &* -@all" in users
+ assert len(users) == len(start) + 1
@skip_if_server_version_lt(REDIS_6_VERSION)
@pytest.mark.onlynoncluster
diff --git a/tests/test_asyncio/test_connection.py b/tests/test_asyncio/test_connection.py
index c5b21055e0..926b432b62 100644
--- a/tests/test_asyncio/test_connection.py
+++ b/tests/test_asyncio/test_connection.py
@@ -4,7 +4,6 @@
from unittest.mock import patch
import pytest
-
import redis
from redis.asyncio import Redis
from redis.asyncio.connection import Connection, UnixDomainSocketConnection
@@ -112,22 +111,22 @@ async def get_conn(_):
@skip_if_server_version_lt("4.0.0")
@pytest.mark.redismod
@pytest.mark.onlynoncluster
-async def test_loading_external_modules(modclient):
+async def test_loading_external_modules(r):
def inner():
pass
- modclient.load_external_module("myfuncname", inner)
- assert getattr(modclient, "myfuncname") == inner
- assert isinstance(getattr(modclient, "myfuncname"), types.FunctionType)
+ r.load_external_module("myfuncname", inner)
+ assert getattr(r, "myfuncname") == inner
+ assert isinstance(getattr(r, "myfuncname"), types.FunctionType)
# and call it
from redis.commands import RedisModuleCommands
j = RedisModuleCommands.json
- modclient.load_external_module("sometestfuncname", j)
+ r.load_external_module("sometestfuncname", j)
# d = {'hello': 'world!'}
- # mod = j(modclient)
+ # mod = j(r)
# mod.set("fookey", ".", d)
# assert mod.get('fookey') == d
diff --git a/tests/test_asyncio/test_connection_pool.py b/tests/test_asyncio/test_connection_pool.py
index d1e52bd2a3..20c2c79c84 100644
--- a/tests/test_asyncio/test_connection_pool.py
+++ b/tests/test_asyncio/test_connection_pool.py
@@ -4,7 +4,6 @@
import pytest
import pytest_asyncio
-
import redis.asyncio as redis
from redis.asyncio.connection import Connection, to_bool
from tests.conftest import skip_if_redis_enterprise, skip_if_server_version_lt
@@ -246,8 +245,9 @@ async def test_connection_pool_blocks_until_timeout(self, master_host):
start = asyncio.get_running_loop().time()
with pytest.raises(redis.ConnectionError):
await pool.get_connection("_")
- # we should have waited at least 0.1 seconds
- assert asyncio.get_running_loop().time() - start >= 0.1
+
+ # we should have waited at least some period of time
+ assert asyncio.get_running_loop().time() - start >= 0.05
await c1.disconnect()
async def test_connection_pool_blocks_until_conn_available(self, master_host):
@@ -267,7 +267,8 @@ async def target():
start = asyncio.get_running_loop().time()
await asyncio.gather(target(), pool.get_connection("_"))
- assert asyncio.get_running_loop().time() - start >= 0.1
+ stop = asyncio.get_running_loop().time()
+ assert (stop - start) <= 0.2
async def test_reuse_previously_released_connection(self, master_host):
connection_kwargs = {"host": master_host}
@@ -658,6 +659,7 @@ async def r(self, create_redis, server):
@pytest.mark.onlynoncluster
+@pytest.mark.xfail(strict=False)
class TestHealthCheck:
interval = 60
diff --git a/tests/test_asyncio/test_credentials.py b/tests/test_asyncio/test_credentials.py
index 8e213cdb26..4429f7453b 100644
--- a/tests/test_asyncio/test_credentials.py
+++ b/tests/test_asyncio/test_credentials.py
@@ -5,7 +5,6 @@
import pytest
import pytest_asyncio
-
import redis
from redis import AuthenticationError, DataError, ResponseError
from redis.credentials import CredentialProvider, UsernamePasswordCredentialProvider
diff --git a/tests/test_asyncio/test_encoding.py b/tests/test_asyncio/test_encoding.py
index 3efcf69e5b..162ccb367d 100644
--- a/tests/test_asyncio/test_encoding.py
+++ b/tests/test_asyncio/test_encoding.py
@@ -1,6 +1,5 @@
import pytest
import pytest_asyncio
-
import redis.asyncio as redis
from redis.exceptions import DataError
@@ -90,6 +89,7 @@ async def r(self, create_redis):
yield redis
await redis.flushall()
+ @pytest.mark.xfail
async def test_basic_command(self, r: redis.Redis):
await r.set("hello", "world")
diff --git a/tests/test_asyncio/test_graph.py b/tests/test_asyncio/test_graph.py
index 7e70baae89..22195901e6 100644
--- a/tests/test_asyncio/test_graph.py
+++ b/tests/test_asyncio/test_graph.py
@@ -1,5 +1,4 @@
import pytest
-
import redis.asyncio as redis
from redis.commands.graph import Edge, Node, Path
from redis.commands.graph.execution_plan import Operation
@@ -8,15 +7,15 @@
@pytest.mark.redismod
-async def test_bulk(modclient):
+async def test_bulk(decoded_r):
with pytest.raises(NotImplementedError):
- await modclient.graph().bulk()
- await modclient.graph().bulk(foo="bar!")
+ await decoded_r.graph().bulk()
+ await decoded_r.graph().bulk(foo="bar!")
@pytest.mark.redismod
-async def test_graph_creation(modclient: redis.Redis):
- graph = modclient.graph()
+async def test_graph_creation(decoded_r: redis.Redis):
+ graph = decoded_r.graph()
john = Node(
label="person",
@@ -60,8 +59,8 @@ async def test_graph_creation(modclient: redis.Redis):
@pytest.mark.redismod
-async def test_array_functions(modclient: redis.Redis):
- graph = modclient.graph()
+async def test_array_functions(decoded_r: redis.Redis):
+ graph = decoded_r.graph()
query = """CREATE (p:person{name:'a',age:32, array:[0,1,2]})"""
await graph.query(query)
@@ -83,12 +82,12 @@ async def test_array_functions(modclient: redis.Redis):
@pytest.mark.redismod
-async def test_path(modclient: redis.Redis):
+async def test_path(decoded_r: redis.Redis):
node0 = Node(node_id=0, label="L1")
node1 = Node(node_id=1, label="L1")
edge01 = Edge(node0, "R1", node1, edge_id=0, properties={"value": 1})
- graph = modclient.graph()
+ graph = decoded_r.graph()
graph.add_node(node0)
graph.add_node(node1)
graph.add_edge(edge01)
@@ -103,20 +102,20 @@ async def test_path(modclient: redis.Redis):
@pytest.mark.redismod
-async def test_param(modclient: redis.Redis):
+async def test_param(decoded_r: redis.Redis):
params = [1, 2.3, "str", True, False, None, [0, 1, 2]]
query = "RETURN $param"
for param in params:
- result = await modclient.graph().query(query, {"param": param})
+ result = await decoded_r.graph().query(query, {"param": param})
expected_results = [[param]]
assert expected_results == result.result_set
@pytest.mark.redismod
-async def test_map(modclient: redis.Redis):
+async def test_map(decoded_r: redis.Redis):
query = "RETURN {a:1, b:'str', c:NULL, d:[1,2,3], e:True, f:{x:1, y:2}}"
- actual = (await modclient.graph().query(query)).result_set[0][0]
+ actual = (await decoded_r.graph().query(query)).result_set[0][0]
expected = {
"a": 1,
"b": "str",
@@ -130,40 +129,40 @@ async def test_map(modclient: redis.Redis):
@pytest.mark.redismod
-async def test_point(modclient: redis.Redis):
+async def test_point(decoded_r: redis.Redis):
query = "RETURN point({latitude: 32.070794860, longitude: 34.820751118})"
expected_lat = 32.070794860
expected_lon = 34.820751118
- actual = (await modclient.graph().query(query)).result_set[0][0]
+ actual = (await decoded_r.graph().query(query)).result_set[0][0]
assert abs(actual["latitude"] - expected_lat) < 0.001
assert abs(actual["longitude"] - expected_lon) < 0.001
query = "RETURN point({latitude: 32, longitude: 34.0})"
expected_lat = 32
expected_lon = 34
- actual = (await modclient.graph().query(query)).result_set[0][0]
+ actual = (await decoded_r.graph().query(query)).result_set[0][0]
assert abs(actual["latitude"] - expected_lat) < 0.001
assert abs(actual["longitude"] - expected_lon) < 0.001
@pytest.mark.redismod
-async def test_index_response(modclient: redis.Redis):
- result_set = await modclient.graph().query("CREATE INDEX ON :person(age)")
+async def test_index_response(decoded_r: redis.Redis):
+ result_set = await decoded_r.graph().query("CREATE INDEX ON :person(age)")
assert 1 == result_set.indices_created
- result_set = await modclient.graph().query("CREATE INDEX ON :person(age)")
+ result_set = await decoded_r.graph().query("CREATE INDEX ON :person(age)")
assert 0 == result_set.indices_created
- result_set = await modclient.graph().query("DROP INDEX ON :person(age)")
+ result_set = await decoded_r.graph().query("DROP INDEX ON :person(age)")
assert 1 == result_set.indices_deleted
with pytest.raises(ResponseError):
- await modclient.graph().query("DROP INDEX ON :person(age)")
+ await decoded_r.graph().query("DROP INDEX ON :person(age)")
@pytest.mark.redismod
-async def test_stringify_query_result(modclient: redis.Redis):
- graph = modclient.graph()
+async def test_stringify_query_result(decoded_r: redis.Redis):
+ graph = decoded_r.graph()
john = Node(
alias="a",
@@ -216,14 +215,14 @@ async def test_stringify_query_result(modclient: redis.Redis):
@pytest.mark.redismod
-async def test_optional_match(modclient: redis.Redis):
+async def test_optional_match(decoded_r: redis.Redis):
# Build a graph of form (a)-[R]->(b)
node0 = Node(node_id=0, label="L1", properties={"value": "a"})
node1 = Node(node_id=1, label="L1", properties={"value": "b"})
edge01 = Edge(node0, "R", node1, edge_id=0)
- graph = modclient.graph()
+ graph = decoded_r.graph()
graph.add_node(node0)
graph.add_node(node1)
graph.add_edge(edge01)
@@ -241,17 +240,17 @@ async def test_optional_match(modclient: redis.Redis):
@pytest.mark.redismod
-async def test_cached_execution(modclient: redis.Redis):
- await modclient.graph().query("CREATE ()")
+async def test_cached_execution(decoded_r: redis.Redis):
+ await decoded_r.graph().query("CREATE ()")
- uncached_result = await modclient.graph().query(
+ uncached_result = await decoded_r.graph().query(
"MATCH (n) RETURN n, $param", {"param": [0]}
)
assert uncached_result.cached_execution is False
# loop to make sure the query is cached on each thread on server
for x in range(0, 64):
- cached_result = await modclient.graph().query(
+ cached_result = await decoded_r.graph().query(
"MATCH (n) RETURN n, $param", {"param": [0]}
)
assert uncached_result.result_set == cached_result.result_set
@@ -261,50 +260,51 @@ async def test_cached_execution(modclient: redis.Redis):
@pytest.mark.redismod
-async def test_slowlog(modclient: redis.Redis):
+async def test_slowlog(decoded_r: redis.Redis):
create_query = """CREATE
(:Rider {name:'Valentino Rossi'})-[:rides]->(:Team {name:'Yamaha'}),
(:Rider {name:'Dani Pedrosa'})-[:rides]->(:Team {name:'Honda'}),
(:Rider {name:'Andrea Dovizioso'})-[:rides]->(:Team {name:'Ducati'})"""
- await modclient.graph().query(create_query)
+ await decoded_r.graph().query(create_query)
- results = await modclient.graph().slowlog()
+ results = await decoded_r.graph().slowlog()
assert results[0][1] == "GRAPH.QUERY"
assert results[0][2] == create_query
@pytest.mark.redismod
-async def test_query_timeout(modclient: redis.Redis):
+@pytest.mark.xfail(strict=False)
+async def test_query_timeout(decoded_r: redis.Redis):
# Build a sample graph with 1000 nodes.
- await modclient.graph().query("UNWIND range(0,1000) as val CREATE ({v: val})")
+ await decoded_r.graph().query("UNWIND range(0,1000) as val CREATE ({v: val})")
# Issue a long-running query with a 1-millisecond timeout.
with pytest.raises(ResponseError):
- await modclient.graph().query("MATCH (a), (b), (c), (d) RETURN *", timeout=1)
+ await decoded_r.graph().query("MATCH (a), (b), (c), (d) RETURN *", timeout=1)
assert False is False
with pytest.raises(Exception):
- await modclient.graph().query("RETURN 1", timeout="str")
+ await decoded_r.graph().query("RETURN 1", timeout="str")
assert False is False
@pytest.mark.redismod
-async def test_read_only_query(modclient: redis.Redis):
+async def test_read_only_query(decoded_r: redis.Redis):
with pytest.raises(Exception):
# Issue a write query, specifying read-only true,
# this call should fail.
- await modclient.graph().query("CREATE (p:person {name:'a'})", read_only=True)
+ await decoded_r.graph().query("CREATE (p:person {name:'a'})", read_only=True)
assert False is False
@pytest.mark.redismod
-async def test_profile(modclient: redis.Redis):
+async def test_profile(decoded_r: redis.Redis):
q = """UNWIND range(1, 3) AS x CREATE (p:Person {v:x})"""
- profile = (await modclient.graph().profile(q)).result_set
+ profile = (await decoded_r.graph().profile(q)).result_set
assert "Create | Records produced: 3" in profile
assert "Unwind | Records produced: 3" in profile
q = "MATCH (p:Person) WHERE p.v > 1 RETURN p"
- profile = (await modclient.graph().profile(q)).result_set
+ profile = (await decoded_r.graph().profile(q)).result_set
assert "Results | Records produced: 2" in profile
assert "Project | Records produced: 2" in profile
assert "Filter | Records produced: 2" in profile
@@ -313,16 +313,16 @@ async def test_profile(modclient: redis.Redis):
@pytest.mark.redismod
@skip_if_redis_enterprise()
-async def test_config(modclient: redis.Redis):
+async def test_config(decoded_r: redis.Redis):
config_name = "RESULTSET_SIZE"
config_value = 3
# Set configuration
- response = await modclient.graph().config(config_name, config_value, set=True)
+ response = await decoded_r.graph().config(config_name, config_value, set=True)
assert response == "OK"
# Make sure config been updated.
- response = await modclient.graph().config(config_name, set=False)
+ response = await decoded_r.graph().config(config_name, set=False)
expected_response = [config_name, config_value]
assert response == expected_response
@@ -330,46 +330,46 @@ async def test_config(modclient: redis.Redis):
config_value = 1 << 20 # 1MB
# Set configuration
- response = await modclient.graph().config(config_name, config_value, set=True)
+ response = await decoded_r.graph().config(config_name, config_value, set=True)
assert response == "OK"
# Make sure config been updated.
- response = await modclient.graph().config(config_name, set=False)
+ response = await decoded_r.graph().config(config_name, set=False)
expected_response = [config_name, config_value]
assert response == expected_response
# reset to default
- await modclient.graph().config("QUERY_MEM_CAPACITY", 0, set=True)
- await modclient.graph().config("RESULTSET_SIZE", -100, set=True)
+ await decoded_r.graph().config("QUERY_MEM_CAPACITY", 0, set=True)
+ await decoded_r.graph().config("RESULTSET_SIZE", -100, set=True)
@pytest.mark.redismod
@pytest.mark.onlynoncluster
-async def test_list_keys(modclient: redis.Redis):
- result = await modclient.graph().list_keys()
+async def test_list_keys(decoded_r: redis.Redis):
+ result = await decoded_r.graph().list_keys()
assert result == []
- await modclient.graph("G").query("CREATE (n)")
- result = await modclient.graph().list_keys()
+ await decoded_r.graph("G").query("CREATE (n)")
+ result = await decoded_r.graph().list_keys()
assert result == ["G"]
- await modclient.graph("X").query("CREATE (m)")
- result = await modclient.graph().list_keys()
+ await decoded_r.graph("X").query("CREATE (m)")
+ result = await decoded_r.graph().list_keys()
assert result == ["G", "X"]
- await modclient.delete("G")
- await modclient.rename("X", "Z")
- result = await modclient.graph().list_keys()
+ await decoded_r.delete("G")
+ await decoded_r.rename("X", "Z")
+ result = await decoded_r.graph().list_keys()
assert result == ["Z"]
- await modclient.delete("Z")
- result = await modclient.graph().list_keys()
+ await decoded_r.delete("Z")
+ result = await decoded_r.graph().list_keys()
assert result == []
@pytest.mark.redismod
-async def test_multi_label(modclient: redis.Redis):
- redis_graph = modclient.graph("g")
+async def test_multi_label(decoded_r: redis.Redis):
+ redis_graph = decoded_r.graph("g")
node = Node(label=["l", "ll"])
redis_graph.add_node(node)
@@ -394,8 +394,8 @@ async def test_multi_label(modclient: redis.Redis):
@pytest.mark.redismod
-async def test_execution_plan(modclient: redis.Redis):
- redis_graph = modclient.graph("execution_plan")
+async def test_execution_plan(decoded_r: redis.Redis):
+ redis_graph = decoded_r.graph("execution_plan")
create_query = """CREATE
(:Rider {name:'Valentino Rossi'})-[:rides]->(:Team {name:'Yamaha'}),
(:Rider {name:'Dani Pedrosa'})-[:rides]->(:Team {name:'Honda'}),
@@ -413,8 +413,8 @@ async def test_execution_plan(modclient: redis.Redis):
@pytest.mark.redismod
-async def test_explain(modclient: redis.Redis):
- redis_graph = modclient.graph("execution_plan")
+async def test_explain(decoded_r: redis.Redis):
+ redis_graph = decoded_r.graph("execution_plan")
# graph creation / population
create_query = """CREATE
(:Rider {name:'Valentino Rossi'})-[:rides]->(:Team {name:'Yamaha'}),
diff --git a/tests/test_asyncio/test_json.py b/tests/test_asyncio/test_json.py
index 551e307805..78176f4710 100644
--- a/tests/test_asyncio/test_json.py
+++ b/tests/test_asyncio/test_json.py
@@ -1,5 +1,4 @@
import pytest
-
import redis.asyncio as redis
from redis import exceptions
from redis.commands.json.path import Path
@@ -7,287 +6,287 @@
@pytest.mark.redismod
-async def test_json_setbinarykey(modclient: redis.Redis):
+async def test_json_setbinarykey(decoded_r: redis.Redis):
d = {"hello": "world", b"some": "value"}
with pytest.raises(TypeError):
- modclient.json().set("somekey", Path.root_path(), d)
- assert await modclient.json().set("somekey", Path.root_path(), d, decode_keys=True)
+ decoded_r.json().set("somekey", Path.root_path(), d)
+ assert await decoded_r.json().set("somekey", Path.root_path(), d, decode_keys=True)
@pytest.mark.redismod
-async def test_json_setgetdeleteforget(modclient: redis.Redis):
- assert await modclient.json().set("foo", Path.root_path(), "bar")
- assert_resp_response(modclient, await modclient.json().get("foo"), "bar", [["bar"]])
- assert await modclient.json().get("baz") is None
- assert await modclient.json().delete("foo") == 1
- assert await modclient.json().forget("foo") == 0 # second delete
- assert await modclient.exists("foo") == 0
+async def test_json_setgetdeleteforget(decoded_r: redis.Redis):
+ assert await decoded_r.json().set("foo", Path.root_path(), "bar")
+ assert_resp_response(decoded_r, await decoded_r.json().get("foo"), "bar", [["bar"]])
+ assert await decoded_r.json().get("baz") is None
+ assert await decoded_r.json().delete("foo") == 1
+ assert await decoded_r.json().forget("foo") == 0 # second delete
+ assert await decoded_r.exists("foo") == 0
@pytest.mark.redismod
-async def test_jsonget(modclient: redis.Redis):
- await modclient.json().set("foo", Path.root_path(), "bar")
- assert_resp_response(modclient, await modclient.json().get("foo"), "bar", [["bar"]])
+async def test_jsonget(decoded_r: redis.Redis):
+ await decoded_r.json().set("foo", Path.root_path(), "bar")
+ assert_resp_response(decoded_r, await decoded_r.json().get("foo"), "bar", [["bar"]])
@pytest.mark.redismod
-async def test_json_get_jset(modclient: redis.Redis):
- assert await modclient.json().set("foo", Path.root_path(), "bar")
- assert_resp_response(modclient, await modclient.json().get("foo"), "bar", [["bar"]])
- assert await modclient.json().get("baz") is None
- assert 1 == await modclient.json().delete("foo")
- assert await modclient.exists("foo") == 0
+async def test_json_get_jset(decoded_r: redis.Redis):
+ assert await decoded_r.json().set("foo", Path.root_path(), "bar")
+ assert_resp_response(decoded_r, await decoded_r.json().get("foo"), "bar", [["bar"]])
+ assert await decoded_r.json().get("baz") is None
+ assert 1 == await decoded_r.json().delete("foo")
+ assert await decoded_r.exists("foo") == 0
@pytest.mark.redismod
-async def test_nonascii_setgetdelete(modclient: redis.Redis):
- assert await modclient.json().set("notascii", Path.root_path(), "hyvää-élève")
+async def test_nonascii_setgetdelete(decoded_r: redis.Redis):
+ assert await decoded_r.json().set("notascii", Path.root_path(), "hyvää-élève")
res = "hyvää-élève"
assert_resp_response(
- modclient, await modclient.json().get("notascii", no_escape=True), res, [[res]]
+ decoded_r, await decoded_r.json().get("notascii", no_escape=True), res, [[res]]
)
- assert 1 == await modclient.json().delete("notascii")
- assert await modclient.exists("notascii") == 0
+ assert 1 == await decoded_r.json().delete("notascii")
+ assert await decoded_r.exists("notascii") == 0
@pytest.mark.redismod
-async def test_jsonsetexistentialmodifiersshouldsucceed(modclient: redis.Redis):
+async def test_jsonsetexistentialmodifiersshouldsucceed(decoded_r: redis.Redis):
obj = {"foo": "bar"}
- assert await modclient.json().set("obj", Path.root_path(), obj)
+ assert await decoded_r.json().set("obj", Path.root_path(), obj)
# Test that flags prevent updates when conditions are unmet
- assert await modclient.json().set("obj", Path("foo"), "baz", nx=True) is None
- assert await modclient.json().set("obj", Path("qaz"), "baz", xx=True) is None
+ assert await decoded_r.json().set("obj", Path("foo"), "baz", nx=True) is None
+ assert await decoded_r.json().set("obj", Path("qaz"), "baz", xx=True) is None
# Test that flags allow updates when conditions are met
- assert await modclient.json().set("obj", Path("foo"), "baz", xx=True)
- assert await modclient.json().set("obj", Path("qaz"), "baz", nx=True)
+ assert await decoded_r.json().set("obj", Path("foo"), "baz", xx=True)
+ assert await decoded_r.json().set("obj", Path("qaz"), "baz", nx=True)
# Test that flags are mutually exlusive
with pytest.raises(Exception):
- await modclient.json().set("obj", Path("foo"), "baz", nx=True, xx=True)
+ await decoded_r.json().set("obj", Path("foo"), "baz", nx=True, xx=True)
@pytest.mark.redismod
-async def test_mgetshouldsucceed(modclient: redis.Redis):
- await modclient.json().set("1", Path.root_path(), 1)
- await modclient.json().set("2", Path.root_path(), 2)
- assert await modclient.json().mget(["1"], Path.root_path()) == [1]
+async def test_mgetshouldsucceed(decoded_r: redis.Redis):
+ await decoded_r.json().set("1", Path.root_path(), 1)
+ await decoded_r.json().set("2", Path.root_path(), 2)
+ assert await decoded_r.json().mget(["1"], Path.root_path()) == [1]
- assert await modclient.json().mget([1, 2], Path.root_path()) == [1, 2]
+ assert await decoded_r.json().mget([1, 2], Path.root_path()) == [1, 2]
@pytest.mark.redismod
@skip_ifmodversion_lt("99.99.99", "ReJSON") # todo: update after the release
-async def test_clear(modclient: redis.Redis):
- await modclient.json().set("arr", Path.root_path(), [0, 1, 2, 3, 4])
- assert 1 == await modclient.json().clear("arr", Path.root_path())
- assert_resp_response(modclient, await modclient.json().get("arr"), [], [[[]]])
+async def test_clear(decoded_r: redis.Redis):
+ await decoded_r.json().set("arr", Path.root_path(), [0, 1, 2, 3, 4])
+ assert 1 == await decoded_r.json().clear("arr", Path.root_path())
+ assert_resp_response(decoded_r, await decoded_r.json().get("arr"), [], [[[]]])
@pytest.mark.redismod
-async def test_type(modclient: redis.Redis):
- await modclient.json().set("1", Path.root_path(), 1)
+async def test_type(decoded_r: redis.Redis):
+ await decoded_r.json().set("1", Path.root_path(), 1)
assert_resp_response(
- modclient,
- await modclient.json().type("1", Path.root_path()),
+ decoded_r,
+ await decoded_r.json().type("1", Path.root_path()),
"integer",
["integer"],
)
assert_resp_response(
- modclient, await modclient.json().type("1"), "integer", ["integer"]
+ decoded_r, await decoded_r.json().type("1"), "integer", ["integer"]
)
@pytest.mark.redismod
-async def test_numincrby(modclient):
- await modclient.json().set("num", Path.root_path(), 1)
+async def test_numincrby(decoded_r):
+ await decoded_r.json().set("num", Path.root_path(), 1)
assert_resp_response(
- modclient, await modclient.json().numincrby("num", Path.root_path(), 1), 2, [2]
+ decoded_r, await decoded_r.json().numincrby("num", Path.root_path(), 1), 2, [2]
)
- res = await modclient.json().numincrby("num", Path.root_path(), 0.5)
- assert_resp_response(modclient, res, 2.5, [2.5])
- res = await modclient.json().numincrby("num", Path.root_path(), -1.25)
- assert_resp_response(modclient, res, 1.25, [1.25])
+ res = await decoded_r.json().numincrby("num", Path.root_path(), 0.5)
+ assert_resp_response(decoded_r, res, 2.5, [2.5])
+ res = await decoded_r.json().numincrby("num", Path.root_path(), -1.25)
+ assert_resp_response(decoded_r, res, 1.25, [1.25])
@pytest.mark.redismod
-async def test_nummultby(modclient: redis.Redis):
- await modclient.json().set("num", Path.root_path(), 1)
+async def test_nummultby(decoded_r: redis.Redis):
+ await decoded_r.json().set("num", Path.root_path(), 1)
with pytest.deprecated_call():
- res = await modclient.json().nummultby("num", Path.root_path(), 2)
- assert_resp_response(modclient, res, 2, [2])
- res = await modclient.json().nummultby("num", Path.root_path(), 2.5)
- assert_resp_response(modclient, res, 5, [5])
- res = await modclient.json().nummultby("num", Path.root_path(), 0.5)
- assert_resp_response(modclient, res, 2.5, [2.5])
+ res = await decoded_r.json().nummultby("num", Path.root_path(), 2)
+ assert_resp_response(decoded_r, res, 2, [2])
+ res = await decoded_r.json().nummultby("num", Path.root_path(), 2.5)
+ assert_resp_response(decoded_r, res, 5, [5])
+ res = await decoded_r.json().nummultby("num", Path.root_path(), 0.5)
+ assert_resp_response(decoded_r, res, 2.5, [2.5])
@pytest.mark.redismod
@skip_ifmodversion_lt("99.99.99", "ReJSON") # todo: update after the release
-async def test_toggle(modclient: redis.Redis):
- await modclient.json().set("bool", Path.root_path(), False)
- assert await modclient.json().toggle("bool", Path.root_path())
- assert await modclient.json().toggle("bool", Path.root_path()) is False
+async def test_toggle(decoded_r: redis.Redis):
+ await decoded_r.json().set("bool", Path.root_path(), False)
+ assert await decoded_r.json().toggle("bool", Path.root_path())
+ assert await decoded_r.json().toggle("bool", Path.root_path()) is False
# check non-boolean value
- await modclient.json().set("num", Path.root_path(), 1)
+ await decoded_r.json().set("num", Path.root_path(), 1)
with pytest.raises(exceptions.ResponseError):
- await modclient.json().toggle("num", Path.root_path())
+ await decoded_r.json().toggle("num", Path.root_path())
@pytest.mark.redismod
-async def test_strappend(modclient: redis.Redis):
- await modclient.json().set("jsonkey", Path.root_path(), "foo")
- assert 6 == await modclient.json().strappend("jsonkey", "bar")
- res = await modclient.json().get("jsonkey", Path.root_path())
- assert_resp_response(modclient, res, "foobar", [["foobar"]])
+async def test_strappend(decoded_r: redis.Redis):
+ await decoded_r.json().set("jsonkey", Path.root_path(), "foo")
+ assert 6 == await decoded_r.json().strappend("jsonkey", "bar")
+ res = await decoded_r.json().get("jsonkey", Path.root_path())
+ assert_resp_response(decoded_r, res, "foobar", [["foobar"]])
@pytest.mark.redismod
-async def test_strlen(modclient: redis.Redis):
- await modclient.json().set("str", Path.root_path(), "foo")
- assert 3 == await modclient.json().strlen("str", Path.root_path())
- await modclient.json().strappend("str", "bar", Path.root_path())
- assert 6 == await modclient.json().strlen("str", Path.root_path())
- assert 6 == await modclient.json().strlen("str")
+async def test_strlen(decoded_r: redis.Redis):
+ await decoded_r.json().set("str", Path.root_path(), "foo")
+ assert 3 == await decoded_r.json().strlen("str", Path.root_path())
+ await decoded_r.json().strappend("str", "bar", Path.root_path())
+ assert 6 == await decoded_r.json().strlen("str", Path.root_path())
+ assert 6 == await decoded_r.json().strlen("str")
@pytest.mark.redismod
-async def test_arrappend(modclient: redis.Redis):
- await modclient.json().set("arr", Path.root_path(), [1])
- assert 2 == await modclient.json().arrappend("arr", Path.root_path(), 2)
- assert 4 == await modclient.json().arrappend("arr", Path.root_path(), 3, 4)
- assert 7 == await modclient.json().arrappend("arr", Path.root_path(), *[5, 6, 7])
+async def test_arrappend(decoded_r: redis.Redis):
+ await decoded_r.json().set("arr", Path.root_path(), [1])
+ assert 2 == await decoded_r.json().arrappend("arr", Path.root_path(), 2)
+ assert 4 == await decoded_r.json().arrappend("arr", Path.root_path(), 3, 4)
+ assert 7 == await decoded_r.json().arrappend("arr", Path.root_path(), *[5, 6, 7])
@pytest.mark.redismod
-async def test_arrindex(modclient: redis.Redis):
+async def test_arrindex(decoded_r: redis.Redis):
r_path = Path.root_path()
- await modclient.json().set("arr", r_path, [0, 1, 2, 3, 4])
- assert 1 == await modclient.json().arrindex("arr", r_path, 1)
- assert -1 == await modclient.json().arrindex("arr", r_path, 1, 2)
- assert 4 == await modclient.json().arrindex("arr", r_path, 4)
- assert 4 == await modclient.json().arrindex("arr", r_path, 4, start=0)
- assert 4 == await modclient.json().arrindex("arr", r_path, 4, start=0, stop=5000)
- assert -1 == await modclient.json().arrindex("arr", r_path, 4, start=0, stop=-1)
- assert -1 == await modclient.json().arrindex("arr", r_path, 4, start=1, stop=3)
+ await decoded_r.json().set("arr", r_path, [0, 1, 2, 3, 4])
+ assert 1 == await decoded_r.json().arrindex("arr", r_path, 1)
+ assert -1 == await decoded_r.json().arrindex("arr", r_path, 1, 2)
+ assert 4 == await decoded_r.json().arrindex("arr", r_path, 4)
+ assert 4 == await decoded_r.json().arrindex("arr", r_path, 4, start=0)
+ assert 4 == await decoded_r.json().arrindex("arr", r_path, 4, start=0, stop=5000)
+ assert -1 == await decoded_r.json().arrindex("arr", r_path, 4, start=0, stop=-1)
+ assert -1 == await decoded_r.json().arrindex("arr", r_path, 4, start=1, stop=3)
@pytest.mark.redismod
-async def test_arrinsert(modclient: redis.Redis):
- await modclient.json().set("arr", Path.root_path(), [0, 4])
- assert 5 == await modclient.json().arrinsert("arr", Path.root_path(), 1, *[1, 2, 3])
+async def test_arrinsert(decoded_r: redis.Redis):
+ await decoded_r.json().set("arr", Path.root_path(), [0, 4])
+ assert 5 == await decoded_r.json().arrinsert("arr", Path.root_path(), 1, *[1, 2, 3])
res = [0, 1, 2, 3, 4]
- assert_resp_response(modclient, await modclient.json().get("arr"), res, [[res]])
+ assert_resp_response(decoded_r, await decoded_r.json().get("arr"), res, [[res]])
# test prepends
- await modclient.json().set("val2", Path.root_path(), [5, 6, 7, 8, 9])
- await modclient.json().arrinsert("val2", Path.root_path(), 0, ["some", "thing"])
+ await decoded_r.json().set("val2", Path.root_path(), [5, 6, 7, 8, 9])
+ await decoded_r.json().arrinsert("val2", Path.root_path(), 0, ["some", "thing"])
res = [["some", "thing"], 5, 6, 7, 8, 9]
- assert_resp_response(modclient, await modclient.json().get("val2"), res, [[res]])
+ assert_resp_response(decoded_r, await decoded_r.json().get("val2"), res, [[res]])
@pytest.mark.redismod
-async def test_arrlen(modclient: redis.Redis):
- await modclient.json().set("arr", Path.root_path(), [0, 1, 2, 3, 4])
- assert 5 == await modclient.json().arrlen("arr", Path.root_path())
- assert 5 == await modclient.json().arrlen("arr")
- assert await modclient.json().arrlen("fakekey") is None
+async def test_arrlen(decoded_r: redis.Redis):
+ await decoded_r.json().set("arr", Path.root_path(), [0, 1, 2, 3, 4])
+ assert 5 == await decoded_r.json().arrlen("arr", Path.root_path())
+ assert 5 == await decoded_r.json().arrlen("arr")
+ assert await decoded_r.json().arrlen("fakekey") is None
@pytest.mark.redismod
-async def test_arrpop(modclient: redis.Redis):
- await modclient.json().set("arr", Path.root_path(), [0, 1, 2, 3, 4])
- assert 4 == await modclient.json().arrpop("arr", Path.root_path(), 4)
- assert 3 == await modclient.json().arrpop("arr", Path.root_path(), -1)
- assert 2 == await modclient.json().arrpop("arr", Path.root_path())
- assert 0 == await modclient.json().arrpop("arr", Path.root_path(), 0)
- assert_resp_response(modclient, await modclient.json().get("arr"), [1], [[[1]]])
+async def test_arrpop(decoded_r: redis.Redis):
+ await decoded_r.json().set("arr", Path.root_path(), [0, 1, 2, 3, 4])
+ assert 4 == await decoded_r.json().arrpop("arr", Path.root_path(), 4)
+ assert 3 == await decoded_r.json().arrpop("arr", Path.root_path(), -1)
+ assert 2 == await decoded_r.json().arrpop("arr", Path.root_path())
+ assert 0 == await decoded_r.json().arrpop("arr", Path.root_path(), 0)
+ assert_resp_response(decoded_r, await decoded_r.json().get("arr"), [1], [[[1]]])
# test out of bounds
- await modclient.json().set("arr", Path.root_path(), [0, 1, 2, 3, 4])
- assert 4 == await modclient.json().arrpop("arr", Path.root_path(), 99)
+ await decoded_r.json().set("arr", Path.root_path(), [0, 1, 2, 3, 4])
+ assert 4 == await decoded_r.json().arrpop("arr", Path.root_path(), 99)
# none test
- await modclient.json().set("arr", Path.root_path(), [])
- assert await modclient.json().arrpop("arr") is None
+ await decoded_r.json().set("arr", Path.root_path(), [])
+ assert await decoded_r.json().arrpop("arr") is None
@pytest.mark.redismod
-async def test_arrtrim(modclient: redis.Redis):
- await modclient.json().set("arr", Path.root_path(), [0, 1, 2, 3, 4])
- assert 3 == await modclient.json().arrtrim("arr", Path.root_path(), 1, 3)
- res = await modclient.json().get("arr")
- assert_resp_response(modclient, res, [1, 2, 3], [[[1, 2, 3]]])
+async def test_arrtrim(decoded_r: redis.Redis):
+ await decoded_r.json().set("arr", Path.root_path(), [0, 1, 2, 3, 4])
+ assert 3 == await decoded_r.json().arrtrim("arr", Path.root_path(), 1, 3)
+ res = await decoded_r.json().get("arr")
+ assert_resp_response(decoded_r, res, [1, 2, 3], [[[1, 2, 3]]])
# <0 test, should be 0 equivalent
- await modclient.json().set("arr", Path.root_path(), [0, 1, 2, 3, 4])
- assert 0 == await modclient.json().arrtrim("arr", Path.root_path(), -1, 3)
+ await decoded_r.json().set("arr", Path.root_path(), [0, 1, 2, 3, 4])
+ assert 0 == await decoded_r.json().arrtrim("arr", Path.root_path(), -1, 3)
# testing stop > end
- await modclient.json().set("arr", Path.root_path(), [0, 1, 2, 3, 4])
- assert 2 == await modclient.json().arrtrim("arr", Path.root_path(), 3, 99)
+ await decoded_r.json().set("arr", Path.root_path(), [0, 1, 2, 3, 4])
+ assert 2 == await decoded_r.json().arrtrim("arr", Path.root_path(), 3, 99)
# start > array size and stop
- await modclient.json().set("arr", Path.root_path(), [0, 1, 2, 3, 4])
- assert 0 == await modclient.json().arrtrim("arr", Path.root_path(), 9, 1)
+ await decoded_r.json().set("arr", Path.root_path(), [0, 1, 2, 3, 4])
+ assert 0 == await decoded_r.json().arrtrim("arr", Path.root_path(), 9, 1)
# all larger
- await modclient.json().set("arr", Path.root_path(), [0, 1, 2, 3, 4])
- assert 0 == await modclient.json().arrtrim("arr", Path.root_path(), 9, 11)
+ await decoded_r.json().set("arr", Path.root_path(), [0, 1, 2, 3, 4])
+ assert 0 == await decoded_r.json().arrtrim("arr", Path.root_path(), 9, 11)
@pytest.mark.redismod
-async def test_resp(modclient: redis.Redis):
+async def test_resp(decoded_r: redis.Redis):
obj = {"foo": "bar", "baz": 1, "qaz": True}
- await modclient.json().set("obj", Path.root_path(), obj)
- assert "bar" == await modclient.json().resp("obj", Path("foo"))
- assert 1 == await modclient.json().resp("obj", Path("baz"))
- assert await modclient.json().resp("obj", Path("qaz"))
- assert isinstance(await modclient.json().resp("obj"), list)
+ await decoded_r.json().set("obj", Path.root_path(), obj)
+ assert "bar" == await decoded_r.json().resp("obj", Path("foo"))
+ assert 1 == await decoded_r.json().resp("obj", Path("baz"))
+ assert await decoded_r.json().resp("obj", Path("qaz"))
+ assert isinstance(await decoded_r.json().resp("obj"), list)
@pytest.mark.redismod
-async def test_objkeys(modclient: redis.Redis):
+async def test_objkeys(decoded_r: redis.Redis):
obj = {"foo": "bar", "baz": "qaz"}
- await modclient.json().set("obj", Path.root_path(), obj)
- keys = await modclient.json().objkeys("obj", Path.root_path())
+ await decoded_r.json().set("obj", Path.root_path(), obj)
+ keys = await decoded_r.json().objkeys("obj", Path.root_path())
keys.sort()
exp = list(obj.keys())
exp.sort()
assert exp == keys
- await modclient.json().set("obj", Path.root_path(), obj)
- keys = await modclient.json().objkeys("obj")
+ await decoded_r.json().set("obj", Path.root_path(), obj)
+ keys = await decoded_r.json().objkeys("obj")
assert keys == list(obj.keys())
- assert await modclient.json().objkeys("fakekey") is None
+ assert await decoded_r.json().objkeys("fakekey") is None
@pytest.mark.redismod
-async def test_objlen(modclient: redis.Redis):
+async def test_objlen(decoded_r: redis.Redis):
obj = {"foo": "bar", "baz": "qaz"}
- await modclient.json().set("obj", Path.root_path(), obj)
- assert len(obj) == await modclient.json().objlen("obj", Path.root_path())
+ await decoded_r.json().set("obj", Path.root_path(), obj)
+ assert len(obj) == await decoded_r.json().objlen("obj", Path.root_path())
- await modclient.json().set("obj", Path.root_path(), obj)
- assert len(obj) == await modclient.json().objlen("obj")
+ await decoded_r.json().set("obj", Path.root_path(), obj)
+ assert len(obj) == await decoded_r.json().objlen("obj")
# @pytest.mark.redismod
-# async def test_json_commands_in_pipeline(modclient: redis.Redis):
-# async with modclient.json().pipeline() as p:
+# async def test_json_commands_in_pipeline(decoded_r: redis.Redis):
+# async with decoded_r.json().pipeline() as p:
# p.set("foo", Path.root_path(), "bar")
# p.get("foo")
# p.delete("foo")
# assert [True, "bar", 1] == await p.execute()
-# assert await modclient.keys() == []
-# assert await modclient.get("foo") is None
+# assert await decoded_r.keys() == []
+# assert await decoded_r.get("foo") is None
# # now with a true, json object
-# await modclient.flushdb()
-# p = await modclient.json().pipeline()
+# await decoded_r.flushdb()
+# p = await decoded_r.json().pipeline()
# d = {"hello": "world", "oh": "snap"}
# with pytest.deprecated_call():
# p.jsonset("foo", Path.root_path(), d)
@@ -295,24 +294,24 @@ async def test_objlen(modclient: redis.Redis):
# p.exists("notarealkey")
# p.delete("foo")
# assert [True, d, 0, 1] == p.execute()
-# assert await modclient.keys() == []
-# assert await modclient.get("foo") is None
+# assert await decoded_r.keys() == []
+# assert await decoded_r.get("foo") is None
@pytest.mark.redismod
-async def test_json_delete_with_dollar(modclient: redis.Redis):
+async def test_json_delete_with_dollar(decoded_r: redis.Redis):
doc1 = {"a": 1, "nested": {"a": 2, "b": 3}}
- assert await modclient.json().set("doc1", "$", doc1)
- assert await modclient.json().delete("doc1", "$..a") == 2
+ assert await decoded_r.json().set("doc1", "$", doc1)
+ assert await decoded_r.json().delete("doc1", "$..a") == 2
res = [{"nested": {"b": 3}}]
- assert_resp_response(modclient, await modclient.json().get("doc1", "$"), res, [res])
+ assert_resp_response(decoded_r, await decoded_r.json().get("doc1", "$"), res, [res])
doc2 = {"a": {"a": 2, "b": 3}, "b": ["a", "b"], "nested": {"b": [True, "a", "b"]}}
- assert await modclient.json().set("doc2", "$", doc2)
- assert await modclient.json().delete("doc2", "$..a") == 1
- res = await modclient.json().get("doc2", "$")
+ assert await decoded_r.json().set("doc2", "$", doc2)
+ assert await decoded_r.json().delete("doc2", "$..a") == 1
+ res = await decoded_r.json().get("doc2", "$")
res = [{"nested": {"b": [True, "a", "b"]}, "b": ["a", "b"]}]
- assert_resp_response(modclient, await modclient.json().get("doc2", "$"), res, [res])
+ assert_resp_response(decoded_r, await decoded_r.json().get("doc2", "$"), res, [res])
doc3 = [
{
@@ -326,8 +325,8 @@ async def test_json_delete_with_dollar(modclient: redis.Redis):
],
}
]
- assert await modclient.json().set("doc3", "$", doc3)
- assert await modclient.json().delete("doc3", '$.[0]["nested"]..ciao') == 3
+ assert await decoded_r.json().set("doc3", "$", doc3)
+ assert await decoded_r.json().delete("doc3", '$.[0]["nested"]..ciao') == 3
doc3val = [
[
@@ -343,29 +342,29 @@ async def test_json_delete_with_dollar(modclient: redis.Redis):
}
]
]
- res = await modclient.json().get("doc3", "$")
- assert_resp_response(modclient, res, doc3val, [doc3val])
+ res = await decoded_r.json().get("doc3", "$")
+ assert_resp_response(decoded_r, res, doc3val, [doc3val])
# Test async default path
- assert await modclient.json().delete("doc3") == 1
- assert await modclient.json().get("doc3", "$") is None
+ assert await decoded_r.json().delete("doc3") == 1
+ assert await decoded_r.json().get("doc3", "$") is None
- await modclient.json().delete("not_a_document", "..a")
+ await decoded_r.json().delete("not_a_document", "..a")
@pytest.mark.redismod
-async def test_json_forget_with_dollar(modclient: redis.Redis):
+async def test_json_forget_with_dollar(decoded_r: redis.Redis):
doc1 = {"a": 1, "nested": {"a": 2, "b": 3}}
- assert await modclient.json().set("doc1", "$", doc1)
- assert await modclient.json().forget("doc1", "$..a") == 2
+ assert await decoded_r.json().set("doc1", "$", doc1)
+ assert await decoded_r.json().forget("doc1", "$..a") == 2
res = [{"nested": {"b": 3}}]
- assert_resp_response(modclient, await modclient.json().get("doc1", "$"), res, [res])
+ assert_resp_response(decoded_r, await decoded_r.json().get("doc1", "$"), res, [res])
doc2 = {"a": {"a": 2, "b": 3}, "b": ["a", "b"], "nested": {"b": [True, "a", "b"]}}
- assert await modclient.json().set("doc2", "$", doc2)
- assert await modclient.json().forget("doc2", "$..a") == 1
+ assert await decoded_r.json().set("doc2", "$", doc2)
+ assert await decoded_r.json().forget("doc2", "$..a") == 1
res = [{"nested": {"b": [True, "a", "b"]}, "b": ["a", "b"]}]
- assert_resp_response(modclient, await modclient.json().get("doc2", "$"), res, [res])
+ assert_resp_response(decoded_r, await decoded_r.json().get("doc2", "$"), res, [res])
doc3 = [
{
@@ -379,8 +378,8 @@ async def test_json_forget_with_dollar(modclient: redis.Redis):
],
}
]
- assert await modclient.json().set("doc3", "$", doc3)
- assert await modclient.json().forget("doc3", '$.[0]["nested"]..ciao') == 3
+ assert await decoded_r.json().set("doc3", "$", doc3)
+ assert await decoded_r.json().forget("doc3", '$.[0]["nested"]..ciao') == 3
doc3val = [
[
@@ -396,25 +395,25 @@ async def test_json_forget_with_dollar(modclient: redis.Redis):
}
]
]
- res = await modclient.json().get("doc3", "$")
- assert_resp_response(modclient, res, doc3val, [doc3val])
+ res = await decoded_r.json().get("doc3", "$")
+ assert_resp_response(decoded_r, res, doc3val, [doc3val])
# Test async default path
- assert await modclient.json().forget("doc3") == 1
- assert await modclient.json().get("doc3", "$") is None
+ assert await decoded_r.json().forget("doc3") == 1
+ assert await decoded_r.json().get("doc3", "$") is None
- await modclient.json().forget("not_a_document", "..a")
+ await decoded_r.json().forget("not_a_document", "..a")
@pytest.mark.redismod
-async def test_json_mget_dollar(modclient: redis.Redis):
+async def test_json_mget_dollar(decoded_r: redis.Redis):
# Test mget with multi paths
- await modclient.json().set(
+ await decoded_r.json().set(
"doc1",
"$",
{"a": 1, "b": 2, "nested": {"a": 3}, "c": None, "nested2": {"a": None}},
)
- await modclient.json().set(
+ await decoded_r.json().set(
"doc2",
"$",
{"a": 4, "b": 5, "nested": {"a": 6}, "c": None, "nested2": {"a": [None]}},
@@ -422,139 +421,139 @@ async def test_json_mget_dollar(modclient: redis.Redis):
# Compare also to single JSON.GET
res = [1, 3, None]
assert_resp_response(
- modclient, await modclient.json().get("doc1", "$..a"), res, [res]
+ decoded_r, await decoded_r.json().get("doc1", "$..a"), res, [res]
)
res = [4, 6, [None]]
assert_resp_response(
- modclient, await modclient.json().get("doc2", "$..a"), res, [res]
+ decoded_r, await decoded_r.json().get("doc2", "$..a"), res, [res]
)
# Test mget with single path
- await modclient.json().mget("doc1", "$..a") == [1, 3, None]
+ await decoded_r.json().mget("doc1", "$..a") == [1, 3, None]
# Test mget with multi path
- res = await modclient.json().mget(["doc1", "doc2"], "$..a")
+ res = await decoded_r.json().mget(["doc1", "doc2"], "$..a")
assert res == [[1, 3, None], [4, 6, [None]]]
# Test missing key
- res = await modclient.json().mget(["doc1", "missing_doc"], "$..a")
+ res = await decoded_r.json().mget(["doc1", "missing_doc"], "$..a")
assert res == [[1, 3, None], None]
- res = await modclient.json().mget(["missing_doc1", "missing_doc2"], "$..a")
+ res = await decoded_r.json().mget(["missing_doc1", "missing_doc2"], "$..a")
assert res == [None, None]
@pytest.mark.redismod
-async def test_numby_commands_dollar(modclient: redis.Redis):
+async def test_numby_commands_dollar(decoded_r: redis.Redis):
# Test NUMINCRBY
- await modclient.json().set(
+ await decoded_r.json().set(
"doc1", "$", {"a": "b", "b": [{"a": 2}, {"a": 5.0}, {"a": "c"}]}
)
# Test multi
- assert await modclient.json().numincrby("doc1", "$..a", 2) == [None, 4, 7.0, None]
+ assert await decoded_r.json().numincrby("doc1", "$..a", 2) == [None, 4, 7.0, None]
- res = await modclient.json().numincrby("doc1", "$..a", 2.5)
+ res = await decoded_r.json().numincrby("doc1", "$..a", 2.5)
assert res == [None, 6.5, 9.5, None]
# Test single
- assert await modclient.json().numincrby("doc1", "$.b[1].a", 2) == [11.5]
+ assert await decoded_r.json().numincrby("doc1", "$.b[1].a", 2) == [11.5]
- assert await modclient.json().numincrby("doc1", "$.b[2].a", 2) == [None]
- assert await modclient.json().numincrby("doc1", "$.b[1].a", 3.5) == [15.0]
+ assert await decoded_r.json().numincrby("doc1", "$.b[2].a", 2) == [None]
+ assert await decoded_r.json().numincrby("doc1", "$.b[1].a", 3.5) == [15.0]
# Test NUMMULTBY
- await modclient.json().set(
+ await decoded_r.json().set(
"doc1", "$", {"a": "b", "b": [{"a": 2}, {"a": 5.0}, {"a": "c"}]}
)
# test list
with pytest.deprecated_call():
- res = await modclient.json().nummultby("doc1", "$..a", 2)
+ res = await decoded_r.json().nummultby("doc1", "$..a", 2)
assert res == [None, 4, 10, None]
- res = await modclient.json().nummultby("doc1", "$..a", 2.5)
+ res = await decoded_r.json().nummultby("doc1", "$..a", 2.5)
assert res == [None, 10.0, 25.0, None]
# Test single
with pytest.deprecated_call():
- assert await modclient.json().nummultby("doc1", "$.b[1].a", 2) == [50.0]
- assert await modclient.json().nummultby("doc1", "$.b[2].a", 2) == [None]
- assert await modclient.json().nummultby("doc1", "$.b[1].a", 3) == [150.0]
+ assert await decoded_r.json().nummultby("doc1", "$.b[1].a", 2) == [50.0]
+ assert await decoded_r.json().nummultby("doc1", "$.b[2].a", 2) == [None]
+ assert await decoded_r.json().nummultby("doc1", "$.b[1].a", 3) == [150.0]
# test missing keys
with pytest.raises(exceptions.ResponseError):
- await modclient.json().numincrby("non_existing_doc", "$..a", 2)
- await modclient.json().nummultby("non_existing_doc", "$..a", 2)
+ await decoded_r.json().numincrby("non_existing_doc", "$..a", 2)
+ await decoded_r.json().nummultby("non_existing_doc", "$..a", 2)
# Test legacy NUMINCRBY
- await modclient.json().set(
+ await decoded_r.json().set(
"doc1", "$", {"a": "b", "b": [{"a": 2}, {"a": 5.0}, {"a": "c"}]}
)
- await modclient.json().numincrby("doc1", ".b[0].a", 3) == 5
+ await decoded_r.json().numincrby("doc1", ".b[0].a", 3) == 5
# Test legacy NUMMULTBY
- await modclient.json().set(
+ await decoded_r.json().set(
"doc1", "$", {"a": "b", "b": [{"a": 2}, {"a": 5.0}, {"a": "c"}]}
)
with pytest.deprecated_call():
- await modclient.json().nummultby("doc1", ".b[0].a", 3) == 6
+ await decoded_r.json().nummultby("doc1", ".b[0].a", 3) == 6
@pytest.mark.redismod
-async def test_strappend_dollar(modclient: redis.Redis):
+async def test_strappend_dollar(decoded_r: redis.Redis):
- await modclient.json().set(
+ await decoded_r.json().set(
"doc1", "$", {"a": "foo", "nested1": {"a": "hello"}, "nested2": {"a": 31}}
)
# Test multi
- await modclient.json().strappend("doc1", "bar", "$..a") == [6, 8, None]
+ await decoded_r.json().strappend("doc1", "bar", "$..a") == [6, 8, None]
res = [{"a": "foobar", "nested1": {"a": "hellobar"}, "nested2": {"a": 31}}]
- assert_resp_response(modclient, await modclient.json().get("doc1", "$"), res, [res])
+ assert_resp_response(decoded_r, await decoded_r.json().get("doc1", "$"), res, [res])
# Test single
- await modclient.json().strappend("doc1", "baz", "$.nested1.a") == [11]
+ await decoded_r.json().strappend("doc1", "baz", "$.nested1.a") == [11]
res = [{"a": "foobar", "nested1": {"a": "hellobarbaz"}, "nested2": {"a": 31}}]
- assert_resp_response(modclient, await modclient.json().get("doc1", "$"), res, [res])
+ assert_resp_response(decoded_r, await decoded_r.json().get("doc1", "$"), res, [res])
# Test missing key
with pytest.raises(exceptions.ResponseError):
- await modclient.json().strappend("non_existing_doc", "$..a", "err")
+ await decoded_r.json().strappend("non_existing_doc", "$..a", "err")
# Test multi
- await modclient.json().strappend("doc1", "bar", ".*.a") == 8
+ await decoded_r.json().strappend("doc1", "bar", ".*.a") == 8
res = [{"a": "foobar", "nested1": {"a": "hellobarbazbar"}, "nested2": {"a": 31}}]
- assert_resp_response(modclient, await modclient.json().get("doc1", "$"), res, [res])
+ assert_resp_response(decoded_r, await decoded_r.json().get("doc1", "$"), res, [res])
# Test missing path
with pytest.raises(exceptions.ResponseError):
- await modclient.json().strappend("doc1", "piu")
+ await decoded_r.json().strappend("doc1", "piu")
@pytest.mark.redismod
-async def test_strlen_dollar(modclient: redis.Redis):
+async def test_strlen_dollar(decoded_r: redis.Redis):
# Test multi
- await modclient.json().set(
+ await decoded_r.json().set(
"doc1", "$", {"a": "foo", "nested1": {"a": "hello"}, "nested2": {"a": 31}}
)
- assert await modclient.json().strlen("doc1", "$..a") == [3, 5, None]
+ assert await decoded_r.json().strlen("doc1", "$..a") == [3, 5, None]
- res2 = await modclient.json().strappend("doc1", "bar", "$..a")
- res1 = await modclient.json().strlen("doc1", "$..a")
+ res2 = await decoded_r.json().strappend("doc1", "bar", "$..a")
+ res1 = await decoded_r.json().strlen("doc1", "$..a")
assert res1 == res2
# Test single
- await modclient.json().strlen("doc1", "$.nested1.a") == [8]
- await modclient.json().strlen("doc1", "$.nested2.a") == [None]
+ await decoded_r.json().strlen("doc1", "$.nested1.a") == [8]
+ await decoded_r.json().strlen("doc1", "$.nested2.a") == [None]
# Test missing key
with pytest.raises(exceptions.ResponseError):
- await modclient.json().strlen("non_existing_doc", "$..a")
+ await decoded_r.json().strlen("non_existing_doc", "$..a")
@pytest.mark.redismod
-async def test_arrappend_dollar(modclient: redis.Redis):
- await modclient.json().set(
+async def test_arrappend_dollar(decoded_r: redis.Redis):
+ await decoded_r.json().set(
"doc1",
"$",
{
@@ -564,7 +563,7 @@ async def test_arrappend_dollar(modclient: redis.Redis):
},
)
# Test multi
- await modclient.json().arrappend("doc1", "$..a", "bar", "racuda") == [3, 5, None]
+ await decoded_r.json().arrappend("doc1", "$..a", "bar", "racuda") == [3, 5, None]
res = [
{
"a": ["foo", "bar", "racuda"],
@@ -572,10 +571,10 @@ async def test_arrappend_dollar(modclient: redis.Redis):
"nested2": {"a": 31},
}
]
- assert_resp_response(modclient, await modclient.json().get("doc1", "$"), res, [res])
+ assert_resp_response(decoded_r, await decoded_r.json().get("doc1", "$"), res, [res])
# Test single
- assert await modclient.json().arrappend("doc1", "$.nested1.a", "baz") == [6]
+ assert await decoded_r.json().arrappend("doc1", "$.nested1.a", "baz") == [6]
res = [
{
"a": ["foo", "bar", "racuda"],
@@ -583,14 +582,14 @@ async def test_arrappend_dollar(modclient: redis.Redis):
"nested2": {"a": 31},
}
]
- assert_resp_response(modclient, await modclient.json().get("doc1", "$"), res, [res])
+ assert_resp_response(decoded_r, await decoded_r.json().get("doc1", "$"), res, [res])
# Test missing key
with pytest.raises(exceptions.ResponseError):
- await modclient.json().arrappend("non_existing_doc", "$..a")
+ await decoded_r.json().arrappend("non_existing_doc", "$..a")
# Test legacy
- await modclient.json().set(
+ await decoded_r.json().set(
"doc1",
"$",
{
@@ -600,7 +599,7 @@ async def test_arrappend_dollar(modclient: redis.Redis):
},
)
# Test multi (all paths are updated, but return result of last path)
- assert await modclient.json().arrappend("doc1", "..a", "bar", "racuda") == 5
+ assert await decoded_r.json().arrappend("doc1", "..a", "bar", "racuda") == 5
res = [
{
@@ -609,9 +608,9 @@ async def test_arrappend_dollar(modclient: redis.Redis):
"nested2": {"a": 31},
}
]
- assert_resp_response(modclient, await modclient.json().get("doc1", "$"), res, [res])
+ assert_resp_response(decoded_r, await decoded_r.json().get("doc1", "$"), res, [res])
# Test single
- assert await modclient.json().arrappend("doc1", ".nested1.a", "baz") == 6
+ assert await decoded_r.json().arrappend("doc1", ".nested1.a", "baz") == 6
res = [
{
"a": ["foo", "bar", "racuda"],
@@ -619,16 +618,16 @@ async def test_arrappend_dollar(modclient: redis.Redis):
"nested2": {"a": 31},
}
]
- assert_resp_response(modclient, await modclient.json().get("doc1", "$"), res, [res])
+ assert_resp_response(decoded_r, await decoded_r.json().get("doc1", "$"), res, [res])
# Test missing key
with pytest.raises(exceptions.ResponseError):
- await modclient.json().arrappend("non_existing_doc", "$..a")
+ await decoded_r.json().arrappend("non_existing_doc", "$..a")
@pytest.mark.redismod
-async def test_arrinsert_dollar(modclient: redis.Redis):
- await modclient.json().set(
+async def test_arrinsert_dollar(decoded_r: redis.Redis):
+ await decoded_r.json().set(
"doc1",
"$",
{
@@ -638,7 +637,7 @@ async def test_arrinsert_dollar(modclient: redis.Redis):
},
)
# Test multi
- res = await modclient.json().arrinsert("doc1", "$..a", "1", "bar", "racuda")
+ res = await decoded_r.json().arrinsert("doc1", "$..a", "1", "bar", "racuda")
assert res == [3, 5, None]
res = [
@@ -648,9 +647,9 @@ async def test_arrinsert_dollar(modclient: redis.Redis):
"nested2": {"a": 31},
}
]
- assert_resp_response(modclient, await modclient.json().get("doc1", "$"), res, [res])
+ assert_resp_response(decoded_r, await decoded_r.json().get("doc1", "$"), res, [res])
# Test single
- assert await modclient.json().arrinsert("doc1", "$.nested1.a", -2, "baz") == [6]
+ assert await decoded_r.json().arrinsert("doc1", "$.nested1.a", -2, "baz") == [6]
res = [
{
"a": ["foo", "bar", "racuda"],
@@ -658,17 +657,17 @@ async def test_arrinsert_dollar(modclient: redis.Redis):
"nested2": {"a": 31},
}
]
- assert_resp_response(modclient, await modclient.json().get("doc1", "$"), res, [res])
+ assert_resp_response(decoded_r, await decoded_r.json().get("doc1", "$"), res, [res])
# Test missing key
with pytest.raises(exceptions.ResponseError):
- await modclient.json().arrappend("non_existing_doc", "$..a")
+ await decoded_r.json().arrappend("non_existing_doc", "$..a")
@pytest.mark.redismod
-async def test_arrlen_dollar(modclient: redis.Redis):
+async def test_arrlen_dollar(decoded_r: redis.Redis):
- await modclient.json().set(
+ await decoded_r.json().set(
"doc1",
"$",
{
@@ -679,20 +678,20 @@ async def test_arrlen_dollar(modclient: redis.Redis):
)
# Test multi
- assert await modclient.json().arrlen("doc1", "$..a") == [1, 3, None]
- res = await modclient.json().arrappend("doc1", "$..a", "non", "abba", "stanza")
+ assert await decoded_r.json().arrlen("doc1", "$..a") == [1, 3, None]
+ res = await decoded_r.json().arrappend("doc1", "$..a", "non", "abba", "stanza")
assert res == [4, 6, None]
- await modclient.json().clear("doc1", "$.a")
- assert await modclient.json().arrlen("doc1", "$..a") == [0, 6, None]
+ await decoded_r.json().clear("doc1", "$.a")
+ assert await decoded_r.json().arrlen("doc1", "$..a") == [0, 6, None]
# Test single
- assert await modclient.json().arrlen("doc1", "$.nested1.a") == [6]
+ assert await decoded_r.json().arrlen("doc1", "$.nested1.a") == [6]
# Test missing key
with pytest.raises(exceptions.ResponseError):
- await modclient.json().arrappend("non_existing_doc", "$..a")
+ await decoded_r.json().arrappend("non_existing_doc", "$..a")
- await modclient.json().set(
+ await decoded_r.json().set(
"doc1",
"$",
{
@@ -702,19 +701,19 @@ async def test_arrlen_dollar(modclient: redis.Redis):
},
)
# Test multi (return result of last path)
- assert await modclient.json().arrlen("doc1", "$..a") == [1, 3, None]
- assert await modclient.json().arrappend("doc1", "..a", "non", "abba", "stanza") == 6
+ assert await decoded_r.json().arrlen("doc1", "$..a") == [1, 3, None]
+ assert await decoded_r.json().arrappend("doc1", "..a", "non", "abba", "stanza") == 6
# Test single
- assert await modclient.json().arrlen("doc1", ".nested1.a") == 6
+ assert await decoded_r.json().arrlen("doc1", ".nested1.a") == 6
# Test missing key
- assert await modclient.json().arrlen("non_existing_doc", "..a") is None
+ assert await decoded_r.json().arrlen("non_existing_doc", "..a") is None
@pytest.mark.redismod
-async def test_arrpop_dollar(modclient: redis.Redis):
- await modclient.json().set(
+async def test_arrpop_dollar(decoded_r: redis.Redis):
+ await decoded_r.json().set(
"doc1",
"$",
{
@@ -725,17 +724,17 @@ async def test_arrpop_dollar(modclient: redis.Redis):
)
# Test multi
- assert await modclient.json().arrpop("doc1", "$..a", 1) == ['"foo"', None, None]
+ assert await decoded_r.json().arrpop("doc1", "$..a", 1) == ['"foo"', None, None]
res = [{"a": [], "nested1": {"a": ["hello", "world"]}, "nested2": {"a": 31}}]
- assert_resp_response(modclient, await modclient.json().get("doc1", "$"), res, [res])
+ assert_resp_response(decoded_r, await decoded_r.json().get("doc1", "$"), res, [res])
# Test missing key
with pytest.raises(exceptions.ResponseError):
- await modclient.json().arrpop("non_existing_doc", "..a")
+ await decoded_r.json().arrpop("non_existing_doc", "..a")
# # Test legacy
- await modclient.json().set(
+ await decoded_r.json().set(
"doc1",
"$",
{
@@ -745,19 +744,19 @@ async def test_arrpop_dollar(modclient: redis.Redis):
},
)
# Test multi (all paths are updated, but return result of last path)
- await modclient.json().arrpop("doc1", "..a", "1") is None
+ await decoded_r.json().arrpop("doc1", "..a", "1") is None
res = [{"a": [], "nested1": {"a": ["hello", "world"]}, "nested2": {"a": 31}}]
- assert_resp_response(modclient, await modclient.json().get("doc1", "$"), res, [res])
+ assert_resp_response(decoded_r, await decoded_r.json().get("doc1", "$"), res, [res])
# # Test missing key
with pytest.raises(exceptions.ResponseError):
- await modclient.json().arrpop("non_existing_doc", "..a")
+ await decoded_r.json().arrpop("non_existing_doc", "..a")
@pytest.mark.redismod
-async def test_arrtrim_dollar(modclient: redis.Redis):
+async def test_arrtrim_dollar(decoded_r: redis.Redis):
- await modclient.json().set(
+ await decoded_r.json().set(
"doc1",
"$",
{
@@ -767,24 +766,24 @@ async def test_arrtrim_dollar(modclient: redis.Redis):
},
)
# Test multi
- assert await modclient.json().arrtrim("doc1", "$..a", "1", -1) == [0, 2, None]
+ assert await decoded_r.json().arrtrim("doc1", "$..a", "1", -1) == [0, 2, None]
res = [{"a": [], "nested1": {"a": [None, "world"]}, "nested2": {"a": 31}}]
- assert_resp_response(modclient, await modclient.json().get("doc1", "$"), res, [res])
+ assert_resp_response(decoded_r, await decoded_r.json().get("doc1", "$"), res, [res])
- assert await modclient.json().arrtrim("doc1", "$..a", "1", "1") == [0, 1, None]
+ assert await decoded_r.json().arrtrim("doc1", "$..a", "1", "1") == [0, 1, None]
res = [{"a": [], "nested1": {"a": ["world"]}, "nested2": {"a": 31}}]
- assert_resp_response(modclient, await modclient.json().get("doc1", "$"), res, [res])
+ assert_resp_response(decoded_r, await decoded_r.json().get("doc1", "$"), res, [res])
# Test single
- assert await modclient.json().arrtrim("doc1", "$.nested1.a", 1, 0) == [0]
+ assert await decoded_r.json().arrtrim("doc1", "$.nested1.a", 1, 0) == [0]
res = [{"a": [], "nested1": {"a": []}, "nested2": {"a": 31}}]
- assert_resp_response(modclient, await modclient.json().get("doc1", "$"), res, [res])
+ assert_resp_response(decoded_r, await decoded_r.json().get("doc1", "$"), res, [res])
# Test missing key
with pytest.raises(exceptions.ResponseError):
- await modclient.json().arrtrim("non_existing_doc", "..a", "0", 1)
+ await decoded_r.json().arrtrim("non_existing_doc", "..a", "0", 1)
# Test legacy
- await modclient.json().set(
+ await decoded_r.json().set(
"doc1",
"$",
{
@@ -795,21 +794,21 @@ async def test_arrtrim_dollar(modclient: redis.Redis):
)
# Test multi (all paths are updated, but return result of last path)
- assert await modclient.json().arrtrim("doc1", "..a", "1", "-1") == 2
+ assert await decoded_r.json().arrtrim("doc1", "..a", "1", "-1") == 2
# Test single
- assert await modclient.json().arrtrim("doc1", ".nested1.a", "1", "1") == 1
+ assert await decoded_r.json().arrtrim("doc1", ".nested1.a", "1", "1") == 1
res = [{"a": [], "nested1": {"a": ["world"]}, "nested2": {"a": 31}}]
- assert_resp_response(modclient, await modclient.json().get("doc1", "$"), res, [res])
+ assert_resp_response(decoded_r, await decoded_r.json().get("doc1", "$"), res, [res])
# Test missing key
with pytest.raises(exceptions.ResponseError):
- await modclient.json().arrtrim("non_existing_doc", "..a", 1, 1)
+ await decoded_r.json().arrtrim("non_existing_doc", "..a", 1, 1)
@pytest.mark.redismod
-async def test_objkeys_dollar(modclient: redis.Redis):
- await modclient.json().set(
+async def test_objkeys_dollar(decoded_r: redis.Redis):
+ await decoded_r.json().set(
"doc1",
"$",
{
@@ -820,26 +819,26 @@ async def test_objkeys_dollar(modclient: redis.Redis):
)
# Test single
- assert await modclient.json().objkeys("doc1", "$.nested1.a") == [["foo", "bar"]]
+ assert await decoded_r.json().objkeys("doc1", "$.nested1.a") == [["foo", "bar"]]
# Test legacy
- assert await modclient.json().objkeys("doc1", ".*.a") == ["foo", "bar"]
+ assert await decoded_r.json().objkeys("doc1", ".*.a") == ["foo", "bar"]
# Test single
- assert await modclient.json().objkeys("doc1", ".nested2.a") == ["baz"]
+ assert await decoded_r.json().objkeys("doc1", ".nested2.a") == ["baz"]
# Test missing key
- assert await modclient.json().objkeys("non_existing_doc", "..a") is None
+ assert await decoded_r.json().objkeys("non_existing_doc", "..a") is None
# Test non existing doc
with pytest.raises(exceptions.ResponseError):
- assert await modclient.json().objkeys("non_existing_doc", "$..a") == []
+ assert await decoded_r.json().objkeys("non_existing_doc", "$..a") == []
- assert await modclient.json().objkeys("doc1", "$..nowhere") == []
+ assert await decoded_r.json().objkeys("doc1", "$..nowhere") == []
@pytest.mark.redismod
-async def test_objlen_dollar(modclient: redis.Redis):
- await modclient.json().set(
+async def test_objlen_dollar(decoded_r: redis.Redis):
+ await decoded_r.json().set(
"doc1",
"$",
{
@@ -849,28 +848,28 @@ async def test_objlen_dollar(modclient: redis.Redis):
},
)
# Test multi
- assert await modclient.json().objlen("doc1", "$..a") == [None, 2, 1]
+ assert await decoded_r.json().objlen("doc1", "$..a") == [None, 2, 1]
# Test single
- assert await modclient.json().objlen("doc1", "$.nested1.a") == [2]
+ assert await decoded_r.json().objlen("doc1", "$.nested1.a") == [2]
# Test missing key, and path
with pytest.raises(exceptions.ResponseError):
- await modclient.json().objlen("non_existing_doc", "$..a")
+ await decoded_r.json().objlen("non_existing_doc", "$..a")
- assert await modclient.json().objlen("doc1", "$.nowhere") == []
+ assert await decoded_r.json().objlen("doc1", "$.nowhere") == []
# Test legacy
- assert await modclient.json().objlen("doc1", ".*.a") == 2
+ assert await decoded_r.json().objlen("doc1", ".*.a") == 2
# Test single
- assert await modclient.json().objlen("doc1", ".nested2.a") == 1
+ assert await decoded_r.json().objlen("doc1", ".nested2.a") == 1
# Test missing key
- assert await modclient.json().objlen("non_existing_doc", "..a") is None
+ assert await decoded_r.json().objlen("non_existing_doc", "..a") is None
# Test missing path
# with pytest.raises(exceptions.ResponseError):
- await modclient.json().objlen("doc1", ".nowhere")
+ await decoded_r.json().objlen("doc1", ".nowhere")
@pytest.mark.redismod
@@ -894,28 +893,28 @@ def load_types_data(nested_key_name):
@pytest.mark.redismod
-async def test_type_dollar(modclient: redis.Redis):
+async def test_type_dollar(decoded_r: redis.Redis):
jdata, jtypes = load_types_data("a")
- await modclient.json().set("doc1", "$", jdata)
+ await decoded_r.json().set("doc1", "$", jdata)
# Test multi
assert_resp_response(
- modclient, await modclient.json().type("doc1", "$..a"), jtypes, [jtypes]
+ decoded_r, await decoded_r.json().type("doc1", "$..a"), jtypes, [jtypes]
)
# Test single
- res = await modclient.json().type("doc1", "$.nested2.a")
- assert_resp_response(modclient, res, [jtypes[1]], [[jtypes[1]]])
+ res = await decoded_r.json().type("doc1", "$.nested2.a")
+ assert_resp_response(decoded_r, res, [jtypes[1]], [[jtypes[1]]])
# Test missing key
assert_resp_response(
- modclient, await modclient.json().type("non_existing_doc", "..a"), None, [None]
+ decoded_r, await decoded_r.json().type("non_existing_doc", "..a"), None, [None]
)
@pytest.mark.redismod
-async def test_clear_dollar(modclient: redis.Redis):
+async def test_clear_dollar(decoded_r: redis.Redis):
- await modclient.json().set(
+ await decoded_r.json().set(
"doc1",
"$",
{
@@ -927,15 +926,15 @@ async def test_clear_dollar(modclient: redis.Redis):
)
# Test multi
- assert await modclient.json().clear("doc1", "$..a") == 3
+ assert await decoded_r.json().clear("doc1", "$..a") == 3
res = [
{"nested1": {"a": {}}, "a": [], "nested2": {"a": "claro"}, "nested3": {"a": {}}}
]
- assert_resp_response(modclient, await modclient.json().get("doc1", "$"), res, [res])
+ assert_resp_response(decoded_r, await decoded_r.json().get("doc1", "$"), res, [res])
# Test single
- await modclient.json().set(
+ await decoded_r.json().set(
"doc1",
"$",
{
@@ -945,7 +944,7 @@ async def test_clear_dollar(modclient: redis.Redis):
"nested3": {"a": {"baz": 50}},
},
)
- assert await modclient.json().clear("doc1", "$.nested1.a") == 1
+ assert await decoded_r.json().clear("doc1", "$.nested1.a") == 1
res = [
{
"nested1": {"a": {}},
@@ -954,22 +953,22 @@ async def test_clear_dollar(modclient: redis.Redis):
"nested3": {"a": {"baz": 50}},
}
]
- assert_resp_response(modclient, await modclient.json().get("doc1", "$"), res, [res])
+ assert_resp_response(decoded_r, await decoded_r.json().get("doc1", "$"), res, [res])
# Test missing path (async defaults to root)
- assert await modclient.json().clear("doc1") == 1
+ assert await decoded_r.json().clear("doc1") == 1
assert_resp_response(
- modclient, await modclient.json().get("doc1", "$"), [{}], [[{}]]
+ decoded_r, await decoded_r.json().get("doc1", "$"), [{}], [[{}]]
)
# Test missing key
with pytest.raises(exceptions.ResponseError):
- await modclient.json().clear("non_existing_doc", "$..a")
+ await decoded_r.json().clear("non_existing_doc", "$..a")
@pytest.mark.redismod
-async def test_toggle_dollar(modclient: redis.Redis):
- await modclient.json().set(
+async def test_toggle_dollar(decoded_r: redis.Redis):
+ await decoded_r.json().set(
"doc1",
"$",
{
@@ -980,7 +979,7 @@ async def test_toggle_dollar(modclient: redis.Redis):
},
)
# Test multi
- assert await modclient.json().toggle("doc1", "$..a") == [None, 1, None, 0]
+ assert await decoded_r.json().toggle("doc1", "$..a") == [None, 1, None, 0]
res = [
{
"a": ["foo"],
@@ -989,8 +988,8 @@ async def test_toggle_dollar(modclient: redis.Redis):
"nested3": {"a": False},
}
]
- assert_resp_response(modclient, await modclient.json().get("doc1", "$"), res, [res])
+ assert_resp_response(decoded_r, await decoded_r.json().get("doc1", "$"), res, [res])
# Test missing key
with pytest.raises(exceptions.ResponseError):
- await modclient.json().toggle("non_existing_doc", "$..a")
+ await decoded_r.json().toggle("non_existing_doc", "$..a")
diff --git a/tests/test_asyncio/test_lock.py b/tests/test_asyncio/test_lock.py
index d78f74164d..75484a2791 100644
--- a/tests/test_asyncio/test_lock.py
+++ b/tests/test_asyncio/test_lock.py
@@ -2,7 +2,6 @@
import pytest
import pytest_asyncio
-
from redis.asyncio.lock import Lock
from redis.exceptions import LockError, LockNotOwnedError
diff --git a/tests/test_asyncio/test_monitor.py b/tests/test_asyncio/test_monitor.py
index 3551579ec0..73ee3cf811 100644
--- a/tests/test_asyncio/test_monitor.py
+++ b/tests/test_asyncio/test_monitor.py
@@ -1,5 +1,4 @@
import pytest
-
from tests.conftest import skip_if_redis_enterprise, skip_ifnot_redis_enterprise
from .conftest import wait_for_command
diff --git a/tests/test_asyncio/test_pipeline.py b/tests/test_asyncio/test_pipeline.py
index b29aa53487..edd2f6d147 100644
--- a/tests/test_asyncio/test_pipeline.py
+++ b/tests/test_asyncio/test_pipeline.py
@@ -1,5 +1,4 @@
import pytest
-
import redis
from tests.conftest import skip_if_server_version_lt
diff --git a/tests/test_asyncio/test_pubsub.py b/tests/test_asyncio/test_pubsub.py
index 8160b3b0f1..8354abe45b 100644
--- a/tests/test_asyncio/test_pubsub.py
+++ b/tests/test_asyncio/test_pubsub.py
@@ -12,7 +12,6 @@
import pytest
import pytest_asyncio
-
import redis.asyncio as redis
from redis.exceptions import ConnectionError
from redis.typing import EncodableT
diff --git a/tests/test_asyncio/test_retry.py b/tests/test_asyncio/test_retry.py
index 86e6ddfa0d..2912ca786c 100644
--- a/tests/test_asyncio/test_retry.py
+++ b/tests/test_asyncio/test_retry.py
@@ -1,5 +1,4 @@
import pytest
-
from redis.asyncio import Redis
from redis.asyncio.connection import Connection, UnixDomainSocketConnection
from redis.asyncio.retry import Retry
diff --git a/tests/test_asyncio/test_scripting.py b/tests/test_asyncio/test_scripting.py
index 3776d12cb7..8375ecd787 100644
--- a/tests/test_asyncio/test_scripting.py
+++ b/tests/test_asyncio/test_scripting.py
@@ -1,6 +1,5 @@
import pytest
import pytest_asyncio
-
from redis import exceptions
from tests.conftest import skip_if_server_version_lt
diff --git a/tests/test_asyncio/test_search.py b/tests/test_asyncio/test_search.py
index 599631bfc9..149b26d958 100644
--- a/tests/test_asyncio/test_search.py
+++ b/tests/test_asyncio/test_search.py
@@ -5,7 +5,6 @@
from io import TextIOWrapper
import pytest
-
import redis.asyncio as redis
import redis.commands.search
import redis.commands.search.aggregation as aggregations
@@ -55,23 +54,23 @@ async def waitForIndex(env, idx, timeout=None):
break
-def getClient(modclient: redis.Redis):
+def getClient(decoded_r: redis.Redis):
"""
Gets a client client attached to an index name which is ready to be
created
"""
- return modclient
+ return decoded_r
-async def createIndex(modclient, num_docs=100, definition=None):
+async def createIndex(decoded_r, num_docs=100, definition=None):
try:
- await modclient.create_index(
+ await decoded_r.create_index(
(TextField("play", weight=5.0), TextField("txt"), NumericField("chapter")),
definition=definition,
)
except redis.ResponseError:
- await modclient.dropindex(delete_documents=True)
- return createIndex(modclient, num_docs=num_docs, definition=definition)
+ await decoded_r.dropindex(delete_documents=True)
+ return createIndex(decoded_r, num_docs=num_docs, definition=definition)
chapters = {}
bzfp = TextIOWrapper(bz2.BZ2File(WILL_PLAY_TEXT), encoding="utf8")
@@ -89,7 +88,7 @@ async def createIndex(modclient, num_docs=100, definition=None):
if len(chapters) == num_docs:
break
- indexer = modclient.batch_indexer(chunk_size=50)
+ indexer = decoded_r.batch_indexer(chunk_size=50)
assert isinstance(indexer, AsyncSearch.BatchIndexer)
assert 50 == indexer.chunk_size
@@ -99,12 +98,12 @@ async def createIndex(modclient, num_docs=100, definition=None):
@pytest.mark.redismod
-async def test_client(modclient: redis.Redis):
+async def test_client(decoded_r: redis.Redis):
num_docs = 500
- await createIndex(modclient.ft(), num_docs=num_docs)
- await waitForIndex(modclient, "idx")
+ await createIndex(decoded_r.ft(), num_docs=num_docs)
+ await waitForIndex(decoded_r, "idx")
# verify info
- info = await modclient.ft().info()
+ info = await decoded_r.ft().info()
for k in [
"index_name",
"index_options",
@@ -124,11 +123,11 @@ async def test_client(modclient: redis.Redis):
]:
assert k in info
- assert modclient.ft().index_name == info["index_name"]
+ assert decoded_r.ft().index_name == info["index_name"]
assert num_docs == int(info["num_docs"])
- res = await modclient.ft().search("henry iv")
- if is_resp2_connection(modclient):
+ res = await decoded_r.ft().search("henry iv")
+ if is_resp2_connection(decoded_r):
assert isinstance(res, Result)
assert 225 == res.total
assert 10 == len(res.docs)
@@ -140,7 +139,7 @@ async def test_client(modclient: redis.Redis):
assert len(doc.txt) > 0
# test no content
- res = await modclient.ft().search(Query("king").no_content())
+ res = await decoded_r.ft().search(Query("king").no_content())
assert 194 == res.total
assert 10 == len(res.docs)
for doc in res.docs:
@@ -148,24 +147,24 @@ async def test_client(modclient: redis.Redis):
assert "play" not in doc.__dict__
# test verbatim vs no verbatim
- total = (await modclient.ft().search(Query("kings").no_content())).total
+ total = (await decoded_r.ft().search(Query("kings").no_content())).total
vtotal = (
- await modclient.ft().search(Query("kings").no_content().verbatim())
+ await decoded_r.ft().search(Query("kings").no_content().verbatim())
).total
assert total > vtotal
# test in fields
txt_total = (
- await modclient.ft().search(Query("henry").no_content().limit_fields("txt"))
+ await decoded_r.ft().search(Query("henry").no_content().limit_fields("txt"))
).total
play_total = (
- await modclient.ft().search(
+ await decoded_r.ft().search(
Query("henry").no_content().limit_fields("play")
)
).total
both_total = (
await (
- modclient.ft().search(
+ decoded_r.ft().search(
Query("henry").no_content().limit_fields("play", "txt")
)
)
@@ -175,52 +174,52 @@ async def test_client(modclient: redis.Redis):
assert 494 == both_total
# test load_document
- doc = await modclient.ft().load_document("henry vi part 3:62")
+ doc = await decoded_r.ft().load_document("henry vi part 3:62")
assert doc is not None
assert "henry vi part 3:62" == doc.id
assert doc.play == "Henry VI Part 3"
assert len(doc.txt) > 0
# test in-keys
- ids = [x.id for x in (await modclient.ft().search(Query("henry"))).docs]
+ ids = [x.id for x in (await decoded_r.ft().search(Query("henry"))).docs]
assert 10 == len(ids)
subset = ids[:5]
- docs = await modclient.ft().search(Query("henry").limit_ids(*subset))
+ docs = await decoded_r.ft().search(Query("henry").limit_ids(*subset))
assert len(subset) == docs.total
ids = [x.id for x in docs.docs]
assert set(ids) == set(subset)
# test slop and in order
- assert 193 == (await modclient.ft().search(Query("henry king"))).total
+ assert 193 == (await decoded_r.ft().search(Query("henry king"))).total
assert (
3
== (
- await modclient.ft().search(Query("henry king").slop(0).in_order())
+ await decoded_r.ft().search(Query("henry king").slop(0).in_order())
).total
)
assert (
52
== (
- await modclient.ft().search(Query("king henry").slop(0).in_order())
+ await decoded_r.ft().search(Query("king henry").slop(0).in_order())
).total
)
- assert 53 == (await modclient.ft().search(Query("henry king").slop(0))).total
- assert 167 == (await modclient.ft().search(Query("henry king").slop(100))).total
+ assert 53 == (await decoded_r.ft().search(Query("henry king").slop(0))).total
+ assert 167 == (await decoded_r.ft().search(Query("henry king").slop(100))).total
# test delete document
- await modclient.hset("doc-5ghs2", mapping={"play": "Death of a Salesman"})
- res = await modclient.ft().search(Query("death of a salesman"))
+ await decoded_r.hset("doc-5ghs2", mapping={"play": "Death of a Salesman"})
+ res = await decoded_r.ft().search(Query("death of a salesman"))
assert 1 == res.total
- assert 1 == await modclient.ft().delete_document("doc-5ghs2")
- res = await modclient.ft().search(Query("death of a salesman"))
+ assert 1 == await decoded_r.ft().delete_document("doc-5ghs2")
+ res = await decoded_r.ft().search(Query("death of a salesman"))
assert 0 == res.total
- assert 0 == await modclient.ft().delete_document("doc-5ghs2")
+ assert 0 == await decoded_r.ft().delete_document("doc-5ghs2")
- await modclient.hset("doc-5ghs2", mapping={"play": "Death of a Salesman"})
- res = await modclient.ft().search(Query("death of a salesman"))
+ await decoded_r.hset("doc-5ghs2", mapping={"play": "Death of a Salesman"})
+ res = await decoded_r.ft().search(Query("death of a salesman"))
assert 1 == res.total
- await modclient.ft().delete_document("doc-5ghs2")
+ await decoded_r.ft().delete_document("doc-5ghs2")
else:
assert isinstance(res, dict)
assert 225 == res["total_results"]
@@ -228,36 +227,36 @@ async def test_client(modclient: redis.Redis):
for doc in res["results"]:
assert doc["id"]
- assert doc["fields"]["play"] == "Henry IV"
- assert len(doc["fields"]["txt"]) > 0
+ assert doc["extra_attributes"]["play"] == "Henry IV"
+ assert len(doc["extra_attributes"]["txt"]) > 0
# test no content
- res = await modclient.ft().search(Query("king").no_content())
+ res = await decoded_r.ft().search(Query("king").no_content())
assert 194 == res["total_results"]
assert 10 == len(res["results"])
for doc in res["results"]:
- assert "fields" not in doc.keys()
+ assert "extra_attributes" not in doc.keys()
# test verbatim vs no verbatim
- total = (await modclient.ft().search(Query("kings").no_content()))[
+ total = (await decoded_r.ft().search(Query("kings").no_content()))[
"total_results"
]
- vtotal = (await modclient.ft().search(Query("kings").no_content().verbatim()))[
+ vtotal = (await decoded_r.ft().search(Query("kings").no_content().verbatim()))[
"total_results"
]
assert total > vtotal
# test in fields
txt_total = (
- await modclient.ft().search(Query("henry").no_content().limit_fields("txt"))
+ await decoded_r.ft().search(Query("henry").no_content().limit_fields("txt"))
)["total_results"]
play_total = (
- await modclient.ft().search(
+ await decoded_r.ft().search(
Query("henry").no_content().limit_fields("play")
)
)["total_results"]
both_total = (
- await modclient.ft().search(
+ await decoded_r.ft().search(
Query("henry").no_content().limit_fields("play", "txt")
)
)["total_results"]
@@ -266,7 +265,7 @@ async def test_client(modclient: redis.Redis):
assert 494 == both_total
# test load_document
- doc = await modclient.ft().load_document("henry vi part 3:62")
+ doc = await decoded_r.ft().load_document("henry vi part 3:62")
assert doc is not None
assert "henry vi part 3:62" == doc.id
assert doc.play == "Henry VI Part 3"
@@ -274,71 +273,71 @@ async def test_client(modclient: redis.Redis):
# test in-keys
ids = [
- x["id"] for x in (await modclient.ft().search(Query("henry")))["results"]
+ x["id"] for x in (await decoded_r.ft().search(Query("henry")))["results"]
]
assert 10 == len(ids)
subset = ids[:5]
- docs = await modclient.ft().search(Query("henry").limit_ids(*subset))
+ docs = await decoded_r.ft().search(Query("henry").limit_ids(*subset))
assert len(subset) == docs["total_results"]
ids = [x["id"] for x in docs["results"]]
assert set(ids) == set(subset)
# test slop and in order
assert (
- 193 == (await modclient.ft().search(Query("henry king")))["total_results"]
+ 193 == (await decoded_r.ft().search(Query("henry king")))["total_results"]
)
assert (
3
- == (await modclient.ft().search(Query("henry king").slop(0).in_order()))[
+ == (await decoded_r.ft().search(Query("henry king").slop(0).in_order()))[
"total_results"
]
)
assert (
52
- == (await modclient.ft().search(Query("king henry").slop(0).in_order()))[
+ == (await decoded_r.ft().search(Query("king henry").slop(0).in_order()))[
"total_results"
]
)
assert (
53
- == (await modclient.ft().search(Query("henry king").slop(0)))[
+ == (await decoded_r.ft().search(Query("henry king").slop(0)))[
"total_results"
]
)
assert (
167
- == (await modclient.ft().search(Query("henry king").slop(100)))[
+ == (await decoded_r.ft().search(Query("henry king").slop(100)))[
"total_results"
]
)
# test delete document
- await modclient.hset("doc-5ghs2", mapping={"play": "Death of a Salesman"})
- res = await modclient.ft().search(Query("death of a salesman"))
+ await decoded_r.hset("doc-5ghs2", mapping={"play": "Death of a Salesman"})
+ res = await decoded_r.ft().search(Query("death of a salesman"))
assert 1 == res["total_results"]
- assert 1 == await modclient.ft().delete_document("doc-5ghs2")
- res = await modclient.ft().search(Query("death of a salesman"))
+ assert 1 == await decoded_r.ft().delete_document("doc-5ghs2")
+ res = await decoded_r.ft().search(Query("death of a salesman"))
assert 0 == res["total_results"]
- assert 0 == await modclient.ft().delete_document("doc-5ghs2")
+ assert 0 == await decoded_r.ft().delete_document("doc-5ghs2")
- await modclient.hset("doc-5ghs2", mapping={"play": "Death of a Salesman"})
- res = await modclient.ft().search(Query("death of a salesman"))
+ await decoded_r.hset("doc-5ghs2", mapping={"play": "Death of a Salesman"})
+ res = await decoded_r.ft().search(Query("death of a salesman"))
assert 1 == res["total_results"]
- await modclient.ft().delete_document("doc-5ghs2")
+ await decoded_r.ft().delete_document("doc-5ghs2")
@pytest.mark.redismod
@pytest.mark.onlynoncluster
-async def test_scores(modclient: redis.Redis):
- await modclient.ft().create_index((TextField("txt"),))
+async def test_scores(decoded_r: redis.Redis):
+ await decoded_r.ft().create_index((TextField("txt"),))
- await modclient.hset("doc1", mapping={"txt": "foo baz"})
- await modclient.hset("doc2", mapping={"txt": "foo bar"})
+ await decoded_r.hset("doc1", mapping={"txt": "foo baz"})
+ await decoded_r.hset("doc2", mapping={"txt": "foo bar"})
q = Query("foo ~bar").with_scores()
- res = await modclient.ft().search(q)
- if is_resp2_connection(modclient):
+ res = await decoded_r.ft().search(q)
+ if is_resp2_connection(decoded_r):
assert 2 == res.total
assert "doc2" == res.docs[0].id
assert 3.0 == res.docs[0].score
@@ -351,17 +350,17 @@ async def test_scores(modclient: redis.Redis):
@pytest.mark.redismod
-async def test_stopwords(modclient: redis.Redis):
+async def test_stopwords(decoded_r: redis.Redis):
stopwords = ["foo", "bar", "baz"]
- await modclient.ft().create_index((TextField("txt"),), stopwords=stopwords)
- await modclient.hset("doc1", mapping={"txt": "foo bar"})
- await modclient.hset("doc2", mapping={"txt": "hello world"})
- await waitForIndex(modclient, "idx")
+ await decoded_r.ft().create_index((TextField("txt"),), stopwords=stopwords)
+ await decoded_r.hset("doc1", mapping={"txt": "foo bar"})
+ await decoded_r.hset("doc2", mapping={"txt": "hello world"})
+ await waitForIndex(decoded_r, "idx")
q1 = Query("foo bar").no_content()
q2 = Query("foo bar hello world").no_content()
- res1, res2 = await modclient.ft().search(q1), await modclient.ft().search(q2)
- if is_resp2_connection(modclient):
+ res1, res2 = await decoded_r.ft().search(q1), await decoded_r.ft().search(q2)
+ if is_resp2_connection(decoded_r):
assert 0 == res1.total
assert 1 == res2.total
else:
@@ -370,22 +369,22 @@ async def test_stopwords(modclient: redis.Redis):
@pytest.mark.redismod
-async def test_filters(modclient: redis.Redis):
+async def test_filters(decoded_r: redis.Redis):
await (
- modclient.ft().create_index(
+ decoded_r.ft().create_index(
(TextField("txt"), NumericField("num"), GeoField("loc"))
)
)
await (
- modclient.hset(
+ decoded_r.hset(
"doc1", mapping={"txt": "foo bar", "num": 3.141, "loc": "-0.441,51.458"}
)
)
await (
- modclient.hset("doc2", mapping={"txt": "foo baz", "num": 2, "loc": "-0.1,51.2"})
+ decoded_r.hset("doc2", mapping={"txt": "foo baz", "num": 2, "loc": "-0.1,51.2"})
)
- await waitForIndex(modclient, "idx")
+ await waitForIndex(decoded_r, "idx")
# Test numerical filter
q1 = Query("foo").add_filter(NumericFilter("num", 0, 2)).no_content()
q2 = (
@@ -393,9 +392,9 @@ async def test_filters(modclient: redis.Redis):
.add_filter(NumericFilter("num", 2, NumericFilter.INF, minExclusive=True))
.no_content()
)
- res1, res2 = await modclient.ft().search(q1), await modclient.ft().search(q2)
+ res1, res2 = await decoded_r.ft().search(q1), await decoded_r.ft().search(q2)
- if is_resp2_connection(modclient):
+ if is_resp2_connection(decoded_r):
assert 1 == res1.total
assert 1 == res2.total
assert "doc2" == res1.docs[0].id
@@ -409,9 +408,9 @@ async def test_filters(modclient: redis.Redis):
# Test geo filter
q1 = Query("foo").add_filter(GeoFilter("loc", -0.44, 51.45, 10)).no_content()
q2 = Query("foo").add_filter(GeoFilter("loc", -0.44, 51.45, 100)).no_content()
- res1, res2 = await modclient.ft().search(q1), await modclient.ft().search(q2)
+ res1, res2 = await decoded_r.ft().search(q1), await decoded_r.ft().search(q2)
- if is_resp2_connection(modclient):
+ if is_resp2_connection(decoded_r):
assert 1 == res1.total
assert 2 == res2.total
assert "doc1" == res1.docs[0].id
@@ -432,22 +431,22 @@ async def test_filters(modclient: redis.Redis):
@pytest.mark.redismod
-async def test_sort_by(modclient: redis.Redis):
+async def test_sort_by(decoded_r: redis.Redis):
await (
- modclient.ft().create_index(
+ decoded_r.ft().create_index(
(TextField("txt"), NumericField("num", sortable=True))
)
)
- await modclient.hset("doc1", mapping={"txt": "foo bar", "num": 1})
- await modclient.hset("doc2", mapping={"txt": "foo baz", "num": 2})
- await modclient.hset("doc3", mapping={"txt": "foo qux", "num": 3})
+ await decoded_r.hset("doc1", mapping={"txt": "foo bar", "num": 1})
+ await decoded_r.hset("doc2", mapping={"txt": "foo baz", "num": 2})
+ await decoded_r.hset("doc3", mapping={"txt": "foo qux", "num": 3})
# Test sort
q1 = Query("foo").sort_by("num", asc=True).no_content()
q2 = Query("foo").sort_by("num", asc=False).no_content()
- res1, res2 = await modclient.ft().search(q1), await modclient.ft().search(q2)
+ res1, res2 = await decoded_r.ft().search(q1), await decoded_r.ft().search(q2)
- if is_resp2_connection(modclient):
+ if is_resp2_connection(decoded_r):
assert 3 == res1.total
assert "doc1" == res1.docs[0].id
assert "doc2" == res1.docs[1].id
@@ -469,14 +468,14 @@ async def test_sort_by(modclient: redis.Redis):
@pytest.mark.redismod
@skip_ifmodversion_lt("2.0.0", "search")
-async def test_drop_index(modclient: redis.Redis):
+async def test_drop_index(decoded_r: redis.Redis):
"""
Ensure the index gets dropped by data remains by default
"""
for x in range(20):
for keep_docs in [[True, {}], [False, {"name": "haveit"}]]:
idx = "HaveIt"
- index = getClient(modclient)
+ index = getClient(decoded_r)
await index.hset("index:haveit", mapping={"name": "haveit"})
idef = IndexDefinition(prefix=["index:"])
await index.ft(idx).create_index((TextField("name"),), definition=idef)
@@ -487,14 +486,14 @@ async def test_drop_index(modclient: redis.Redis):
@pytest.mark.redismod
-async def test_example(modclient: redis.Redis):
+async def test_example(decoded_r: redis.Redis):
# Creating the index definition and schema
await (
- modclient.ft().create_index((TextField("title", weight=5.0), TextField("body")))
+ decoded_r.ft().create_index((TextField("title", weight=5.0), TextField("body")))
)
# Indexing a document
- await modclient.hset(
+ await decoded_r.hset(
"doc1",
mapping={
"title": "RediSearch",
@@ -505,12 +504,12 @@ async def test_example(modclient: redis.Redis):
# Searching with complex parameters:
q = Query("search engine").verbatim().no_content().paging(0, 5)
- res = await modclient.ft().search(q)
+ res = await decoded_r.ft().search(q)
assert res is not None
@pytest.mark.redismod
-async def test_auto_complete(modclient: redis.Redis):
+async def test_auto_complete(decoded_r: redis.Redis):
n = 0
with open(TITLES_CSV) as f:
cr = csv.reader(f)
@@ -518,10 +517,10 @@ async def test_auto_complete(modclient: redis.Redis):
for row in cr:
n += 1
term, score = row[0], float(row[1])
- assert n == await modclient.ft().sugadd("ac", Suggestion(term, score=score))
+ assert n == await decoded_r.ft().sugadd("ac", Suggestion(term, score=score))
- assert n == await modclient.ft().suglen("ac")
- ret = await modclient.ft().sugget("ac", "bad", with_scores=True)
+ assert n == await decoded_r.ft().suglen("ac")
+ ret = await decoded_r.ft().sugget("ac", "bad", with_scores=True)
assert 2 == len(ret)
assert "badger" == ret[0].string
assert isinstance(ret[0].score, float)
@@ -530,29 +529,29 @@ async def test_auto_complete(modclient: redis.Redis):
assert isinstance(ret[1].score, float)
assert 1.0 != ret[1].score
- ret = await modclient.ft().sugget("ac", "bad", fuzzy=True, num=10)
+ ret = await decoded_r.ft().sugget("ac", "bad", fuzzy=True, num=10)
assert 10 == len(ret)
assert 1.0 == ret[0].score
strs = {x.string for x in ret}
for sug in strs:
- assert 1 == await modclient.ft().sugdel("ac", sug)
+ assert 1 == await decoded_r.ft().sugdel("ac", sug)
# make sure a second delete returns 0
for sug in strs:
- assert 0 == await modclient.ft().sugdel("ac", sug)
+ assert 0 == await decoded_r.ft().sugdel("ac", sug)
# make sure they were actually deleted
- ret2 = await modclient.ft().sugget("ac", "bad", fuzzy=True, num=10)
+ ret2 = await decoded_r.ft().sugget("ac", "bad", fuzzy=True, num=10)
for sug in ret2:
assert sug.string not in strs
# Test with payload
- await modclient.ft().sugadd("ac", Suggestion("pay1", payload="pl1"))
- await modclient.ft().sugadd("ac", Suggestion("pay2", payload="pl2"))
- await modclient.ft().sugadd("ac", Suggestion("pay3", payload="pl3"))
+ await decoded_r.ft().sugadd("ac", Suggestion("pay1", payload="pl1"))
+ await decoded_r.ft().sugadd("ac", Suggestion("pay2", payload="pl2"))
+ await decoded_r.ft().sugadd("ac", Suggestion("pay3", payload="pl3"))
sugs = await (
- modclient.ft().sugget("ac", "pay", with_payloads=True, with_scores=True)
+ decoded_r.ft().sugget("ac", "pay", with_payloads=True, with_scores=True)
)
assert 3 == len(sugs)
for sug in sugs:
@@ -561,8 +560,8 @@ async def test_auto_complete(modclient: redis.Redis):
@pytest.mark.redismod
-async def test_no_index(modclient: redis.Redis):
- await modclient.ft().create_index(
+async def test_no_index(decoded_r: redis.Redis):
+ await decoded_r.ft().create_index(
(
TextField("field"),
TextField("text", no_index=True, sortable=True),
@@ -572,59 +571,59 @@ async def test_no_index(modclient: redis.Redis):
)
)
- await modclient.hset(
+ await decoded_r.hset(
"doc1",
mapping={"field": "aaa", "text": "1", "numeric": "1", "geo": "1,1", "tag": "1"},
)
- await modclient.hset(
+ await decoded_r.hset(
"doc2",
mapping={"field": "aab", "text": "2", "numeric": "2", "geo": "2,2", "tag": "2"},
)
- await waitForIndex(modclient, "idx")
+ await waitForIndex(decoded_r, "idx")
- if is_resp2_connection(modclient):
- res = await modclient.ft().search(Query("@text:aa*"))
+ if is_resp2_connection(decoded_r):
+ res = await decoded_r.ft().search(Query("@text:aa*"))
assert 0 == res.total
- res = await modclient.ft().search(Query("@field:aa*"))
+ res = await decoded_r.ft().search(Query("@field:aa*"))
assert 2 == res.total
- res = await modclient.ft().search(Query("*").sort_by("text", asc=False))
+ res = await decoded_r.ft().search(Query("*").sort_by("text", asc=False))
assert 2 == res.total
assert "doc2" == res.docs[0].id
- res = await modclient.ft().search(Query("*").sort_by("text", asc=True))
+ res = await decoded_r.ft().search(Query("*").sort_by("text", asc=True))
assert "doc1" == res.docs[0].id
- res = await modclient.ft().search(Query("*").sort_by("numeric", asc=True))
+ res = await decoded_r.ft().search(Query("*").sort_by("numeric", asc=True))
assert "doc1" == res.docs[0].id
- res = await modclient.ft().search(Query("*").sort_by("geo", asc=True))
+ res = await decoded_r.ft().search(Query("*").sort_by("geo", asc=True))
assert "doc1" == res.docs[0].id
- res = await modclient.ft().search(Query("*").sort_by("tag", asc=True))
+ res = await decoded_r.ft().search(Query("*").sort_by("tag", asc=True))
assert "doc1" == res.docs[0].id
else:
- res = await modclient.ft().search(Query("@text:aa*"))
+ res = await decoded_r.ft().search(Query("@text:aa*"))
assert 0 == res["total_results"]
- res = await modclient.ft().search(Query("@field:aa*"))
+ res = await decoded_r.ft().search(Query("@field:aa*"))
assert 2 == res["total_results"]
- res = await modclient.ft().search(Query("*").sort_by("text", asc=False))
+ res = await decoded_r.ft().search(Query("*").sort_by("text", asc=False))
assert 2 == res["total_results"]
assert "doc2" == res["results"][0]["id"]
- res = await modclient.ft().search(Query("*").sort_by("text", asc=True))
+ res = await decoded_r.ft().search(Query("*").sort_by("text", asc=True))
assert "doc1" == res["results"][0]["id"]
- res = await modclient.ft().search(Query("*").sort_by("numeric", asc=True))
+ res = await decoded_r.ft().search(Query("*").sort_by("numeric", asc=True))
assert "doc1" == res["results"][0]["id"]
- res = await modclient.ft().search(Query("*").sort_by("geo", asc=True))
+ res = await decoded_r.ft().search(Query("*").sort_by("geo", asc=True))
assert "doc1" == res["results"][0]["id"]
- res = await modclient.ft().search(Query("*").sort_by("tag", asc=True))
+ res = await decoded_r.ft().search(Query("*").sort_by("tag", asc=True))
assert "doc1" == res["results"][0]["id"]
# Ensure exception is raised for non-indexable, non-sortable fields
@@ -639,31 +638,31 @@ async def test_no_index(modclient: redis.Redis):
@pytest.mark.redismod
-async def test_explain(modclient: redis.Redis):
+async def test_explain(decoded_r: redis.Redis):
await (
- modclient.ft().create_index((TextField("f1"), TextField("f2"), TextField("f3")))
+ decoded_r.ft().create_index((TextField("f1"), TextField("f2"), TextField("f3")))
)
- res = await modclient.ft().explain("@f3:f3_val @f2:f2_val @f1:f1_val")
+ res = await decoded_r.ft().explain("@f3:f3_val @f2:f2_val @f1:f1_val")
assert res
@pytest.mark.redismod
-async def test_explaincli(modclient: redis.Redis):
+async def test_explaincli(decoded_r: redis.Redis):
with pytest.raises(NotImplementedError):
- await modclient.ft().explain_cli("foo")
+ await decoded_r.ft().explain_cli("foo")
@pytest.mark.redismod
-async def test_summarize(modclient: redis.Redis):
- await createIndex(modclient.ft())
- await waitForIndex(modclient, "idx")
+async def test_summarize(decoded_r: redis.Redis):
+ await createIndex(decoded_r.ft())
+ await waitForIndex(decoded_r, "idx")
q = Query("king henry").paging(0, 1)
q.highlight(fields=("play", "txt"), tags=("", ""))
q.summarize("txt")
- if is_resp2_connection(modclient):
- doc = sorted((await modclient.ft().search(q)).docs)[0]
+ if is_resp2_connection(decoded_r):
+ doc = sorted((await decoded_r.ft().search(q)).docs)[0]
assert "Henry IV" == doc.play
assert (
"ACT I SCENE I. London. The palace. Enter KING HENRY, LORD JOHN OF LANCASTER, the EARL of WESTMORELAND, SIR... " # noqa
@@ -672,35 +671,35 @@ async def test_summarize(modclient: redis.Redis):
q = Query("king henry").paging(0, 1).summarize().highlight()
- doc = sorted((await modclient.ft().search(q)).docs)[0]
+ doc = sorted((await decoded_r.ft().search(q)).docs)[0]
assert "Henry ... " == doc.play
assert (
"ACT I SCENE I. London. The palace. Enter KING HENRY, LORD JOHN OF LANCASTER, the EARL of WESTMORELAND, SIR... " # noqa
== doc.txt
)
else:
- doc = sorted((await modclient.ft().search(q))["results"])[0]
- assert "Henry IV" == doc["fields"]["play"]
+ doc = sorted((await decoded_r.ft().search(q))["results"])[0]
+ assert "Henry IV" == doc["extra_attributes"]["play"]
assert (
"ACT I SCENE I. London. The palace. Enter KING HENRY, LORD JOHN OF LANCASTER, the EARL of WESTMORELAND, SIR... " # noqa
- == doc["fields"]["txt"]
+ == doc["extra_attributes"]["txt"]
)
q = Query("king henry").paging(0, 1).summarize().highlight()
- doc = sorted((await modclient.ft().search(q))["results"])[0]
- assert "Henry ... " == doc["fields"]["play"]
+ doc = sorted((await decoded_r.ft().search(q))["results"])[0]
+ assert "Henry ... " == doc["extra_attributes"]["play"]
assert (
"ACT I SCENE I. London. The palace. Enter KING HENRY, LORD JOHN OF LANCASTER, the EARL of WESTMORELAND, SIR... " # noqa
- == doc["fields"]["txt"]
+ == doc["extra_attributes"]["txt"]
)
@pytest.mark.redismod
@skip_ifmodversion_lt("2.0.0", "search")
-async def test_alias(modclient: redis.Redis):
- index1 = getClient(modclient)
- index2 = getClient(modclient)
+async def test_alias(decoded_r: redis.Redis):
+ index1 = getClient(decoded_r)
+ index2 = getClient(decoded_r)
def1 = IndexDefinition(prefix=["index1:"])
def2 = IndexDefinition(prefix=["index2:"])
@@ -713,13 +712,13 @@ async def test_alias(modclient: redis.Redis):
await index1.hset("index1:lonestar", mapping={"name": "lonestar"})
await index2.hset("index2:yogurt", mapping={"name": "yogurt"})
- if is_resp2_connection(modclient):
+ if is_resp2_connection(decoded_r):
res = (await ftindex1.search("*")).docs[0]
assert "index1:lonestar" == res.id
# create alias and check for results
await ftindex1.aliasadd("spaceballs")
- alias_client = getClient(modclient).ft("spaceballs")
+ alias_client = getClient(decoded_r).ft("spaceballs")
res = (await alias_client.search("*")).docs[0]
assert "index1:lonestar" == res.id
@@ -729,7 +728,7 @@ async def test_alias(modclient: redis.Redis):
# update alias and ensure new results
await ftindex2.aliasupdate("spaceballs")
- alias_client2 = getClient(modclient).ft("spaceballs")
+ alias_client2 = getClient(decoded_r).ft("spaceballs")
res = (await alias_client2.search("*")).docs[0]
assert "index2:yogurt" == res.id
@@ -739,7 +738,7 @@ async def test_alias(modclient: redis.Redis):
# create alias and check for results
await ftindex1.aliasadd("spaceballs")
- alias_client = getClient(await modclient).ft("spaceballs")
+ alias_client = getClient(await decoded_r).ft("spaceballs")
res = (await alias_client.search("*"))["results"][0]
assert "index1:lonestar" == res["id"]
@@ -749,7 +748,7 @@ async def test_alias(modclient: redis.Redis):
# update alias and ensure new results
await ftindex2.aliasupdate("spaceballs")
- alias_client2 = getClient(await modclient).ft("spaceballs")
+ alias_client2 = getClient(await decoded_r).ft("spaceballs")
res = (await alias_client2.search("*"))["results"][0]
assert "index2:yogurt" == res["id"]
@@ -760,23 +759,24 @@ async def test_alias(modclient: redis.Redis):
@pytest.mark.redismod
-async def test_alias_basic(modclient: redis.Redis):
+@pytest.mark.xfail(strict=False)
+async def test_alias_basic(decoded_r: redis.Redis):
# Creating a client with one index
- client = getClient(modclient)
+ client = getClient(decoded_r)
await client.flushdb()
- index1 = getClient(modclient).ft("testAlias")
+ index1 = getClient(decoded_r).ft("testAlias")
await index1.create_index((TextField("txt"),))
await index1.client.hset("doc1", mapping={"txt": "text goes here"})
- index2 = getClient(modclient).ft("testAlias2")
+ index2 = getClient(decoded_r).ft("testAlias2")
await index2.create_index((TextField("txt"),))
await index2.client.hset("doc2", mapping={"txt": "text goes here"})
# add the actual alias and check
await index1.aliasadd("myalias")
- alias_client = getClient(modclient).ft("myalias")
- if is_resp2_connection(modclient):
+ alias_client = getClient(decoded_r).ft("myalias")
+ if is_resp2_connection(decoded_r):
res = sorted((await alias_client.search("*")).docs, key=lambda x: x.id)
assert "doc1" == res[0].id
@@ -786,7 +786,7 @@ async def test_alias_basic(modclient: redis.Redis):
# update the alias and ensure we get doc2
await index2.aliasupdate("myalias")
- alias_client2 = getClient(modclient).ft("myalias")
+ alias_client2 = getClient(decoded_r).ft("myalias")
res = sorted((await alias_client2.search("*")).docs, key=lambda x: x.id)
assert "doc1" == res[0].id
else:
@@ -811,44 +811,63 @@ async def test_alias_basic(modclient: redis.Redis):
_ = (await alias_client2.search("*")).docs[0]
-# @pytest.mark.redismod
-# async def test_tags(modclient: redis.Redis):
-# await modclient.ft().create_index((TextField("txt"), TagField("tags")))
-# tags = "foo,foo bar,hello;world"
-# tags2 = "soba,ramen"
+@pytest.mark.redismod
+async def test_tags(decoded_r: redis.Redis):
+ await decoded_r.ft().create_index((TextField("txt"), TagField("tags")))
+ tags = "foo,foo bar,hello;world"
+ tags2 = "soba,ramen"
+
+ await decoded_r.hset("doc1", mapping={"txt": "fooz barz", "tags": tags})
+ await decoded_r.hset("doc2", mapping={"txt": "noodles", "tags": tags2})
+ await waitForIndex(decoded_r, "idx")
+
+ q = Query("@tags:{foo}")
+ if is_resp2_connection(decoded_r):
+ res = await decoded_r.ft().search(q)
+ assert 1 == res.total
-# await modclient.hset("doc1", mapping={"txt": "fooz barz", "tags": tags})
-# await modclient.hset("doc2", mapping={"txt": "noodles", "tags": tags2})
-# await waitForIndex(modclient, "idx")
+ q = Query("@tags:{foo bar}")
+ res = await decoded_r.ft().search(q)
+ assert 1 == res.total
-# q = Query("@tags:{foo}")
-# res = await modclient.ft().search(q)
-# assert 1 == res.total
+ q = Query("@tags:{foo\\ bar}")
+ res = await decoded_r.ft().search(q)
+ assert 1 == res.total
-# q = Query("@tags:{foo bar}")
-# res = await modclient.ft().search(q)
-# assert 1 == res.total
+ q = Query("@tags:{hello\\;world}")
+ res = await decoded_r.ft().search(q)
+ assert 1 == res.total
-# q = Query("@tags:{foo\\ bar}")
-# res = await modclient.ft().search(q)
-# assert 1 == res.total
+ q2 = await decoded_r.ft().tagvals("tags")
+ assert (tags.split(",") + tags2.split(",")).sort() == q2.sort()
+ else:
+ res = await decoded_r.ft().search(q)
+ assert 1 == res["total_results"]
+
+ q = Query("@tags:{foo bar}")
+ res = await decoded_r.ft().search(q)
+ assert 1 == res["total_results"]
-# q = Query("@tags:{hello\\;world}")
-# res = await modclient.ft().search(q)
-# assert 1 == res.total
+ q = Query("@tags:{foo\\ bar}")
+ res = await decoded_r.ft().search(q)
+ assert 1 == res["total_results"]
+
+ q = Query("@tags:{hello\\;world}")
+ res = await decoded_r.ft().search(q)
+ assert 1 == res["total_results"]
-# q2 = await modclient.ft().tagvals("tags")
-# assert (tags.split(",") + tags2.split(",")).sort() == q2.sort()
+ q2 = await decoded_r.ft().tagvals("tags")
+ assert set(tags.split(",") + tags2.split(",")) == q2
@pytest.mark.redismod
-async def test_textfield_sortable_nostem(modclient: redis.Redis):
+async def test_textfield_sortable_nostem(decoded_r: redis.Redis):
# Creating the index definition with sortable and no_stem
- await modclient.ft().create_index((TextField("txt", sortable=True, no_stem=True),))
+ await decoded_r.ft().create_index((TextField("txt", sortable=True, no_stem=True),))
# Now get the index info to confirm its contents
- response = await modclient.ft().info()
- if is_resp2_connection(modclient):
+ response = await decoded_r.ft().info()
+ if is_resp2_connection(decoded_r):
assert "SORTABLE" in response["attributes"][0]
assert "NOSTEM" in response["attributes"][0]
else:
@@ -857,15 +876,15 @@ async def test_textfield_sortable_nostem(modclient: redis.Redis):
@pytest.mark.redismod
-async def test_alter_schema_add(modclient: redis.Redis):
+async def test_alter_schema_add(decoded_r: redis.Redis):
# Creating the index definition and schema
- await modclient.ft().create_index(TextField("title"))
+ await decoded_r.ft().create_index(TextField("title"))
# Using alter to add a field
- await modclient.ft().alter_schema_add(TextField("body"))
+ await decoded_r.ft().alter_schema_add(TextField("body"))
# Indexing a document
- await modclient.hset(
+ await decoded_r.hset(
"doc1", mapping={"title": "MyTitle", "body": "Some content only in the body"}
)
@@ -873,42 +892,42 @@ async def test_alter_schema_add(modclient: redis.Redis):
q = Query("only in the body")
# Ensure we find the result searching on the added body field
- res = await modclient.ft().search(q)
- if is_resp2_connection(modclient):
+ res = await decoded_r.ft().search(q)
+ if is_resp2_connection(decoded_r):
assert 1 == res.total
else:
assert 1 == res["total_results"]
@pytest.mark.redismod
-async def test_spell_check(modclient: redis.Redis):
- await modclient.ft().create_index((TextField("f1"), TextField("f2")))
+async def test_spell_check(decoded_r: redis.Redis):
+ await decoded_r.ft().create_index((TextField("f1"), TextField("f2")))
await (
- modclient.hset(
+ decoded_r.hset(
"doc1", mapping={"f1": "some valid content", "f2": "this is sample text"}
)
)
- await modclient.hset("doc2", mapping={"f1": "very important", "f2": "lorem ipsum"})
- await waitForIndex(modclient, "idx")
+ await decoded_r.hset("doc2", mapping={"f1": "very important", "f2": "lorem ipsum"})
+ await waitForIndex(decoded_r, "idx")
- if is_resp2_connection(modclient):
+ if is_resp2_connection(decoded_r):
# test spellcheck
- res = await modclient.ft().spellcheck("impornant")
+ res = await decoded_r.ft().spellcheck("impornant")
assert "important" == res["impornant"][0]["suggestion"]
- res = await modclient.ft().spellcheck("contnt")
+ res = await decoded_r.ft().spellcheck("contnt")
assert "content" == res["contnt"][0]["suggestion"]
# test spellcheck with Levenshtein distance
- res = await modclient.ft().spellcheck("vlis")
+ res = await decoded_r.ft().spellcheck("vlis")
assert res == {}
- res = await modclient.ft().spellcheck("vlis", distance=2)
+ res = await decoded_r.ft().spellcheck("vlis", distance=2)
assert "valid" == res["vlis"][0]["suggestion"]
# test spellcheck include
- await modclient.ft().dict_add("dict", "lore", "lorem", "lorm")
- res = await modclient.ft().spellcheck("lorm", include="dict")
+ await decoded_r.ft().dict_add("dict", "lore", "lorem", "lorm")
+ res = await decoded_r.ft().spellcheck("lorm", include="dict")
assert len(res["lorm"]) == 3
assert (
res["lorm"][0]["suggestion"],
@@ -918,186 +937,191 @@ async def test_spell_check(modclient: redis.Redis):
assert (res["lorm"][0]["score"], res["lorm"][1]["score"]) == ("0.5", "0")
# test spellcheck exclude
- res = await modclient.ft().spellcheck("lorm", exclude="dict")
+ res = await decoded_r.ft().spellcheck("lorm", exclude="dict")
assert res == {}
else:
# test spellcheck
- res = await modclient.ft().spellcheck("impornant")
- assert "important" in res["impornant"][0].keys()
+ res = await decoded_r.ft().spellcheck("impornant")
+ assert "important" in res["results"]["impornant"][0].keys()
- res = await modclient.ft().spellcheck("contnt")
- assert "content" in res["contnt"][0].keys()
+ res = await decoded_r.ft().spellcheck("contnt")
+ assert "content" in res["results"]["contnt"][0].keys()
# test spellcheck with Levenshtein distance
- res = await modclient.ft().spellcheck("vlis")
- assert res == {"vlis": []}
- res = await modclient.ft().spellcheck("vlis", distance=2)
- assert "valid" in res["vlis"][0].keys()
+ res = await decoded_r.ft().spellcheck("vlis")
+ assert res == {"results": {"vlis": []}}
+ res = await decoded_r.ft().spellcheck("vlis", distance=2)
+ assert "valid" in res["results"]["vlis"][0].keys()
# test spellcheck include
- await modclient.ft().dict_add("dict", "lore", "lorem", "lorm")
- res = await modclient.ft().spellcheck("lorm", include="dict")
- assert len(res["lorm"]) == 3
- assert "lorem" in res["lorm"][0].keys()
- assert "lore" in res["lorm"][1].keys()
- assert "lorm" in res["lorm"][2].keys()
- assert (res["lorm"][0]["lorem"], res["lorm"][1]["lore"]) == (0.5, 0)
+ await decoded_r.ft().dict_add("dict", "lore", "lorem", "lorm")
+ res = await decoded_r.ft().spellcheck("lorm", include="dict")
+ assert len(res["results"]["lorm"]) == 3
+ assert "lorem" in res["results"]["lorm"][0].keys()
+ assert "lore" in res["results"]["lorm"][1].keys()
+ assert "lorm" in res["results"]["lorm"][2].keys()
+ assert (
+ res["results"]["lorm"][0]["lorem"],
+ res["results"]["lorm"][1]["lore"],
+ ) == (0.5, 0)
# test spellcheck exclude
- res = await modclient.ft().spellcheck("lorm", exclude="dict")
- assert res == {}
+ res = await decoded_r.ft().spellcheck("lorm", exclude="dict")
+ assert res == {"results": {}}
@pytest.mark.redismod
-async def test_dict_operations(modclient: redis.Redis):
- await modclient.ft().create_index((TextField("f1"), TextField("f2")))
+async def test_dict_operations(decoded_r: redis.Redis):
+ await decoded_r.ft().create_index((TextField("f1"), TextField("f2")))
# Add three items
- res = await modclient.ft().dict_add("custom_dict", "item1", "item2", "item3")
+ res = await decoded_r.ft().dict_add("custom_dict", "item1", "item2", "item3")
assert 3 == res
# Remove one item
- res = await modclient.ft().dict_del("custom_dict", "item2")
+ res = await decoded_r.ft().dict_del("custom_dict", "item2")
assert 1 == res
# Dump dict and inspect content
- res = await modclient.ft().dict_dump("custom_dict")
- assert_resp_response(modclient, res, ["item1", "item3"], {"item1", "item3"})
+ res = await decoded_r.ft().dict_dump("custom_dict")
+ assert_resp_response(decoded_r, res, ["item1", "item3"], {"item1", "item3"})
# Remove rest of the items before reload
- await modclient.ft().dict_del("custom_dict", *res)
+ await decoded_r.ft().dict_del("custom_dict", *res)
@pytest.mark.redismod
-async def test_phonetic_matcher(modclient: redis.Redis):
- await modclient.ft().create_index((TextField("name"),))
- await modclient.hset("doc1", mapping={"name": "Jon"})
- await modclient.hset("doc2", mapping={"name": "John"})
+async def test_phonetic_matcher(decoded_r: redis.Redis):
+ await decoded_r.ft().create_index((TextField("name"),))
+ await decoded_r.hset("doc1", mapping={"name": "Jon"})
+ await decoded_r.hset("doc2", mapping={"name": "John"})
- res = await modclient.ft().search(Query("Jon"))
- if is_resp2_connection(modclient):
+ res = await decoded_r.ft().search(Query("Jon"))
+ if is_resp2_connection(decoded_r):
assert 1 == len(res.docs)
assert "Jon" == res.docs[0].name
else:
assert 1 == res["total_results"]
- assert "Jon" == res["results"][0]["fields"]["name"]
+ assert "Jon" == res["results"][0]["extra_attributes"]["name"]
# Drop and create index with phonetic matcher
- await modclient.flushdb()
+ await decoded_r.flushdb()
- await modclient.ft().create_index((TextField("name", phonetic_matcher="dm:en"),))
- await modclient.hset("doc1", mapping={"name": "Jon"})
- await modclient.hset("doc2", mapping={"name": "John"})
+ await decoded_r.ft().create_index((TextField("name", phonetic_matcher="dm:en"),))
+ await decoded_r.hset("doc1", mapping={"name": "Jon"})
+ await decoded_r.hset("doc2", mapping={"name": "John"})
- res = await modclient.ft().search(Query("Jon"))
- if is_resp2_connection(modclient):
+ res = await decoded_r.ft().search(Query("Jon"))
+ if is_resp2_connection(decoded_r):
assert 2 == len(res.docs)
assert ["John", "Jon"] == sorted(d.name for d in res.docs)
else:
assert 2 == res["total_results"]
- assert ["John", "Jon"] == sorted(d["fields"]["name"] for d in res["results"])
+ assert ["John", "Jon"] == sorted(
+ d["extra_attributes"]["name"] for d in res["results"]
+ )
@pytest.mark.redismod
@pytest.mark.onlynoncluster
-async def test_scorer(modclient: redis.Redis):
- await modclient.ft().create_index((TextField("description"),))
+async def test_scorer(decoded_r: redis.Redis):
+ await decoded_r.ft().create_index((TextField("description"),))
- await modclient.hset(
+ await decoded_r.hset(
"doc1", mapping={"description": "The quick brown fox jumps over the lazy dog"}
)
- await modclient.hset(
+ await decoded_r.hset(
"doc2",
mapping={
"description": "Quick alice was beginning to get very tired of sitting by her quick sister on the bank, and of having nothing to do." # noqa
},
)
- if is_resp2_connection(modclient):
+ if is_resp2_connection(decoded_r):
# default scorer is TFIDF
- res = await modclient.ft().search(Query("quick").with_scores())
+ res = await decoded_r.ft().search(Query("quick").with_scores())
assert 1.0 == res.docs[0].score
- res = await modclient.ft().search(Query("quick").scorer("TFIDF").with_scores())
+ res = await decoded_r.ft().search(Query("quick").scorer("TFIDF").with_scores())
assert 1.0 == res.docs[0].score
res = await (
- modclient.ft().search(Query("quick").scorer("TFIDF.DOCNORM").with_scores())
+ decoded_r.ft().search(Query("quick").scorer("TFIDF.DOCNORM").with_scores())
)
assert 0.1111111111111111 == res.docs[0].score
- res = await modclient.ft().search(Query("quick").scorer("BM25").with_scores())
+ res = await decoded_r.ft().search(Query("quick").scorer("BM25").with_scores())
assert 0.17699114465425977 == res.docs[0].score
- res = await modclient.ft().search(Query("quick").scorer("DISMAX").with_scores())
+ res = await decoded_r.ft().search(Query("quick").scorer("DISMAX").with_scores())
assert 2.0 == res.docs[0].score
- res = await modclient.ft().search(
+ res = await decoded_r.ft().search(
Query("quick").scorer("DOCSCORE").with_scores()
)
assert 1.0 == res.docs[0].score
- res = await modclient.ft().search(
+ res = await decoded_r.ft().search(
Query("quick").scorer("HAMMING").with_scores()
)
assert 0.0 == res.docs[0].score
else:
- res = await modclient.ft().search(Query("quick").with_scores())
+ res = await decoded_r.ft().search(Query("quick").with_scores())
assert 1.0 == res["results"][0]["score"]
- res = await modclient.ft().search(Query("quick").scorer("TFIDF").with_scores())
+ res = await decoded_r.ft().search(Query("quick").scorer("TFIDF").with_scores())
assert 1.0 == res["results"][0]["score"]
- res = await modclient.ft().search(
+ res = await decoded_r.ft().search(
Query("quick").scorer("TFIDF.DOCNORM").with_scores()
)
assert 0.1111111111111111 == res["results"][0]["score"]
- res = await modclient.ft().search(Query("quick").scorer("BM25").with_scores())
+ res = await decoded_r.ft().search(Query("quick").scorer("BM25").with_scores())
assert 0.17699114465425977 == res["results"][0]["score"]
- res = await modclient.ft().search(Query("quick").scorer("DISMAX").with_scores())
+ res = await decoded_r.ft().search(Query("quick").scorer("DISMAX").with_scores())
assert 2.0 == res["results"][0]["score"]
- res = await modclient.ft().search(
+ res = await decoded_r.ft().search(
Query("quick").scorer("DOCSCORE").with_scores()
)
assert 1.0 == res["results"][0]["score"]
- res = await modclient.ft().search(
+ res = await decoded_r.ft().search(
Query("quick").scorer("HAMMING").with_scores()
)
assert 0.0 == res["results"][0]["score"]
@pytest.mark.redismod
-async def test_get(modclient: redis.Redis):
- await modclient.ft().create_index((TextField("f1"), TextField("f2")))
+async def test_get(decoded_r: redis.Redis):
+ await decoded_r.ft().create_index((TextField("f1"), TextField("f2")))
- assert [None] == await modclient.ft().get("doc1")
- assert [None, None] == await modclient.ft().get("doc2", "doc1")
+ assert [None] == await decoded_r.ft().get("doc1")
+ assert [None, None] == await decoded_r.ft().get("doc2", "doc1")
- await modclient.hset(
+ await decoded_r.hset(
"doc1", mapping={"f1": "some valid content dd1", "f2": "this is sample text f1"}
)
- await modclient.hset(
+ await decoded_r.hset(
"doc2", mapping={"f1": "some valid content dd2", "f2": "this is sample text f2"}
)
assert [
["f1", "some valid content dd2", "f2", "this is sample text f2"]
- ] == await modclient.ft().get("doc2")
+ ] == await decoded_r.ft().get("doc2")
assert [
["f1", "some valid content dd1", "f2", "this is sample text f1"],
["f1", "some valid content dd2", "f2", "this is sample text f2"],
- ] == await modclient.ft().get("doc1", "doc2")
+ ] == await decoded_r.ft().get("doc1", "doc2")
@pytest.mark.redismod
@pytest.mark.onlynoncluster
@skip_ifmodversion_lt("2.2.0", "search")
-async def test_config(modclient: redis.Redis):
- assert await modclient.ft().config_set("TIMEOUT", "100")
+async def test_config(decoded_r: redis.Redis):
+ assert await decoded_r.ft().config_set("TIMEOUT", "100")
with pytest.raises(redis.ResponseError):
- await modclient.ft().config_set("TIMEOUT", "null")
- res = await modclient.ft().config_get("*")
+ await decoded_r.ft().config_set("TIMEOUT", "null")
+ res = await decoded_r.ft().config_get("*")
assert "100" == res["TIMEOUT"]
- res = await modclient.ft().config_get("TIMEOUT")
+ res = await decoded_r.ft().config_get("TIMEOUT")
assert "100" == res["TIMEOUT"]
@pytest.mark.redismod
@pytest.mark.onlynoncluster
-async def test_aggregations_groupby(modclient: redis.Redis):
+async def test_aggregations_groupby(decoded_r: redis.Redis):
# Creating the index definition and schema
- await modclient.ft().create_index(
+ await decoded_r.ft().create_index(
(
NumericField("random_num"),
TextField("title"),
@@ -1107,7 +1131,7 @@ async def test_aggregations_groupby(modclient: redis.Redis):
)
# Indexing a document
- await modclient.hset(
+ await decoded_r.hset(
"search",
mapping={
"title": "RediSearch",
@@ -1116,7 +1140,7 @@ async def test_aggregations_groupby(modclient: redis.Redis):
"random_num": 10,
},
)
- await modclient.hset(
+ await decoded_r.hset(
"ai",
mapping={
"title": "RedisAI",
@@ -1125,7 +1149,7 @@ async def test_aggregations_groupby(modclient: redis.Redis):
"random_num": 3,
},
)
- await modclient.hset(
+ await decoded_r.hset(
"json",
mapping={
"title": "RedisJson",
@@ -1136,14 +1160,14 @@ async def test_aggregations_groupby(modclient: redis.Redis):
)
for dialect in [1, 2]:
- if is_resp2_connection(modclient):
+ if is_resp2_connection(decoded_r):
req = (
aggregations.AggregateRequest("redis")
.group_by("@parent", reducers.count())
.dialect(dialect)
)
- res = (await modclient.ft().aggregate(req)).rows[0]
+ res = (await decoded_r.ft().aggregate(req)).rows[0]
assert res[1] == "redis"
assert res[3] == "3"
@@ -1153,7 +1177,7 @@ async def test_aggregations_groupby(modclient: redis.Redis):
.dialect(dialect)
)
- res = (await modclient.ft().aggregate(req)).rows[0]
+ res = (await decoded_r.ft().aggregate(req)).rows[0]
assert res[1] == "redis"
assert res[3] == "3"
@@ -1163,7 +1187,7 @@ async def test_aggregations_groupby(modclient: redis.Redis):
.dialect(dialect)
)
- res = (await modclient.ft().aggregate(req)).rows[0]
+ res = (await decoded_r.ft().aggregate(req)).rows[0]
assert res[1] == "redis"
assert res[3] == "3"
@@ -1173,7 +1197,7 @@ async def test_aggregations_groupby(modclient: redis.Redis):
.dialect(dialect)
)
- res = (await modclient.ft().aggregate(req)).rows[0]
+ res = (await decoded_r.ft().aggregate(req)).rows[0]
assert res[1] == "redis"
assert res[3] == "21" # 10+8+3
@@ -1183,7 +1207,7 @@ async def test_aggregations_groupby(modclient: redis.Redis):
.dialect(dialect)
)
- res = (await modclient.ft().aggregate(req)).rows[0]
+ res = (await decoded_r.ft().aggregate(req)).rows[0]
assert res[1] == "redis"
assert res[3] == "3" # min(10,8,3)
@@ -1193,7 +1217,7 @@ async def test_aggregations_groupby(modclient: redis.Redis):
.dialect(dialect)
)
- res = (await modclient.ft().aggregate(req)).rows[0]
+ res = (await decoded_r.ft().aggregate(req)).rows[0]
assert res[1] == "redis"
assert res[3] == "10" # max(10,8,3)
@@ -1203,7 +1227,7 @@ async def test_aggregations_groupby(modclient: redis.Redis):
.dialect(dialect)
)
- res = (await modclient.ft().aggregate(req)).rows[0]
+ res = (await decoded_r.ft().aggregate(req)).rows[0]
assert res[1] == "redis"
assert res[3] == "7" # (10+3+8)/3
@@ -1213,7 +1237,7 @@ async def test_aggregations_groupby(modclient: redis.Redis):
.dialect(dialect)
)
- res = (await modclient.ft().aggregate(req)).rows[0]
+ res = (await decoded_r.ft().aggregate(req)).rows[0]
assert res[1] == "redis"
assert res[3] == "3.60555127546"
@@ -1223,7 +1247,7 @@ async def test_aggregations_groupby(modclient: redis.Redis):
.dialect(dialect)
)
- res = (await modclient.ft().aggregate(req)).rows[0]
+ res = (await decoded_r.ft().aggregate(req)).rows[0]
assert res[1] == "redis"
assert res[3] == "8" # median of 3,8,10
@@ -1233,7 +1257,7 @@ async def test_aggregations_groupby(modclient: redis.Redis):
.dialect(dialect)
)
- res = (await modclient.ft().aggregate(req)).rows[0]
+ res = (await decoded_r.ft().aggregate(req)).rows[0]
assert res[1] == "redis"
assert set(res[3]) == {"RediSearch", "RedisAI", "RedisJson"}
@@ -1243,7 +1267,7 @@ async def test_aggregations_groupby(modclient: redis.Redis):
.dialect(dialect)
)
- res = (await modclient.ft().aggregate(req)).rows[0]
+ res = (await decoded_r.ft().aggregate(req)).rows[0]
assert res == ["parent", "redis", "first", "RediSearch"]
req = (
@@ -1254,7 +1278,7 @@ async def test_aggregations_groupby(modclient: redis.Redis):
.dialect(dialect)
)
- res = (await modclient.ft().aggregate(req)).rows[0]
+ res = (await decoded_r.ft().aggregate(req)).rows[0]
assert res[1] == "redis"
assert res[2] == "random"
assert len(res[3]) == 2
@@ -1266,9 +1290,9 @@ async def test_aggregations_groupby(modclient: redis.Redis):
.dialect(dialect)
)
- res = (await modclient.ft().aggregate(req))["results"][0]
- assert res["fields"]["parent"] == "redis"
- assert res["fields"]["__generated_aliascount"] == "3"
+ res = (await decoded_r.ft().aggregate(req))["results"][0]
+ assert res["extra_attributes"]["parent"] == "redis"
+ assert res["extra_attributes"]["__generated_aliascount"] == "3"
req = (
aggregations.AggregateRequest("redis")
@@ -1276,9 +1300,11 @@ async def test_aggregations_groupby(modclient: redis.Redis):
.dialect(dialect)
)
- res = (await modclient.ft().aggregate(req))["results"][0]
- assert res["fields"]["parent"] == "redis"
- assert res["fields"]["__generated_aliascount_distincttitle"] == "3"
+ res = (await decoded_r.ft().aggregate(req))["results"][0]
+ assert res["extra_attributes"]["parent"] == "redis"
+ assert (
+ res["extra_attributes"]["__generated_aliascount_distincttitle"] == "3"
+ )
req = (
aggregations.AggregateRequest("redis")
@@ -1286,9 +1312,12 @@ async def test_aggregations_groupby(modclient: redis.Redis):
.dialect(dialect)
)
- res = (await modclient.ft().aggregate(req))["results"][0]
- assert res["fields"]["parent"] == "redis"
- assert res["fields"]["__generated_aliascount_distinctishtitle"] == "3"
+ res = (await decoded_r.ft().aggregate(req))["results"][0]
+ assert res["extra_attributes"]["parent"] == "redis"
+ assert (
+ res["extra_attributes"]["__generated_aliascount_distinctishtitle"]
+ == "3"
+ )
req = (
aggregations.AggregateRequest("redis")
@@ -1296,9 +1325,9 @@ async def test_aggregations_groupby(modclient: redis.Redis):
.dialect(dialect)
)
- res = (await modclient.ft().aggregate(req))["results"][0]
- assert res["fields"]["parent"] == "redis"
- assert res["fields"]["__generated_aliassumrandom_num"] == "21" # 10+8+3
+ res = (await decoded_r.ft().aggregate(req))["results"][0]
+ assert res["extra_attributes"]["parent"] == "redis"
+ assert res["extra_attributes"]["__generated_aliassumrandom_num"] == "21"
req = (
aggregations.AggregateRequest("redis")
@@ -1306,9 +1335,9 @@ async def test_aggregations_groupby(modclient: redis.Redis):
.dialect(dialect)
)
- res = (await modclient.ft().aggregate(req))["results"][0]
- assert res["fields"]["parent"] == "redis"
- assert res["fields"]["__generated_aliasminrandom_num"] == "3" # min(10,8,3)
+ res = (await decoded_r.ft().aggregate(req))["results"][0]
+ assert res["extra_attributes"]["parent"] == "redis"
+ assert res["extra_attributes"]["__generated_aliasminrandom_num"] == "3"
req = (
aggregations.AggregateRequest("redis")
@@ -1316,9 +1345,9 @@ async def test_aggregations_groupby(modclient: redis.Redis):
.dialect(dialect)
)
- res = (await modclient.ft().aggregate(req))["results"][0]
- assert res["fields"]["parent"] == "redis"
- assert res["fields"]["__generated_aliasmaxrandom_num"] == "10"
+ res = (await decoded_r.ft().aggregate(req))["results"][0]
+ assert res["extra_attributes"]["parent"] == "redis"
+ assert res["extra_attributes"]["__generated_aliasmaxrandom_num"] == "10"
req = (
aggregations.AggregateRequest("redis")
@@ -1326,9 +1355,9 @@ async def test_aggregations_groupby(modclient: redis.Redis):
.dialect(dialect)
)
- res = (await modclient.ft().aggregate(req))["results"][0]
- assert res["fields"]["parent"] == "redis"
- assert res["fields"]["__generated_aliasavgrandom_num"] == "7" # (10+3+8)/3
+ res = (await decoded_r.ft().aggregate(req))["results"][0]
+ assert res["extra_attributes"]["parent"] == "redis"
+ assert res["extra_attributes"]["__generated_aliasavgrandom_num"] == "7"
req = (
aggregations.AggregateRequest("redis")
@@ -1336,9 +1365,12 @@ async def test_aggregations_groupby(modclient: redis.Redis):
.dialect(dialect)
)
- res = (await modclient.ft().aggregate(req))["results"][0]
- assert res["fields"]["parent"] == "redis"
- assert res["fields"]["__generated_aliasstddevrandom_num"] == "3.60555127546"
+ res = (await decoded_r.ft().aggregate(req))["results"][0]
+ assert res["extra_attributes"]["parent"] == "redis"
+ assert (
+ res["extra_attributes"]["__generated_aliasstddevrandom_num"]
+ == "3.60555127546"
+ )
req = (
aggregations.AggregateRequest("redis")
@@ -1346,9 +1378,12 @@ async def test_aggregations_groupby(modclient: redis.Redis):
.dialect(dialect)
)
- res = (await modclient.ft().aggregate(req))["results"][0]
- assert res["fields"]["parent"] == "redis"
- assert res["fields"]["__generated_aliasquantilerandom_num,0.5"] == "8"
+ res = (await decoded_r.ft().aggregate(req))["results"][0]
+ assert res["extra_attributes"]["parent"] == "redis"
+ assert (
+ res["extra_attributes"]["__generated_aliasquantilerandom_num,0.5"]
+ == "8"
+ )
req = (
aggregations.AggregateRequest("redis")
@@ -1356,9 +1391,9 @@ async def test_aggregations_groupby(modclient: redis.Redis):
.dialect(dialect)
)
- res = (await modclient.ft().aggregate(req))["results"][0]
- assert res["fields"]["parent"] == "redis"
- assert set(res["fields"]["__generated_aliastolisttitle"]) == {
+ res = (await decoded_r.ft().aggregate(req))["results"][0]
+ assert res["extra_attributes"]["parent"] == "redis"
+ assert set(res["extra_attributes"]["__generated_aliastolisttitle"]) == {
"RediSearch",
"RedisAI",
"RedisJson",
@@ -1370,8 +1405,8 @@ async def test_aggregations_groupby(modclient: redis.Redis):
.dialect(dialect)
)
- res = (await modclient.ft().aggregate(req))["results"][0]
- assert res["fields"] == {"parent": "redis", "first": "RediSearch"}
+ res = (await decoded_r.ft().aggregate(req))["results"][0]
+ assert res["extra_attributes"] == {"parent": "redis", "first": "RediSearch"}
req = (
aggregations.AggregateRequest("redis")
@@ -1381,43 +1416,47 @@ async def test_aggregations_groupby(modclient: redis.Redis):
.dialect(dialect)
)
- res = (await modclient.ft().aggregate(req))["results"][0]
- assert res["fields"]["parent"] == "redis"
- assert "random" in res["fields"].keys()
- assert len(res["fields"]["random"]) == 2
- assert res["fields"]["random"][0] in ["RediSearch", "RedisAI", "RedisJson"]
+ res = (await decoded_r.ft().aggregate(req))["results"][0]
+ assert res["extra_attributes"]["parent"] == "redis"
+ assert "random" in res["extra_attributes"].keys()
+ assert len(res["extra_attributes"]["random"]) == 2
+ assert res["extra_attributes"]["random"][0] in [
+ "RediSearch",
+ "RedisAI",
+ "RedisJson",
+ ]
@pytest.mark.redismod
-async def test_aggregations_sort_by_and_limit(modclient: redis.Redis):
- await modclient.ft().create_index((TextField("t1"), TextField("t2")))
+async def test_aggregations_sort_by_and_limit(decoded_r: redis.Redis):
+ await decoded_r.ft().create_index((TextField("t1"), TextField("t2")))
- await modclient.ft().client.hset("doc1", mapping={"t1": "a", "t2": "b"})
- await modclient.ft().client.hset("doc2", mapping={"t1": "b", "t2": "a"})
+ await decoded_r.ft().client.hset("doc1", mapping={"t1": "a", "t2": "b"})
+ await decoded_r.ft().client.hset("doc2", mapping={"t1": "b", "t2": "a"})
- if is_resp2_connection(modclient):
+ if is_resp2_connection(decoded_r):
# test sort_by using SortDirection
req = aggregations.AggregateRequest("*").sort_by(
aggregations.Asc("@t2"), aggregations.Desc("@t1")
)
- res = await modclient.ft().aggregate(req)
+ res = await decoded_r.ft().aggregate(req)
assert res.rows[0] == ["t2", "a", "t1", "b"]
assert res.rows[1] == ["t2", "b", "t1", "a"]
# test sort_by without SortDirection
req = aggregations.AggregateRequest("*").sort_by("@t1")
- res = await modclient.ft().aggregate(req)
+ res = await decoded_r.ft().aggregate(req)
assert res.rows[0] == ["t1", "a"]
assert res.rows[1] == ["t1", "b"]
# test sort_by with max
req = aggregations.AggregateRequest("*").sort_by("@t1", max=1)
- res = await modclient.ft().aggregate(req)
+ res = await decoded_r.ft().aggregate(req)
assert len(res.rows) == 1
# test limit
req = aggregations.AggregateRequest("*").sort_by("@t1").limit(1, 1)
- res = await modclient.ft().aggregate(req)
+ res = await decoded_r.ft().aggregate(req)
assert len(res.rows) == 1
assert res.rows[0] == ["t1", "b"]
else:
@@ -1425,81 +1464,81 @@ async def test_aggregations_sort_by_and_limit(modclient: redis.Redis):
req = aggregations.AggregateRequest("*").sort_by(
aggregations.Asc("@t2"), aggregations.Desc("@t1")
)
- res = (await modclient.ft().aggregate(req))["results"]
- assert res[0]["fields"] == {"t2": "a", "t1": "b"}
- assert res[1]["fields"] == {"t2": "b", "t1": "a"}
+ res = (await decoded_r.ft().aggregate(req))["results"]
+ assert res[0]["extra_attributes"] == {"t2": "a", "t1": "b"}
+ assert res[1]["extra_attributes"] == {"t2": "b", "t1": "a"}
# test sort_by without SortDirection
req = aggregations.AggregateRequest("*").sort_by("@t1")
- res = (await modclient.ft().aggregate(req))["results"]
- assert res[0]["fields"] == {"t1": "a"}
- assert res[1]["fields"] == {"t1": "b"}
+ res = (await decoded_r.ft().aggregate(req))["results"]
+ assert res[0]["extra_attributes"] == {"t1": "a"}
+ assert res[1]["extra_attributes"] == {"t1": "b"}
# test sort_by with max
req = aggregations.AggregateRequest("*").sort_by("@t1", max=1)
- res = await modclient.ft().aggregate(req)
+ res = await decoded_r.ft().aggregate(req)
assert len(res["results"]) == 1
# test limit
req = aggregations.AggregateRequest("*").sort_by("@t1").limit(1, 1)
- res = await modclient.ft().aggregate(req)
+ res = await decoded_r.ft().aggregate(req)
assert len(res["results"]) == 1
- assert res["results"][0]["fields"] == {"t1": "b"}
+ assert res["results"][0]["extra_attributes"] == {"t1": "b"}
@pytest.mark.redismod
@pytest.mark.experimental
-async def test_withsuffixtrie(modclient: redis.Redis):
+async def test_withsuffixtrie(decoded_r: redis.Redis):
# create index
- assert await modclient.ft().create_index((TextField("txt"),))
- await waitForIndex(modclient, getattr(modclient.ft(), "index_name", "idx"))
- if is_resp2_connection(modclient):
- info = await modclient.ft().info()
+ assert await decoded_r.ft().create_index((TextField("txt"),))
+ await waitForIndex(decoded_r, getattr(decoded_r.ft(), "index_name", "idx"))
+ if is_resp2_connection(decoded_r):
+ info = await decoded_r.ft().info()
assert "WITHSUFFIXTRIE" not in info["attributes"][0]
- assert await modclient.ft().dropindex("idx")
+ assert await decoded_r.ft().dropindex("idx")
# create withsuffixtrie index (text field)
- assert await modclient.ft().create_index((TextField("t", withsuffixtrie=True)))
- await waitForIndex(modclient, getattr(modclient.ft(), "index_name", "idx"))
- info = await modclient.ft().info()
+ assert await decoded_r.ft().create_index((TextField("t", withsuffixtrie=True)))
+ await waitForIndex(decoded_r, getattr(decoded_r.ft(), "index_name", "idx"))
+ info = await decoded_r.ft().info()
assert "WITHSUFFIXTRIE" in info["attributes"][0]
- assert await modclient.ft().dropindex("idx")
+ assert await decoded_r.ft().dropindex("idx")
# create withsuffixtrie index (tag field)
- assert await modclient.ft().create_index((TagField("t", withsuffixtrie=True)))
- await waitForIndex(modclient, getattr(modclient.ft(), "index_name", "idx"))
- info = await modclient.ft().info()
+ assert await decoded_r.ft().create_index((TagField("t", withsuffixtrie=True)))
+ await waitForIndex(decoded_r, getattr(decoded_r.ft(), "index_name", "idx"))
+ info = await decoded_r.ft().info()
assert "WITHSUFFIXTRIE" in info["attributes"][0]
else:
- info = await modclient.ft().info()
+ info = await decoded_r.ft().info()
assert "WITHSUFFIXTRIE" not in info["attributes"][0]["flags"]
- assert await modclient.ft().dropindex("idx")
+ assert await decoded_r.ft().dropindex("idx")
# create withsuffixtrie index (text fiels)
- assert await modclient.ft().create_index((TextField("t", withsuffixtrie=True)))
- waitForIndex(modclient, getattr(modclient.ft(), "index_name", "idx"))
- info = await modclient.ft().info()
+ assert await decoded_r.ft().create_index((TextField("t", withsuffixtrie=True)))
+ waitForIndex(decoded_r, getattr(decoded_r.ft(), "index_name", "idx"))
+ info = await decoded_r.ft().info()
assert "WITHSUFFIXTRIE" in info["attributes"][0]["flags"]
- assert await modclient.ft().dropindex("idx")
+ assert await decoded_r.ft().dropindex("idx")
# create withsuffixtrie index (tag field)
- assert await modclient.ft().create_index((TagField("t", withsuffixtrie=True)))
- waitForIndex(modclient, getattr(modclient.ft(), "index_name", "idx"))
- info = await modclient.ft().info()
+ assert await decoded_r.ft().create_index((TagField("t", withsuffixtrie=True)))
+ waitForIndex(decoded_r, getattr(decoded_r.ft(), "index_name", "idx"))
+ info = await decoded_r.ft().info()
assert "WITHSUFFIXTRIE" in info["attributes"][0]["flags"]
@pytest.mark.redismod
@skip_if_redis_enterprise()
-async def test_search_commands_in_pipeline(modclient: redis.Redis):
- p = await modclient.ft().pipeline()
+async def test_search_commands_in_pipeline(decoded_r: redis.Redis):
+ p = await decoded_r.ft().pipeline()
p.create_index((TextField("txt"),))
p.hset("doc1", mapping={"txt": "foo bar"})
p.hset("doc2", mapping={"txt": "foo bar"})
q = Query("foo bar").with_payloads()
await p.search(q)
res = await p.execute()
- if is_resp2_connection(modclient):
+ if is_resp2_connection(decoded_r):
assert res[:3] == ["OK", True, True]
assert 2 == res[3][0]
assert "doc1" == res[3][1]
@@ -1513,16 +1552,16 @@ async def test_search_commands_in_pipeline(modclient: redis.Redis):
assert "doc2" == res[3]["results"][1]["id"]
assert res[3]["results"][0]["payload"] is None
assert (
- res[3]["results"][0]["fields"]
- == res[3]["results"][1]["fields"]
+ res[3]["results"][0]["extra_attributes"]
+ == res[3]["results"][1]["extra_attributes"]
== {"txt": "foo bar"}
)
@pytest.mark.redismod
-async def test_query_timeout(modclient: redis.Redis):
+async def test_query_timeout(decoded_r: redis.Redis):
q1 = Query("foo").timeout(5000)
assert q1.get_args() == ["foo", "TIMEOUT", 5000, "LIMIT", 0, 10]
q2 = Query("foo").timeout("not_a_number")
with pytest.raises(redis.ResponseError):
- await modclient.ft().search(q2)
+ await decoded_r.ft().search(q2)
diff --git a/tests/test_asyncio/test_sentinel.py b/tests/test_asyncio/test_sentinel.py
index 5a0533ba05..4f32ecdc08 100644
--- a/tests/test_asyncio/test_sentinel.py
+++ b/tests/test_asyncio/test_sentinel.py
@@ -2,7 +2,6 @@
import pytest
import pytest_asyncio
-
import redis.asyncio.sentinel
from redis import exceptions
from redis.asyncio.sentinel import (
diff --git a/tests/test_asyncio/test_sentinel_managed_connection.py b/tests/test_asyncio/test_sentinel_managed_connection.py
index a6e9f37a63..e784690c77 100644
--- a/tests/test_asyncio/test_sentinel_managed_connection.py
+++ b/tests/test_asyncio/test_sentinel_managed_connection.py
@@ -1,7 +1,6 @@
import socket
import pytest
-
from redis.asyncio.retry import Retry
from redis.asyncio.sentinel import SentinelManagedConnection
from redis.backoff import NoBackoff
diff --git a/tests/test_asyncio/test_timeseries.py b/tests/test_asyncio/test_timeseries.py
index d09e992a7b..48ffdfd889 100644
--- a/tests/test_asyncio/test_timeseries.py
+++ b/tests/test_asyncio/test_timeseries.py
@@ -2,7 +2,6 @@
from time import sleep
import pytest
-
import redis.asyncio as redis
from tests.conftest import (
assert_resp_response,
@@ -12,33 +11,33 @@
@pytest.mark.redismod
-async def test_create(modclient: redis.Redis):
- assert await modclient.ts().create(1)
- assert await modclient.ts().create(2, retention_msecs=5)
- assert await modclient.ts().create(3, labels={"Redis": "Labs"})
- assert await modclient.ts().create(4, retention_msecs=20, labels={"Time": "Series"})
- info = await modclient.ts().info(4)
+async def test_create(decoded_r: redis.Redis):
+ assert await decoded_r.ts().create(1)
+ assert await decoded_r.ts().create(2, retention_msecs=5)
+ assert await decoded_r.ts().create(3, labels={"Redis": "Labs"})
+ assert await decoded_r.ts().create(4, retention_msecs=20, labels={"Time": "Series"})
+ info = await decoded_r.ts().info(4)
assert_resp_response(
- modclient, 20, info.get("retention_msecs"), info.get("retentionTime")
+ decoded_r, 20, info.get("retention_msecs"), info.get("retentionTime")
)
assert "Series" == info["labels"]["Time"]
# Test for a chunk size of 128 Bytes
- assert await modclient.ts().create("time-serie-1", chunk_size=128)
- info = await modclient.ts().info("time-serie-1")
- assert_resp_response(modclient, 128, info.get("chunk_size"), info.get("chunkSize"))
+ assert await decoded_r.ts().create("time-serie-1", chunk_size=128)
+ info = await decoded_r.ts().info("time-serie-1")
+ assert_resp_response(decoded_r, 128, info.get("chunk_size"), info.get("chunkSize"))
@pytest.mark.redismod
@skip_ifmodversion_lt("1.4.0", "timeseries")
-async def test_create_duplicate_policy(modclient: redis.Redis):
+async def test_create_duplicate_policy(decoded_r: redis.Redis):
# Test for duplicate policy
for duplicate_policy in ["block", "last", "first", "min", "max"]:
ts_name = f"time-serie-ooo-{duplicate_policy}"
- assert await modclient.ts().create(ts_name, duplicate_policy=duplicate_policy)
- info = await modclient.ts().info(ts_name)
+ assert await decoded_r.ts().create(ts_name, duplicate_policy=duplicate_policy)
+ info = await decoded_r.ts().info(ts_name)
assert_resp_response(
- modclient,
+ decoded_r,
duplicate_policy,
info.get("duplicate_policy"),
info.get("duplicatePolicy"),
@@ -46,214 +45,210 @@ async def test_create_duplicate_policy(modclient: redis.Redis):
@pytest.mark.redismod
-async def test_alter(modclient: redis.Redis):
- assert await modclient.ts().create(1)
- res = await modclient.ts().info(1)
+async def test_alter(decoded_r: redis.Redis):
+ assert await decoded_r.ts().create(1)
+ res = await decoded_r.ts().info(1)
assert_resp_response(
- modclient, 0, res.get("retention_msecs"), res.get("retentionTime")
+ decoded_r, 0, res.get("retention_msecs"), res.get("retentionTime")
)
- assert await modclient.ts().alter(1, retention_msecs=10)
- res = await modclient.ts().info(1)
- assert {} == (await modclient.ts().info(1))["labels"]
- info = await modclient.ts().info(1)
+ assert await decoded_r.ts().alter(1, retention_msecs=10)
+ res = await decoded_r.ts().info(1)
+ assert {} == (await decoded_r.ts().info(1))["labels"]
+ info = await decoded_r.ts().info(1)
assert_resp_response(
- modclient, 10, info.get("retention_msecs"), info.get("retentionTime")
+ decoded_r, 10, info.get("retention_msecs"), info.get("retentionTime")
)
- assert await modclient.ts().alter(1, labels={"Time": "Series"})
- res = await modclient.ts().info(1)
- assert "Series" == (await modclient.ts().info(1))["labels"]["Time"]
- info = await modclient.ts().info(1)
+ assert await decoded_r.ts().alter(1, labels={"Time": "Series"})
+ res = await decoded_r.ts().info(1)
+ assert "Series" == (await decoded_r.ts().info(1))["labels"]["Time"]
+ info = await decoded_r.ts().info(1)
assert_resp_response(
- modclient, 10, info.get("retention_msecs"), info.get("retentionTime")
+ decoded_r, 10, info.get("retention_msecs"), info.get("retentionTime")
)
@pytest.mark.redismod
@skip_ifmodversion_lt("1.4.0", "timeseries")
-async def test_alter_diplicate_policy(modclient: redis.Redis):
- assert await modclient.ts().create(1)
- info = await modclient.ts().info(1)
+async def test_alter_diplicate_policy(decoded_r: redis.Redis):
+ assert await decoded_r.ts().create(1)
+ info = await decoded_r.ts().info(1)
assert_resp_response(
- modclient, None, info.get("duplicate_policy"), info.get("duplicatePolicy")
+ decoded_r, None, info.get("duplicate_policy"), info.get("duplicatePolicy")
)
- assert await modclient.ts().alter(1, duplicate_policy="min")
- info = await modclient.ts().info(1)
+ assert await decoded_r.ts().alter(1, duplicate_policy="min")
+ info = await decoded_r.ts().info(1)
assert_resp_response(
- modclient, "min", info.get("duplicate_policy"), info.get("duplicatePolicy")
+ decoded_r, "min", info.get("duplicate_policy"), info.get("duplicatePolicy")
)
@pytest.mark.redismod
-async def test_add(modclient: redis.Redis):
- assert 1 == await modclient.ts().add(1, 1, 1)
- assert 2 == await modclient.ts().add(2, 2, 3, retention_msecs=10)
- assert 3 == await modclient.ts().add(3, 3, 2, labels={"Redis": "Labs"})
- assert 4 == await modclient.ts().add(
+async def test_add(decoded_r: redis.Redis):
+ assert 1 == await decoded_r.ts().add(1, 1, 1)
+ assert 2 == await decoded_r.ts().add(2, 2, 3, retention_msecs=10)
+ assert 3 == await decoded_r.ts().add(3, 3, 2, labels={"Redis": "Labs"})
+ assert 4 == await decoded_r.ts().add(
4, 4, 2, retention_msecs=10, labels={"Redis": "Labs", "Time": "Series"}
)
- res = await modclient.ts().add(5, "*", 1)
+ res = await decoded_r.ts().add(5, "*", 1)
assert abs(time.time() - round(float(res) / 1000)) < 1.0
- info = await modclient.ts().info(4)
+ info = await decoded_r.ts().info(4)
assert_resp_response(
- modclient, 10, info.get("retention_msecs"), info.get("retentionTime")
+ decoded_r, 10, info.get("retention_msecs"), info.get("retentionTime")
)
assert "Labs" == info["labels"]["Redis"]
# Test for a chunk size of 128 Bytes on TS.ADD
- assert await modclient.ts().add("time-serie-1", 1, 10.0, chunk_size=128)
- info = await modclient.ts().info("time-serie-1")
- assert_resp_response(modclient, 128, info.get("chunk_size"), info.get("chunkSize"))
+ assert await decoded_r.ts().add("time-serie-1", 1, 10.0, chunk_size=128)
+ info = await decoded_r.ts().info("time-serie-1")
+ assert_resp_response(decoded_r, 128, info.get("chunk_size"), info.get("chunkSize"))
@pytest.mark.redismod
@skip_ifmodversion_lt("1.4.0", "timeseries")
-async def test_add_duplicate_policy(modclient: redis.Redis):
+async def test_add_duplicate_policy(r: redis.Redis):
# Test for duplicate policy BLOCK
- assert 1 == await modclient.ts().add("time-serie-add-ooo-block", 1, 5.0)
+ assert 1 == await r.ts().add("time-serie-add-ooo-block", 1, 5.0)
with pytest.raises(Exception):
- await modclient.ts().add(
- "time-serie-add-ooo-block", 1, 5.0, duplicate_policy="block"
- )
+ await r.ts().add("time-serie-add-ooo-block", 1, 5.0, duplicate_policy="block")
# Test for duplicate policy LAST
- assert 1 == await modclient.ts().add("time-serie-add-ooo-last", 1, 5.0)
- assert 1 == await modclient.ts().add(
+ assert 1 == await r.ts().add("time-serie-add-ooo-last", 1, 5.0)
+ assert 1 == await r.ts().add(
"time-serie-add-ooo-last", 1, 10.0, duplicate_policy="last"
)
- res = await modclient.ts().get("time-serie-add-ooo-last")
+ res = await r.ts().get("time-serie-add-ooo-last")
assert 10.0 == res[1]
# Test for duplicate policy FIRST
- assert 1 == await modclient.ts().add("time-serie-add-ooo-first", 1, 5.0)
- assert 1 == await modclient.ts().add(
+ assert 1 == await r.ts().add("time-serie-add-ooo-first", 1, 5.0)
+ assert 1 == await r.ts().add(
"time-serie-add-ooo-first", 1, 10.0, duplicate_policy="first"
)
- res = await modclient.ts().get("time-serie-add-ooo-first")
+ res = await r.ts().get("time-serie-add-ooo-first")
assert 5.0 == res[1]
# Test for duplicate policy MAX
- assert 1 == await modclient.ts().add("time-serie-add-ooo-max", 1, 5.0)
- assert 1 == await modclient.ts().add(
+ assert 1 == await r.ts().add("time-serie-add-ooo-max", 1, 5.0)
+ assert 1 == await r.ts().add(
"time-serie-add-ooo-max", 1, 10.0, duplicate_policy="max"
)
- res = await modclient.ts().get("time-serie-add-ooo-max")
+ res = await r.ts().get("time-serie-add-ooo-max")
assert 10.0 == res[1]
# Test for duplicate policy MIN
- assert 1 == await modclient.ts().add("time-serie-add-ooo-min", 1, 5.0)
- assert 1 == await modclient.ts().add(
+ assert 1 == await r.ts().add("time-serie-add-ooo-min", 1, 5.0)
+ assert 1 == await r.ts().add(
"time-serie-add-ooo-min", 1, 10.0, duplicate_policy="min"
)
- res = await modclient.ts().get("time-serie-add-ooo-min")
+ res = await r.ts().get("time-serie-add-ooo-min")
assert 5.0 == res[1]
@pytest.mark.redismod
-async def test_madd(modclient: redis.Redis):
- await modclient.ts().create("a")
- assert [1, 2, 3] == await modclient.ts().madd(
+async def test_madd(decoded_r: redis.Redis):
+ await decoded_r.ts().create("a")
+ assert [1, 2, 3] == await decoded_r.ts().madd(
[("a", 1, 5), ("a", 2, 10), ("a", 3, 15)]
)
@pytest.mark.redismod
-async def test_incrby_decrby(modclient: redis.Redis):
+async def test_incrby_decrby(decoded_r: redis.Redis):
for _ in range(100):
- assert await modclient.ts().incrby(1, 1)
+ assert await decoded_r.ts().incrby(1, 1)
sleep(0.001)
- assert 100 == (await modclient.ts().get(1))[1]
+ assert 100 == (await decoded_r.ts().get(1))[1]
for _ in range(100):
- assert await modclient.ts().decrby(1, 1)
+ assert await decoded_r.ts().decrby(1, 1)
sleep(0.001)
- assert 0 == (await modclient.ts().get(1))[1]
+ assert 0 == (await decoded_r.ts().get(1))[1]
- assert await modclient.ts().incrby(2, 1.5, timestamp=5)
- assert_resp_response(modclient, await modclient.ts().get(2), (5, 1.5), [5, 1.5])
- assert await modclient.ts().incrby(2, 2.25, timestamp=7)
- assert_resp_response(modclient, await modclient.ts().get(2), (7, 3.75), [7, 3.75])
- assert await modclient.ts().decrby(2, 1.5, timestamp=15)
- assert_resp_response(modclient, await modclient.ts().get(2), (15, 2.25), [15, 2.25])
+ assert await decoded_r.ts().incrby(2, 1.5, timestamp=5)
+ assert_resp_response(decoded_r, await decoded_r.ts().get(2), (5, 1.5), [5, 1.5])
+ assert await decoded_r.ts().incrby(2, 2.25, timestamp=7)
+ assert_resp_response(decoded_r, await decoded_r.ts().get(2), (7, 3.75), [7, 3.75])
+ assert await decoded_r.ts().decrby(2, 1.5, timestamp=15)
+ assert_resp_response(decoded_r, await decoded_r.ts().get(2), (15, 2.25), [15, 2.25])
# Test for a chunk size of 128 Bytes on TS.INCRBY
- assert await modclient.ts().incrby("time-serie-1", 10, chunk_size=128)
- info = await modclient.ts().info("time-serie-1")
- assert_resp_response(modclient, 128, info.get("chunk_size"), info.get("chunkSize"))
+ assert await decoded_r.ts().incrby("time-serie-1", 10, chunk_size=128)
+ info = await decoded_r.ts().info("time-serie-1")
+ assert_resp_response(decoded_r, 128, info.get("chunk_size"), info.get("chunkSize"))
# Test for a chunk size of 128 Bytes on TS.DECRBY
- assert await modclient.ts().decrby("time-serie-2", 10, chunk_size=128)
- info = await modclient.ts().info("time-serie-2")
- assert_resp_response(modclient, 128, info.get("chunk_size"), info.get("chunkSize"))
+ assert await decoded_r.ts().decrby("time-serie-2", 10, chunk_size=128)
+ info = await decoded_r.ts().info("time-serie-2")
+ assert_resp_response(decoded_r, 128, info.get("chunk_size"), info.get("chunkSize"))
@pytest.mark.redismod
-async def test_create_and_delete_rule(modclient: redis.Redis):
+async def test_create_and_delete_rule(decoded_r: redis.Redis):
# test rule creation
time = 100
- await modclient.ts().create(1)
- await modclient.ts().create(2)
- await modclient.ts().createrule(1, 2, "avg", 100)
+ await decoded_r.ts().create(1)
+ await decoded_r.ts().create(2)
+ await decoded_r.ts().createrule(1, 2, "avg", 100)
for i in range(50):
- await modclient.ts().add(1, time + i * 2, 1)
- await modclient.ts().add(1, time + i * 2 + 1, 2)
- await modclient.ts().add(1, time * 2, 1.5)
- assert round((await modclient.ts().get(2))[1], 5) == 1.5
- info = await modclient.ts().info(1)
- if is_resp2_connection(modclient):
+ await decoded_r.ts().add(1, time + i * 2, 1)
+ await decoded_r.ts().add(1, time + i * 2 + 1, 2)
+ await decoded_r.ts().add(1, time * 2, 1.5)
+ assert round((await decoded_r.ts().get(2))[1], 5) == 1.5
+ info = await decoded_r.ts().info(1)
+ if is_resp2_connection(decoded_r):
assert info.rules[0][1] == 100
else:
assert info["rules"]["2"][0] == 100
# test rule deletion
- await modclient.ts().deleterule(1, 2)
- info = await modclient.ts().info(1)
+ await decoded_r.ts().deleterule(1, 2)
+ info = await decoded_r.ts().info(1)
assert not info["rules"]
@pytest.mark.redismod
@skip_ifmodversion_lt("99.99.99", "timeseries")
-async def test_del_range(modclient: redis.Redis):
+async def test_del_range(decoded_r: redis.Redis):
try:
- await modclient.ts().delete("test", 0, 100)
+ await decoded_r.ts().delete("test", 0, 100)
except Exception as e:
assert e.__str__() != ""
for i in range(100):
- await modclient.ts().add(1, i, i % 7)
- assert 22 == await modclient.ts().delete(1, 0, 21)
- assert [] == await modclient.ts().range(1, 0, 21)
+ await decoded_r.ts().add(1, i, i % 7)
+ assert 22 == await decoded_r.ts().delete(1, 0, 21)
+ assert [] == await decoded_r.ts().range(1, 0, 21)
assert_resp_response(
- modclient, await modclient.ts().range(1, 22, 22), [(22, 1.0)], [[22, 1.0]]
+ decoded_r, await decoded_r.ts().range(1, 22, 22), [(22, 1.0)], [[22, 1.0]]
)
@pytest.mark.redismod
-async def test_range(modclient: redis.Redis):
+async def test_range(r: redis.Redis):
for i in range(100):
- await modclient.ts().add(1, i, i % 7)
- assert 100 == len(await modclient.ts().range(1, 0, 200))
+ await r.ts().add(1, i, i % 7)
+ assert 100 == len(await r.ts().range(1, 0, 200))
for i in range(100):
- await modclient.ts().add(1, i + 200, i % 7)
- assert 200 == len(await modclient.ts().range(1, 0, 500))
+ await r.ts().add(1, i + 200, i % 7)
+ assert 200 == len(await r.ts().range(1, 0, 500))
# last sample isn't returned
assert 20 == len(
- await modclient.ts().range(
- 1, 0, 500, aggregation_type="avg", bucket_size_msec=10
- )
+ await r.ts().range(1, 0, 500, aggregation_type="avg", bucket_size_msec=10)
)
- assert 10 == len(await modclient.ts().range(1, 0, 500, count=10))
+ assert 10 == len(await r.ts().range(1, 0, 500, count=10))
@pytest.mark.redismod
@skip_ifmodversion_lt("99.99.99", "timeseries")
-async def test_range_advanced(modclient: redis.Redis):
+async def test_range_advanced(decoded_r: redis.Redis):
for i in range(100):
- await modclient.ts().add(1, i, i % 7)
- await modclient.ts().add(1, i + 200, i % 7)
+ await decoded_r.ts().add(1, i, i % 7)
+ await decoded_r.ts().add(1, i + 200, i % 7)
assert 2 == len(
- await modclient.ts().range(
+ await decoded_r.ts().range(
1,
0,
500,
@@ -262,38 +257,38 @@ async def test_range_advanced(modclient: redis.Redis):
filter_by_max_value=2,
)
)
- res = await modclient.ts().range(
+ res = await decoded_r.ts().range(
1, 0, 10, aggregation_type="count", bucket_size_msec=10, align="+"
)
- assert_resp_response(modclient, res, [(0, 10.0), (10, 1.0)], [[0, 10.0], [10, 1.0]])
- res = await modclient.ts().range(
+ assert_resp_response(decoded_r, res, [(0, 10.0), (10, 1.0)], [[0, 10.0], [10, 1.0]])
+ res = await decoded_r.ts().range(
1, 0, 10, aggregation_type="count", bucket_size_msec=10, align=5
)
- assert_resp_response(modclient, res, [(0, 5.0), (5, 6.0)], [[0, 5.0], [5, 6.0]])
- res = await modclient.ts().range(
+ assert_resp_response(decoded_r, res, [(0, 5.0), (5, 6.0)], [[0, 5.0], [5, 6.0]])
+ res = await decoded_r.ts().range(
1, 0, 10, aggregation_type="twa", bucket_size_msec=10
)
- assert_resp_response(modclient, res, [(0, 2.55), (10, 3.0)], [[0, 2.55], [10, 3.0]])
+ assert_resp_response(decoded_r, res, [(0, 2.55), (10, 3.0)], [[0, 2.55], [10, 3.0]])
@pytest.mark.redismod
@skip_ifmodversion_lt("99.99.99", "timeseries")
-async def test_rev_range(modclient: redis.Redis):
+async def test_rev_range(decoded_r: redis.Redis):
for i in range(100):
- await modclient.ts().add(1, i, i % 7)
- assert 100 == len(await modclient.ts().range(1, 0, 200))
+ await decoded_r.ts().add(1, i, i % 7)
+ assert 100 == len(await decoded_r.ts().range(1, 0, 200))
for i in range(100):
- await modclient.ts().add(1, i + 200, i % 7)
- assert 200 == len(await modclient.ts().range(1, 0, 500))
+ await decoded_r.ts().add(1, i + 200, i % 7)
+ assert 200 == len(await decoded_r.ts().range(1, 0, 500))
# first sample isn't returned
assert 20 == len(
- await modclient.ts().revrange(
+ await decoded_r.ts().revrange(
1, 0, 500, aggregation_type="avg", bucket_size_msec=10
)
)
- assert 10 == len(await modclient.ts().revrange(1, 0, 500, count=10))
+ assert 10 == len(await decoded_r.ts().revrange(1, 0, 500, count=10))
assert 2 == len(
- await modclient.ts().revrange(
+ await decoded_r.ts().revrange(
1,
0,
500,
@@ -303,16 +298,16 @@ async def test_rev_range(modclient: redis.Redis):
)
)
assert_resp_response(
- modclient,
- await modclient.ts().revrange(
+ decoded_r,
+ await decoded_r.ts().revrange(
1, 0, 10, aggregation_type="count", bucket_size_msec=10, align="+"
),
[(10, 1.0), (0, 10.0)],
[[10, 1.0], [0, 10.0]],
)
assert_resp_response(
- modclient,
- await modclient.ts().revrange(
+ decoded_r,
+ await decoded_r.ts().revrange(
1, 0, 10, aggregation_type="count", bucket_size_msec=10, align=1
),
[(1, 10.0), (0, 1.0)],
@@ -322,26 +317,26 @@ async def test_rev_range(modclient: redis.Redis):
@pytest.mark.redismod
@pytest.mark.onlynoncluster
-async def test_multi_range(modclient: redis.Redis):
- await modclient.ts().create(1, labels={"Test": "This", "team": "ny"})
- await modclient.ts().create(
+async def test_multi_range(decoded_r: redis.Redis):
+ await decoded_r.ts().create(1, labels={"Test": "This", "team": "ny"})
+ await decoded_r.ts().create(
2, labels={"Test": "This", "Taste": "That", "team": "sf"}
)
for i in range(100):
- await modclient.ts().add(1, i, i % 7)
- await modclient.ts().add(2, i, i % 11)
+ await decoded_r.ts().add(1, i, i % 7)
+ await decoded_r.ts().add(2, i, i % 11)
- res = await modclient.ts().mrange(0, 200, filters=["Test=This"])
+ res = await decoded_r.ts().mrange(0, 200, filters=["Test=This"])
assert 2 == len(res)
- if is_resp2_connection(modclient):
+ if is_resp2_connection(decoded_r):
assert 100 == len(res[0]["1"][1])
- res = await modclient.ts().mrange(0, 200, filters=["Test=This"], count=10)
+ res = await decoded_r.ts().mrange(0, 200, filters=["Test=This"], count=10)
assert 10 == len(res[0]["1"][1])
for i in range(100):
- await modclient.ts().add(1, i + 200, i % 7)
- res = await modclient.ts().mrange(
+ await decoded_r.ts().add(1, i + 200, i % 7)
+ res = await decoded_r.ts().mrange(
0, 500, filters=["Test=This"], aggregation_type="avg", bucket_size_msec=10
)
assert 2 == len(res)
@@ -349,19 +344,19 @@ async def test_multi_range(modclient: redis.Redis):
# test withlabels
assert {} == res[0]["1"][0]
- res = await modclient.ts().mrange(
+ res = await decoded_r.ts().mrange(
0, 200, filters=["Test=This"], with_labels=True
)
assert {"Test": "This", "team": "ny"} == res[0]["1"][0]
else:
assert 100 == len(res["1"][2])
- res = await modclient.ts().mrange(0, 200, filters=["Test=This"], count=10)
+ res = await decoded_r.ts().mrange(0, 200, filters=["Test=This"], count=10)
assert 10 == len(res["1"][2])
for i in range(100):
- await modclient.ts().add(1, i + 200, i % 7)
- res = await modclient.ts().mrange(
+ await decoded_r.ts().add(1, i + 200, i % 7)
+ res = await decoded_r.ts().mrange(
0, 500, filters=["Test=This"], aggregation_type="avg", bucket_size_msec=10
)
assert 2 == len(res)
@@ -369,7 +364,7 @@ async def test_multi_range(modclient: redis.Redis):
# test withlabels
assert {} == res["1"][0]
- res = await modclient.ts().mrange(
+ res = await decoded_r.ts().mrange(
0, 200, filters=["Test=This"], with_labels=True
)
assert {"Test": "This", "team": "ny"} == res["1"][0]
@@ -378,25 +373,25 @@ async def test_multi_range(modclient: redis.Redis):
@pytest.mark.redismod
@pytest.mark.onlynoncluster
@skip_ifmodversion_lt("99.99.99", "timeseries")
-async def test_multi_range_advanced(modclient: redis.Redis):
- await modclient.ts().create(1, labels={"Test": "This", "team": "ny"})
- await modclient.ts().create(
+async def test_multi_range_advanced(decoded_r: redis.Redis):
+ await decoded_r.ts().create(1, labels={"Test": "This", "team": "ny"})
+ await decoded_r.ts().create(
2, labels={"Test": "This", "Taste": "That", "team": "sf"}
)
for i in range(100):
- await modclient.ts().add(1, i, i % 7)
- await modclient.ts().add(2, i, i % 11)
+ await decoded_r.ts().add(1, i, i % 7)
+ await decoded_r.ts().add(2, i, i % 11)
# test with selected labels
- res = await modclient.ts().mrange(
+ res = await decoded_r.ts().mrange(
0, 200, filters=["Test=This"], select_labels=["team"]
)
- if is_resp2_connection(modclient):
+ if is_resp2_connection(decoded_r):
assert {"team": "ny"} == res[0]["1"][0]
assert {"team": "sf"} == res[1]["2"][0]
# test with filterby
- res = await modclient.ts().mrange(
+ res = await decoded_r.ts().mrange(
0,
200,
filters=["Test=This"],
@@ -407,15 +402,15 @@ async def test_multi_range_advanced(modclient: redis.Redis):
assert [(15, 1.0), (16, 2.0)] == res[0]["1"][1]
# test groupby
- res = await modclient.ts().mrange(
+ res = await decoded_r.ts().mrange(
0, 3, filters=["Test=This"], groupby="Test", reduce="sum"
)
assert [(0, 0.0), (1, 2.0), (2, 4.0), (3, 6.0)] == res[0]["Test=This"][1]
- res = await modclient.ts().mrange(
+ res = await decoded_r.ts().mrange(
0, 3, filters=["Test=This"], groupby="Test", reduce="max"
)
assert [(0, 0.0), (1, 1.0), (2, 2.0), (3, 3.0)] == res[0]["Test=This"][1]
- res = await modclient.ts().mrange(
+ res = await decoded_r.ts().mrange(
0, 3, filters=["Test=This"], groupby="team", reduce="min"
)
assert 2 == len(res)
@@ -423,7 +418,7 @@ async def test_multi_range_advanced(modclient: redis.Redis):
assert [(0, 0.0), (1, 1.0), (2, 2.0), (3, 3.0)] == res[1]["team=sf"][1]
# test align
- res = await modclient.ts().mrange(
+ res = await decoded_r.ts().mrange(
0,
10,
filters=["team=ny"],
@@ -432,7 +427,7 @@ async def test_multi_range_advanced(modclient: redis.Redis):
align="-",
)
assert [(0, 10.0), (10, 1.0)] == res[0]["1"][1]
- res = await modclient.ts().mrange(
+ res = await decoded_r.ts().mrange(
0,
10,
filters=["team=ny"],
@@ -446,7 +441,7 @@ async def test_multi_range_advanced(modclient: redis.Redis):
assert {"team": "sf"} == res["2"][0]
# test with filterby
- res = await modclient.ts().mrange(
+ res = await decoded_r.ts().mrange(
0,
200,
filters=["Test=This"],
@@ -457,15 +452,15 @@ async def test_multi_range_advanced(modclient: redis.Redis):
assert [[15, 1.0], [16, 2.0]] == res["1"][2]
# test groupby
- res = await modclient.ts().mrange(
+ res = await decoded_r.ts().mrange(
0, 3, filters=["Test=This"], groupby="Test", reduce="sum"
)
assert [[0, 0.0], [1, 2.0], [2, 4.0], [3, 6.0]] == res["Test=This"][3]
- res = await modclient.ts().mrange(
+ res = await decoded_r.ts().mrange(
0, 3, filters=["Test=This"], groupby="Test", reduce="max"
)
assert [[0, 0.0], [1, 1.0], [2, 2.0], [3, 3.0]] == res["Test=This"][3]
- res = await modclient.ts().mrange(
+ res = await decoded_r.ts().mrange(
0, 3, filters=["Test=This"], groupby="team", reduce="min"
)
assert 2 == len(res)
@@ -473,7 +468,7 @@ async def test_multi_range_advanced(modclient: redis.Redis):
assert [[0, 0.0], [1, 1.0], [2, 2.0], [3, 3.0]] == res["team=sf"][3]
# test align
- res = await modclient.ts().mrange(
+ res = await decoded_r.ts().mrange(
0,
10,
filters=["team=ny"],
@@ -482,7 +477,7 @@ async def test_multi_range_advanced(modclient: redis.Redis):
align="-",
)
assert [[0, 10.0], [10, 1.0]] == res["1"][2]
- res = await modclient.ts().mrange(
+ res = await decoded_r.ts().mrange(
0,
10,
filters=["team=ny"],
@@ -496,26 +491,26 @@ async def test_multi_range_advanced(modclient: redis.Redis):
@pytest.mark.redismod
@pytest.mark.onlynoncluster
@skip_ifmodversion_lt("99.99.99", "timeseries")
-async def test_multi_reverse_range(modclient: redis.Redis):
- await modclient.ts().create(1, labels={"Test": "This", "team": "ny"})
- await modclient.ts().create(
+async def test_multi_reverse_range(decoded_r: redis.Redis):
+ await decoded_r.ts().create(1, labels={"Test": "This", "team": "ny"})
+ await decoded_r.ts().create(
2, labels={"Test": "This", "Taste": "That", "team": "sf"}
)
for i in range(100):
- await modclient.ts().add(1, i, i % 7)
- await modclient.ts().add(2, i, i % 11)
+ await decoded_r.ts().add(1, i, i % 7)
+ await decoded_r.ts().add(2, i, i % 11)
- res = await modclient.ts().mrange(0, 200, filters=["Test=This"])
+ res = await decoded_r.ts().mrange(0, 200, filters=["Test=This"])
assert 2 == len(res)
- if is_resp2_connection(modclient):
+ if is_resp2_connection(decoded_r):
assert 100 == len(res[0]["1"][1])
- res = await modclient.ts().mrange(0, 200, filters=["Test=This"], count=10)
+ res = await decoded_r.ts().mrange(0, 200, filters=["Test=This"], count=10)
assert 10 == len(res[0]["1"][1])
for i in range(100):
- await modclient.ts().add(1, i + 200, i % 7)
- res = await modclient.ts().mrevrange(
+ await decoded_r.ts().add(1, i + 200, i % 7)
+ res = await decoded_r.ts().mrevrange(
0, 500, filters=["Test=This"], aggregation_type="avg", bucket_size_msec=10
)
assert 2 == len(res)
@@ -523,20 +518,20 @@ async def test_multi_reverse_range(modclient: redis.Redis):
assert {} == res[0]["1"][0]
# test withlabels
- res = await modclient.ts().mrevrange(
+ res = await decoded_r.ts().mrevrange(
0, 200, filters=["Test=This"], with_labels=True
)
assert {"Test": "This", "team": "ny"} == res[0]["1"][0]
# test with selected labels
- res = await modclient.ts().mrevrange(
+ res = await decoded_r.ts().mrevrange(
0, 200, filters=["Test=This"], select_labels=["team"]
)
assert {"team": "ny"} == res[0]["1"][0]
assert {"team": "sf"} == res[1]["2"][0]
# test filterby
- res = await modclient.ts().mrevrange(
+ res = await decoded_r.ts().mrevrange(
0,
200,
filters=["Test=This"],
@@ -547,15 +542,15 @@ async def test_multi_reverse_range(modclient: redis.Redis):
assert [(16, 2.0), (15, 1.0)] == res[0]["1"][1]
# test groupby
- res = await modclient.ts().mrevrange(
+ res = await decoded_r.ts().mrevrange(
0, 3, filters=["Test=This"], groupby="Test", reduce="sum"
)
assert [(3, 6.0), (2, 4.0), (1, 2.0), (0, 0.0)] == res[0]["Test=This"][1]
- res = await modclient.ts().mrevrange(
+ res = await decoded_r.ts().mrevrange(
0, 3, filters=["Test=This"], groupby="Test", reduce="max"
)
assert [(3, 3.0), (2, 2.0), (1, 1.0), (0, 0.0)] == res[0]["Test=This"][1]
- res = await modclient.ts().mrevrange(
+ res = await decoded_r.ts().mrevrange(
0, 3, filters=["Test=This"], groupby="team", reduce="min"
)
assert 2 == len(res)
@@ -563,7 +558,7 @@ async def test_multi_reverse_range(modclient: redis.Redis):
assert [(3, 3.0), (2, 2.0), (1, 1.0), (0, 0.0)] == res[1]["team=sf"][1]
# test align
- res = await modclient.ts().mrevrange(
+ res = await decoded_r.ts().mrevrange(
0,
10,
filters=["team=ny"],
@@ -572,7 +567,7 @@ async def test_multi_reverse_range(modclient: redis.Redis):
align="-",
)
assert [(10, 1.0), (0, 10.0)] == res[0]["1"][1]
- res = await modclient.ts().mrevrange(
+ res = await decoded_r.ts().mrevrange(
0,
10,
filters=["team=ny"],
@@ -584,12 +579,12 @@ async def test_multi_reverse_range(modclient: redis.Redis):
else:
assert 100 == len(res["1"][2])
- res = await modclient.ts().mrange(0, 200, filters=["Test=This"], count=10)
+ res = await decoded_r.ts().mrange(0, 200, filters=["Test=This"], count=10)
assert 10 == len(res["1"][2])
for i in range(100):
- await modclient.ts().add(1, i + 200, i % 7)
- res = await modclient.ts().mrevrange(
+ await decoded_r.ts().add(1, i + 200, i % 7)
+ res = await decoded_r.ts().mrevrange(
0, 500, filters=["Test=This"], aggregation_type="avg", bucket_size_msec=10
)
assert 2 == len(res)
@@ -597,20 +592,20 @@ async def test_multi_reverse_range(modclient: redis.Redis):
assert {} == res["1"][0]
# test withlabels
- res = await modclient.ts().mrevrange(
+ res = await decoded_r.ts().mrevrange(
0, 200, filters=["Test=This"], with_labels=True
)
assert {"Test": "This", "team": "ny"} == res["1"][0]
# test with selected labels
- res = await modclient.ts().mrevrange(
+ res = await decoded_r.ts().mrevrange(
0, 200, filters=["Test=This"], select_labels=["team"]
)
assert {"team": "ny"} == res["1"][0]
assert {"team": "sf"} == res["2"][0]
# test filterby
- res = await modclient.ts().mrevrange(
+ res = await decoded_r.ts().mrevrange(
0,
200,
filters=["Test=This"],
@@ -621,15 +616,15 @@ async def test_multi_reverse_range(modclient: redis.Redis):
assert [[16, 2.0], [15, 1.0]] == res["1"][2]
# test groupby
- res = await modclient.ts().mrevrange(
+ res = await decoded_r.ts().mrevrange(
0, 3, filters=["Test=This"], groupby="Test", reduce="sum"
)
assert [[3, 6.0], [2, 4.0], [1, 2.0], [0, 0.0]] == res["Test=This"][3]
- res = await modclient.ts().mrevrange(
+ res = await decoded_r.ts().mrevrange(
0, 3, filters=["Test=This"], groupby="Test", reduce="max"
)
assert [[3, 3.0], [2, 2.0], [1, 1.0], [0, 0.0]] == res["Test=This"][3]
- res = await modclient.ts().mrevrange(
+ res = await decoded_r.ts().mrevrange(
0, 3, filters=["Test=This"], groupby="team", reduce="min"
)
assert 2 == len(res)
@@ -637,7 +632,7 @@ async def test_multi_reverse_range(modclient: redis.Redis):
assert [[3, 3.0], [2, 2.0], [1, 1.0], [0, 0.0]] == res["team=sf"][3]
# test align
- res = await modclient.ts().mrevrange(
+ res = await decoded_r.ts().mrevrange(
0,
10,
filters=["team=ny"],
@@ -646,7 +641,7 @@ async def test_multi_reverse_range(modclient: redis.Redis):
align="-",
)
assert [[10, 1.0], [0, 10.0]] == res["1"][2]
- res = await modclient.ts().mrevrange(
+ res = await decoded_r.ts().mrevrange(
0,
10,
filters=["team=ny"],
@@ -658,115 +653,115 @@ async def test_multi_reverse_range(modclient: redis.Redis):
@pytest.mark.redismod
-async def test_get(modclient: redis.Redis):
+async def test_get(decoded_r: redis.Redis):
name = "test"
- await modclient.ts().create(name)
- assert not await modclient.ts().get(name)
- await modclient.ts().add(name, 2, 3)
- assert 2 == (await modclient.ts().get(name))[0]
- await modclient.ts().add(name, 3, 4)
- assert 4 == (await modclient.ts().get(name))[1]
+ await decoded_r.ts().create(name)
+ assert not await decoded_r.ts().get(name)
+ await decoded_r.ts().add(name, 2, 3)
+ assert 2 == (await decoded_r.ts().get(name))[0]
+ await decoded_r.ts().add(name, 3, 4)
+ assert 4 == (await decoded_r.ts().get(name))[1]
@pytest.mark.redismod
@pytest.mark.onlynoncluster
-async def test_mget(modclient: redis.Redis):
- await modclient.ts().create(1, labels={"Test": "This"})
- await modclient.ts().create(2, labels={"Test": "This", "Taste": "That"})
- act_res = await modclient.ts().mget(["Test=This"])
+async def test_mget(decoded_r: redis.Redis):
+ await decoded_r.ts().create(1, labels={"Test": "This"})
+ await decoded_r.ts().create(2, labels={"Test": "This", "Taste": "That"})
+ act_res = await decoded_r.ts().mget(["Test=This"])
exp_res = [{"1": [{}, None, None]}, {"2": [{}, None, None]}]
exp_res_resp3 = {"1": [{}, []], "2": [{}, []]}
- assert_resp_response(modclient, act_res, exp_res, exp_res_resp3)
- await modclient.ts().add(1, "*", 15)
- await modclient.ts().add(2, "*", 25)
- res = await modclient.ts().mget(["Test=This"])
- if is_resp2_connection(modclient):
+ assert_resp_response(decoded_r, act_res, exp_res, exp_res_resp3)
+ await decoded_r.ts().add(1, "*", 15)
+ await decoded_r.ts().add(2, "*", 25)
+ res = await decoded_r.ts().mget(["Test=This"])
+ if is_resp2_connection(decoded_r):
assert 15 == res[0]["1"][2]
assert 25 == res[1]["2"][2]
else:
assert 15 == res["1"][1][1]
assert 25 == res["2"][1][1]
- res = await modclient.ts().mget(["Taste=That"])
- if is_resp2_connection(modclient):
+ res = await decoded_r.ts().mget(["Taste=That"])
+ if is_resp2_connection(decoded_r):
assert 25 == res[0]["2"][2]
else:
assert 25 == res["2"][1][1]
# test with_labels
- if is_resp2_connection(modclient):
+ if is_resp2_connection(decoded_r):
assert {} == res[0]["2"][0]
else:
assert {} == res["2"][0]
- res = await modclient.ts().mget(["Taste=That"], with_labels=True)
- if is_resp2_connection(modclient):
+ res = await decoded_r.ts().mget(["Taste=That"], with_labels=True)
+ if is_resp2_connection(decoded_r):
assert {"Taste": "That", "Test": "This"} == res[0]["2"][0]
else:
assert {"Taste": "That", "Test": "This"} == res["2"][0]
@pytest.mark.redismod
-async def test_info(modclient: redis.Redis):
- await modclient.ts().create(
+async def test_info(decoded_r: redis.Redis):
+ await decoded_r.ts().create(
1, retention_msecs=5, labels={"currentLabel": "currentData"}
)
- info = await modclient.ts().info(1)
+ info = await decoded_r.ts().info(1)
assert_resp_response(
- modclient, 5, info.get("retention_msecs"), info.get("retentionTime")
+ decoded_r, 5, info.get("retention_msecs"), info.get("retentionTime")
)
assert info["labels"]["currentLabel"] == "currentData"
@pytest.mark.redismod
@skip_ifmodversion_lt("1.4.0", "timeseries")
-async def testInfoDuplicatePolicy(modclient: redis.Redis):
- await modclient.ts().create(
+async def testInfoDuplicatePolicy(decoded_r: redis.Redis):
+ await decoded_r.ts().create(
1, retention_msecs=5, labels={"currentLabel": "currentData"}
)
- info = await modclient.ts().info(1)
+ info = await decoded_r.ts().info(1)
assert_resp_response(
- modclient, None, info.get("duplicate_policy"), info.get("duplicatePolicy")
+ decoded_r, None, info.get("duplicate_policy"), info.get("duplicatePolicy")
)
- await modclient.ts().create("time-serie-2", duplicate_policy="min")
- info = await modclient.ts().info("time-serie-2")
+ await decoded_r.ts().create("time-serie-2", duplicate_policy="min")
+ info = await decoded_r.ts().info("time-serie-2")
assert_resp_response(
- modclient, "min", info.get("duplicate_policy"), info.get("duplicatePolicy")
+ decoded_r, "min", info.get("duplicate_policy"), info.get("duplicatePolicy")
)
@pytest.mark.redismod
@pytest.mark.onlynoncluster
-async def test_query_index(modclient: redis.Redis):
- await modclient.ts().create(1, labels={"Test": "This"})
- await modclient.ts().create(2, labels={"Test": "This", "Taste": "That"})
- assert 2 == len(await modclient.ts().queryindex(["Test=This"]))
- assert 1 == len(await modclient.ts().queryindex(["Taste=That"]))
+async def test_query_index(decoded_r: redis.Redis):
+ await decoded_r.ts().create(1, labels={"Test": "This"})
+ await decoded_r.ts().create(2, labels={"Test": "This", "Taste": "That"})
+ assert 2 == len(await decoded_r.ts().queryindex(["Test=This"]))
+ assert 1 == len(await decoded_r.ts().queryindex(["Taste=That"]))
assert_resp_response(
- modclient, await modclient.ts().queryindex(["Taste=That"]), [2], {"2"}
+ decoded_r, await decoded_r.ts().queryindex(["Taste=That"]), [2], {"2"}
)
# @pytest.mark.redismod
-# async def test_pipeline(modclient: redis.Redis):
-# pipeline = await modclient.ts().pipeline()
+# async def test_pipeline(r: redis.Redis):
+# pipeline = await r.ts().pipeline()
# pipeline.create("with_pipeline")
# for i in range(100):
# pipeline.add("with_pipeline", i, 1.1 * i)
# pipeline.execute()
-# info = await modclient.ts().info("with_pipeline")
+# info = await r.ts().info("with_pipeline")
# assert info.lastTimeStamp == 99
# assert info.total_samples == 100
-# assert await modclient.ts().get("with_pipeline")[1] == 99 * 1.1
+# assert await r.ts().get("with_pipeline")[1] == 99 * 1.1
@pytest.mark.redismod
-async def test_uncompressed(modclient: redis.Redis):
- await modclient.ts().create("compressed")
- await modclient.ts().create("uncompressed", uncompressed=True)
- compressed_info = await modclient.ts().info("compressed")
- uncompressed_info = await modclient.ts().info("uncompressed")
- if is_resp2_connection(modclient):
+async def test_uncompressed(decoded_r: redis.Redis):
+ await decoded_r.ts().create("compressed")
+ await decoded_r.ts().create("uncompressed", uncompressed=True)
+ compressed_info = await decoded_r.ts().info("compressed")
+ uncompressed_info = await decoded_r.ts().info("uncompressed")
+ if is_resp2_connection(decoded_r):
assert compressed_info.memory_usage != uncompressed_info.memory_usage
else:
assert compressed_info["memoryUsage"] != uncompressed_info["memoryUsage"]
diff --git a/tests/test_bloom.py b/tests/test_bloom.py
index 4ee8ba29d2..a82fece470 100644
--- a/tests/test_bloom.py
+++ b/tests/test_bloom.py
@@ -1,7 +1,6 @@
from math import inf
import pytest
-
import redis.commands.bf
from redis.exceptions import ModuleError, RedisError
from redis.utils import HIREDIS_AVAILABLE
@@ -14,15 +13,15 @@ def intlist(obj):
@pytest.fixture
-def client(modclient):
- assert isinstance(modclient.bf(), redis.commands.bf.BFBloom)
- assert isinstance(modclient.cf(), redis.commands.bf.CFBloom)
- assert isinstance(modclient.cms(), redis.commands.bf.CMSBloom)
- assert isinstance(modclient.tdigest(), redis.commands.bf.TDigestBloom)
- assert isinstance(modclient.topk(), redis.commands.bf.TOPKBloom)
-
- modclient.flushdb()
- return modclient
+def client(decoded_r):
+ assert isinstance(decoded_r.bf(), redis.commands.bf.BFBloom)
+ assert isinstance(decoded_r.cf(), redis.commands.bf.CFBloom)
+ assert isinstance(decoded_r.cms(), redis.commands.bf.CMSBloom)
+ assert isinstance(decoded_r.tdigest(), redis.commands.bf.TDigestBloom)
+ assert isinstance(decoded_r.topk(), redis.commands.bf.TOPKBloom)
+
+ decoded_r.flushdb()
+ return decoded_r
@pytest.mark.redismod
diff --git a/tests/test_cluster.py b/tests/test_cluster.py
index 2ca323eaf5..834831fabd 100644
--- a/tests/test_cluster.py
+++ b/tests/test_cluster.py
@@ -6,7 +6,6 @@
from unittest.mock import DEFAULT, Mock, call, patch
import pytest
-
from redis import Redis
from redis.backoff import ExponentialBackoff, NoBackoff, default_backoff
from redis.cluster import (
diff --git a/tests/test_command_parser.py b/tests/test_command_parser.py
index b2a2268f85..c89a2ab0e5 100644
--- a/tests/test_command_parser.py
+++ b/tests/test_command_parser.py
@@ -1,5 +1,4 @@
import pytest
-
from redis.parsers import CommandsParser
from .conftest import skip_if_redis_enterprise, skip_if_server_version_lt
diff --git a/tests/test_commands.py b/tests/test_commands.py
index 9849e7d64e..a024167877 100644
--- a/tests/test_commands.py
+++ b/tests/test_commands.py
@@ -6,7 +6,6 @@
from unittest import mock
import pytest
-
import redis
from redis import exceptions
from redis.client import EMPTY_RESPONSE, NEVER_DECODE, parse_info
@@ -199,6 +198,7 @@ def test_acl_genpass(self, r):
@skip_if_server_version_lt("7.0.0")
@skip_if_redis_enterprise()
def test_acl_getuser_setuser(self, r, request):
+ r.flushall()
username = "redis-py-user"
def teardown():
@@ -238,14 +238,14 @@ def teardown():
keys=["cache:*", "objects:*"],
)
acl = r.acl_getuser(username)
- assert set(acl["categories"]) == {"-@all", "+@set", "+@hash", "-@geo"}
+ assert set(acl["categories"]) == {"+@hash", "+@set", "-@all", "-@geo"}
assert set(acl["commands"]) == {"+get", "+mget", "-hset"}
assert acl["enabled"] is True
assert "on" in acl["flags"]
assert set(acl["keys"]) == {"~cache:*", "~objects:*"}
assert len(acl["passwords"]) == 2
- # test reset=False keeps existing ACL and applies new ACL on top
+ # # test reset=False keeps existing ACL and applies new ACL on top
assert r.acl_setuser(
username,
enabled=True,
@@ -264,14 +264,13 @@ def teardown():
keys=["objects:*"],
)
acl = r.acl_getuser(username)
- assert set(acl["categories"]) == {"-@all", "+@set", "+@hash"}
assert set(acl["commands"]) == {"+get", "+mget"}
assert acl["enabled"] is True
assert "on" in acl["flags"]
assert set(acl["keys"]) == {"~cache:*", "~objects:*"}
assert len(acl["passwords"]) == 2
- # test removal of passwords
+ # # test removal of passwords
assert r.acl_setuser(
username, enabled=True, reset=True, passwords=["+pass1", "+pass2"]
)
@@ -279,7 +278,7 @@ def teardown():
assert r.acl_setuser(username, enabled=True, passwords=["-pass2"])
assert len(r.acl_getuser(username)["passwords"]) == 1
- # Resets and tests that hashed passwords are set properly.
+ # # Resets and tests that hashed passwords are set properly.
hashed_password = (
"5e884898da28047151d0e56f8dc6292773603d0d6aabbdd62a11ef721d1542d8"
)
@@ -303,7 +302,7 @@ def teardown():
)
assert len(r.acl_getuser(username)["passwords"]) == 1
- # test selectors
+ # # test selectors
assert r.acl_setuser(
username,
enabled=True,
@@ -316,7 +315,7 @@ def teardown():
selectors=[("+set", "%W~app*")],
)
acl = r.acl_getuser(username)
- assert set(acl["categories"]) == {"-@all", "+@set", "+@hash", "-@geo"}
+ assert set(acl["categories"]) == {"+@hash", "+@set", "-@all", "-@geo"}
assert set(acl["commands"]) == {"+get", "+mget", "-hset"}
assert acl["enabled"] is True
assert "on" in acl["flags"]
@@ -340,6 +339,7 @@ def test_acl_help(self, r):
@skip_if_redis_enterprise()
def test_acl_list(self, r, request):
username = "redis-py-user"
+ start = r.acl_list()
def teardown():
r.acl_deluser(username)
@@ -348,7 +348,7 @@ def teardown():
assert r.acl_setuser(username, enabled=False, reset=True)
users = r.acl_list()
- assert len(users) == 2
+ assert len(users) == len(start) + 1
@skip_if_server_version_lt("6.0.0")
@skip_if_redis_enterprise()
@@ -712,7 +712,7 @@ def test_client_no_evict(self, r):
@skip_if_server_version_lt("3.2.0")
def test_client_reply(self, r, r_timeout):
assert r_timeout.client_reply("ON") == b"OK"
- with pytest.raises(exceptions.TimeoutError):
+ with pytest.raises(exceptions.RedisError):
r_timeout.client_reply("OFF")
r_timeout.client_reply("SKIP")
@@ -4914,6 +4914,8 @@ def test_shutdown_with_params(self, r: redis.Redis):
@skip_if_server_version_lt("2.8.0")
@skip_if_redis_enterprise()
def test_sync(self, r):
+ r.flushdb()
+ time.sleep(1)
r2 = redis.Redis(port=6380, decode_responses=False)
res = r2.sync()
assert b"REDIS" in res
diff --git a/tests/test_connection.py b/tests/test_connection.py
index facd425061..1ae3d73ede 100644
--- a/tests/test_connection.py
+++ b/tests/test_connection.py
@@ -4,7 +4,6 @@
from unittest.mock import patch
import pytest
-
import redis
from redis.backoff import NoBackoff
from redis.connection import Connection, SSLConnection, UnixDomainSocketConnection
@@ -30,22 +29,22 @@ def test_invalid_response(r):
@skip_if_server_version_lt("4.0.0")
@pytest.mark.redismod
-def test_loading_external_modules(modclient):
+def test_loading_external_modules(r):
def inner():
pass
- modclient.load_external_module("myfuncname", inner)
- assert getattr(modclient, "myfuncname") == inner
- assert isinstance(getattr(modclient, "myfuncname"), types.FunctionType)
+ r.load_external_module("myfuncname", inner)
+ assert getattr(r, "myfuncname") == inner
+ assert isinstance(getattr(r, "myfuncname"), types.FunctionType)
# and call it
from redis.commands import RedisModuleCommands
j = RedisModuleCommands.json
- modclient.load_external_module("sometestfuncname", j)
+ r.load_external_module("sometestfuncname", j)
# d = {'hello': 'world!'}
- # mod = j(modclient)
+ # mod = j(r)
# mod.set("fookey", ".", d)
# assert mod.get('fookey') == d
diff --git a/tests/test_connection_pool.py b/tests/test_connection_pool.py
index ba9fef3089..888e0226eb 100644
--- a/tests/test_connection_pool.py
+++ b/tests/test_connection_pool.py
@@ -5,7 +5,6 @@
from unittest import mock
import pytest
-
import redis
from redis.connection import to_bool
from redis.utils import SSL_AVAILABLE
diff --git a/tests/test_credentials.py b/tests/test_credentials.py
index 9c0ff1bcea..aade04e082 100644
--- a/tests/test_credentials.py
+++ b/tests/test_credentials.py
@@ -4,7 +4,6 @@
from typing import Optional, Tuple, Union
import pytest
-
import redis
from redis import AuthenticationError, DataError, ResponseError
from redis.credentials import CredentialProvider, UsernamePasswordCredentialProvider
diff --git a/tests/test_encoding.py b/tests/test_encoding.py
index cb9c4e20be..331cd5108c 100644
--- a/tests/test_encoding.py
+++ b/tests/test_encoding.py
@@ -1,5 +1,4 @@
import pytest
-
import redis
from redis.connection import Connection
from redis.utils import HIREDIS_PACK_AVAILABLE
diff --git a/tests/test_function.py b/tests/test_function.py
index bb32fdf27c..22db904273 100644
--- a/tests/test_function.py
+++ b/tests/test_function.py
@@ -1,5 +1,4 @@
import pytest
-
from redis.exceptions import ResponseError
from .conftest import assert_resp_response, skip_if_server_version_lt
diff --git a/tests/test_graph.py b/tests/test_graph.py
index 4721b2f4e2..42f1d9e5df 100644
--- a/tests/test_graph.py
+++ b/tests/test_graph.py
@@ -1,7 +1,7 @@
from unittest.mock import patch
import pytest
-
+from redis import Redis
from redis.commands.graph import Edge, Node, Path
from redis.commands.graph.execution_plan import Operation
from redis.commands.graph.query_result import (
@@ -20,13 +20,14 @@
QueryResult,
)
from redis.exceptions import ResponseError
-from tests.conftest import skip_if_redis_enterprise
+from tests.conftest import _get_client, skip_if_redis_enterprise
@pytest.fixture
-def client(modclient):
- modclient.flushdb()
- return modclient
+def client(request):
+ r = _get_client(Redis, request, decode_responses=True)
+ r.flushdb()
+ return r
@pytest.mark.redismod
@@ -292,6 +293,7 @@ def test_slowlog(client):
@pytest.mark.redismod
+@pytest.mark.xfail(strict=False)
def test_query_timeout(client):
# Build a sample graph with 1000 nodes.
client.graph().query("UNWIND range(0,1000) as val CREATE ({v: val})")
diff --git a/tests/test_graph_utils/test_edge.py b/tests/test_graph_utils/test_edge.py
index b5b7362389..581ebfab5d 100644
--- a/tests/test_graph_utils/test_edge.py
+++ b/tests/test_graph_utils/test_edge.py
@@ -1,5 +1,4 @@
import pytest
-
from redis.commands.graph import edge, node
diff --git a/tests/test_graph_utils/test_node.py b/tests/test_graph_utils/test_node.py
index cd4e936719..c3b34ac6ff 100644
--- a/tests/test_graph_utils/test_node.py
+++ b/tests/test_graph_utils/test_node.py
@@ -1,5 +1,4 @@
import pytest
-
from redis.commands.graph import node
diff --git a/tests/test_graph_utils/test_path.py b/tests/test_graph_utils/test_path.py
index d581269307..1bd38efab4 100644
--- a/tests/test_graph_utils/test_path.py
+++ b/tests/test_graph_utils/test_path.py
@@ -1,5 +1,4 @@
import pytest
-
from redis.commands.graph import edge, node, path
diff --git a/tests/test_json.py b/tests/test_json.py
index 84232b20d1..a1271386d9 100644
--- a/tests/test_json.py
+++ b/tests/test_json.py
@@ -1,17 +1,17 @@
import pytest
-
import redis
-from redis import exceptions
+from redis import Redis, exceptions
from redis.commands.json.decoders import decode_list, unstring
from redis.commands.json.path import Path
-from .conftest import assert_resp_response, skip_ifmodversion_lt
+from .conftest import _get_client, assert_resp_response, skip_ifmodversion_lt
@pytest.fixture
-def client(modclient):
- modclient.flushdb()
- return modclient
+def client(request):
+ r = _get_client(Redis, request, decode_responses=True)
+ r.flushdb()
+ return r
@pytest.mark.redismod
diff --git a/tests/test_lock.py b/tests/test_lock.py
index 10ad7e1539..b4b9b32917 100644
--- a/tests/test_lock.py
+++ b/tests/test_lock.py
@@ -1,7 +1,6 @@
import time
import pytest
-
from redis.client import Redis
from redis.exceptions import LockError, LockNotOwnedError
from redis.lock import Lock
diff --git a/tests/test_multiprocessing.py b/tests/test_multiprocessing.py
index 32f5e23d53..5cda3190a6 100644
--- a/tests/test_multiprocessing.py
+++ b/tests/test_multiprocessing.py
@@ -2,7 +2,6 @@
import multiprocessing
import pytest
-
import redis
from redis.connection import Connection, ConnectionPool
from redis.exceptions import ConnectionError
diff --git a/tests/test_pipeline.py b/tests/test_pipeline.py
index 7b98ece692..7b048eec01 100644
--- a/tests/test_pipeline.py
+++ b/tests/test_pipeline.py
@@ -1,5 +1,4 @@
import pytest
-
import redis
from .conftest import skip_if_server_version_lt, wait_for_command
diff --git a/tests/test_pubsub.py b/tests/test_pubsub.py
index fc98966d74..9c10740ae8 100644
--- a/tests/test_pubsub.py
+++ b/tests/test_pubsub.py
@@ -8,7 +8,6 @@
from unittest.mock import patch
import pytest
-
import redis
from redis.exceptions import ConnectionError
from redis.utils import HIREDIS_AVAILABLE
diff --git a/tests/test_retry.py b/tests/test_retry.py
index 3cfea5c09e..e9d3015897 100644
--- a/tests/test_retry.py
+++ b/tests/test_retry.py
@@ -1,7 +1,6 @@
from unittest.mock import patch
import pytest
-
from redis.backoff import ExponentialBackoff, NoBackoff
from redis.client import Redis
from redis.connection import Connection, UnixDomainSocketConnection
diff --git a/tests/test_scripting.py b/tests/test_scripting.py
index b6b5f9fb70..899dc69482 100644
--- a/tests/test_scripting.py
+++ b/tests/test_scripting.py
@@ -1,5 +1,4 @@
import pytest
-
import redis
from redis import exceptions
from redis.commands.core import Script
diff --git a/tests/test_search.py b/tests/test_search.py
index fc63bcc1d2..2e42aaba57 100644
--- a/tests/test_search.py
+++ b/tests/test_search.py
@@ -5,7 +5,6 @@
from io import TextIOWrapper
import pytest
-
import redis
import redis.commands.search
import redis.commands.search.aggregation as aggregations
@@ -25,6 +24,7 @@
from redis.commands.search.suggestion import Suggestion
from .conftest import (
+ _get_client,
assert_resp_response,
is_resp2_connection,
skip_if_redis_enterprise,
@@ -107,9 +107,10 @@ def createIndex(client, num_docs=100, definition=None):
@pytest.fixture
-def client(modclient):
- modclient.flushdb()
- return modclient
+def client(request):
+ r = _get_client(redis.Redis, request, decode_responses=True)
+ r.flushdb()
+ return r
@pytest.mark.redismod
@@ -228,15 +229,15 @@ def test_client(client):
for doc in res["results"]:
assert doc["id"]
- assert doc["fields"]["play"] == "Henry IV"
- assert len(doc["fields"]["txt"]) > 0
+ assert doc["extra_attributes"]["play"] == "Henry IV"
+ assert len(doc["extra_attributes"]["txt"]) > 0
# test no content
res = client.ft().search(Query("king").no_content())
assert 194 == res["total_results"]
assert 10 == len(res["results"])
for doc in res["results"]:
- assert "fields" not in doc.keys()
+ assert "extra_attributes" not in doc.keys()
# test verbatim vs no verbatim
total = client.ft().search(Query("kings").no_content())["total_results"]
@@ -641,19 +642,19 @@ def test_summarize(client):
)
else:
doc = sorted(client.ft().search(q)["results"])[0]
- assert "Henry IV" == doc["fields"]["play"]
+ assert "Henry IV" == doc["extra_attributes"]["play"]
assert (
"ACT I SCENE I. London. The palace. Enter KING HENRY, LORD JOHN OF LANCASTER, the EARL of WESTMORELAND, SIR... " # noqa
- == doc["fields"]["txt"]
+ == doc["extra_attributes"]["txt"]
)
q = Query("king henry").paging(0, 1).summarize().highlight()
doc = sorted(client.ft().search(q)["results"])[0]
- assert "Henry ... " == doc["fields"]["play"]
+ assert "Henry ... " == doc["extra_attributes"]["play"]
assert (
"ACT I SCENE I. London. The palace. Enter KING HENRY, LORD JOHN OF LANCASTER, the EARL of WESTMORELAND, SIR... " # noqa
- == doc["fields"]["txt"]
+ == doc["extra_attributes"]["txt"]
)
@@ -721,9 +722,9 @@ def test_alias(client):
@pytest.mark.redismod
+@pytest.mark.xfail(strict=False)
def test_alias_basic(client):
# Creating a client with one index
- getClient(client).flushdb()
index1 = getClient(client).ft("testAlias")
index1.create_index((TextField("txt"),))
@@ -850,29 +851,32 @@ def test_spell_check(client):
else:
# test spellcheck
res = client.ft().spellcheck("impornant")
- assert "important" in res["impornant"][0].keys()
+ assert "important" in res["results"]["impornant"][0].keys()
res = client.ft().spellcheck("contnt")
- assert "content" in res["contnt"][0].keys()
+ assert "content" in res["results"]["contnt"][0].keys()
# test spellcheck with Levenshtein distance
res = client.ft().spellcheck("vlis")
- assert res == {"vlis": []}
+ assert res == {"results": {"vlis": []}}
res = client.ft().spellcheck("vlis", distance=2)
- assert "valid" in res["vlis"][0].keys()
+ assert "valid" in res["results"]["vlis"][0].keys()
# test spellcheck include
client.ft().dict_add("dict", "lore", "lorem", "lorm")
res = client.ft().spellcheck("lorm", include="dict")
- assert len(res["lorm"]) == 3
- assert "lorem" in res["lorm"][0].keys()
- assert "lore" in res["lorm"][1].keys()
- assert "lorm" in res["lorm"][2].keys()
- assert (res["lorm"][0]["lorem"], res["lorm"][1]["lore"]) == (0.5, 0)
+ assert len(res["results"]["lorm"]) == 3
+ assert "lorem" in res["results"]["lorm"][0].keys()
+ assert "lore" in res["results"]["lorm"][1].keys()
+ assert "lorm" in res["results"]["lorm"][2].keys()
+ assert (
+ res["results"]["lorm"][0]["lorem"],
+ res["results"]["lorm"][1]["lore"],
+ ) == (0.5, 0)
# test spellcheck exclude
res = client.ft().spellcheck("lorm", exclude="dict")
- assert res == {}
+ assert res == {"results": {}}
@pytest.mark.redismod
@@ -906,7 +910,7 @@ def test_phonetic_matcher(client):
assert "Jon" == res.docs[0].name
else:
assert 1 == res["total_results"]
- assert "Jon" == res["results"][0]["fields"]["name"]
+ assert "Jon" == res["results"][0]["extra_attributes"]["name"]
# Drop and create index with phonetic matcher
client.flushdb()
@@ -921,7 +925,9 @@ def test_phonetic_matcher(client):
assert ["John", "Jon"] == sorted(d.name for d in res.docs)
else:
assert 2 == res["total_results"]
- assert ["John", "Jon"] == sorted(d["fields"]["name"] for d in res["results"])
+ assert ["John", "Jon"] == sorted(
+ d["extra_attributes"]["name"] for d in res["results"]
+ )
@pytest.mark.redismod
@@ -1154,80 +1160,83 @@ def test_aggregations_groupby(client):
)
res = client.ft().aggregate(req)["results"][0]
- assert res["fields"]["parent"] == "redis"
- assert res["fields"]["__generated_aliascount"] == "3"
+ assert res["extra_attributes"]["parent"] == "redis"
+ assert res["extra_attributes"]["__generated_aliascount"] == "3"
req = aggregations.AggregateRequest("redis").group_by(
"@parent", reducers.count_distinct("@title")
)
res = client.ft().aggregate(req)["results"][0]
- assert res["fields"]["parent"] == "redis"
- assert res["fields"]["__generated_aliascount_distincttitle"] == "3"
+ assert res["extra_attributes"]["parent"] == "redis"
+ assert res["extra_attributes"]["__generated_aliascount_distincttitle"] == "3"
req = aggregations.AggregateRequest("redis").group_by(
"@parent", reducers.count_distinctish("@title")
)
res = client.ft().aggregate(req)["results"][0]
- assert res["fields"]["parent"] == "redis"
- assert res["fields"]["__generated_aliascount_distinctishtitle"] == "3"
+ assert res["extra_attributes"]["parent"] == "redis"
+ assert res["extra_attributes"]["__generated_aliascount_distinctishtitle"] == "3"
req = aggregations.AggregateRequest("redis").group_by(
"@parent", reducers.sum("@random_num")
)
res = client.ft().aggregate(req)["results"][0]
- assert res["fields"]["parent"] == "redis"
- assert res["fields"]["__generated_aliassumrandom_num"] == "21" # 10+8+3
+ assert res["extra_attributes"]["parent"] == "redis"
+ assert res["extra_attributes"]["__generated_aliassumrandom_num"] == "21"
req = aggregations.AggregateRequest("redis").group_by(
"@parent", reducers.min("@random_num")
)
res = client.ft().aggregate(req)["results"][0]
- assert res["fields"]["parent"] == "redis"
- assert res["fields"]["__generated_aliasminrandom_num"] == "3" # min(10,8,3)
+ assert res["extra_attributes"]["parent"] == "redis"
+ assert res["extra_attributes"]["__generated_aliasminrandom_num"] == "3"
req = aggregations.AggregateRequest("redis").group_by(
"@parent", reducers.max("@random_num")
)
res = client.ft().aggregate(req)["results"][0]
- assert res["fields"]["parent"] == "redis"
- assert res["fields"]["__generated_aliasmaxrandom_num"] == "10" # max(10,8,3)
+ assert res["extra_attributes"]["parent"] == "redis"
+ assert res["extra_attributes"]["__generated_aliasmaxrandom_num"] == "10"
req = aggregations.AggregateRequest("redis").group_by(
"@parent", reducers.avg("@random_num")
)
res = client.ft().aggregate(req)["results"][0]
- assert res["fields"]["parent"] == "redis"
- assert res["fields"]["__generated_aliasavgrandom_num"] == "7" # (10+3+8)/3
+ assert res["extra_attributes"]["parent"] == "redis"
+ assert res["extra_attributes"]["__generated_aliasavgrandom_num"] == "7"
req = aggregations.AggregateRequest("redis").group_by(
"@parent", reducers.stddev("random_num")
)
res = client.ft().aggregate(req)["results"][0]
- assert res["fields"]["parent"] == "redis"
- assert res["fields"]["__generated_aliasstddevrandom_num"] == "3.60555127546"
+ assert res["extra_attributes"]["parent"] == "redis"
+ assert (
+ res["extra_attributes"]["__generated_aliasstddevrandom_num"]
+ == "3.60555127546"
+ )
req = aggregations.AggregateRequest("redis").group_by(
"@parent", reducers.quantile("@random_num", 0.5)
)
res = client.ft().aggregate(req)["results"][0]
- assert res["fields"]["parent"] == "redis"
- assert res["fields"]["__generated_aliasquantilerandom_num,0.5"] == "8"
+ assert res["extra_attributes"]["parent"] == "redis"
+ assert res["extra_attributes"]["__generated_aliasquantilerandom_num,0.5"] == "8"
req = aggregations.AggregateRequest("redis").group_by(
"@parent", reducers.tolist("@title")
)
res = client.ft().aggregate(req)["results"][0]
- assert res["fields"]["parent"] == "redis"
- assert set(res["fields"]["__generated_aliastolisttitle"]) == {
+ assert res["extra_attributes"]["parent"] == "redis"
+ assert set(res["extra_attributes"]["__generated_aliastolisttitle"]) == {
"RediSearch",
"RedisAI",
"RedisJson",
@@ -1238,17 +1247,21 @@ def test_aggregations_groupby(client):
)
res = client.ft().aggregate(req)["results"][0]
- assert res["fields"] == {"parent": "redis", "first": "RediSearch"}
+ assert res["extra_attributes"] == {"parent": "redis", "first": "RediSearch"}
req = aggregations.AggregateRequest("redis").group_by(
"@parent", reducers.random_sample("@title", 2).alias("random")
)
res = client.ft().aggregate(req)["results"][0]
- assert res["fields"]["parent"] == "redis"
- assert "random" in res["fields"].keys()
- assert len(res["fields"]["random"]) == 2
- assert res["fields"]["random"][0] in ["RediSearch", "RedisAI", "RedisJson"]
+ assert res["extra_attributes"]["parent"] == "redis"
+ assert "random" in res["extra_attributes"].keys()
+ assert len(res["extra_attributes"]["random"]) == 2
+ assert res["extra_attributes"]["random"][0] in [
+ "RediSearch",
+ "RedisAI",
+ "RedisJson",
+ ]
@pytest.mark.redismod
@@ -1289,14 +1302,14 @@ def test_aggregations_sort_by_and_limit(client):
aggregations.Asc("@t2"), aggregations.Desc("@t1")
)
res = client.ft().aggregate(req)["results"]
- assert res[0]["fields"] == {"t2": "a", "t1": "b"}
- assert res[1]["fields"] == {"t2": "b", "t1": "a"}
+ assert res[0]["extra_attributes"] == {"t2": "a", "t1": "b"}
+ assert res[1]["extra_attributes"] == {"t2": "b", "t1": "a"}
# test sort_by without SortDirection
req = aggregations.AggregateRequest("*").sort_by("@t1")
res = client.ft().aggregate(req)["results"]
- assert res[0]["fields"] == {"t1": "a"}
- assert res[1]["fields"] == {"t1": "b"}
+ assert res[0]["extra_attributes"] == {"t1": "a"}
+ assert res[1]["extra_attributes"] == {"t1": "b"}
# test sort_by with max
req = aggregations.AggregateRequest("*").sort_by("@t1", max=1)
@@ -1307,7 +1320,7 @@ def test_aggregations_sort_by_and_limit(client):
req = aggregations.AggregateRequest("*").sort_by("@t1").limit(1, 1)
res = client.ft().aggregate(req)
assert len(res["results"]) == 1
- assert res["results"][0]["fields"] == {"t1": "b"}
+ assert res["results"][0]["extra_attributes"] == {"t1": "b"}
@pytest.mark.redismod
@@ -1335,17 +1348,17 @@ def test_aggregations_load(client):
# load t1
req = aggregations.AggregateRequest("*").load("t1")
res = client.ft().aggregate(req)
- assert res["results"][0]["fields"] == {"t1": "hello"}
+ assert res["results"][0]["extra_attributes"] == {"t1": "hello"}
# load t2
req = aggregations.AggregateRequest("*").load("t2")
res = client.ft().aggregate(req)
- assert res["results"][0]["fields"] == {"t2": "world"}
+ assert res["results"][0]["extra_attributes"] == {"t2": "world"}
# load all
req = aggregations.AggregateRequest("*").load()
res = client.ft().aggregate(req)
- assert res["results"][0]["fields"] == {"t1": "hello", "t2": "world"}
+ assert res["results"][0]["extra_attributes"] == {"t1": "hello", "t2": "world"}
@pytest.mark.redismod
@@ -1376,8 +1389,8 @@ def test_aggregations_apply(client):
else:
res_set = set(
[
- res["results"][0]["fields"]["CreatedDateTimeUTC"],
- res["results"][1]["fields"]["CreatedDateTimeUTC"],
+ res["results"][0]["extra_attributes"]["CreatedDateTimeUTC"],
+ res["results"][1]["extra_attributes"]["CreatedDateTimeUTC"],
],
)
assert res_set == set(["6373878785249699840", "6373878758592700416"])
@@ -1415,7 +1428,7 @@ def test_aggregations_filter(client):
assert res.rows[1] == ["age", "25"]
else:
assert len(res["results"]) == 1
- assert res["results"][0]["fields"] == {"name": "foo", "age": "19"}
+ assert res["results"][0]["extra_attributes"] == {"name": "foo", "age": "19"}
req = (
aggregations.AggregateRequest("*")
@@ -1425,8 +1438,8 @@ def test_aggregations_filter(client):
)
res = client.ft().aggregate(req)
assert len(res["results"]) == 2
- assert res["results"][0]["fields"] == {"age": "19"}
- assert res["results"][1]["fields"] == {"age": "25"}
+ assert res["results"][0]["extra_attributes"] == {"age": "19"}
+ assert res["results"][1]["extra_attributes"] == {"age": "25"}
@pytest.mark.redismod
@@ -1591,7 +1604,7 @@ def test_create_client_definition_json(client):
assert res.total == 1
else:
assert res["results"][0]["id"] == "king:1"
- assert res["results"][0]["fields"]["$"] == '{"name":"henry"}'
+ assert res["results"][0]["extra_attributes"]["$"] == '{"name":"henry"}'
assert res["total_results"] == 1
@@ -1619,8 +1632,8 @@ def test_fields_as_name(client):
else:
assert 1 == len(res["results"])
assert "doc:1" == res["results"][0]["id"]
- assert "Jon" == res["results"][0]["fields"]["name"]
- assert "25" == res["results"][0]["fields"]["just_a_number"]
+ assert "Jon" == res["results"][0]["extra_attributes"]["name"]
+ assert "25" == res["results"][0]["extra_attributes"]["just_a_number"]
@pytest.mark.redismod
@@ -1687,12 +1700,12 @@ def test_search_return_fields(client):
total = client.ft().search(Query("*").return_field("$.t", as_field="txt"))
assert 1 == len(total["results"])
assert "doc:1" == total["results"][0]["id"]
- assert "riceratops" == total["results"][0]["fields"]["txt"]
+ assert "riceratops" == total["results"][0]["extra_attributes"]["txt"]
total = client.ft().search(Query("*").return_field("$.t2", as_field="txt"))
assert 1 == len(total["results"])
assert "doc:1" == total["results"][0]["id"]
- assert "telmatosaurus" == total["results"][0]["fields"]["txt"]
+ assert "telmatosaurus" == total["results"][0]["extra_attributes"]["txt"]
@pytest.mark.redismod
@@ -1715,8 +1728,8 @@ def test_synupdate(client):
assert res.docs[0].body == "another test"
else:
assert res["results"][0]["id"] == "doc2"
- assert res["results"][0]["fields"]["title"] == "he is another baby"
- assert res["results"][0]["fields"]["body"] == "another test"
+ assert res["results"][0]["extra_attributes"]["title"] == "he is another baby"
+ assert res["results"][0]["extra_attributes"]["body"] == "another test"
@pytest.mark.redismod
@@ -1769,12 +1782,14 @@ def test_create_json_with_alias(client):
else:
res = client.ft().search("@name:henry")
assert res["results"][0]["id"] == "king:1"
- assert res["results"][0]["fields"]["$"] == '{"name":"henry","num":42}'
+ assert res["results"][0]["extra_attributes"]["$"] == '{"name":"henry","num":42}'
assert res["total_results"] == 1
res = client.ft().search("@num:[0 10]")
assert res["results"][0]["id"] == "king:2"
- assert res["results"][0]["fields"]["$"] == '{"name":"james","num":3.14}'
+ assert (
+ res["results"][0]["extra_attributes"]["$"] == '{"name":"james","num":3.14}'
+ )
assert res["total_results"] == 1
# Tests returns an error if path contain special characters (user should
@@ -1813,7 +1828,7 @@ def test_json_with_multipath(client):
res = client.ft().search("@name:{henry}")
assert res["results"][0]["id"] == "king:1"
assert (
- res["results"][0]["fields"]["$"]
+ res["results"][0]["extra_attributes"]["$"]
== '{"name":"henry","country":{"name":"england"}}'
)
assert res["total_results"] == 1
@@ -1821,7 +1836,7 @@ def test_json_with_multipath(client):
res = client.ft().search("@name:{england}")
assert res["results"][0]["id"] == "king:1"
assert (
- res["results"][0]["fields"]["$"]
+ res["results"][0]["extra_attributes"]["$"]
== '{"name":"henry","country":{"name":"england"}}'
)
assert res["total_results"] == 1
@@ -1862,7 +1877,9 @@ def test_json_with_jsonpath(client):
res = client.ft().search(Query("@name:RediSearch"))
assert res["total_results"] == 1
assert res["results"][0]["id"] == "doc:1"
- assert res["results"][0]["fields"]["$"] == '{"prod:name":"RediSearch"}'
+ assert (
+ res["results"][0]["extra_attributes"]["$"] == '{"prod:name":"RediSearch"}'
+ )
# query for an unsupported field
res = client.ft().search("@name_unsupported:RediSearch")
@@ -1872,141 +1889,181 @@ def test_json_with_jsonpath(client):
res = client.ft().search(Query("@name:RediSearch").return_field("name"))
assert res["total_results"] == 1
assert res["results"][0]["id"] == "doc:1"
- assert res["results"][0]["fields"]["name"] == "RediSearch"
-
-
-# @pytest.mark.redismod
-# @pytest.mark.onlynoncluster
-# @skip_if_redis_enterprise()
-# def test_profile(client):
-# client.ft().create_index((TextField("t"),))
-# client.ft().client.hset("1", "t", "hello")
-# client.ft().client.hset("2", "t", "world")
-
-# # check using Query
-# q = Query("hello|world").no_content()
-# res, det = client.ft().profile(q)
-# assert det["Iterators profile"]["Counter"] == 2.0
-# assert len(det["Iterators profile"]["Child iterators"]) == 2
-# assert det["Iterators profile"]["Type"] == "UNION"
-# assert det["Parsing time"] < 0.5
-# assert len(res.docs) == 2 # check also the search result
-
-# # check using AggregateRequest
-# req = (
-# aggregations.AggregateRequest("*")
-# .load("t")
-# .apply(prefix="startswith(@t, 'hel')")
-# )
-# res, det = client.ft().profile(req)
-# assert det["Iterators profile"]["Counter"] == 2.0
-# assert det["Iterators profile"]["Type"] == "WILDCARD"
-# assert isinstance(det["Parsing time"], float)
-# assert len(res.rows) == 2 # check also the search result
-
-
-# @pytest.mark.redismod
-# @pytest.mark.onlynoncluster
-# def test_profile_limited(client):
-# client.ft().create_index((TextField("t"),))
-# client.ft().client.hset("1", "t", "hello")
-# client.ft().client.hset("2", "t", "hell")
-# client.ft().client.hset("3", "t", "help")
-# client.ft().client.hset("4", "t", "helowa")
-
-# q = Query("%hell% hel*")
-# res, det = client.ft().profile(q, limited=True)
-# assert (
-# det["Iterators profile"]["Child iterators"][0]["Child iterators"]
-# == "The number of iterators in the union is 3"
-# )
-# assert (
-# det["Iterators profile"]["Child iterators"][1]["Child iterators"]
-# == "The number of iterators in the union is 4"
-# )
-# assert det["Iterators profile"]["Type"] == "INTERSECT"
-# assert len(res.docs) == 3 # check also the search result
-
-
-# @pytest.mark.redismod
-# @skip_ifmodversion_lt("2.4.3", "search")
-# def test_profile_query_params(modclient: redis.Redis):
-# modclient.flushdb()
-# modclient.ft().create_index(
-# (
-# VectorField(
-# "v", "HNSW", {"TYPE": "FLOAT32", "DIM": 2, "DISTANCE_METRIC": "L2"}
-# ),
-# )
-# )
-# modclient.hset("a", "v", "aaaaaaaa")
-# modclient.hset("b", "v", "aaaabaaa")
-# modclient.hset("c", "v", "aaaaabaa")
-# query = "*=>[KNN 2 @v $vec]"
-# q = Query(query).return_field("__v_score").sort_by("__v_score", True).dialect(2)
-# res, det = modclient.ft().profile(q, query_params={"vec": "aaaaaaaa"})
-# assert det["Iterators profile"]["Counter"] == 2.0
-# assert det["Iterators profile"]["Type"] == "VECTOR"
-# assert res.total == 2
-# assert "a" == res.docs[0].id
-# assert "0" == res.docs[0].__getattribute__("__v_score")
+ assert res["results"][0]["extra_attributes"]["name"] == "RediSearch"
+
+
+@pytest.mark.redismod
+@pytest.mark.onlynoncluster
+@skip_if_redis_enterprise()
+def test_profile(client):
+ client.ft().create_index((TextField("t"),))
+ client.ft().client.hset("1", "t", "hello")
+ client.ft().client.hset("2", "t", "world")
+
+ # check using Query
+ q = Query("hello|world").no_content()
+ if is_resp2_connection(client):
+ res, det = client.ft().profile(q)
+ assert det["Iterators profile"]["Counter"] == 2.0
+ assert len(det["Iterators profile"]["Child iterators"]) == 2
+ assert det["Iterators profile"]["Type"] == "UNION"
+ assert det["Parsing time"] < 0.5
+ assert len(res.docs) == 2 # check also the search result
+
+ # check using AggregateRequest
+ req = (
+ aggregations.AggregateRequest("*")
+ .load("t")
+ .apply(prefix="startswith(@t, 'hel')")
+ )
+ res, det = client.ft().profile(req)
+ assert det["Iterators profile"]["Counter"] == 2
+ assert det["Iterators profile"]["Type"] == "WILDCARD"
+ assert isinstance(det["Parsing time"], float)
+ assert len(res.rows) == 2 # check also the search result
+ else:
+ res = client.ft().profile(q)
+ assert res["profile"]["Iterators profile"][0]["Counter"] == 2.0
+ assert res["profile"]["Iterators profile"][0]["Type"] == "UNION"
+ assert res["profile"]["Parsing time"] < 0.5
+ assert len(res["results"]) == 2 # check also the search result
+
+ # check using AggregateRequest
+ req = (
+ aggregations.AggregateRequest("*")
+ .load("t")
+ .apply(prefix="startswith(@t, 'hel')")
+ )
+ res = client.ft().profile(req)
+ assert res["profile"]["Iterators profile"][0]["Counter"] == 2
+ assert res["profile"]["Iterators profile"][0]["Type"] == "WILDCARD"
+ assert isinstance(res["profile"]["Parsing time"], float)
+ assert len(res["results"]) == 2 # check also the search result
+
+
+@pytest.mark.redismod
+@pytest.mark.onlynoncluster
+def test_profile_limited(client):
+ client.ft().create_index((TextField("t"),))
+ client.ft().client.hset("1", "t", "hello")
+ client.ft().client.hset("2", "t", "hell")
+ client.ft().client.hset("3", "t", "help")
+ client.ft().client.hset("4", "t", "helowa")
+
+ q = Query("%hell% hel*")
+ if is_resp2_connection(client):
+ res, det = client.ft().profile(q, limited=True)
+ assert (
+ det["Iterators profile"]["Child iterators"][0]["Child iterators"]
+ == "The number of iterators in the union is 3"
+ )
+ assert (
+ det["Iterators profile"]["Child iterators"][1]["Child iterators"]
+ == "The number of iterators in the union is 4"
+ )
+ assert det["Iterators profile"]["Type"] == "INTERSECT"
+ assert len(res.docs) == 3 # check also the search result
+ else:
+ res = client.ft().profile(q, limited=True)
+ iterators_profile = res["profile"]["Iterators profile"]
+ assert (
+ iterators_profile[0]["Child iterators"][0]["Child iterators"]
+ == "The number of iterators in the union is 3"
+ )
+ assert (
+ iterators_profile[0]["Child iterators"][1]["Child iterators"]
+ == "The number of iterators in the union is 4"
+ )
+ assert iterators_profile[0]["Type"] == "INTERSECT"
+ assert len(res["results"]) == 3 # check also the search result
+
+
+@pytest.mark.redismod
+@skip_ifmodversion_lt("2.4.3", "search")
+def test_profile_query_params(client):
+ client.ft().create_index(
+ (
+ VectorField(
+ "v", "HNSW", {"TYPE": "FLOAT32", "DIM": 2, "DISTANCE_METRIC": "L2"}
+ ),
+ )
+ )
+ client.hset("a", "v", "aaaaaaaa")
+ client.hset("b", "v", "aaaabaaa")
+ client.hset("c", "v", "aaaaabaa")
+ query = "*=>[KNN 2 @v $vec]"
+ q = Query(query).return_field("__v_score").sort_by("__v_score", True).dialect(2)
+ if is_resp2_connection(client):
+ res, det = client.ft().profile(q, query_params={"vec": "aaaaaaaa"})
+ assert det["Iterators profile"]["Counter"] == 2.0
+ assert det["Iterators profile"]["Type"] == "VECTOR"
+ assert res.total == 2
+ assert "a" == res.docs[0].id
+ assert "0" == res.docs[0].__getattribute__("__v_score")
+ else:
+ res = client.ft().profile(q, query_params={"vec": "aaaaaaaa"})
+ assert res["profile"]["Iterators profile"][0]["Counter"] == 2
+ assert res["profile"]["Iterators profile"][0]["Type"] == "VECTOR"
+ assert res["total_results"] == 2
+ assert "a" == res["results"][0]["id"]
+ assert "0" == res["results"][0]["extra_attributes"]["__v_score"]
@pytest.mark.redismod
@skip_ifmodversion_lt("2.4.3", "search")
-def test_vector_field(modclient):
- modclient.flushdb()
- modclient.ft().create_index(
+def test_vector_field(client):
+ client.flushdb()
+ client.ft().create_index(
(
VectorField(
"v", "HNSW", {"TYPE": "FLOAT32", "DIM": 2, "DISTANCE_METRIC": "L2"}
),
)
)
- modclient.hset("a", "v", "aaaaaaaa")
- modclient.hset("b", "v", "aaaabaaa")
- modclient.hset("c", "v", "aaaaabaa")
+ client.hset("a", "v", "aaaaaaaa")
+ client.hset("b", "v", "aaaabaaa")
+ client.hset("c", "v", "aaaaabaa")
query = "*=>[KNN 2 @v $vec]"
q = Query(query).return_field("__v_score").sort_by("__v_score", True).dialect(2)
- res = modclient.ft().search(q, query_params={"vec": "aaaaaaaa"})
+ res = client.ft().search(q, query_params={"vec": "aaaaaaaa"})
- if is_resp2_connection(modclient):
+ if is_resp2_connection(client):
assert "a" == res.docs[0].id
assert "0" == res.docs[0].__getattribute__("__v_score")
else:
assert "a" == res["results"][0]["id"]
- assert "0" == res["results"][0]["fields"]["__v_score"]
+ assert "0" == res["results"][0]["extra_attributes"]["__v_score"]
@pytest.mark.redismod
@skip_ifmodversion_lt("2.4.3", "search")
-def test_vector_field_error(modclient):
- modclient.flushdb()
+def test_vector_field_error(r):
+ r.flushdb()
# sortable tag
with pytest.raises(Exception):
- modclient.ft().create_index((VectorField("v", "HNSW", {}, sortable=True),))
+ r.ft().create_index((VectorField("v", "HNSW", {}, sortable=True),))
# not supported algorithm
with pytest.raises(Exception):
- modclient.ft().create_index((VectorField("v", "SORT", {}),))
+ r.ft().create_index((VectorField("v", "SORT", {}),))
@pytest.mark.redismod
@skip_ifmodversion_lt("2.4.3", "search")
-def test_text_params(modclient):
- modclient.flushdb()
- modclient.ft().create_index((TextField("name"),))
+def test_text_params(client):
+ client.flushdb()
+ client.ft().create_index((TextField("name"),))
- modclient.hset("doc1", mapping={"name": "Alice"})
- modclient.hset("doc2", mapping={"name": "Bob"})
- modclient.hset("doc3", mapping={"name": "Carol"})
+ client.hset("doc1", mapping={"name": "Alice"})
+ client.hset("doc2", mapping={"name": "Bob"})
+ client.hset("doc3", mapping={"name": "Carol"})
params_dict = {"name1": "Alice", "name2": "Bob"}
q = Query("@name:($name1 | $name2 )").dialect(2)
- res = modclient.ft().search(q, query_params=params_dict)
- if is_resp2_connection(modclient):
+ res = client.ft().search(q, query_params=params_dict)
+ if is_resp2_connection(client):
assert 2 == res.total
assert "doc1" == res.docs[0].id
assert "doc2" == res.docs[1].id
@@ -2018,19 +2075,19 @@ def test_text_params(modclient):
@pytest.mark.redismod
@skip_ifmodversion_lt("2.4.3", "search")
-def test_numeric_params(modclient):
- modclient.flushdb()
- modclient.ft().create_index((NumericField("numval"),))
+def test_numeric_params(client):
+ client.flushdb()
+ client.ft().create_index((NumericField("numval"),))
- modclient.hset("doc1", mapping={"numval": 101})
- modclient.hset("doc2", mapping={"numval": 102})
- modclient.hset("doc3", mapping={"numval": 103})
+ client.hset("doc1", mapping={"numval": 101})
+ client.hset("doc2", mapping={"numval": 102})
+ client.hset("doc3", mapping={"numval": 103})
params_dict = {"min": 101, "max": 102}
q = Query("@numval:[$min $max]").dialect(2)
- res = modclient.ft().search(q, query_params=params_dict)
+ res = client.ft().search(q, query_params=params_dict)
- if is_resp2_connection(modclient):
+ if is_resp2_connection(client):
assert 2 == res.total
assert "doc1" == res.docs[0].id
assert "doc2" == res.docs[1].id
@@ -2042,18 +2099,17 @@ def test_numeric_params(modclient):
@pytest.mark.redismod
@skip_ifmodversion_lt("2.4.3", "search")
-def test_geo_params(modclient):
+def test_geo_params(client):
- modclient.flushdb()
- modclient.ft().create_index((GeoField("g")))
- modclient.hset("doc1", mapping={"g": "29.69465, 34.95126"})
- modclient.hset("doc2", mapping={"g": "29.69350, 34.94737"})
- modclient.hset("doc3", mapping={"g": "29.68746, 34.94882"})
+ client.ft().create_index((GeoField("g")))
+ client.hset("doc1", mapping={"g": "29.69465, 34.95126"})
+ client.hset("doc2", mapping={"g": "29.69350, 34.94737"})
+ client.hset("doc3", mapping={"g": "29.68746, 34.94882"})
params_dict = {"lat": "34.95126", "lon": "29.69465", "radius": 1000, "units": "km"}
q = Query("@g:[$lon $lat $radius $units]").dialect(2)
- res = modclient.ft().search(q, query_params=params_dict)
- if is_resp2_connection(modclient):
+ res = client.ft().search(q, query_params=params_dict)
+ if is_resp2_connection(client):
assert 3 == res.total
assert "doc1" == res.docs[0].id
assert "doc2" == res.docs[1].id
@@ -2089,8 +2145,8 @@ def test_search_commands_in_pipeline(client):
assert "doc2" == res[3]["results"][1]["id"]
assert res[3]["results"][0]["payload"] is None
assert (
- res[3]["results"][0]["fields"]
- == res[3]["results"][1]["fields"]
+ res[3]["results"][0]["extra_attributes"]
+ == res[3]["results"][1]["extra_attributes"]
== {"txt": "foo bar"}
)
@@ -2098,19 +2154,18 @@ def test_search_commands_in_pipeline(client):
@pytest.mark.redismod
@pytest.mark.onlynoncluster
@skip_ifmodversion_lt("2.4.3", "search")
-def test_dialect_config(modclient: redis.Redis):
- assert modclient.ft().config_get("DEFAULT_DIALECT") == {"DEFAULT_DIALECT": "1"}
- assert modclient.ft().config_set("DEFAULT_DIALECT", 2)
- assert modclient.ft().config_get("DEFAULT_DIALECT") == {"DEFAULT_DIALECT": "2"}
- assert modclient.ft().config_set("DEFAULT_DIALECT", 1)
+def test_dialect_config(client):
+ assert client.ft().config_get("DEFAULT_DIALECT")
+ client.ft().config_set("DEFAULT_DIALECT", 2)
+ assert client.ft().config_get("DEFAULT_DIALECT") == {"DEFAULT_DIALECT": "2"}
with pytest.raises(redis.ResponseError):
- modclient.ft().config_set("DEFAULT_DIALECT", 0)
+ client.ft().config_set("DEFAULT_DIALECT", 0)
@pytest.mark.redismod
@skip_ifmodversion_lt("2.4.3", "search")
-def test_dialect(modclient: redis.Redis):
- modclient.ft().create_index(
+def test_dialect(client):
+ client.ft().create_index(
(
TagField("title"),
TextField("t1"),
@@ -2121,94 +2176,94 @@ def test_dialect(modclient: redis.Redis):
),
)
)
- modclient.hset("h", "t1", "hello")
+ client.hset("h", "t1", "hello")
with pytest.raises(redis.ResponseError) as err:
- modclient.ft().explain(Query("(*)").dialect(1))
+ client.ft().explain(Query("(*)").dialect(1))
assert "Syntax error" in str(err)
- assert "WILDCARD" in modclient.ft().explain(Query("(*)").dialect(2))
+ assert "WILDCARD" in client.ft().explain(Query("(*)").dialect(2))
with pytest.raises(redis.ResponseError) as err:
- modclient.ft().explain(Query("$hello").dialect(1))
+ client.ft().explain(Query("$hello").dialect(1))
assert "Syntax error" in str(err)
q = Query("$hello").dialect(2)
expected = "UNION {\n hello\n +hello(expanded)\n}\n"
- assert expected in modclient.ft().explain(q, query_params={"hello": "hello"})
+ assert expected in client.ft().explain(q, query_params={"hello": "hello"})
expected = "NUMERIC {0.000000 <= @num <= 10.000000}\n"
- assert expected in modclient.ft().explain(Query("@title:(@num:[0 10])").dialect(1))
+ assert expected in client.ft().explain(Query("@title:(@num:[0 10])").dialect(1))
with pytest.raises(redis.ResponseError) as err:
- modclient.ft().explain(Query("@title:(@num:[0 10])").dialect(2))
+ client.ft().explain(Query("@title:(@num:[0 10])").dialect(2))
assert "Syntax error" in str(err)
@pytest.mark.redismod
-def test_expire_while_search(modclient: redis.Redis):
- modclient.ft().create_index((TextField("txt"),))
- modclient.hset("hset:1", "txt", "a")
- modclient.hset("hset:2", "txt", "b")
- modclient.hset("hset:3", "txt", "c")
- if is_resp2_connection(modclient):
- assert 3 == modclient.ft().search(Query("*")).total
- modclient.pexpire("hset:2", 300)
+def test_expire_while_search(client: redis.Redis):
+ client.ft().create_index((TextField("txt"),))
+ client.hset("hset:1", "txt", "a")
+ client.hset("hset:2", "txt", "b")
+ client.hset("hset:3", "txt", "c")
+ if is_resp2_connection(client):
+ assert 3 == client.ft().search(Query("*")).total
+ client.pexpire("hset:2", 300)
for _ in range(500):
- modclient.ft().search(Query("*")).docs[1]
+ client.ft().search(Query("*")).docs[1]
time.sleep(1)
- assert 2 == modclient.ft().search(Query("*")).total
+ assert 2 == client.ft().search(Query("*")).total
else:
- assert 3 == modclient.ft().search(Query("*"))["total_results"]
- modclient.pexpire("hset:2", 300)
+ assert 3 == client.ft().search(Query("*"))["total_results"]
+ client.pexpire("hset:2", 300)
for _ in range(500):
- modclient.ft().search(Query("*"))["results"][1]
+ client.ft().search(Query("*"))["results"][1]
time.sleep(1)
- assert 2 == modclient.ft().search(Query("*"))["total_results"]
+ assert 2 == client.ft().search(Query("*"))["total_results"]
@pytest.mark.redismod
@pytest.mark.experimental
-def test_withsuffixtrie(modclient: redis.Redis):
+def test_withsuffixtrie(client: redis.Redis):
# create index
- assert modclient.ft().create_index((TextField("txt"),))
- waitForIndex(modclient, getattr(modclient.ft(), "index_name", "idx"))
- if is_resp2_connection(modclient):
- info = modclient.ft().info()
+ assert client.ft().create_index((TextField("txt"),))
+ waitForIndex(client, getattr(client.ft(), "index_name", "idx"))
+ if is_resp2_connection(client):
+ info = client.ft().info()
assert "WITHSUFFIXTRIE" not in info["attributes"][0]
- assert modclient.ft().dropindex("idx")
+ assert client.ft().dropindex("idx")
# create withsuffixtrie index (text fiels)
- assert modclient.ft().create_index((TextField("t", withsuffixtrie=True)))
- waitForIndex(modclient, getattr(modclient.ft(), "index_name", "idx"))
- info = modclient.ft().info()
+ assert client.ft().create_index((TextField("t", withsuffixtrie=True)))
+ waitForIndex(client, getattr(client.ft(), "index_name", "idx"))
+ info = client.ft().info()
assert "WITHSUFFIXTRIE" in info["attributes"][0]
- assert modclient.ft().dropindex("idx")
+ assert client.ft().dropindex("idx")
# create withsuffixtrie index (tag field)
- assert modclient.ft().create_index((TagField("t", withsuffixtrie=True)))
- waitForIndex(modclient, getattr(modclient.ft(), "index_name", "idx"))
- info = modclient.ft().info()
+ assert client.ft().create_index((TagField("t", withsuffixtrie=True)))
+ waitForIndex(client, getattr(client.ft(), "index_name", "idx"))
+ info = client.ft().info()
assert "WITHSUFFIXTRIE" in info["attributes"][0]
else:
- info = modclient.ft().info()
+ info = client.ft().info()
assert "WITHSUFFIXTRIE" not in info["attributes"][0]["flags"]
- assert modclient.ft().dropindex("idx")
+ assert client.ft().dropindex("idx")
# create withsuffixtrie index (text fiels)
- assert modclient.ft().create_index((TextField("t", withsuffixtrie=True)))
- waitForIndex(modclient, getattr(modclient.ft(), "index_name", "idx"))
- info = modclient.ft().info()
+ assert client.ft().create_index((TextField("t", withsuffixtrie=True)))
+ waitForIndex(client, getattr(client.ft(), "index_name", "idx"))
+ info = client.ft().info()
assert "WITHSUFFIXTRIE" in info["attributes"][0]["flags"]
- assert modclient.ft().dropindex("idx")
+ assert client.ft().dropindex("idx")
# create withsuffixtrie index (tag field)
- assert modclient.ft().create_index((TagField("t", withsuffixtrie=True)))
- waitForIndex(modclient, getattr(modclient.ft(), "index_name", "idx"))
- info = modclient.ft().info()
+ assert client.ft().create_index((TagField("t", withsuffixtrie=True)))
+ waitForIndex(client, getattr(client.ft(), "index_name", "idx"))
+ info = client.ft().info()
assert "WITHSUFFIXTRIE" in info["attributes"][0]["flags"]
@pytest.mark.redismod
-def test_query_timeout(modclient: redis.Redis):
+def test_query_timeout(r: redis.Redis):
q1 = Query("foo").timeout(5000)
assert q1.get_args() == ["foo", "TIMEOUT", 5000, "LIMIT", 0, 10]
q2 = Query("foo").timeout("not_a_number")
with pytest.raises(redis.ResponseError):
- modclient.ft().search(q2)
+ r.ft().search(q2)
diff --git a/tests/test_sentinel.py b/tests/test_sentinel.py
index 8542a0bfc3..d797a0467b 100644
--- a/tests/test_sentinel.py
+++ b/tests/test_sentinel.py
@@ -1,7 +1,6 @@
import socket
import pytest
-
import redis.sentinel
from redis import exceptions
from redis.sentinel import (
diff --git a/tests/test_ssl.py b/tests/test_ssl.py
index ed38a3166b..f33e45a60b 100644
--- a/tests/test_ssl.py
+++ b/tests/test_ssl.py
@@ -4,7 +4,6 @@
from urllib.parse import urlparse
import pytest
-
import redis
from redis.exceptions import ConnectionError, RedisError
@@ -20,10 +19,10 @@ class TestSSL:
"""
ROOT = os.path.join(os.path.dirname(__file__), "..")
- CERT_DIR = os.path.abspath(os.path.join(ROOT, "docker", "stunnel", "keys"))
+ CERT_DIR = os.path.abspath(os.path.join(ROOT, "dockers", "stunnel", "keys"))
if not os.path.isdir(CERT_DIR): # github actions package validation case
CERT_DIR = os.path.abspath(
- os.path.join(ROOT, "..", "docker", "stunnel", "keys")
+ os.path.join(ROOT, "..", "dockers", "stunnel", "keys")
)
if not os.path.isdir(CERT_DIR):
raise IOError(f"No SSL certificates found. They should be in {CERT_DIR}")
diff --git a/tests/test_timeseries.py b/tests/test_timeseries.py
index 31e753c158..80490af4ef 100644
--- a/tests/test_timeseries.py
+++ b/tests/test_timeseries.py
@@ -3,16 +3,15 @@
from time import sleep
import pytest
-
import redis
from .conftest import assert_resp_response, is_resp2_connection, skip_ifmodversion_lt
@pytest.fixture
-def client(modclient):
- modclient.flushdb()
- return modclient
+def client(decoded_r):
+ decoded_r.flushdb()
+ return decoded_r
@pytest.mark.redismod
diff --git a/tox.ini b/tox.ini
deleted file mode 100644
index 553c77b3c6..0000000000
--- a/tox.ini
+++ /dev/null
@@ -1,379 +0,0 @@
-[pytest]
-addopts = -s
-markers =
- redismod: run only the redis module tests
- pipeline: pipeline tests
- onlycluster: marks tests to be run only with cluster mode redis
- onlynoncluster: marks tests to be run only with standalone redis
- ssl: marker for only the ssl tests
- asyncio: marker for async tests
- replica: replica tests
- experimental: run only experimental tests
-asyncio_mode = auto
-
-[tox]
-minversion = 3.2.0
-requires = tox-docker
-envlist = {standalone,cluster}-{plain,hiredis,ocsp}-{uvloop,asyncio}-{py37,py38,py39,pypy3},linters,docs
-
-[docker:master]
-name = master
-image = redisfab/redis-py:6.2.6
-ports =
- 6379:6379/tcp
-healtcheck_cmd = python -c "import socket;print(True) if 0 == socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect_ex(('127.0.0.1',6379)) else False"
-volumes =
- bind:rw:{toxinidir}/docker/redis6.2/master/redis.conf:/redis.conf
-
-[docker:replica]
-name = replica
-image = redisfab/redis-py:6.2.6
-links =
- master:master
-ports =
- 6380:6380/tcp
-healtcheck_cmd = python -c "import socket;print(True) if 0 == socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect_ex(('127.0.0.1',6380)) else False"
-volumes =
- bind:rw:{toxinidir}/docker/redis6.2/replica/redis.conf:/redis.conf
-
-[docker:unstable]
-name = unstable
-image = redisfab/redis-py:unstable
-ports =
- 6378:6378/tcp
-healtcheck_cmd = python -c "import socket;print(True) if 0 == socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect_ex(('127.0.0.1',6378)) else False"
-volumes =
- bind:rw:{toxinidir}/docker/unstable/redis.conf:/redis.conf
-
-[docker:unstable_cluster]
-name = unstable_cluster
-image = redisfab/redis-py-cluster:unstable
-ports =
- 6372:6372/tcp
- 6373:6373/tcp
- 6374:6374/tcp
- 6375:6375/tcp
- 6376:6376/tcp
- 6377:6377/tcp
-healtcheck_cmd = python -c "import socket;print(True) if all([0 == socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect_ex(('127.0.0.1',port)) for port in range(6372,6377)]) else False"
-volumes =
- bind:rw:{toxinidir}/docker/unstable_cluster/redis.conf:/redis.conf
-
-[docker:sentinel_1]
-name = sentinel_1
-image = redisfab/redis-py-sentinel:6.2.6
-links =
- master:master
-ports =
- 26379:26379/tcp
-healtcheck_cmd = python -c "import socket;print(True) if 0 == socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect_ex(('127.0.0.1',26379)) else False"
-volumes =
- bind:rw:{toxinidir}/docker/redis6.2/sentinel/sentinel_1.conf:/sentinel.conf
-
-[docker:sentinel_2]
-name = sentinel_2
-image = redisfab/redis-py-sentinel:6.2.6
-links =
- master:master
-ports =
- 26380:26380/tcp
-healtcheck_cmd = python -c "import socket;print(True) if 0 == socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect_ex(('127.0.0.1',26380)) else False"
-volumes =
- bind:rw:{toxinidir}/docker/redis6.2/sentinel/sentinel_2.conf:/sentinel.conf
-
-[docker:sentinel_3]
-name = sentinel_3
-image = redisfab/redis-py-sentinel:6.2.6
-links =
- master:master
-ports =
- 26381:26381/tcp
-healtcheck_cmd = python -c "import socket;print(True) if 0 == socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect_ex(('127.0.0.1',26381)) else False"
-volumes =
- bind:rw:{toxinidir}/docker/redis6.2/sentinel/sentinel_3.conf:/sentinel.conf
-
-[docker:redis_stack]
-name = redis_stack
-image = redis/redis-stack-server:edge
-ports =
- 36379:6379/tcp
-healtcheck_cmd = python -c "import socket;print(True) if 0 == socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect_ex(('127.0.0.1',36379)) else False"
-
-[docker:redis_cluster]
-name = redis_cluster
-image = redisfab/redis-py-cluster:6.2.6
-ports =
- 16379:16379/tcp
- 16380:16380/tcp
- 16381:16381/tcp
- 16382:16382/tcp
- 16383:16383/tcp
- 16384:16384/tcp
-healtcheck_cmd = python -c "import socket;print(True) if all([0 == socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect_ex(('127.0.0.1',port)) for port in range(16379,16384)]) else False"
-volumes =
- bind:rw:{toxinidir}/docker/cluster/redis.conf:/redis.conf
-
-[docker:redismod_cluster]
-name = redismod_cluster
-image = redisfab/redis-py-modcluster:edge
-ports =
- 46379:46379/tcp
- 46380:46380/tcp
- 46381:46381/tcp
- 46382:46382/tcp
- 46383:46383/tcp
- 46384:46384/tcp
-healtcheck_cmd = python -c "import socket;print(True) if all([0 == socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect_ex(('127.0.0.1',port)) for port in range(46379,46384)]) else False"
-volumes =
- bind:rw:{toxinidir}/docker/redismod_cluster/redis.conf:/redis.conf
-
-[docker:stunnel]
-name = stunnel
-image = redisfab/stunnel:latest
-healtcheck_cmd = python -c "import socket;print(True) if 0 == socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect_ex(('127.0.0.1',6666)) else False"
-links =
- master:master
-ports =
- 6666:6666/tcp
-volumes =
- bind:ro:{toxinidir}/docker/stunnel/conf:/etc/stunnel/conf.d
- bind:ro:{toxinidir}/docker/stunnel/keys:/etc/stunnel/keys
-
-[docker:redis5_master]
-name = redis5_master
-image = redisfab/redis-py:5.0-buster
-ports =
- 6382:6382/tcp
-healtcheck_cmd = python -c "import socket;print(True) if 0 == socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect_ex(('127.0.0.1',6382)) else False"
-volumes =
- bind:rw:{toxinidir}/docker/redis5/master/redis.conf:/redis.conf
-
-[docker:redis5_replica]
-name = redis5_replica
-image = redisfab/redis-py:5.0-buster
-links =
- redis5_master:redis5_master
-ports =
- 6383:6383/tcp
-healtcheck_cmd = python -c "import socket;print(True) if 0 == socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect_ex(('127.0.0.1',6383)) else False"
-volumes =
- bind:rw:{toxinidir}/docker/redis5/replica/redis.conf:/redis.conf
-
-[docker:redis5_sentinel_1]
-name = redis5_sentinel_1
-image = redisfab/redis-py-sentinel:5.0-buster
-links =
- redis5_master:redis5_master
-ports =
- 26382:26382/tcp
-healtcheck_cmd = python -c "import socket;print(True) if 0 == socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect_ex(('127.0.0.1',26382)) else False"
-volumes =
- bind:rw:{toxinidir}/docker/redis5/sentinel/sentinel_1.conf:/sentinel.conf
-
-[docker:redis5_sentinel_2]
-name = redis5_sentinel_2
-image = redisfab/redis-py-sentinel:5.0-buster
-links =
- redis5_master:redis5_master
-ports =
- 26383:26383/tcp
-healtcheck_cmd = python -c "import socket;print(True) if 0 == socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect_ex(('127.0.0.1',26383)) else False"
-volumes =
- bind:rw:{toxinidir}/docker/redis5/sentinel/sentinel_2.conf:/sentinel.conf
-
-[docker:redis5_sentinel_3]
-name = redis5_sentinel_3
-image = redisfab/redis-py-sentinel:5.0-buster
-links =
- redis5_master:redis5_master
-ports =
- 26384:26384/tcp
-healtcheck_cmd = python -c "import socket;print(True) if 0 == socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect_ex(('127.0.0.1',26384)) else False"
-volumes =
- bind:rw:{toxinidir}/docker/redis5/sentinel/sentinel_3.conf:/sentinel.conf
-
-[docker:redis5_cluster]
-name = redis5_cluster
-image = redisfab/redis-py-cluster:5.0-buster
-ports =
- 16385:16385/tcp
- 16386:16386/tcp
- 16387:16387/tcp
- 16388:16388/tcp
- 16389:16389/tcp
- 16390:16390/tcp
-healtcheck_cmd = python -c "import socket;print(True) if all([0 == socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect_ex(('127.0.0.1',port)) for port in range(16385,16390)]) else False"
-volumes =
- bind:rw:{toxinidir}/docker/cluster/redis.conf:/redis.conf
-
-[docker:redis4_master]
-name = redis4_master
-image = redisfab/redis-py:4.0-buster
-ports =
- 6381:6381/tcp
-healtcheck_cmd = python -c "import socket;print(True) if 0 == socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect_ex(('127.0.0.1',6381)) else False"
-volumes =
- bind:rw:{toxinidir}/docker/redis4/master/redis.conf:/redis.conf
-
-[docker:redis4_sentinel_1]
-name = redis4_sentinel_1
-image = redisfab/redis-py-sentinel:4.0-buster
-links =
- redis4_master:redis4_master
-ports =
- 26385:26385/tcp
-healtcheck_cmd = python -c "import socket;print(True) if 0 == socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect_ex(('127.0.0.1',26385)) else False"
-volumes =
- bind:rw:{toxinidir}/docker/redis4/sentinel/sentinel_1.conf:/sentinel.conf
-
-[docker:redis4_sentinel_2]
-name = redis4_sentinel_2
-image = redisfab/redis-py-sentinel:4.0-buster
-links =
- redis4_master:redis4_master
-ports =
- 26386:26386/tcp
-healtcheck_cmd = python -c "import socket;print(True) if 0 == socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect_ex(('127.0.0.1',26386)) else False"
-volumes =
- bind:rw:{toxinidir}/docker/redis4/sentinel/sentinel_2.conf:/sentinel.conf
-
-[docker:redis4_sentinel_3]
-name = redis4_sentinel_3
-image = redisfab/redis-py-sentinel:4.0-buster
-links =
- redis4_master:redis4_master
-ports =
- 26387:26387/tcp
-healtcheck_cmd = python -c "import socket;print(True) if 0 == socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect_ex(('127.0.0.1',26387)) else False"
-volumes =
- bind:rw:{toxinidir}/docker/redis4/sentinel/sentinel_3.conf:/sentinel.conf
-
-[docker:redis4_cluster]
-name = redis4_cluster
-image = redisfab/redis-py-cluster:4.0-buster
-ports =
- 16391:16391/tcp
- 16392:16392/tcp
- 16393:16393/tcp
- 16394:16394/tcp
- 16395:16395/tcp
- 16396:16396/tcp
-healtcheck_cmd = python -c "import socket;print(True) if all([0 == socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect_ex(('127.0.0.1',port)) for port in range(16391,16396)]) else False"
-volumes =
- bind:rw:{toxinidir}/docker/cluster/redis.conf:/redis.conf
-
-[isort]
-profile = black
-multi_line_output = 3
-
-[testenv]
-deps =
- -r {toxinidir}/requirements.txt
- -r {toxinidir}/dev_requirements.txt
-docker =
- unstable
- unstable_cluster
- master
- replica
- sentinel_1
- sentinel_2
- sentinel_3
- redis_cluster
- redis_stack
- stunnel
-extras =
- hiredis: hiredis
- ocsp: cryptography, pyopenssl, requests
-setenv =
- CLUSTER_URL = "redis://localhost:16379/0"
- UNSTABLE_CLUSTER_URL = "redis://localhost:6372/0"
-commands =
- standalone: pytest --cov=./ --cov-report=xml:coverage_redis.xml -W always -m 'not onlycluster' --junit-xml=standalone-results.xml {posargs}
- standalone-uvloop: pytest --cov=./ --cov-report=xml:coverage_redis.xml -W always -m 'not onlycluster' --junit-xml=standalone-uvloop-results.xml --uvloop {posargs}
- cluster: pytest --cov=./ --cov-report=xml:coverage_cluster.xml -W always -m 'not onlynoncluster and not redismod' --redis-url={env:CLUSTER_URL:} --redis-unstable-url={env:UNSTABLE_CLUSTER_URL:} --junit-xml=cluster-results.xml {posargs}
- cluster-uvloop: pytest --cov=./ --cov-report=xml:coverage_cluster.xml -W always -m 'not onlynoncluster and not redismod' --redis-url={env:CLUSTER_URL:} --redis-unstable-url={env:UNSTABLE_CLUSTER_URL:} --junit-xml=cluster-uvloop-results.xml --uvloop {posargs}
-
-[testenv:redis5]
-deps =
- -r {toxinidir}/requirements.txt
- -r {toxinidir}/dev_requirements.txt
-docker =
- redis5_master
- redis5_replica
- redis5_sentinel_1
- redis5_sentinel_2
- redis5_sentinel_3
- redis5_cluster
-extras =
- hiredis: hiredis
- cryptography: cryptography, requests
-setenv =
- CLUSTER_URL = "redis://localhost:16385/0"
-commands =
- standalone: pytest --cov=./ --cov-report=xml:coverage_redis.xml -W always -m 'not onlycluster and not redismod' {posargs}
- cluster: pytest --cov=./ --cov-report=xml:coverage_cluster.xml -W always -m 'not onlynoncluster and not redismod' --redis-url={env:CLUSTER_URL:} {posargs}
-
-[testenv:redis4]
-deps =
- -r {toxinidir}/requirements.txt
- -r {toxinidir}/dev_requirements.txt
-docker =
- redis4_master
- redis4_sentinel_1
- redis4_sentinel_2
- redis4_sentinel_3
- redis4_cluster
-extras =
- hiredis: hiredis
- cryptography: cryptography, requests
-setenv =
- CLUSTER_URL = "redis://localhost:16391/0"
-commands =
- standalone: pytest --cov=./ --cov-report=xml:coverage_redis.xml -W always -m 'not onlycluster and not redismod' {posargs}
- cluster: pytest --cov=./ --cov-report=xml:coverage_cluster.xml -W always -m 'not onlynoncluster and not redismod' --redis-url={env:CLUSTER_URL:} {posargs}
-
-[testenv:devenv]
-skipsdist = true
-skip_install = true
-deps = -r {toxinidir}/dev_requirements.txt
-docker = {[testenv]docker}
-
-[testenv:linters]
-deps_files = dev_requirements.txt
-docker =
-commands =
- flake8
- black --target-version py37 --check --diff .
- isort --check-only --diff .
- vulture redis whitelist.py --min-confidence 80
- flynt --fail-on-change --dry-run .
-skipsdist = true
-skip_install = true
-
-[testenv:docs]
-deps = -r docs/requirements.txt
-docker =
-changedir = {toxinidir}/docs
-allowlist_externals = make
-commands = make html
-
-[flake8]
-max-line-length = 88
-exclude =
- *.egg-info,
- *.pyc,
- .git,
- .tox,
- .venv*,
- build,
- docs/*,
- dist,
- docker,
- venv*,
- .venv*,
- whitelist.py
-ignore =
- F405
- W503
- E203
- E126