diff --git a/.github/workflows/integration.yaml b/.github/workflows/integration.yaml index b10edf2fb4..ed969d11f1 100644 --- a/.github/workflows/integration.yaml +++ b/.github/workflows/integration.yaml @@ -90,6 +90,21 @@ jobs: invoke ${{matrix.test-type}}-tests ls -1 + - name: Run tests against hiredis < 3.0.0 + if: ${{ matrix.connection-type == 'hiredis' && matrix.python-version == '3.12'}} + run: | + pip uninstall hiredis -y + pip install -U setuptools wheel + pip install -r requirements.txt + pip install -r dev_requirements.txt + if [ "${{matrix.connection-type}}" == "hiredis" ]; then + pip install "hiredis<3.0.0" + fi + invoke devenv + sleep 10 # time to settle + invoke ${{matrix.test-type}}-tests + ls -1 + - name: Upload test results and profiling data uses: actions/upload-artifact@v4 with: @@ -145,6 +160,24 @@ jobs: invoke ${{matrix.test-type}}-tests --protocol=3 fi + - name: Run tests against hiredis < 3.0.0 + if: ${{ matrix.connection-type == 'hiredis' && matrix.python-version == '3.12'}} + run: | + pip uninstall hiredis -y + pip install -U setuptools wheel + pip install -r requirements.txt + pip install -r dev_requirements.txt + if [ "${{matrix.connection-type}}" == "hiredis" ]; then + pip install "hiredis<3.0.0" + fi + invoke devenv + sleep 10 # time to settle + if [ "${{matrix.event-loop}}" == "uvloop" ]; then + invoke ${{matrix.test-type}}-tests --uvloop --protocol=3 + else + invoke ${{matrix.test-type}}-tests --protocol=3 + fi + - name: Upload test results and profiling data uses: actions/upload-artifact@v4 with: diff --git a/dev_requirements.txt b/dev_requirements.txt index 37a107d16d..adfa99e80c 100644 --- a/dev_requirements.txt +++ b/dev_requirements.txt @@ -9,7 +9,7 @@ packaging>=20.4 pytest pytest-asyncio>=0.23.0,<0.24.0 pytest-cov -pytest-profiling +pytest-profiling==1.7.0 pytest-timeout ujson>=4.2.0 uvloop diff --git a/doctests/dt_set.py b/doctests/dt_set.py index 0c0562ac80..fc66410b45 100644 --- a/doctests/dt_set.py +++ b/doctests/dt_set.py @@ -58,11 +58,11 @@ r.sadd("bikes:racing:usa", "bike:1", "bike:4") # HIDE_END res7 = r.sinter("bikes:racing:france", "bikes:racing:usa") -print(res7) # >>> ['bike:1'] +print(res7) # >>> {'bike:1'} # STEP_END # REMOVE_START -assert res7 == ["bike:1"] +assert res7 == {"bike:1"} # REMOVE_END # STEP_START scard @@ -83,12 +83,12 @@ print(res9) # >>> 3 res10 = r.smembers("bikes:racing:france") -print(res10) # >>> ['bike:1', 'bike:2', 'bike:3'] +print(res10) # >>> {'bike:1', 'bike:2', 'bike:3'} # STEP_END # REMOVE_START assert res9 == 3 -assert res10 == ['bike:1', 'bike:2', 'bike:3'] +assert res10 == {'bike:1', 'bike:2', 'bike:3'} # REMOVE_END # STEP_START smismember @@ -109,11 +109,11 @@ r.sadd("bikes:racing:usa", "bike:1", "bike:4") res13 = r.sdiff("bikes:racing:france", "bikes:racing:usa") -print(res13) # >>> ['bike:2', 'bike:3'] +print(res13) # >>> {'bike:2', 'bike:3'} # STEP_END # REMOVE_START -assert res13 == ['bike:2', 'bike:3'] +assert res13 == {'bike:2', 'bike:3'} r.delete("bikes:racing:france") r.delete("bikes:racing:usa") # REMOVE_END @@ -124,27 +124,27 @@ r.sadd("bikes:racing:italy", "bike:1", "bike:2", "bike:3", "bike:4") res13 = r.sinter("bikes:racing:france", "bikes:racing:usa", "bikes:racing:italy") -print(res13) # >>> ['bike:1'] +print(res13) # >>> {'bike:1'} res14 = r.sunion("bikes:racing:france", "bikes:racing:usa", "bikes:racing:italy") -print(res14) # >>> ['bike:1', 'bike:2', 'bike:3', 'bike:4'] +print(res14) # >>> {'bike:1', 'bike:2', 'bike:3', 'bike:4'} res15 = r.sdiff("bikes:racing:france", "bikes:racing:usa", "bikes:racing:italy") -print(res15) # >>> [] +print(res15) # >>> {} res16 = r.sdiff("bikes:racing:usa", "bikes:racing:france") -print(res16) # >>> ['bike:4'] +print(res16) # >>> {'bike:4'} res17 = r.sdiff("bikes:racing:france", "bikes:racing:usa") -print(res17) # >>> ['bike:2', 'bike:3'] +print(res17) # >>> {'bike:2', 'bike:3'} # STEP_END # REMOVE_START -assert res13 == ['bike:1'] -assert res14 == ['bike:1', 'bike:2', 'bike:3', 'bike:4'] -assert res15 == [] -assert res16 == ['bike:4'] -assert res17 == ['bike:2', 'bike:3'] +assert res13 == {'bike:1'} +assert res14 == {'bike:1', 'bike:2', 'bike:3', 'bike:4'} +assert res15 == {} +assert res16 == {'bike:4'} +assert res17 == {'bike:2', 'bike:3'} r.delete("bikes:racing:france") r.delete("bikes:racing:usa") r.delete("bikes:racing:italy") @@ -160,7 +160,7 @@ print(res19) # >>> bike:3 res20 = r.smembers("bikes:racing:france") -print(res20) # >>> ['bike:2', 'bike:4', 'bike:5'] +print(res20) # >>> {'bike:2', 'bike:4', 'bike:5'} res21 = r.srandmember("bikes:racing:france") print(res21) # >>> bike:4 diff --git a/doctests/query_agg.py b/doctests/query_agg.py new file mode 100644 index 0000000000..4fa8f14b84 --- /dev/null +++ b/doctests/query_agg.py @@ -0,0 +1,103 @@ +# EXAMPLE: query_agg +# HIDE_START +import json +import redis +from redis.commands.json.path import Path +from redis.commands.search import Search +from redis.commands.search.aggregation import AggregateRequest +from redis.commands.search.field import NumericField, TagField +from redis.commands.search.indexDefinition import IndexDefinition, IndexType +import redis.commands.search.reducers as reducers + +r = redis.Redis(decode_responses=True) + +# create index +schema = ( + TagField("$.condition", as_name="condition"), + NumericField("$.price", as_name="price"), +) + +index = r.ft("idx:bicycle") +index.create_index( + schema, + definition=IndexDefinition(prefix=["bicycle:"], index_type=IndexType.JSON), +) + +# load data +with open("data/query_em.json") as f: + bicycles = json.load(f) + +pipeline = r.pipeline(transaction=False) +for bid, bicycle in enumerate(bicycles): + pipeline.json().set(f'bicycle:{bid}', Path.root_path(), bicycle) +pipeline.execute() +# HIDE_END + +# STEP_START agg1 +search = Search(r, index_name="idx:bicycle") +aggregate_request = AggregateRequest(query='@condition:{new}') \ + .load('__key', 'price') \ + .apply(discounted='@price - (@price * 0.1)') +res = search.aggregate(aggregate_request) +print(len(res.rows)) # >>> 5 +print(res.rows) # >>> [['__key', 'bicycle:0', ... +#[['__key', 'bicycle:0', 'price', '270', 'discounted', '243'], +# ['__key', 'bicycle:5', 'price', '810', 'discounted', '729'], +# ['__key', 'bicycle:6', 'price', '2300', 'discounted', '2070'], +# ['__key', 'bicycle:7', 'price', '430', 'discounted', '387'], +# ['__key', 'bicycle:8', 'price', '1200', 'discounted', '1080']] +# REMOVE_START +assert len(res.rows) == 5 +# REMOVE_END +# STEP_END + +# STEP_START agg2 +search = Search(r, index_name="idx:bicycle") +aggregate_request = AggregateRequest(query='*') \ + .load('price') \ + .apply(price_category='@price<1000') \ + .group_by('@condition', reducers.sum('@price_category').alias('num_affordable')) +res = search.aggregate(aggregate_request) +print(len(res.rows)) # >>> 3 +print(res.rows) # >>> +#[['condition', 'refurbished', 'num_affordable', '1'], +# ['condition', 'used', 'num_affordable', '1'], +# ['condition', 'new', 'num_affordable', '3']] +# REMOVE_START +assert len(res.rows) == 3 +# REMOVE_END +# STEP_END + +# STEP_START agg3 +search = Search(r, index_name="idx:bicycle") +aggregate_request = AggregateRequest(query='*') \ + .apply(type="'bicycle'") \ + .group_by('@type', reducers.count().alias('num_total')) +res = search.aggregate(aggregate_request) +print(len(res.rows)) # >>> 1 +print(res.rows) # >>> [['type', 'bicycle', 'num_total', '10']] +# REMOVE_START +assert len(res.rows) == 1 +# REMOVE_END +# STEP_END + +# STEP_START agg4 +search = Search(r, index_name="idx:bicycle") +aggregate_request = AggregateRequest(query='*') \ + .load('__key') \ + .group_by('@condition', reducers.tolist('__key').alias('bicycles')) +res = search.aggregate(aggregate_request) +print(len(res.rows)) # >>> 3 +print(res.rows) # >>> +#[['condition', 'refurbished', 'bicycles', ['bicycle:9']], +# ['condition', 'used', 'bicycles', ['bicycle:1', 'bicycle:2', 'bicycle:3', 'bicycle:4']], +# ['condition', 'new', 'bicycles', ['bicycle:5', 'bicycle:6', 'bicycle:7', 'bicycle:0', 'bicycle:8']]] +# REMOVE_START +assert len(res.rows) == 3 +# REMOVE_END +# STEP_END + +# REMOVE_START +# destroy index and data +r.ft("idx:bicycle").dropindex(delete_documents=True) +# REMOVE_END diff --git a/doctests/query_combined.py b/doctests/query_combined.py new file mode 100644 index 0000000000..a17f19417c --- /dev/null +++ b/doctests/query_combined.py @@ -0,0 +1,124 @@ +# EXAMPLE: query_combined +# HIDE_START +import json +import numpy as np +import redis +import warnings +from redis.commands.json.path import Path +from redis.commands.search.field import NumericField, TagField, TextField, VectorField +from redis.commands.search.indexDefinition import IndexDefinition, IndexType +from redis.commands.search.query import Query +from sentence_transformers import SentenceTransformer + + +def embed_text(model, text): + return np.array(model.encode(text)).astype(np.float32).tobytes() + +warnings.filterwarnings("ignore", category=FutureWarning, message=r".*clean_up_tokenization_spaces.*") +model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2') +query = "Bike for small kids" +query_vector = embed_text(model, query) + +r = redis.Redis(decode_responses=True) + +# create index +schema = ( + TextField("$.description", no_stem=True, as_name="model"), + TagField("$.condition", as_name="condition"), + NumericField("$.price", as_name="price"), + VectorField( + "$.description_embeddings", + "FLAT", + { + "TYPE": "FLOAT32", + "DIM": 384, + "DISTANCE_METRIC": "COSINE", + }, + as_name="vector", + ), +) + +index = r.ft("idx:bicycle") +index.create_index( + schema, + definition=IndexDefinition(prefix=["bicycle:"], index_type=IndexType.JSON), +) + +# load data +with open("data/query_vector.json") as f: + bicycles = json.load(f) + +pipeline = r.pipeline(transaction=False) +for bid, bicycle in enumerate(bicycles): + pipeline.json().set(f'bicycle:{bid}', Path.root_path(), bicycle) +pipeline.execute() +# HIDE_END + +# STEP_START combined1 +q = Query("@price:[500 1000] @condition:{new}") +res = index.search(q) +print(res.total) # >>> 1 +# REMOVE_START +assert res.total == 1 +# REMOVE_END +# STEP_END + +# STEP_START combined2 +q = Query("kids @price:[500 1000] @condition:{used}") +res = index.search(q) +print(res.total) # >>> 1 +# REMOVE_START +assert res.total == 1 +# REMOVE_END +# STEP_END + +# STEP_START combined3 +q = Query("(kids | small) @condition:{used}") +res = index.search(q) +print(res.total) # >>> 2 +# REMOVE_START +assert res.total == 2 +# REMOVE_END +# STEP_END + +# STEP_START combined4 +q = Query("@description:(kids | small) @condition:{used}") +res = index.search(q) +print(res.total) # >>> 0 +# REMOVE_START +assert res.total == 0 +# REMOVE_END +# STEP_END + +# STEP_START combined5 +q = Query("@description:(kids | small) @condition:{new | used}") +res = index.search(q) +print(res.total) # >>> 0 +# REMOVE_START +assert res.total == 0 +# REMOVE_END +# STEP_END + +# STEP_START combined6 +q = Query("@price:[500 1000] -@condition:{new}") +res = index.search(q) +print(res.total) # >>> 2 +# REMOVE_START +assert res.total == 2 +# REMOVE_END +# STEP_END + +# STEP_START combined7 +q = Query("(@price:[500 1000] -@condition:{new})=>[KNN 3 @vector $query_vector]").dialect(2) +# put query string here +res = index.search(q,{ 'query_vector': query_vector }) +print(res.total) # >>> 2 +# REMOVE_START +assert res.total == 2 +# REMOVE_END +# STEP_END + +# REMOVE_START +# destroy index and data +r.ft("idx:bicycle").dropindex(delete_documents=True) +# REMOVE_END diff --git a/redis/_parsers/helpers.py b/redis/_parsers/helpers.py index 7494c79210..6832100bb6 100644 --- a/redis/_parsers/helpers.py +++ b/redis/_parsers/helpers.py @@ -785,6 +785,9 @@ def string_keys_to_dict(key_string, callback): _RedisCallbacksRESP2 = { + **string_keys_to_dict( + "SDIFF SINTER SMEMBERS SUNION", lambda r: r and set(r) or set() + ), **string_keys_to_dict( "ZDIFF ZINTER ZPOPMAX ZPOPMIN ZRANGE ZRANGEBYSCORE ZRANK ZREVRANGE " "ZREVRANGEBYSCORE ZREVRANK ZUNION", @@ -829,6 +832,9 @@ def string_keys_to_dict(key_string, callback): _RedisCallbacksRESP3 = { + **string_keys_to_dict( + "SDIFF SINTER SMEMBERS SUNION", lambda r: r and set(r) or set() + ), **string_keys_to_dict( "ZRANGE ZINTER ZPOPMAX ZPOPMIN ZRANGEBYSCORE ZREVRANGE ZREVRANGEBYSCORE " "ZUNION HGETALL XREADGROUP", diff --git a/redis/asyncio/client.py b/redis/asyncio/client.py index 039ebfdfae..9508849703 100644 --- a/redis/asyncio/client.py +++ b/redis/asyncio/client.py @@ -1423,6 +1423,10 @@ async def _execute_transaction( # noqa: C901 if not isinstance(r, Exception): args, options = cmd command_name = args[0] + + # Remove keys entry, it needs only for cache. + options.pop("keys", None) + if command_name in self.response_callbacks: r = self.response_callbacks[command_name](r, **options) if inspect.isawaitable(r): diff --git a/redis/cluster.py b/redis/cluster.py index fbf5428d40..9dcbad7fc1 100644 --- a/redis/cluster.py +++ b/redis/cluster.py @@ -1163,6 +1163,10 @@ def _execute_command(self, target_node, *args, **kwargs): asking = False connection.send_command(*args, **kwargs) response = redis_node.parse_response(connection, command, **kwargs) + + # Remove keys entry, it needs only for cache. + kwargs.pop("keys", None) + if command in self.cluster_response_callbacks: response = self.cluster_response_callbacks[command]( response, **kwargs diff --git a/redis/commands/search/aggregation.py b/redis/commands/search/aggregation.py index 42c3547b0b..5638f1d662 100644 --- a/redis/commands/search/aggregation.py +++ b/redis/commands/search/aggregation.py @@ -112,6 +112,7 @@ def __init__(self, query: str = "*") -> None: self._cursor = [] self._dialect = None self._add_scores = False + self._scorer = "TFIDF" def load(self, *fields: List[str]) -> "AggregateRequest": """ @@ -300,6 +301,17 @@ def add_scores(self) -> "AggregateRequest": self._add_scores = True return self + def scorer(self, scorer: str) -> "AggregateRequest": + """ + Use a different scoring function to evaluate document relevance. + Default is `TFIDF`. + + :param scorer: The scoring function to use + (e.g. `TFIDF.DOCNORM` or `BM25`) + """ + self._scorer = scorer + return self + def verbatim(self) -> "AggregateRequest": self._verbatim = True return self @@ -323,6 +335,9 @@ def build_args(self) -> List[str]: if self._verbatim: ret.append("VERBATIM") + if self._scorer: + ret.extend(["SCORER", self._scorer]) + if self._add_scores: ret.append("ADDSCORES") @@ -332,6 +347,7 @@ def build_args(self) -> List[str]: if self._loadall: ret.append("LOAD") ret.append("*") + elif self._loadfields: ret.append("LOAD") ret.append(str(len(self._loadfields))) diff --git a/redis/connection.py b/redis/connection.py index 6aae2101c2..40f2d29722 100644 --- a/redis/connection.py +++ b/redis/connection.py @@ -38,7 +38,6 @@ from .utils import ( CRYPTOGRAPHY_AVAILABLE, HIREDIS_AVAILABLE, - HIREDIS_PACK_AVAILABLE, SSL_AVAILABLE, compare_versions, ensure_string, @@ -314,7 +313,7 @@ def __del__(self): def _construct_command_packer(self, packer): if packer is not None: return packer - elif HIREDIS_PACK_AVAILABLE: + elif HIREDIS_AVAILABLE: return HiredisRespSerializer() else: return PythonRespSerializer(self._buffer_cutoff, self.encoder.encode) diff --git a/redis/utils.py b/redis/utils.py index b4e9afb054..8693fb3c8f 100644 --- a/redis/utils.py +++ b/redis/utils.py @@ -8,10 +8,10 @@ # Only support Hiredis >= 3.0: HIREDIS_AVAILABLE = int(hiredis.__version__.split(".")[0]) >= 3 - HIREDIS_PACK_AVAILABLE = hasattr(hiredis, "pack_command") + if not HIREDIS_AVAILABLE: + raise ImportError("hiredis package should be >= 3.0.0") except ImportError: HIREDIS_AVAILABLE = False - HIREDIS_PACK_AVAILABLE = False try: import ssl # noqa diff --git a/setup.py b/setup.py index 0c968a4d9f..6ece549b52 100644 --- a/setup.py +++ b/setup.py @@ -8,7 +8,7 @@ long_description_content_type="text/markdown", keywords=["Redis", "key-value store", "database"], license="MIT", - version="5.1.0b7", + version="5.2.0", packages=find_packages( include=[ "redis", diff --git a/tests/test_asyncio/test_cluster.py b/tests/test_asyncio/test_cluster.py index e480db332b..f3b76b80c9 100644 --- a/tests/test_asyncio/test_cluster.py +++ b/tests/test_asyncio/test_cluster.py @@ -1752,38 +1752,38 @@ async def test_cluster_rpoplpush(self, r: RedisCluster) -> None: async def test_cluster_sdiff(self, r: RedisCluster) -> None: await r.sadd("{foo}a", "1", "2", "3") - assert set(await r.sdiff("{foo}a", "{foo}b")) == {b"1", b"2", b"3"} + assert await r.sdiff("{foo}a", "{foo}b") == {b"1", b"2", b"3"} await r.sadd("{foo}b", "2", "3") - assert await r.sdiff("{foo}a", "{foo}b") == [b"1"] + assert await r.sdiff("{foo}a", "{foo}b") == {b"1"} async def test_cluster_sdiffstore(self, r: RedisCluster) -> None: await r.sadd("{foo}a", "1", "2", "3") assert await r.sdiffstore("{foo}c", "{foo}a", "{foo}b") == 3 - assert set(await r.smembers("{foo}c")) == {b"1", b"2", b"3"} + assert await r.smembers("{foo}c") == {b"1", b"2", b"3"} await r.sadd("{foo}b", "2", "3") assert await r.sdiffstore("{foo}c", "{foo}a", "{foo}b") == 1 - assert await r.smembers("{foo}c") == [b"1"] + assert await r.smembers("{foo}c") == {b"1"} async def test_cluster_sinter(self, r: RedisCluster) -> None: await r.sadd("{foo}a", "1", "2", "3") - assert await r.sinter("{foo}a", "{foo}b") == [] + assert await r.sinter("{foo}a", "{foo}b") == set() await r.sadd("{foo}b", "2", "3") - assert set(await r.sinter("{foo}a", "{foo}b")) == {b"2", b"3"} + assert await r.sinter("{foo}a", "{foo}b") == {b"2", b"3"} async def test_cluster_sinterstore(self, r: RedisCluster) -> None: await r.sadd("{foo}a", "1", "2", "3") assert await r.sinterstore("{foo}c", "{foo}a", "{foo}b") == 0 - assert await r.smembers("{foo}c") == [] + assert await r.smembers("{foo}c") == set() await r.sadd("{foo}b", "2", "3") assert await r.sinterstore("{foo}c", "{foo}a", "{foo}b") == 2 - assert set(await r.smembers("{foo}c")) == {b"2", b"3"} + assert await r.smembers("{foo}c") == {b"2", b"3"} async def test_cluster_smove(self, r: RedisCluster) -> None: await r.sadd("{foo}a", "a1", "a2") await r.sadd("{foo}b", "b1", "b2") assert await r.smove("{foo}a", "{foo}b", "a1") - assert await r.smembers("{foo}a") == [b"a2"] - assert set(await r.smembers("{foo}b")) == {b"b1", b"b2", b"a1"} + assert await r.smembers("{foo}a") == {b"a2"} + assert await r.smembers("{foo}b") == {b"b1", b"b2", b"a1"} async def test_cluster_sunion(self, r: RedisCluster) -> None: await r.sadd("{foo}a", "1", "2") diff --git a/tests/test_asyncio/test_commands.py b/tests/test_asyncio/test_commands.py index 28c3094cdb..f6ed07fab5 100644 --- a/tests/test_asyncio/test_commands.py +++ b/tests/test_asyncio/test_commands.py @@ -1415,34 +1415,34 @@ async def test_scard(self, r: redis.Redis): @pytest.mark.onlynoncluster async def test_sdiff(self, r: redis.Redis): await r.sadd("a", "1", "2", "3") - assert set(await r.sdiff("a", "b")) == {b"1", b"2", b"3"} + assert await r.sdiff("a", "b") == {b"1", b"2", b"3"} await r.sadd("b", "2", "3") - assert await r.sdiff("a", "b") == [b"1"] + assert await r.sdiff("a", "b") == {b"1"} @pytest.mark.onlynoncluster async def test_sdiffstore(self, r: redis.Redis): await r.sadd("a", "1", "2", "3") assert await r.sdiffstore("c", "a", "b") == 3 - assert set(await r.smembers("c")) == {b"1", b"2", b"3"} + assert await r.smembers("c") == {b"1", b"2", b"3"} await r.sadd("b", "2", "3") assert await r.sdiffstore("c", "a", "b") == 1 - assert await r.smembers("c") == [b"1"] + assert await r.smembers("c") == {b"1"} @pytest.mark.onlynoncluster async def test_sinter(self, r: redis.Redis): await r.sadd("a", "1", "2", "3") - assert await r.sinter("a", "b") == [] + assert await r.sinter("a", "b") == set() await r.sadd("b", "2", "3") - assert set(await r.sinter("a", "b")) == {b"2", b"3"} + assert await r.sinter("a", "b") == {b"2", b"3"} @pytest.mark.onlynoncluster async def test_sinterstore(self, r: redis.Redis): await r.sadd("a", "1", "2", "3") assert await r.sinterstore("c", "a", "b") == 0 - assert await r.smembers("c") == [] + assert await r.smembers("c") == set() await r.sadd("b", "2", "3") assert await r.sinterstore("c", "a", "b") == 2 - assert set(await r.smembers("c")) == {b"2", b"3"} + assert await r.smembers("c") == {b"2", b"3"} async def test_sismember(self, r: redis.Redis): await r.sadd("a", "1", "2", "3") @@ -1460,8 +1460,8 @@ async def test_smove(self, r: redis.Redis): await r.sadd("a", "a1", "a2") await r.sadd("b", "b1", "b2") assert await r.smove("a", "b", "a1") - assert await r.smembers("a") == [b"a2"] - assert set(await r.smembers("b")) == {b"b1", b"b2", b"a1"} + assert await r.smembers("a") == {b"a2"} + assert await r.smembers("b") == {b"b1", b"b2", b"a1"} async def test_spop(self, r: redis.Redis): s = [b"1", b"2", b"3"] diff --git a/tests/test_asyncio/test_pipeline.py b/tests/test_asyncio/test_pipeline.py index 4b29360d72..31759d84a3 100644 --- a/tests/test_asyncio/test_pipeline.py +++ b/tests/test_asyncio/test_pipeline.py @@ -417,3 +417,13 @@ async def test_pipeline_discard(self, r): response = await pipe.execute() assert response[0] assert await r.get("foo") == b"bar" + + @pytest.mark.onlynoncluster + async def test_send_set_commands_over_async_pipeline(self, r: redis.asyncio.Redis): + pipe = r.pipeline() + pipe.hset("hash:1", "foo", "bar") + pipe.hset("hash:1", "bar", "foo") + pipe.hset("hash:1", "baz", "bar") + pipe.hgetall("hash:1") + resp = await pipe.execute() + assert resp == [1, 1, 1, {b"bar": b"foo", b"baz": b"bar", b"foo": b"bar"}] diff --git a/tests/test_asyncio/test_search.py b/tests/test_asyncio/test_search.py index 0e6fe22131..fb813b0bc7 100644 --- a/tests/test_asyncio/test_search.py +++ b/tests/test_asyncio/test_search.py @@ -1556,6 +1556,61 @@ async def test_aggregations_add_scores(decoded_r: redis.Redis): assert res.rows[1] == ["__score", "0.2"] +@pytest.mark.redismod +@skip_ifmodversion_lt("2.10.05", "search") +async def test_aggregations_hybrid_scoring(decoded_r: redis.Redis): + assert await decoded_r.ft().create_index( + ( + TextField("name", sortable=True, weight=5.0), + TextField("description", sortable=True, weight=5.0), + VectorField( + "vector", + "HNSW", + {"TYPE": "FLOAT32", "DIM": 2, "DISTANCE_METRIC": "COSINE"}, + ), + ) + ) + + assert await decoded_r.hset( + "doc1", + mapping={ + "name": "cat book", + "description": "an animal book about cats", + "vector": np.array([0.1, 0.2]).astype(np.float32).tobytes(), + }, + ) + assert await decoded_r.hset( + "doc2", + mapping={ + "name": "dog book", + "description": "an animal book about dogs", + "vector": np.array([0.2, 0.1]).astype(np.float32).tobytes(), + }, + ) + + query_string = "(@description:animal)=>[KNN 3 @vector $vec_param AS dist]" + req = ( + aggregations.AggregateRequest(query_string) + .scorer("BM25") + .add_scores() + .apply(hybrid_score="@__score + @dist") + .load("*") + .dialect(4) + ) + + res = await decoded_r.ft().aggregate( + req, + query_params={"vec_param": np.array([0.11, 0.22]).astype(np.float32).tobytes()}, + ) + + if isinstance(res, dict): + assert len(res["results"]) == 2 + else: + assert len(res.rows) == 2 + for row in res.rows: + len(row) == 6 + + @pytest.mark.redismod @skip_if_redis_enterprise() async def test_search_commands_in_pipeline(decoded_r: redis.Redis): diff --git a/tests/test_cache.py b/tests/test_cache.py index 1803646094..67733dc9af 100644 --- a/tests/test_cache.py +++ b/tests/test_cache.py @@ -41,7 +41,7 @@ def r(request): @pytest.mark.skipif(HIREDIS_AVAILABLE, reason="PythonParser only") @pytest.mark.onlynoncluster -# @skip_if_resp_version(2) +@skip_if_resp_version(2) @skip_if_server_version_lt("7.4.0") class TestCache: @pytest.mark.parametrize( diff --git a/tests/test_cluster.py b/tests/test_cluster.py index c4b3188050..fe5852d1fb 100644 --- a/tests/test_cluster.py +++ b/tests/test_cluster.py @@ -1865,49 +1865,49 @@ def test_cluster_rpoplpush(self, r): def test_cluster_sdiff(self, r): r.sadd("{foo}a", "1", "2", "3") - assert set(r.sdiff("{foo}a", "{foo}b")) == {b"1", b"2", b"3"} + assert r.sdiff("{foo}a", "{foo}b") == {b"1", b"2", b"3"} r.sadd("{foo}b", "2", "3") - assert r.sdiff("{foo}a", "{foo}b") == [b"1"] + assert r.sdiff("{foo}a", "{foo}b") == {b"1"} def test_cluster_sdiffstore(self, r): r.sadd("{foo}a", "1", "2", "3") assert r.sdiffstore("{foo}c", "{foo}a", "{foo}b") == 3 - assert set(r.smembers("{foo}c")) == {b"1", b"2", b"3"} + assert r.smembers("{foo}c") == {b"1", b"2", b"3"} r.sadd("{foo}b", "2", "3") assert r.sdiffstore("{foo}c", "{foo}a", "{foo}b") == 1 - assert r.smembers("{foo}c") == [b"1"] + assert r.smembers("{foo}c") == {b"1"} def test_cluster_sinter(self, r): r.sadd("{foo}a", "1", "2", "3") - assert r.sinter("{foo}a", "{foo}b") == [] + assert r.sinter("{foo}a", "{foo}b") == set() r.sadd("{foo}b", "2", "3") - assert set(r.sinter("{foo}a", "{foo}b")) == {b"2", b"3"} + assert r.sinter("{foo}a", "{foo}b") == {b"2", b"3"} def test_cluster_sinterstore(self, r): r.sadd("{foo}a", "1", "2", "3") assert r.sinterstore("{foo}c", "{foo}a", "{foo}b") == 0 - assert r.smembers("{foo}c") == [] + assert r.smembers("{foo}c") == set() r.sadd("{foo}b", "2", "3") assert r.sinterstore("{foo}c", "{foo}a", "{foo}b") == 2 - assert set(r.smembers("{foo}c")) == {b"2", b"3"} + assert r.smembers("{foo}c") == {b"2", b"3"} def test_cluster_smove(self, r): r.sadd("{foo}a", "a1", "a2") r.sadd("{foo}b", "b1", "b2") assert r.smove("{foo}a", "{foo}b", "a1") - assert r.smembers("{foo}a") == [b"a2"] - assert set(r.smembers("{foo}b")) == {b"b1", b"b2", b"a1"} + assert r.smembers("{foo}a") == {b"a2"} + assert r.smembers("{foo}b") == {b"b1", b"b2", b"a1"} def test_cluster_sunion(self, r): r.sadd("{foo}a", "1", "2") r.sadd("{foo}b", "2", "3") - assert set(r.sunion("{foo}a", "{foo}b")) == {b"1", b"2", b"3"} + assert r.sunion("{foo}a", "{foo}b") == {b"1", b"2", b"3"} def test_cluster_sunionstore(self, r): r.sadd("{foo}a", "1", "2") r.sadd("{foo}b", "2", "3") assert r.sunionstore("{foo}c", "{foo}a", "{foo}b") == 3 - assert set(r.smembers("{foo}c")) == {b"1", b"2", b"3"} + assert r.smembers("{foo}c") == {b"1", b"2", b"3"} @skip_if_server_version_lt("6.2.0") def test_cluster_zdiff(self, r): diff --git a/tests/test_commands.py b/tests/test_commands.py index 74e9c1c88e..4cad4c14b6 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -2247,25 +2247,25 @@ def test_scard(self, r): @pytest.mark.onlynoncluster def test_sdiff(self, r): r.sadd("a", "1", "2", "3") - assert set(r.sdiff("a", "b")) == {b"1", b"2", b"3"} + assert r.sdiff("a", "b") == {b"1", b"2", b"3"} r.sadd("b", "2", "3") - assert r.sdiff("a", "b") == [b"1"] + assert r.sdiff("a", "b") == {b"1"} @pytest.mark.onlynoncluster def test_sdiffstore(self, r): r.sadd("a", "1", "2", "3") assert r.sdiffstore("c", "a", "b") == 3 - assert set(r.smembers("c")) == {b"1", b"2", b"3"} + assert r.smembers("c") == {b"1", b"2", b"3"} r.sadd("b", "2", "3") assert r.sdiffstore("c", "a", "b") == 1 - assert r.smembers("c") == [b"1"] + assert r.smembers("c") == {b"1"} @pytest.mark.onlynoncluster def test_sinter(self, r): r.sadd("a", "1", "2", "3") - assert r.sinter("a", "b") == [] + assert r.sinter("a", "b") == set() r.sadd("b", "2", "3") - assert set(r.sinter("a", "b")) == {b"2", b"3"} + assert r.sinter("a", "b") == {b"2", b"3"} @pytest.mark.onlynoncluster @skip_if_server_version_lt("7.0.0") @@ -2280,10 +2280,10 @@ def test_sintercard(self, r): def test_sinterstore(self, r): r.sadd("a", "1", "2", "3") assert r.sinterstore("c", "a", "b") == 0 - assert r.smembers("c") == [] + assert r.smembers("c") == set() r.sadd("b", "2", "3") assert r.sinterstore("c", "a", "b") == 2 - assert set(r.smembers("c")) == {b"2", b"3"} + assert r.smembers("c") == {b"2", b"3"} def test_sismember(self, r): r.sadd("a", "1", "2", "3") @@ -2308,8 +2308,8 @@ def test_smove(self, r): r.sadd("a", "a1", "a2") r.sadd("b", "b1", "b2") assert r.smove("a", "b", "a1") - assert r.smembers("a") == [b"a2"] - assert set(r.smembers("b")) == {b"b1", b"b2", b"a1"} + assert r.smembers("a") == {b"a2"} + assert r.smembers("b") == {b"b1", b"b2", b"a1"} def test_spop(self, r): s = [b"1", b"2", b"3"] diff --git a/tests/test_encoding.py b/tests/test_encoding.py index 331cd5108c..0fcb256cfb 100644 --- a/tests/test_encoding.py +++ b/tests/test_encoding.py @@ -1,7 +1,5 @@ import pytest import redis -from redis.connection import Connection -from redis.utils import HIREDIS_PACK_AVAILABLE from .conftest import _get_client @@ -75,22 +73,6 @@ def test_replace(self, request): assert r.get("a") == "foo\ufffd" -@pytest.mark.skipif( - HIREDIS_PACK_AVAILABLE, - reason="Packing via hiredis does not preserve memoryviews", -) -class TestMemoryviewsAreNotPacked: - def test_memoryviews_are_not_packed(self): - c = Connection() - arg = memoryview(b"some_arg") - arg_list = ["SOME_COMMAND", arg] - cmd = c.pack_command(*arg_list) - assert cmd[1] is arg - cmds = c.pack_commands([arg_list, arg_list]) - assert cmds[1] is arg - assert cmds[3] is arg - - class TestCommandsAreNotEncoded: @pytest.fixture() def r(self, request): diff --git a/tests/test_pipeline.py b/tests/test_pipeline.py index 7f10fcad4f..be7784ad0b 100644 --- a/tests/test_pipeline.py +++ b/tests/test_pipeline.py @@ -412,3 +412,13 @@ def test_pipeline_discard(self, r): response = pipe.execute() assert response[0] assert r.get("foo") == b"bar" + + @pytest.mark.onlynoncluster + def test_send_set_commands_over_pipeline(self, r: redis.Redis): + pipe = r.pipeline() + pipe.hset("hash:1", "foo", "bar") + pipe.hset("hash:1", "bar", "foo") + pipe.hset("hash:1", "baz", "bar") + pipe.hgetall("hash:1") + resp = pipe.execute() + assert resp == [1, 1, 1, {b"bar": b"foo", b"baz": b"bar", b"foo": b"bar"}] diff --git a/tests/test_search.py b/tests/test_search.py index dde59f0f87..0f0e7bb309 100644 --- a/tests/test_search.py +++ b/tests/test_search.py @@ -1466,6 +1466,61 @@ def test_aggregations_add_scores(client): assert res.rows[1] == ["__score", "0.2"] +@pytest.mark.redismod +@skip_ifmodversion_lt("2.10.05", "search") +async def test_aggregations_hybrid_scoring(client): + client.ft().create_index( + ( + TextField("name", sortable=True, weight=5.0), + TextField("description", sortable=True, weight=5.0), + VectorField( + "vector", + "HNSW", + {"TYPE": "FLOAT32", "DIM": 2, "DISTANCE_METRIC": "COSINE"}, + ), + ) + ) + + client.hset( + "doc1", + mapping={ + "name": "cat book", + "description": "an animal book about cats", + "vector": np.array([0.1, 0.2]).astype(np.float32).tobytes(), + }, + ) + client.hset( + "doc2", + mapping={ + "name": "dog book", + "description": "an animal book about dogs", + "vector": np.array([0.2, 0.1]).astype(np.float32).tobytes(), + }, + ) + + query_string = "(@description:animal)=>[KNN 3 @vector $vec_param AS dist]" + req = ( + aggregations.AggregateRequest(query_string) + .scorer("BM25") + .add_scores() + .apply(hybrid_score="@__score + @dist") + .load("*") + .dialect(4) + ) + + res = client.ft().aggregate( + req, + query_params={"vec_param": np.array([0.11, 0.21]).astype(np.float32).tobytes()}, + ) + + if isinstance(res, dict): + assert len(res["results"]) == 2 + else: + assert len(res.rows) == 2 + for row in res.rows: + len(row) == 6 + + @pytest.mark.redismod @skip_ifmodversion_lt("2.0.0", "search") def test_index_definition(client):