Skip to content
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ name = "redisvl"
version = "0.9.1"
description = "Python client library and CLI for using Redis as a vector database"
authors = [{ name = "Redis Inc.", email = "applied.ai@redis.com" }]
requires-python = ">=3.9,<3.14"
requires-python = ">=3.9.2,<3.14"
readme = "README.md"
license = "MIT"
keywords = [
Expand Down Expand Up @@ -39,6 +39,7 @@ nltk = ["nltk>=3.8.1,<4"]
cohere = ["cohere>=4.44"]
voyageai = ["voyageai>=0.2.2"]
sentence-transformers = ["sentence-transformers>=3.4.0,<4"]
langcache = ["langcache>=0.9.0"]
vertexai = [
"google-cloud-aiplatform>=1.26,<2.0.0",
"protobuf>=5.28.0,<6.0.0",
Expand Down
9 changes: 8 additions & 1 deletion redisvl/extensions/cache/llm/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,18 @@
This module provides LLM cache implementations for RedisVL.
"""

from redisvl.extensions.cache.llm.langcache import LangCacheWrapper
from redisvl.extensions.cache.llm.schema import (
CacheEntry,
CacheHit,
SemanticCacheIndexSchema,
)
from redisvl.extensions.cache.llm.semantic import SemanticCache

__all__ = ["SemanticCache", "CacheEntry", "CacheHit", "SemanticCacheIndexSchema"]
__all__ = [
"SemanticCache",
"LangCacheWrapper",
"CacheEntry",
"CacheHit",
"SemanticCacheIndexSchema",
]
Loading