Skip to content

Commit 8eeca7d

Browse files
authored
Merge pull request #2 from memfuse/dev
Dev
2 parents 57a396d + bb111e2 commit 8eeca7d

File tree

11 files changed

+189
-53
lines changed

11 files changed

+189
-53
lines changed

examples/04_gradio_chatbot.py

Lines changed: 14 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,13 @@
88

99
# Global username variable
1010
USERNAME = "Jane Doe"
11+
SYSTEM_MESSAGE = (
12+
"You are a helpful AI assistant with access to a persistent long-term memory. "
13+
"You can recall, reference, and use information from previous conversations with the user. "
14+
"Leverage this memory to provide more relevant, helpful, and context-aware answers. "
15+
"If you remember something from earlier, feel free to mention it. "
16+
"If the user mentions something from a previous interaction that you don't remember, please apologize and say you must have forgotten."
17+
)
1118

1219
def main():
1320
# Make MemFuse base URL configurable via environment variable
@@ -49,8 +56,12 @@ def memfuse_chatbot(message, history):
4956
for i, item in enumerate(history):
5057
print(f"DEBUG: History item {i}: type={type(item)}, content={item}")
5158
if isinstance(item, dict):
52-
# History is already in message format
53-
messages_history.append(item)
59+
# History is already in message format - extract only role and content
60+
if 'role' in item and 'content' in item:
61+
messages_history.append({
62+
"role": item["role"],
63+
"content": item["content"]
64+
})
5465
elif isinstance(item, (list, tuple)):
5566
# Handle tuple/list format - could be (user_msg, assistant_msg) or more items
5667
if len(item) >= 2:
@@ -66,7 +77,7 @@ def memfuse_chatbot(message, history):
6677
print(f"Unknown history item format: {type(item)}: {item}")
6778

6879
print(f"DEBUG: Final messages_history: {messages_history}")
69-
current_messages_for_api = messages_history + [{"role": "user", "content": message}]
80+
current_messages_for_api = [{"role": "system", "content": SYSTEM_MESSAGE}] + messages_history + [{"role": "user", "content": message}]
7081
print(f"DEBUG: Sending to API: {current_messages_for_api}")
7182

7283
try:

poetry.lock

Lines changed: 34 additions & 1 deletion
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

pyproject.toml

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -44,10 +44,11 @@ ipykernel = "^6.29.5"
4444
datasets = "^3.6.0"
4545
colorlog = "^6.9.0"
4646
twine = "^6.1.0"
47+
poetry-dynamic-versioning = "^1.8.2"
4748

4849
[build-system]
49-
requires = ["poetry-core"]
50-
build-backend = "poetry.core.masonry.api"
50+
requires = ["poetry-core", "poetry-dynamic-versioning"]
51+
build-backend = "poetry_dynamic_versioning.backend"
5152

5253
[tool.poetry-dynamic-versioning]
5354
enable = true
@@ -56,6 +57,7 @@ style = "pep440"
5657

5758
[tool.poetry-dynamic-versioning.substitution]
5859
files = ["src/memfuse/__init__.py"]
60+
patterns = ["(^__version__\\s*(?::.*?)?\\s*=\\s*['\"])[^'\"]*(['\"])"]
5961

6062
[tool.pytest.ini_options]
6163
markers = [

src/memfuse/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
"""MemFuse Python Client Library"""
22

3-
__version__ = "{{version}}" # Or your actual version
3+
__version__ = "{{version}}"
44

55
from .client import AsyncMemFuse, MemFuse
66
from .memory import AsyncMemory, Memory

src/memfuse/api/messages.py

Lines changed: 13 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -33,11 +33,12 @@ def _get_method(self, endpoint_key: str) -> str:
3333
return self.ENDPOINTS[endpoint_key]['method']
3434

3535
def _build_list_url(
36-
self,
37-
session_id: str,
36+
self,
37+
session_id: str,
3838
limit: Optional[int] = None,
3939
sort_by: Optional[str] = None,
40-
order: Optional[str] = None
40+
order: Optional[str] = None,
41+
buffer_only: Optional[bool] = None
4142
) -> str:
4243
"""Build URL for list endpoint with optional query parameters."""
4344
query_params = []
@@ -47,7 +48,9 @@ def _build_list_url(
4748
query_params.append(f"sort_by={sort_by}")
4849
if order is not None:
4950
query_params.append(f"order={order}")
50-
51+
if buffer_only is not None:
52+
query_params.append(f"buffer_only={str(buffer_only).lower()}")
53+
5154
query_string = "&".join(query_params)
5255
endpoint = self._build_url('list', session_id=session_id)
5356
if query_string:
@@ -61,6 +64,7 @@ async def list(
6164
limit: Optional[int] = 20,
6265
sort_by: Optional[str] = "timestamp",
6366
order: Optional[str] = "desc",
67+
buffer_only: Optional[bool] = None,
6468
) -> Dict[str, Any]:
6569
"""List all messages in a session.
6670
@@ -69,11 +73,12 @@ async def list(
6973
limit: Maximum number of messages to return. Defaults to 20.
7074
sort_by: Field to sort messages by (e.g., "timestamp", "id"). Defaults to "timestamp".
7175
order: Sort order ("asc" or "desc"). Defaults to "desc".
76+
buffer_only: If True, only return RoundBuffer data; if False, return HybridBuffer + SQLite data excluding RoundBuffer
7277
7378
Returns:
7479
Response data
7580
"""
76-
url = self._build_list_url(session_id, limit, sort_by, order)
81+
url = self._build_list_url(session_id, limit, sort_by, order, buffer_only)
7782
return await self.client._request(self._get_method('list'), url)
7883

7984
async def add(self, session_id: str, messages: List[Dict[str, str]]) -> Dict[str, Any]:
@@ -159,6 +164,7 @@ def list_sync(
159164
limit: Optional[int] = 20,
160165
sort_by: Optional[str] = "timestamp",
161166
order: Optional[str] = "desc",
167+
buffer_only: Optional[bool] = None,
162168
) -> Dict[str, Any]:
163169
"""List all messages in a session (sync version).
164170
@@ -167,11 +173,12 @@ def list_sync(
167173
limit: Maximum number of messages to return. Defaults to 20.
168174
sort_by: Field to sort messages by (e.g., "timestamp", "id"). Defaults to "timestamp".
169175
order: Sort order ("asc" or "desc"). Defaults to "desc".
176+
buffer_only: If True, only return RoundBuffer data; if False, return HybridBuffer + SQLite data excluding RoundBuffer
170177
171178
Returns:
172179
Response data
173180
"""
174-
url = self._build_list_url(session_id, limit, sort_by, order)
181+
url = self._build_list_url(session_id, limit, sort_by, order, buffer_only)
175182
return self.client._request_sync(self._get_method('list'), url)
176183

177184
def add_sync(self, session_id: str, messages: List[Dict[str, str]]) -> Dict[str, Any]:

src/memfuse/client.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ async def _request(
102102
f"Cannot connect to MemFuse server at {self.base_url}. "
103103
"Please make sure the server is running.\n\n"
104104
"You can start the server with:\n"
105-
" python -m memfuse.server --host localhost --port 8000"
105+
" poetry run memfuse-core"
106106
)
107107

108108
url = f"{self.base_url}{endpoint}"
@@ -121,7 +121,7 @@ async def _request(
121121
f"Cannot connect to MemFuse server at {self.base_url}. "
122122
"Please make sure the server is running.\n\n"
123123
"You can start the server with:\n"
124-
" python -m memfuse.server --host localhost --port 8000"
124+
" poetry run memfuse-core"
125125
) from e
126126

127127
async def init(
@@ -320,7 +320,7 @@ def _request_sync(
320320
f"Cannot connect to MemFuse server at {self.base_url}. "
321321
"Please make sure the server is running.\n\n"
322322
"You can start the server with:\n"
323-
" python -m memfuse.server --host localhost --port 8000"
323+
" poetry run memfuse-core"
324324
)
325325

326326
url = f"{self.base_url}{endpoint}"
@@ -337,7 +337,7 @@ def _request_sync(
337337
f"Cannot connect to MemFuse server at {self.base_url}. "
338338
"Please make sure the server is running.\n\n"
339339
"You can start the server with:\n"
340-
" python -m memfuse.server --host localhost --port 8000"
340+
" poetry run memfuse-core"
341341
) from e
342342

343343
def init(

src/memfuse/llm/anthropic_adapter.py

Lines changed: 26 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -48,11 +48,22 @@ def wrapper(*args: Any, **kwargs: Any) -> Any: # signature replaced below
4848
# ------- 2. Get the last n messages ----------------------------------
4949
max_chat_history = memory.max_chat_history
5050

51-
retrieved_chat_history = memory.list_messages(
51+
in_buffer_chat_history = memory.list_messages(
5252
limit=max_chat_history,
53+
buffer_only=True,
5354
)
5455

55-
chat_history = [{"role": message["role"], "content": message["content"]} for message in retrieved_chat_history["data"]["messages"][::-1]]
56+
in_buffer_messages_length = len(in_buffer_chat_history["data"]["messages"])
57+
58+
if in_buffer_messages_length < max_chat_history:
59+
in_db_chat_history = memory.list_messages(
60+
limit=max_chat_history - in_buffer_messages_length,
61+
buffer_only=False,
62+
)
63+
else:
64+
in_db_chat_history = []
65+
66+
chat_history = [{"role": message["role"], "content": message["content"]} for message in in_db_chat_history["data"]["messages"][::-1]] + [{"role": message["role"], "content": message["content"]} for message in in_buffer_chat_history["data"]["messages"][::-1]]
5667

5768
# ------- 3. Retrieve memories ---------------------------------------
5869
# Convert Anthropic formatted messages to a string for querying
@@ -146,11 +157,22 @@ async def wrapper(*args: Any, **kwargs: Any) -> Any: # signature replaced below
146157
# ------- 2. Get the last n messages ----------------------------------
147158
max_chat_history = memory.max_chat_history
148159

149-
retrieved_chat_history = await memory.list_messages(
160+
in_buffer_chat_history = await memory.list_messages(
150161
limit=max_chat_history,
162+
buffer_only=True,
151163
)
152164

153-
chat_history = [{"role": message["role"], "content": message["content"]} for message in retrieved_chat_history["data"]["messages"][::-1]]
165+
in_buffer_messages_length = len(in_buffer_chat_history["data"]["messages"])
166+
167+
if in_buffer_messages_length < max_chat_history:
168+
in_db_chat_history = await memory.list_messages(
169+
limit=max_chat_history - in_buffer_messages_length,
170+
buffer_only=False,
171+
)
172+
else:
173+
in_db_chat_history = []
174+
175+
chat_history = [{"role": message["role"], "content": message["content"]} for message in in_db_chat_history["data"]["messages"][::-1]] + [{"role": message["role"], "content": message["content"]} for message in in_buffer_chat_history["data"]["messages"][::-1]]
154176

155177
# ------- 3. Retrieve memories ---------------------------------------
156178
query_string = PromptFormatter.messages_to_query(chat_history + query_messages)

src/memfuse/llm/gemini_adapter.py

Lines changed: 53 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
# from google.genai import AsyncClient as AsyncGeminiClient # google.genai.Client can be used with an async transport
1111

1212
from memfuse import Memory
13-
from memfuse.prompts import PromptContext
13+
from memfuse.prompts import PromptContext, PromptFormatter
1414

1515
# Set up logger for this module
1616
logger = logging.getLogger(__name__)
@@ -133,26 +133,41 @@ def _instrument_generate_content_sync(
133133
latest_user_query_message = gemini_query_messages[-1]
134134

135135
retrieved_memories = None
136-
retrieved_chat_history = None
136+
chat_history = None
137137

138-
if latest_user_query_message:
139-
query_response = memory.query_session(latest_user_query_message["content"])
140-
retrieved_memories = query_response["data"]["results"] if query_response else None
141-
138+
if latest_user_query_message:
142139
# Get chat history
143140
max_chat_history = memory.max_chat_history
144-
chat_history_response = memory.list_messages(limit=max_chat_history)
145-
if chat_history_response and chat_history_response.get("data", {}).get("messages"):
146-
retrieved_chat_history = [
147-
{"role": msg["role"], "content": msg["content"]}
148-
for msg in chat_history_response["data"]["messages"][::-1]
149-
]
141+
142+
in_buffer_chat_history = memory.list_messages(
143+
limit=max_chat_history,
144+
buffer_only=True,
145+
)
146+
147+
in_buffer_messages_length = len(in_buffer_chat_history["data"]["messages"])
148+
149+
if in_buffer_messages_length < max_chat_history:
150+
in_db_chat_history = memory.list_messages(
151+
limit=max_chat_history - in_buffer_messages_length,
152+
buffer_only=False,
153+
)
154+
else:
155+
in_db_chat_history = []
156+
157+
chat_history = [{"role": message["role"], "content": message["content"]} for message in in_db_chat_history["data"]["messages"][::-1]] + [{"role": message["role"], "content": message["content"]} for message in in_buffer_chat_history["data"]["messages"][::-1]]
158+
159+
# Retrieve memories
160+
query_string = PromptFormatter.messages_to_query(chat_history + gemini_query_messages)
161+
query_response = memory.query_session(query_string)
162+
retrieved_memories = query_response["data"]["results"] if query_response else None
163+
164+
logger.info(f"Retrieved memories: {retrieved_memories}")
150165

151166
# 3. Compose the prompt context for PromptFormatter
152167
prompt_context = PromptContext(
153168
query_messages=gemini_query_messages,
154169
retrieved_memories=retrieved_memories,
155-
retrieved_chat_history=retrieved_chat_history,
170+
retrieved_chat_history=chat_history,
156171
max_chat_history=memory.max_chat_history,
157172
)
158173

@@ -191,19 +206,33 @@ async def _instrument_generate_content_async(
191206
retrieved_memories = None
192207
retrieved_chat_history = None
193208

194-
if latest_user_query_message:
195-
# Properly await async memory operations
196-
query_response = await memory.query_session(latest_user_query_message["content"])
197-
retrieved_memories = query_response["data"]["results"] if query_response else None
198-
209+
if latest_user_query_message:
199210
# Get chat history
200211
max_chat_history = memory.max_chat_history
201-
chat_history_response = await memory.list_messages(limit=max_chat_history)
202-
if chat_history_response and chat_history_response.get("data", {}).get("messages"):
203-
retrieved_chat_history = [
204-
{"role": msg["role"], "content": msg["content"]}
205-
for msg in chat_history_response["data"]["messages"][::-1]
206-
]
212+
213+
in_buffer_chat_history = await memory.list_messages(
214+
limit=max_chat_history,
215+
buffer_only=True,
216+
)
217+
218+
in_buffer_messages_length = len(in_buffer_chat_history["data"]["messages"])
219+
220+
if in_buffer_messages_length < max_chat_history:
221+
in_db_chat_history = await memory.list_messages(
222+
limit=max_chat_history - in_buffer_messages_length,
223+
buffer_only=False,
224+
)
225+
else:
226+
in_db_chat_history = []
227+
228+
retrieved_chat_history = [{"role": message["role"], "content": message["content"]} for message in in_db_chat_history["data"]["messages"][::-1]] + [{"role": message["role"], "content": message["content"]} for message in in_buffer_chat_history["data"]["messages"][::-1]]
229+
230+
# Retrieve memories
231+
query_string = PromptFormatter.messages_to_query(retrieved_chat_history + gemini_query_messages)
232+
query_response = await memory.query_session(query_string)
233+
retrieved_memories = query_response["data"]["results"] if query_response else None
234+
235+
logger.info(f"Retrieved memories: {retrieved_memories}")
207236

208237
# 3. Compose the prompt context for PromptFormatter
209238
prompt_context = PromptContext(

0 commit comments

Comments
 (0)