diff --git a/backend/models/memory.py b/backend/models/memory.py index 939880e134..b86ee6cf38 100644 --- a/backend/models/memory.py +++ b/backend/models/memory.py @@ -27,6 +27,8 @@ class CategoryEnum(str, Enum): social = 'social' work = 'work' sports = 'sports' + literature = 'literature' + history = 'history' other = 'other' diff --git a/backend/routers/memories.py b/backend/routers/memories.py index 8b2d04a687..868718fb2a 100644 --- a/backend/routers/memories.py +++ b/backend/routers/memories.py @@ -121,14 +121,16 @@ def postprocess_memory( # profile_duration = profile_aseg.duration_seconds + separate_seconds signed_url = upload_postprocessing_audio(file_path) + + # Ensure delete uploaded file in 15m + threads = threading.Thread(target=_delete_postprocessing_audio, args=(file_path, )) + threads.start() + speakers_count = len(set([segment.speaker for segment in memory.transcript_segments])) words = fal_whisperx(signed_url, speakers_count, aseg.duration_seconds) segments = fal_postprocessing(words, aseg.duration_seconds, profile_duration) os.remove(file_path) - # Delete uploaded file in 15m - threads = threading.Thread(target=_delete_postprocessing_audio, args=(file_path, )) - threads.start() if not segments: memories_db.set_postprocessing_status(uid, memory.id, PostProcessingStatus.canceled) diff --git a/backend/utils/llm.py b/backend/utils/llm.py index 1007964c31..bdeab7fd38 100644 --- a/backend/utils/llm.py +++ b/backend/utils/llm.py @@ -1,6 +1,6 @@ import json from datetime import datetime -from typing import List, Optional +from typing import List, Tuple, Optional from langchain_core.output_parsers import PydanticOutputParser from langchain_core.prompts import ChatPromptTemplate @@ -363,9 +363,14 @@ def retrieve_memory_context_params(memory: Memory) -> List[str]: Conversation: {transcript} '''.replace(' ', '').strip() - with_parser = llm.with_structured_output(TopicsContext) - response: TopicsContext = with_parser.invoke(prompt) - return response.topics + + try: + with_parser = llm.with_structured_output(TopicsContext) + response: TopicsContext = with_parser.invoke(prompt) + return response.topics + except Exception as e: + print(f'Error determining memory discard: {e}') + return [] class SummaryOutput(BaseModel): @@ -490,5 +495,4 @@ def qa_emotional_rag(user_name: str, user_facts: List[Fact], context: str, memor ``` Answer: """.replace(' ', '').strip() - print(prompt) return llm.invoke(prompt).content