|
38 | 38 | MATCH (chunk)-[:PART_OF]->(d:Document)
|
39 | 39 | CALL { WITH chunk
|
40 | 40 | MATCH (chunk)-[:HAS_ENTITY]->(e)
|
41 |
| -MATCH path=(e)(()-[rels:!HAS_ENTITY&!PART_OF]-()){0,3}(:!Chunk&!Document) |
| 41 | +MATCH path=(e)(()-[rels:!HAS_ENTITY&!PART_OF]-()){0,2}(:!Chunk&!Document) |
42 | 42 | UNWIND rels as r
|
43 | 43 | RETURN collect(distinct r) as rels
|
44 | 44 | }
|
|
49 | 49 | WITH d, score,
|
50 | 50 | apoc.text.join(texts,"\n----\n") +
|
51 | 51 | apoc.text.join(entities,"\n")
|
52 |
| -as text, entities, chunkIds, page_numbers |
53 |
| -RETURN text, score, {source: COALESCE(CASE WHEN d.url CONTAINS "None" THEN d.fileName ELSE d.url END, d.fileName), chunkIds:chunkIds, page_numbers:page_numbers} as metadata |
| 52 | +as text, entities, chunkIds, page_numbers ,start_times |
| 53 | +RETURN text, score, {source: COALESCE(CASE WHEN d.url CONTAINS "None" THEN d.fileName ELSE d.url END, d.fileName), chunkIds:chunkIds, page_numbers:page_numbers,start_times:start_times,entities:entities} as metadata |
54 | 54 | """
|
55 | 55 |
|
56 | 56 | SYSTEM_TEMPLATE = """
|
57 |
| -You are an AI-powered question-answering agent. Your task is to provide accurate and concise responses to user queries based on the given context, chat history, and available resources. |
| 57 | +You are an AI-powered question-answering agent. Your task is to provide accurate and comprehensive responses to user queries based on the given context, chat history, and available resources. |
58 | 58 |
|
59 | 59 | ### Response Guidelines:
|
60 |
| -1. **Direct Answers**: Provide straightforward answers to the user's queries without headers unless requested. Avoid speculative responses. |
| 60 | +1. **Direct Answers**: Provide clear and thorough answers to the user's queries without headers unless requested. Avoid speculative responses. |
61 | 61 | 2. **Utilize History and Context**: Leverage relevant information from previous interactions, the current user input, and the context provided below.
|
62 | 62 | 3. **No Greetings in Follow-ups**: Start with a greeting in initial interactions. Avoid greetings in subsequent responses unless there's a significant break or the chat restarts.
|
63 | 63 | 4. **Admit Unknowns**: Clearly state if an answer is unknown. Avoid making unsupported statements.
|
64 | 64 | 5. **Avoid Hallucination**: Only provide information based on the context provided. Do not invent information.
|
65 |
| -6. **Response Length**: Keep responses concise and relevant. Aim for clarity and completeness within 2-3 sentences unless more detail is requested. |
| 65 | +6. **Response Length**: Keep responses concise and relevant. Aim for clarity and completeness within 4-5 sentences unless more detail is requested. |
66 | 66 | 7. **Tone and Style**: Maintain a professional and informative tone. Be friendly and approachable.
|
67 | 67 | 8. **Error Handling**: If a query is ambiguous or unclear, ask for clarification rather than providing a potentially incorrect answer.
|
68 | 68 | 9. **Fallback Options**: If the required information is not available in the provided context, provide a polite and helpful response. Example: "I don't have that information right now." or "I'm sorry, but I don't have that information. Is there something else I can help with?"
|
| 69 | +10. **Context Availability**: If the context is empty, do not provide answers based solely on internal knowledge. Instead, respond appropriately by indicating the lack of information. |
| 70 | +
|
| 71 | +
|
| 72 | +**IMPORTANT** : DO NOT ANSWER FROM YOUR KNOWLEDGE BASE USE THE BELOW CONTEXT |
69 | 73 |
|
70 | 74 | ### Context:
|
71 | 75 | <context>
|
|
77 | 81 | AI Response: 'Hello there! How can I assist you today?'
|
78 | 82 |
|
79 | 83 | User: "What is Langchain?"
|
80 |
| -AI Response: "Langchain is a framework that enables the development of applications powered by large language models, such as chatbots." |
| 84 | +AI Response: "Langchain is a framework that enables the development of applications powered by large language models, such as chatbots. It simplifies the integration of language models into various applications by providing useful tools and components." |
81 | 85 |
|
82 | 86 | User: "Can you explain how to use memory management in Langchain?"
|
83 |
| -AI Response: "Langchain's memory management involves utilizing built-in mechanisms to manage conversational context effectively, ensuring a coherent user experience." |
| 87 | +AI Response: "Langchain's memory management involves utilizing built-in mechanisms to manage conversational context effectively. It ensures that the conversation remains coherent and relevant by maintaining the history of interactions and using it to inform responses." |
84 | 88 |
|
85 | 89 | User: "I need help with PyCaret's classification model."
|
86 |
| -AI Response: "PyCaret simplifies the process of building and deploying machine learning models. For classification tasks, you can use PyCaret's setup function to prepare your data, then compare and tune models." |
| 90 | +AI Response: "PyCaret simplifies the process of building and deploying machine learning models. For classification tasks, you can use PyCaret's setup function to prepare your data. After setup, you can compare multiple models to find the best one, and then fine-tune it for better performance." |
87 | 91 |
|
88 |
| -Note: This system does not generate answers based solely on internal knowledge. It answers from the information provided in the user's current and previous inputs, and from explicitly referenced external sources. |
| 92 | +User: "What can you tell me about the latest realtime trends in AI?" |
| 93 | +AI Response: "I don't have that information right now. Is there something else I can help with?" |
| 94 | +
|
| 95 | +Note: This system does not generate answers based solely on internal knowledge. It answers from the information provided in the user's current and previous inputs, and from the context. |
89 | 96 | """
|
90 | 97 |
|
91 | 98 | # def get_llm(model: str,max_tokens=CHAT_MAX_TOKENS) -> Any:
|
@@ -316,27 +323,12 @@ def QA_RAG(graph,model,question,session_id):
|
316 | 323 | "messages":messages
|
317 | 324 | }
|
318 | 325 | )
|
319 |
| - formatted_docs,sources = format_documents(docs) |
320 |
| - doc_retrieval_time = time.time() - start_time |
321 |
| - logging.info(f"Modified question and Documents retrieved in {doc_retrieval_time:.2f} seconds") |
322 |
| - |
323 |
| - start_time = time.time() |
324 |
| - rag_chain = get_rag_chain(llm=llm) |
325 |
| - ai_response = rag_chain.invoke( |
326 |
| - { |
327 |
| - "messages" : messages[:-1], |
328 |
| - "context" : formatted_docs, |
329 |
| - "input" : question |
330 |
| - } |
331 |
| - ) |
332 |
| - result = get_sources_and_chunks(sources,docs) |
333 |
| - content = ai_response.content |
334 |
| - if "Gemini" in model: |
335 |
| - total_tokens = ai_response.response_metadata['usage_metadata']['prompt_token_count'] |
336 |
| - else: |
337 |
| - total_tokens = ai_response.response_metadata['token_usage']['total_tokens'] |
338 |
| - predict_time = time.time() - start_time |
339 |
| - logging.info(f"Final Response predicted in {predict_time:.2f} seconds") |
| 326 | + if docs: |
| 327 | + # print(docs) |
| 328 | + formatted_docs,sources = format_documents(docs) |
| 329 | + |
| 330 | + doc_retrieval_time = time.time() - start_time |
| 331 | + logging.info(f"Modified question and Documents retrieved in {doc_retrieval_time:.2f} seconds") |
340 | 332 |
|
341 | 333 | start_time = time.time()
|
342 | 334 | messages.append(ai_response)
|
|
0 commit comments