-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathapp.py
135 lines (108 loc) · 5.27 KB
/
app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
import streamlit as st
from dotenv import load_dotenv
from langchain_core.messages import AIMessage, HumanMessage
from src.utils import get_text_from_files, get_text_chunks, get_vectorstore, get_conversation_chain
import time
load_dotenv()
def handle_userinput(user_question):
if not st.session_state.conversation:
st.error("Please process documents first.")
return "Please upload and process documents before asking questions."
try:
response = st.session_state.conversation({'question': user_question})
return response['answer']
except Exception as e:
st.error(f"Error processing question: {str(e)}")
return "I encountered an error processing your question. Please try again."
def clear_chat():
st.session_state.chat_history = [
AIMessage(content="Hello, I'm a MFBot. How can I help you?"),
]
def ui():
st.set_page_config(
page_title="Multiple Docs QueryBot",
page_icon=":books:",
layout="wide"
)
if "conversation" not in st.session_state:
st.session_state.conversation = None
if "chat_history" not in st.session_state:
st.session_state.chat_history = [
AIMessage(content="Hello, I'm a MFBot. How can I help you?"),
]
st.header("Multi-Files QueryBot :books:")
with st.sidebar:
st.title("About Project")
st.markdown("The **<u>Multi-Files QueryBot</u>** is a Python-based tool that allows users to interact with multiple document types, including `PDFs`, `.docx`, and `.json` files, through natural language queries.**\n* Users can ask questions based on the content of these documents, and the app provides accurate, context-aware responses.\n* It's designed to help users efficiently navigate and extract insights from large sets of documents.", unsafe_allow_html=True)
st.markdown("<hr>", unsafe_allow_html=True)
st.subheader("Documents")
api_key = st.text_input(label="Enter OpenAI API key", type="password", placeholder="OPENAI_API_KEY")
docs = st.file_uploader(
"Upload your files here (`PDFs`, `DOCX`, `JSON`) and click on `Process`",
type=["pdf", "docx", "json"],
accept_multiple_files=True
)
if st.button("Process"):
if not api_key:
st.error("Please enter your OpenAI API key.")
return
if not docs:
st.error("Please upload at least one document.")
return
with st.status("Processing...", expanded=True) as status:
try:
st.write("Getting text from files...")
raw_text = get_text_from_files(docs)
if not raw_text:
status.update(label="Processing failed!", state="error")
return
st.write("Dividing text into chunks...")
text_chunks = get_text_chunks(raw_text)
if not text_chunks:
status.update(label="Processing failed!", state="error")
return
st.write("Creating vector store...")
vectorstore = get_vectorstore(text_chunks, api_key)
if not vectorstore:
status.update(label="Processing failed!", state="error")
return
st.session_state.conversation = get_conversation_chain(vectorstore, api_key)
if not st.session_state.conversation:
status.update(label="Processing failed!", state="error")
return
status.update(label="Processing complete! You can now ask questions.", state="complete")
except Exception as e:
st.error(f"An error occurred: {str(e)}")
status.update(label="Processing failed!", state="error")
# User input
user_question = st.chat_input("Ask a question about your documents:")
# Clear chat button
if st.button("Clear Chat"):
clear_chat()
# Chat container
chat_container = st.container()
# Display chat history
with chat_container:
for message in st.session_state.chat_history:
if isinstance(message, AIMessage):
with st.chat_message("AI"):
st.write(message.content)
elif isinstance(message, HumanMessage):
with st.chat_message("Human"):
st.write(message.content)
# Handle user input
if user_question:
with st.chat_message("Human"):
st.write(user_question)
st.session_state.chat_history.append(HumanMessage(content=user_question))
with st.chat_message("AI"):
message_placeholder = st.empty()
full_response = handle_userinput(user_question)
# Simulate typing effect
for i in range(len(full_response)):
message_placeholder.markdown(full_response[:i+1] + "▌")
time.sleep(0.01)
message_placeholder.markdown(full_response)
st.session_state.chat_history.append(AIMessage(content=full_response))
if __name__ == '__main__':
ui()