forked from build-on-aws/amazon-bedrock-quick-start
-
Notifications
You must be signed in to change notification settings - Fork 0
/
chat_bedrock_st.py
59 lines (43 loc) · 1.75 KB
/
chat_bedrock_st.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import time
import boto3
import streamlit as st
from langchain.chains import ConversationChain
from langchain.llms.bedrock import Bedrock
from langchain.memory import ConversationBufferMemory
st.title("ChatBedrock")
# Setup bedrock
bedrock_runtime = boto3.client(
service_name="bedrock-runtime",
region_name="us-east-1",
)
@st.cache_resource
def load_llm():
llm = Bedrock(client=bedrock_runtime, model_id="anthropic.claude-v2")
llm.model_kwargs = {"temperature": 0.7, "max_tokens_to_sample": 2048}
model = ConversationChain(llm=llm, verbose=True, memory=ConversationBufferMemory())
return model
model = load_llm()
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if prompt := st.chat_input("What is up?"):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
# prompt = prompt_fixer(prompt)
result = model.predict(input=prompt)
# Simulate stream of response with milliseconds delay
for chunk in result.split(' '): # fix for https://github.com/streamlit/streamlit/issues/868
full_response += chunk + ' '
if chunk.endswith('\n'):
full_response += ' '
time.sleep(0.05)
# Add a blinking cursor to simulate typing
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
st.session_state.messages.append({"role": "assistant", "content": full_response})