forked from jennifermarsman/AIforChildhoodHunger
-
Notifications
You must be signed in to change notification settings - Fork 0
/
prototype.py
75 lines (60 loc) · 3.05 KB
/
prototype.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
import gradio as gr
from langchain.chat_models import AzureChatOpenAI
from langchain.prompts import (
PromptTemplate,
ChatPromptTemplate,
SystemMessagePromptTemplate,
AIMessagePromptTemplate,
HumanMessagePromptTemplate,
)
# Constants for calling the Azure OpenAI service
openai_api_type = "azure"
gpt_endpoint = "https://TODO.openai.azure.com/" # Your endpoint will look something like this: https://YOUR_AOAI_RESOURCE_NAME.openai.azure.com/
gpt_api_key = "TODO" # Your key will look something like this: 00000000000000000000000000000000
gpt_deployment_name="gpt-35-turbo"
# Create instance to call GPT model
gpt = AzureChatOpenAI(
openai_api_base=gpt_endpoint,
openai_api_version="2023-03-15-preview",
deployment_name=gpt_deployment_name,
openai_api_key=gpt_api_key,
openai_api_type = openai_api_type,
)
def call_gpt_model(rag_from_bing, message):
system_template="You are a professional, helpful assistant to provide resources to combat childhood hunger. \n"
system_message_prompt = SystemMessagePromptTemplate.from_template(system_template)
user_prompt=PromptTemplate(
template="## Context \n {rag_from_bing} \n" +
"## Instructions \n Using the above context, answer the question below.\n" +
"## Question \n {message} \n",
input_variables=["rag_from_bing", "message"],
)
human_message_prompt = HumanMessagePromptTemplate(prompt=user_prompt)
chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
# Get formatted messages for the chat completion
messages = chat_prompt.format_prompt(rag_from_bing={rag_from_bing}, message={message}).to_messages()
print("Messages")
print(messages)
# Call the model
output = gpt(messages)
print("Output")
print(output)
return output.content
def chat(message, history):
# Get location
# TODO
# Get information from trusted sources
# TODO
# TODO - do we need logic here to see if we have sufficient trusted source data, or whether we even need to call Bing?
# Call Bing to get context
# TODO - Bing logic here - set rag_from_bing variable to Bing response rather than the hardcoded below.
rag_from_bing = "To determine your eligibility for WIC in Michigan, you can use the online prescreening tool available at the Michigan Department of Health and Human Services website. The tool will ask you a series of questions related to your household size, income, and other factors to determine if you may be eligible for WIC benefits. You can access the tool here: https://www.michigan.gov/mdhhs/0,5885,7-339-71551_2945_42592---,00.html"
# Call GPT model with context from Bing
model_response =call_gpt_model(rag_from_bing, message)
return model_response
chatbot = gr.Chatbot(bubble_full_width = False)
chat_interface = gr.ChatInterface(fn=chat,
# title="Title here",
# description="Description here",
chatbot=chatbot)
chat_interface.launch()