Skip to content

Commit

Permalink
remove prompt from llm response
Browse files Browse the repository at this point in the history
  • Loading branch information
Ayan Bandyopadhyay authored and Ayan Bandyopadhyay committed Aug 3, 2023
1 parent d6b9518 commit d654043
Showing 1 changed file with 31 additions and 19 deletions.
50 changes: 31 additions & 19 deletions server/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,14 +7,17 @@
import requests
import json
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.chains.question_answering import load_qa_chain
from langchain.chains.question_answering import load_qa_chain
from langchain.docstore.document import Document
import openai

embeddings = HuggingFaceEmbeddings(model_name=os.environ.get("embeddings_model") or "all-MiniLM-L6-v2")
embeddings = HuggingFaceEmbeddings(
model_name=os.environ.get("embeddings_model") or "all-MiniLM-L6-v2"
)
embeddings_dimension = 384
base_url = os.environ.get("LLM_URL") or "http://localhost"


def get_selected_llm() -> LLM:
llm_type = os.environ.get("LLM_TYPE") or "falcon7b"
if llm_type == "gpt4all":
Expand All @@ -28,41 +31,46 @@ def get_selected_llm() -> LLM:
else:
raise Exception("Unknown LLM type: " + llm_type)

class Gpt4AllLLM(LLM):

class Gpt4AllLLM(LLM):
llm: Optional[GPT4All] = None

def __init__(self):
super().__init__()
self.llm = GPT4All(model="llm/local/ggml-gpt4all-j-v1.3-groovy.bin", backend='gptj', n_batch=8, verbose=False)
self.llm = GPT4All(
model="llm/local/ggml-gpt4all-j-v1.3-groovy.bin",
backend="gptj",
n_batch=8,
verbose=False,
)

async def ask(self, documents: List[PsychicDocument], question: str) -> str:
# TODO: support streaming https://gist.github.com/jvelezmagic/03ddf4c452d011aae36b2a0f73d72f68
callbacks = [StreamingStdOutCallbackHandler()]

chain = load_qa_chain(self.llm, chain_type="stuff")
chain = load_qa_chain(self.llm, chain_type="stuff")

docs = [
Document(
page_content=doc.payload.get("content"),
metadata={**doc.payload.get("metadata")}
) for doc in documents
page_content=doc.payload.get("content"),
metadata={**doc.payload.get("metadata")},
)
for doc in documents
]
result = chain.run(input_documents=docs, question=question, callbacks=callbacks)

return result

class Falcon7BLLM(LLM):


class Falcon7BLLM(LLM):
def __init__(self):
super().__init__()

async def ask(self, documents: List[PsychicDocument], question: str) -> str:

context_str = ""

for doc in documents:
context_str += f"{doc.title}: {doc.content}\n\n"
doc = documents[0]
context_str += f"{doc.title}: {doc.content}\n\n"

prompt = (
"Context: \n"
Expand All @@ -77,9 +85,11 @@ async def ask(self, documents: List[PsychicDocument], question: str) -> str:
res = requests.post(f"{base_url}:8080/v1/models/model:predict", json=data)

res_json = res.json()
print(res_json)

return res_json["data"]["generated_text"][len(prompt) :]


return res_json['data']['generated_text']

class GPT(LLM):
model_name: str = "gpt-3.5-turbo"

Expand All @@ -101,13 +111,15 @@ async def ask(self, documents: List[PsychicDocument], question: str) -> str:
f"Given the above context and no other information, answer the question: {question}\n"
)


res = openai.ChatCompletion.create(
model=self.model_name,
messages=[
{"role": "system", "content": "You are a helpful assistant that answers questions about the documents provided. If you get messages that aren't related to the documents, ask about how you can help the user with the documents."},
{
"role": "system",
"content": "You are a helpful assistant that answers questions about the documents provided. If you get messages that aren't related to the documents, ask about how you can help the user with the documents.",
},
{"role": "user", "content": prompt},
]
],
)

return res['choices'][0]['message']['content']
return res["choices"][0]["message"]["content"]

0 comments on commit d654043

Please sign in to comment.