Skip to content

Commit

Permalink
Merge pull request #68 from eren23/only-prompt-output
Browse files Browse the repository at this point in the history
Only prompt output
  • Loading branch information
kaanozbudak authored Mar 29, 2023
2 parents fecb45d + 988ce81 commit 979ba77
Showing 1 changed file with 45 additions and 19 deletions.
64 changes: 45 additions & 19 deletions knowledgegpt/extractors/base_extractor.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,50 @@ def extract(self, query, max_tokens, load_index=False) -> tuple[str, str, list]:
param query: Query to answer
param max_tokens: Maximum number of tokens to generate
"""
self._handle_df_embeddings(load_index=load_index)

if len(self.messages) == 0 and self.is_turbo:
self.messages = [{"role": "system", "content": "you are a helpful assistant"}]

self.answer, self.prompt, self.messages = answer_query_with_context(
query=query,
df=self.df,
document_embeddings=self.embeddings,
embedding_type=self.embedding_extractor,
model_lang=self.model_lang,
is_turbo=self.is_turbo,
is_gpt4=self.is_gpt4,
verbose=self.verbose,
messages=self.messages,
max_tokens=max_tokens,
index_type=self.index_type,
prompt_template=self.prompt_template
)
if not self.verbose:
print("all_done!")
return self.answer, self.prompt, self.messages

def get_knowledge_prompt_only(self, query, max_tokens,load_index=False):

from knowledgegpt.utils.utils_completion import construct_prompt

self._handle_df_embeddings(load_index=load_index)

self.prompt = construct_prompt(
verbose=self.verbose,
question=query,
df=self.df,
context_embeddings=self.embeddings,
embedding_type=self.embedding_extractor,
model_lang=self.model_lang,
max_tokens=max_tokens,
index_type=self.index_type,
prompt_template=self.prompt_template
)

return self.prompt

def _handle_df_embeddings(self, load_index=False):
if load_index:
if self.df is None or self.embeddings is None:
self.load_embeddings_indexes()
Expand All @@ -93,22 +137,4 @@ def extract(self, query, max_tokens, load_index=False) -> tuple[str, str, list]:
pickle.dump(self.embeddings, f)

if len(self.messages) == 0 and self.is_turbo:
self.messages = [{"role": "system", "content": "you are a helpful assistant"}]

self.answer, self.prompt, self.messages = answer_query_with_context(
query=query,
df=self.df,
document_embeddings=self.embeddings,
embedding_type=self.embedding_extractor,
model_lang=self.model_lang,
is_turbo=self.is_turbo,
is_gpt4=self.is_gpt4,
verbose=self.verbose,
messages=self.messages,
max_tokens=max_tokens,
index_type=self.index_type,
prompt_template=self.prompt_template
)
if not self.verbose:
print("all_done!")
return self.answer, self.prompt, self.messages
self.messages = [{"role": "system", "content": "you are a helpful assistant"}]

0 comments on commit 979ba77

Please sign in to comment.