-
Notifications
You must be signed in to change notification settings - Fork 7
/
Copy pathconfig.toml
39 lines (36 loc) · 1.7 KB
/
config.toml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
[api_keys]
api_key = "sk-xxxx"
api_base = "https://api.openai.com/v1"
api_name = "gpt-4o"
auth_token = "hf_xxx"
[settings]
llm = "openai" # openai, huggingface, ollama
ollama_model = "llama2:7b" # ollama model name
huggingface_model = "llama" # huggingface model name
embeddings = "BAAI/bge-large-en-v1.5"
split_type = "sentence"
chunk_size = 128
dataset = "hotpot_qa"
# dataset_path = "data/xxx.json" # If dataset is custom, set the path to the custom dataset
persist_dir = "storage"
llamaIndexEvaluateModel = "Qwen/Qwen1.5-7B-Chat-GPTQ-Int8"
deepEvalEvaluateModel = "Qwen/Qwen1.5-7B-Chat-GPTQ-Int8"
upTrainEvaluateModel = "qwen:7b-chat-v1.5-q8_0"
evaluateApiName = ""
evaluateApiKey = ""
evaluateApiBase = ""
output = ""
n = 100
test_init_total_number_documents = 20
extra_number_documents = 20
extra_rate_documents = 0.1
test_all_number_documents = 40
experiment_1 = false
retriever = "BM25"
retriever_mode = 0
postprocess_rerank = "long_context_reorder"
query_transform = "none"
metrics = ["NLG_chrf", "NLG_meteor", "NLG_wer", "NLG_cer", "NLG_chrf_pp","NLG_perplexity", "NLG_rouge_rouge1", "NLG_rouge_rouge2", "NLG_rouge_rougeL", "NLG_rouge_rougeLsum"]
[prompt]
text_qa_template_str = "Context information is below.\n---------------------\n{context_str}\n---------------------\nGiven the context information and not prior knowledge, answer the question: {query_str}\n"
refine_template_str = "We have the opportunity to refine the original answer(only if needed) with some more context below.\n------------\n{context_msg}\n------------\nGiven the new context, refine the original answer to better answer the question: {query_str}. If the context isn't useful, output the original answer again.\nOriginal Answer: {existing_answer}"