Skip to content

Commit 75567b8

Browse files
Test set generation paradigm (#150)
#136 TODO - [ ] Add README with flowchart demonstration of paradigm - [x] Add quickstart notebook - [x] Fix type hinting - [x] Reformat methods if needed - [x] Add progress bar - [ ] Async workflow --------- Co-authored-by: jjmachan <jamesjithin97@gmail.com>
1 parent ed479d4 commit 75567b8

File tree

4 files changed

+690
-0
lines changed

4 files changed

+690
-0
lines changed

docs/Testsetgeneration.ipynb

+182
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,182 @@
1+
{
2+
"cells": [
3+
{
4+
"cell_type": "markdown",
5+
"id": "fe8644a7",
6+
"metadata": {},
7+
"source": [
8+
"### Import Required libraries"
9+
]
10+
},
11+
{
12+
"cell_type": "code",
13+
"execution_count": 1,
14+
"id": "5aab66fc",
15+
"metadata": {},
16+
"outputs": [],
17+
"source": [
18+
"import os\n",
19+
"import json\n",
20+
"from llama_index import download_loader\n",
21+
"from ragas.testset import TestsetGenerator\n",
22+
"\n",
23+
"\n",
24+
"os.environ[\"OPENAI_API_KEY\"] = \"your-openai-key\""
25+
]
26+
},
27+
{
28+
"cell_type": "markdown",
29+
"id": "51dff76e",
30+
"metadata": {},
31+
"source": [
32+
"### Load documents using document loader"
33+
]
34+
},
35+
{
36+
"cell_type": "code",
37+
"execution_count": 5,
38+
"id": "792dafa8",
39+
"metadata": {},
40+
"outputs": [],
41+
"source": [
42+
"SemanticScholarReader = download_loader(\"SemanticScholarReader\")\n",
43+
"loader = SemanticScholarReader()\n",
44+
"# narrow down the search space\n",
45+
"query_space = \"large language models\"\n",
46+
"# increase limit to get more documents\n",
47+
"documents = loader.load_data(query=query_space, limit=10)"
48+
]
49+
},
50+
{
51+
"cell_type": "markdown",
52+
"id": "b1d0356b",
53+
"metadata": {},
54+
"source": [
55+
"### Generate test set using default configurations"
56+
]
57+
},
58+
{
59+
"cell_type": "code",
60+
"execution_count": 8,
61+
"id": "30caed35",
62+
"metadata": {},
63+
"outputs": [],
64+
"source": [
65+
"testsetgenerator = TestsetGenerator.from_default()\n",
66+
"test_size = 2 # Number of samples to generate\n",
67+
"testset = testsetgenerator.generate(documents, test_size=test_size)"
68+
]
69+
},
70+
{
71+
"cell_type": "code",
72+
"execution_count": 11,
73+
"id": "c8162006",
74+
"metadata": {},
75+
"outputs": [
76+
{
77+
"data": {
78+
"text/html": [
79+
"<div>\n",
80+
"<style scoped>\n",
81+
" .dataframe tbody tr th:only-of-type {\n",
82+
" vertical-align: middle;\n",
83+
" }\n",
84+
"\n",
85+
" .dataframe tbody tr th {\n",
86+
" vertical-align: top;\n",
87+
" }\n",
88+
"\n",
89+
" .dataframe thead th {\n",
90+
" text-align: right;\n",
91+
" }\n",
92+
"</style>\n",
93+
"<table border=\"1\" class=\"dataframe\">\n",
94+
" <thead>\n",
95+
" <tr style=\"text-align: right;\">\n",
96+
" <th></th>\n",
97+
" <th>question</th>\n",
98+
" <th>context</th>\n",
99+
" <th>answer</th>\n",
100+
" <th>question_type</th>\n",
101+
" <th>episode_done</th>\n",
102+
" </tr>\n",
103+
" </thead>\n",
104+
" <tbody>\n",
105+
" <tr>\n",
106+
" <th>0</th>\n",
107+
" <td>What is the synthesis performance of large lan...</td>\n",
108+
" <td>- Our benchmarks are designed to measure the a...</td>\n",
109+
" <td>The synthesis performance of large language mo...</td>\n",
110+
" <td>simple</td>\n",
111+
" <td>True</td>\n",
112+
" </tr>\n",
113+
" <tr>\n",
114+
" <th>1</th>\n",
115+
" <td>How did ChatGPT fare on the USMLE exams in ter...</td>\n",
116+
" <td>- ChatGPT performed at or near the passing thr...</td>\n",
117+
" <td>ChatGPT performed well on the USMLE exams in t...</td>\n",
118+
" <td>reasoning</td>\n",
119+
" <td>True</td>\n",
120+
" </tr>\n",
121+
" </tbody>\n",
122+
"</table>\n",
123+
"</div>"
124+
],
125+
"text/plain": [
126+
" question \\\n",
127+
"0 What is the synthesis performance of large lan... \n",
128+
"1 How did ChatGPT fare on the USMLE exams in ter... \n",
129+
"\n",
130+
" context \\\n",
131+
"0 - Our benchmarks are designed to measure the a... \n",
132+
"1 - ChatGPT performed at or near the passing thr... \n",
133+
"\n",
134+
" answer question_type \\\n",
135+
"0 The synthesis performance of large language mo... simple \n",
136+
"1 ChatGPT performed well on the USMLE exams in t... reasoning \n",
137+
"\n",
138+
" episode_done \n",
139+
"0 True \n",
140+
"1 True "
141+
]
142+
},
143+
"execution_count": 11,
144+
"metadata": {},
145+
"output_type": "execute_result"
146+
}
147+
],
148+
"source": [
149+
"testset.to_pandas()"
150+
]
151+
},
152+
{
153+
"cell_type": "code",
154+
"execution_count": null,
155+
"id": "34474b1a",
156+
"metadata": {},
157+
"outputs": [],
158+
"source": []
159+
}
160+
],
161+
"metadata": {
162+
"kernelspec": {
163+
"display_name": "ragas",
164+
"language": "python",
165+
"name": "ragas"
166+
},
167+
"language_info": {
168+
"codemirror_mode": {
169+
"name": "ipython",
170+
"version": 3
171+
},
172+
"file_extension": ".py",
173+
"mimetype": "text/x-python",
174+
"name": "python",
175+
"nbconvert_exporter": "python",
176+
"pygments_lexer": "ipython3",
177+
"version": "3.10.8"
178+
}
179+
},
180+
"nbformat": 4,
181+
"nbformat_minor": 5
182+
}

src/ragas/testset/__init__.py

+3
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
from ragas.testset.testset_generator import TestsetGenerator
2+
3+
__all__ = ["TestsetGenerator"]

src/ragas/testset/prompts.py

+148
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,148 @@
1+
from langchain.prompts import HumanMessagePromptTemplate
2+
3+
SEED_QUESTION = HumanMessagePromptTemplate.from_template(
4+
"""\
5+
Your task is to formulate a question from given context satisfying the rules given below:
6+
1.The question should make sense to humans even when read without the given context.
7+
2.The question should be fully answered from the given context.
8+
3.The question should be framed from a part of context that contains important information. It can also be from tables,code,etc.
9+
4.The answer to the question should not contain any links.
10+
5.The question should be of moderate difficulty.
11+
6.The question must be reasonable and must be understood and responded by humans.
12+
7.Do no use phrases like 'provided context',etc in the question
13+
8.Avoid framing question using word "and" that can be decomposed into more than one question.
14+
9.The question should not contain more than 10 words, make of use of abbreviation wherever possible.
15+
16+
context:{context}
17+
""" # noqa: E501
18+
)
19+
20+
21+
REASONING_QUESTION = HumanMessagePromptTemplate.from_template(
22+
"""\
23+
You are a prompt rewriter. You will be provided with a question and a long context.Your task to is to complicate the given question to improve the difficulty of answering.
24+
You should do complicate the question by rewriting question into a multi-hop reasoning question based on the provided context. The question should require the reader to make multiple logical connections or inferences using the information available in given context.
25+
Here are some strategies to create multi-hop questions:
26+
27+
- Bridge related entities: Identify information that relates specific entities and frame question that can be answered only by analysing information of both entities.
28+
29+
- Use Pronouns: identify (he, she, it, they) that refer to same entity or concepts in the context, and ask questions that would require the reader to figure out what pronouns refer to.
30+
31+
- Refer to Specific Details: Mention specific details or facts from different parts of the context including tables, code, etc and ask how they are related.
32+
33+
- Pose Hypothetical Scenarios: Present a hypothetical situation or scenario that requires combining different elements from the context to arrive at an answer.
34+
35+
Rules to follow when rewriting question:
36+
1. Ensure that the rewritten question can be answered entirely from the information present in the contexts.
37+
2. Do not frame questions that contains more than 15 words. Use abbreviation wherever possible.
38+
3. Make sure the question is clear and unambiguous.
39+
4. phrases like 'based on the provided context','according to the context',etc are not allowed to appear in the question.
40+
41+
question: {question}
42+
CONTEXTS:
43+
{context}
44+
45+
Multi-hop Reasoning Question:
46+
""" # noqa: E501
47+
)
48+
49+
MULTICONTEXT_QUESTION = HumanMessagePromptTemplate.from_template(
50+
"""\
51+
You are a prompt rewriter. You will be provided with a question and two set of contexts namely context1 and context2.
52+
Your task is to complicate the given question in a way that answering it requires information derived from both context1 and context2.
53+
Follow the rules given below while rewriting the question.
54+
1. The rewritten question should not be very long. Use abbreviation wherever possible.
55+
2. The rewritten question must be reasonable and must be understood and responded by humans.
56+
3. The rewritten question must be fully answerable from information present in context1 and context2.
57+
4. Read and understand both contexts and rewrite the question so that answering requires insight from both context1 and context2.
58+
5. phrases like 'based on the provided context','according to the context?',etc are not allowed to appear in the question.
59+
60+
question:\n{question}
61+
context1:\n{context1}
62+
context2:\n{context2}
63+
""" # noqa: E501
64+
)
65+
66+
67+
CONDITIONAL_QUESTION = HumanMessagePromptTemplate.from_template(
68+
"""\
69+
Rewrite the provided question to increase its complexity by introducing a conditional element.
70+
The goal is to make the question more intricate by incorporating a scenario or condition that affects the context of the question.
71+
Follow the rules given below while rewriting the question.
72+
1. The rewritten question should not be longer than 25 words. Use abbreviation wherever possible.
73+
2. The rewritten question must be reasonable and must be understood and responded by humans.
74+
3. The rewritten question must be fully answerable from information present context.
75+
4. phrases like 'provided context','according to the context?',etc are not allowed to appear in the question.
76+
for example,
77+
question: What are the general principles for designing prompts in LLMs?
78+
Rewritten Question:how to apply prompt designing principles to improve LLMs performance in reasoning tasks
79+
80+
question:{question}
81+
context:\n{context}
82+
Rewritten Question
83+
""" # noqa: E501
84+
)
85+
86+
87+
COMPRESS_QUESTION = HumanMessagePromptTemplate.from_template(
88+
"""\
89+
Rewrite the following question to make it more indirect and shorter while retaining the essence of the original question. The goal is to create a question that conveys the same meaning but in a less direct manner.
90+
The rewritten question should shorter so use abbreviation wherever possible.
91+
Original Question:
92+
{question}
93+
94+
Indirectly Rewritten Question:
95+
""" # noqa: E501
96+
)
97+
98+
99+
CONVERSATION_QUESTION = HumanMessagePromptTemplate.from_template(
100+
"""\
101+
Reformat the provided question into two separate questions as if it were to be part of a conversation. Each question should focus on a specific aspect or subtopic related to the original question.
102+
question: What are the advantages and disadvantages of remote work?
103+
Reformatted Questions for Conversation: What are the benefits of remote work?\nOn the flip side, what challenges are encountered when working remotely?
104+
question:{question}
105+
106+
Reformatted Questions for Conversation:
107+
""" # noqa: E501
108+
)
109+
110+
SCORE_CONTEXT = HumanMessagePromptTemplate.from_template(
111+
"""Evaluate the provided context and assign a numerical score between 0 and 10 based on the following criteria:
112+
1. Award a high score to context that thoroughly delves into and explains concepts.
113+
2. Assign a lower score to context that contains excessive references, acknowledgments, external links, personal information, or other non-essential elements.
114+
Output the score only.
115+
Context:
116+
{context}
117+
Score:
118+
""" # noqa: E501
119+
)
120+
121+
FILTER_QUESTION = HumanMessagePromptTemplate.from_template(
122+
"""\
123+
Determine if the given question can be clearly understood even when presented without any additional context? Reason before arriving at the answer.
124+
question: What is the keyword that best describes the paper's focus in natural language understanding tasks?
125+
answer: The specific paper being referred to is not mentioned in the question. Hence, No.
126+
question:{question}
127+
answer:
128+
""" # noqa: E501
129+
)
130+
131+
132+
ANSWER_FORMULATE = HumanMessagePromptTemplate.from_template(
133+
"""\
134+
Answer the question using the information from the given context.
135+
question:{question}
136+
context:{context}
137+
answer:
138+
""" # noqa: E501
139+
)
140+
141+
CONTEXT_FORMULATE = HumanMessagePromptTemplate.from_template(
142+
"""Please extract relevant sentences from the provided context that can potentially help answer the following question. While extracting candidate sentences you're not allowed to make any changes to sentences from given context.
143+
144+
question:{question}
145+
context:\n{context}
146+
candidate sentences:\n
147+
""" # noqa: E501
148+
)

0 commit comments

Comments
 (0)