Skip to content

Commit e13f95d

Browse files
authored
Set Env variables + prompt improvements (#10)
* feat: init functionality to allow developer of chat function to set the env vars for aws lambda * feat: more detailed question info * testing: env vars mocks for deployment * fix: synthetic conversation
1 parent 7dedfed commit e13f95d

File tree

5 files changed

+24
-10
lines changed

5 files changed

+24
-10
lines changed

.github/workflows/dev.yml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,8 @@ jobs:
7575
needs: test
7676
with:
7777
template-repository-name: "lambda-feedback/chat-function-boilerplate"
78+
# allow for developer to specify the environment variables that are used by the deployed AWS Lambda. Default to mock then admin can update.
79+
deployed-environment-variables: '["OPENAI_API_KEY","OPENAI_MODEL","GOOGLE_AI_API_KEY","GOOGLE_AI_MODEL"]'
7880
permissions:
7981
contents: read
8082
id-token: write

.github/workflows/main.yml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,8 @@ jobs:
7575
needs: test
7676
with:
7777
template-repository-name: "lambda-feedback/chat-function-boilerplate"
78+
# allow for developer to specify the environment variables that are used by the deployed AWS Lambda. Default to mock then admin can update.
79+
# deployed-environment-variables: '["OPENAI_API_KEY","OPENAI_MODEL","GOOGLE_AI_API_KEY","GOOGLE_AI_MODEL"]'
7880
permissions:
7981
contents: read
8082
id-token: write

src/agents/llm_factory.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,8 @@
77
from langchain_openai import ChatOpenAI
88
from langchain_openai import OpenAIEmbeddings
99
from langchain_google_genai import ChatGoogleGenerativeAI
10+
from dotenv import load_dotenv
11+
load_dotenv()
1012

1113
class AzureLLMs:
1214
def __init__(self, temperature: int = 0):

src/agents/utils/parse_json_to_prompt.py

Lines changed: 12 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,3 @@
1-
""" File not to be modified. This file contains the conversion logic between the agent API and the Lambda Feedback backend."""
2-
31
from typing import List, Optional, Union, Dict
42

53
# questionSubmissionSummary type
@@ -85,13 +83,21 @@ def __init__(
8583
class QuestionDetails:
8684
def __init__(
8785
self,
86+
setNumber: Optional[int] = None,
87+
setName: Optional[str] = None,
88+
setDescription: Optional[str] = None,
89+
questionNumber: Optional[int] = None,
8890
questionTitle: Optional[str] = None,
8991
questionGuidance: Optional[str] = None,
9092
questionContent: Optional[str] = None,
9193
durationLowerBound: Optional[int] = None,
9294
durationUpperBound: Optional[int] = None,
9395
parts: Optional[List[PartDetails]] = [],
9496
):
97+
self.setNumber = setNumber
98+
self.setName = setName
99+
self.setDescription = setDescription
100+
self.questionNumber = questionNumber
95101
self.questionTitle = questionTitle
96102
self.questionGuidance = questionGuidance
97103
self.questionContent = questionContent
@@ -161,7 +167,7 @@ def format_response_area_details(responseArea: ResponseAreaDetails, studentSumma
161167
{submissionDetails}"""
162168

163169
def format_part_details(part: PartDetails, currentPart: CurrentPart, summary: List[StudentWorkResponseArea]) -> str:
164-
if not part or not part.publishedResponseAreas:
170+
if not part:
165171
return ''
166172

167173
responseAreas = "\n".join(
@@ -187,9 +193,9 @@ def format_part_details(part: PartDetails, currentPart: CurrentPart, summary: Li
187193
"""
188194

189195
questionDetails = f"""This is the question I am currently working on. I am currently working on Part ({convert_index_to_lowercase_letter(questionAccessInformation.currentPart.position)}). Below, you'll find its details, including the parts of the question, my responses for each response area, and the feedback I received. This information highlights my efforts and progress so far. Use this this information to inform your understanding about the question materials provided to me and my work on them.
190-
Maths equations are in KaTex format, preserve them the same.
191-
192-
# Question: {questionInformation.questionTitle};
196+
Maths equations are in KaTex format, preserve them the same. Use British English spellings.
197+
{f'# Question Set {questionInformation.setNumber + 1}: {questionInformation.setName};' if questionInformation.setName and questionInformation.setNumber else ''}
198+
# Question{f' {questionInformation.setNumber + 1}.{questionInformation.questionNumber + 1}' if questionInformation.setNumber and questionInformation.questionNumber else ''}: {questionInformation.questionTitle};
193199
Guidance to Solve the Question: {questionInformation.questionGuidance or 'None'};
194200
Description of Question: {questionInformation.questionContent};
195201
Expected Time to Complete the Question: {f'{questionInformation.durationLowerBound} - {questionInformation.durationUpperBound} min;' if questionInformation.durationLowerBound and questionInformation.durationUpperBound else 'No specified duration.'}

src/agents/utils/synthetic_conversation_generation.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -23,11 +23,11 @@
2323
try:
2424
from ..student_agent.student_agent import invoke_student_agent
2525
from .parse_json_to_prompt import parse_json_to_prompt
26-
from ..base_agent import invoke_base_agent
26+
from ..base_agent.base_agent import invoke_base_agent
2727
except ImportError:
2828
from src.agents.student_agent.student_agent import invoke_student_agent
2929
from src.agents.utils.parse_json_to_prompt import parse_json_to_prompt
30-
from src.agents.base_agent import invoke_base_agent
30+
from src.agents.base_agent.base_agent import invoke_base_agent
3131
import os
3232

3333

@@ -70,11 +70,11 @@ def generate_synthetic_conversations(raw_text: str, num_turns: int, student_agen
7070
# Student starts
7171
student_response = invoke_student_agent(message, conversation_history[:-1], summary, student_agent_type, question_response_details_prompt, conversation_id)
7272
conversation_history.append({
73-
"role": "assistant",
73+
"role": "user",
7474
"content": student_response["output"]
7575
})
7676
else:
77-
tutor_response = invoke_tutor_agent(message, conversation_history[:-1], summary, conversational_style, question_response_details_prompt, conversation_id)
77+
tutor_response = invoke_tutor_agent(message, conversation_history, summary, conversational_style, question_response_details_prompt, conversation_id)
7878
conversation_history.append({
7979
"role": "assistant",
8080
"content": tutor_response["output"]
@@ -88,6 +88,8 @@ def generate_synthetic_conversations(raw_text: str, num_turns: int, student_agen
8888
# Save Conversation
8989
conversation_output = {
9090
"conversation_id": conversation_id+"_"+student_agent_type+"_"+tutor_agent_type+"_synthetic",
91+
"student_agent_type": student_agent_type,
92+
"tutor_agent_type": tutor_agent_type,
9193
"conversation": conversation_history
9294
}
9395
return conversation_output

0 commit comments

Comments
 (0)