Skip to content

Commit

Permalink
WIP: refactor
Browse files Browse the repository at this point in the history
  • Loading branch information
yash-learner committed Mar 12, 2024
1 parent 9ada4e7 commit fd8fd05
Show file tree
Hide file tree
Showing 4 changed files with 68 additions and 215 deletions.
157 changes: 25 additions & 132 deletions app/open_ai_client.rb
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
require "openai"
require "yaml"
require "json"

class OpenAIClient
def initialize
Expand All @@ -9,6 +10,7 @@ def initialize
@model = @config.fetch("OPEN_AI_MODEL", "gpt-3.5-turbo")
@temperature = @config.fetch("OPEN_AI_TEMPERATURE", 0.1).to_f
@system_prompt = @config.fetch("SYSTEM_PROMPT", system_prompt_default)
@submission = Submission.new
end

def extract_relevant_step_configuration
Expand All @@ -31,108 +33,6 @@ def extract_relevant_step_configuration
@config
end

def create_feedback_function
{
type: "function",
function: {
name: "create_feedback",
description: "Creates feedback for a student submission",
parameters: {
type: "object",
properties: {
feedback: {
type: "string",
description: "The feedback to be added to a student submission"
}
},
required: ["feedback"]
}
}
}
end

def allowed_grades
evaluation_criteria = Submission.new.evaluation_criteria
evaluation_criteria.map do |criteria|
{evaluation_criteria_id: criteria["id"], allowed_grades: (1..criteria["max_grade"]).to_a}.to_json
end
end

def create_grading_function
assign_grades = ENV.fetch("ASSIGN_GRADES", "false") == "true"
if assign_grades
{
type: "function",
function: {
name: "create_grading",
description: "Creates grading for a student submission",
parameters: {
type: "object",
properties: {
status: {
type: "string",
enum: ["accepted", "rejected"]
},
feedback: {
type: "string",
description: "The feedback to be added to a student submission"
},
grades: {
type: "array",
items: {
type: "object",
properties: {
evaluationCriterionId: {
type: "string",
enum: Submission.new.evaluation_criteria_ids,
description: "The Id of evaluation criteria"
},
grade: {
type: "integer",
description: "The grade value choosen from allowed grades array for choosen evaluatuion_criterion_id for a submission based on quality"
}
},
required: ["evaluationCriterionId", "grade"]
}
}
},
required: ["status", "feedback", "grades"]
}
}
}
else
{
type: "function",
function: {
name: "create_grading",
description: "Creates grading for a student submission",
parameters: {
type: "object",
properties: {
status: {
type: "string",
enum: ["accepted", "rejected"]
},
feedback: {
type: "string",
description: "The feedback to be added to a student submission"
}
},
required: ["feedback"]
}
}
}
end
end

def function
if ENV.fetch("SKIP_GRADING", "false") == "true"
create_feedback_function
else
create_grading_function
end
end

def ask
puts prompt
response = @client.chat(
Expand All @@ -141,35 +41,37 @@ def ask
messages: [
{role: "system", content: prompt}
],
tools: Reviewer.avilable_actions,
tools: Reviewer.new.available_tools,
temperature: @temperature
}
)
puts response
if message["role"] == "assistant" && message["function_call"]
function_name = message.dig("function_call", "name")
args =
JSON.parse(
message.dig("function_call", "arguments"),
{ symbolize_names: true },
)

{function_name: function_name, arges: args}

message = response.dig("choices", 0, "message")
if message["role"] == "assistant" && message["tool_calls"]
message["tool_calls"].each do |tool_call|
function_name = tool_call.dig("function", "name")
args_json = tool_call.dig("function", "arguments")
begin
args = JSON.parse(args_json, symbolize_names: true)
return {function_name: function_name, args: args}
rescue JSON::ParserError => e
puts "Error parsing JSON arguments: #{e.message}"
end
end
else
{function_name: "errored", args: {}}
end

end

def prompt
@system_prompt
.gsub("${ROLE_PROMPT}", default_role_prompt)
.gsub("${INPUT_DESCRIPTION}", default_input_prompt)
.gsub("${USER_PROMPT}", default_user_prompt)
.gsub("${SUBMISSION}", "#{Submission.new.checklist}")
.gsub("${SUBMISSION}", "#{@submission.checklist}")
.gsub("${EC_PROMPT}", default_evaluation_criteria_prompt)
.gsub("${SUBMISSION_EC}", "#{allowed_grades}")
.gsub("${OUTPUT_DESCRIPTION}", default_output_prompt)
.gsub("${SUBMISSION_EC}", "#{@submission.evaluation_criteria}")
end

def system_prompt_default
Expand All @@ -181,8 +83,6 @@ def system_prompt_default
#{@config.fetch("USER_PROMPT", "${USER_PROMPT}")}
#{@config.fetch("EC_PROMPT", "${EC_PROMPT}")}
#{@config.fetch("OUTPUT_DESCRIPTION", "${OUTPUT_DESCRIPTION}")}
SYSTEM_PROMPT
end

Expand Down Expand Up @@ -214,26 +114,19 @@ def default_input_prompt
end

def default_evaluation_criteria_prompt
if ENV.fetch("ASSIGN_GRADES", "false") == "true"
if @submission.evaluation_criteria.present?
<<~EC_PROMPT
The following is array of objects. Each object has two keys
- evaluation_criteria_id: This key stores the identifier for the evaluation criteria, which can be either a numeric value or a string. This identifier is unique for each set of criteria and is used to reference the specific evaluation criteria being described.
- allowed_grades": Associated with this key is an array of integers, which represents the set of permissible grades for associated evaluation criterion(evaluation_criteria_id). These grades are predefined and indicate the possible outcomes or ratings that can be assigned based on the evaluation criterion.
The following is array of objects. Each object has following keys
- id: This key stores the identifier for the evaluation criteria, which can be either a numeric value or a string.
- name: The name of the evaluation criterion.
- max_grade: The maximum grade that can be assigned for this criterion.
- grade_labels: An array of objects, each containing a 'grade' and a 'label'. 'grade' is an integer representing a possible grade for the criterion, and 'label' is a description of what this grade signifies.
The evaluation_criteria for this submission are:
The evaluation_criteria's for this submission are:
${SUBMISSION_EC}
EC_PROMPT
else
""
end
end

def default_output_prompt
<<~OUTPUT_PROMPT
The following is the expected json schema for the response.
#{function}
If the student submission is not related to question, share generic feedback.
OUTPUT_PROMPT
end
end
17 changes: 5 additions & 12 deletions app/pupilfirst_api.rb
Original file line number Diff line number Diff line change
Expand Up @@ -40,12 +40,12 @@ def initialize(submission = Submission.new)
end

def grade(result)
return puts "Unknown status: #{result["status"].inspect}. Skipping grading..." unless valid_status?(result["status"])
return puts "Unknown status: #{result[:status].inspect}. Skipping grading..." unless valid_status?(result[:status])

variables = {
submissionId: @submission.id,
checklist: @submission.checklist,
feedback: result["feedback"]
feedback: result[:feedback]
}

grades = grades_based_on(result)
Expand All @@ -61,7 +61,7 @@ def grade(result)
def add_feedback(result)
variables = {
submissionId: @submission.id,
feedback: result["feedback"]
feedback: result[:feedback]
}

log_variables(variables) if @test_mode
Expand All @@ -77,15 +77,8 @@ def valid_status?(status)
end

def grades_based_on(result)
if result["status"] == "accepted" && ENV.fetch("ASSIGN_GRADES", "false") == "true"
result["grades"]
elsif result["status"] == "accepted"
@submission.evaluation_criteria.map do |criteria|
{
evaluationCriterionId: criteria["id"],
grade: criteria["max_grade"]
}
end
if result[:status] == "accepted"
result[:grades]
else
[]
end
Expand Down
79 changes: 31 additions & 48 deletions app/reviewer.rb
Original file line number Diff line number Diff line change
@@ -1,44 +1,4 @@
class Reviewer
def review
{
type: "function",
function: {
name: "create_feedback",
description: "Creates feedback for a student submission",
parameters: {
type: "object",
properties: {
feedback: {
type: "string",
description: "The feedback to be added to a student submission"
}
},
required: ["feedback"]
}
}
}
end

def reject
{
type: "function",
function: {
name: "create_feedback",
description: "Creates feedback for a student submission",
parameters: {
type: "object",
properties: {
feedback: {
type: "string",
description: "The feedback to be added to a student submission"
}
},
required: ["feedback"]
}
}
}
end

def create_feedback
{
type: "function",
Expand All @@ -50,7 +10,7 @@ def create_feedback
properties: {
feedback: {
type: "string",
description: "The feedback to be added to a student submission"
description: "The feedback for student submission in markdown."
}
},
required: ["feedback"]
Expand All @@ -59,27 +19,50 @@ def create_feedback
}
end

def dynamic_grading
def grade
{
type: "function",
function: {
name: "create_feedback",
description: "Creates feedback for a student submission",
name: "create_grading",
description: "Creates grading for a student submission",
parameters: {
type: "object",
properties: {
status: {
type: "string",
enum: ["accepted", "rejected"]
},
feedback: {
type: "string",
description: "The feedback to be added to a student submission"
description: "The feedback for student submission in markdown."
},
grades: {
type: "array",
description: "The grades to be added to a student submission. This will be an empty array when a submission is rejected",
items: {
type: "object",
properties: {
evaluationCriterionId: {
type: "string",
enum: Submission.new.evaluation_criteria_ids,
description: "The Id of evaluation criteria"
},
grade: {
type: "integer",
description: "The grade value choosen from allowed grades array for choosen evaluatuion_criterion_id for a submission based on quality"
}
},
required: ["evaluationCriterionId", "grade"]
}
}
},
required: ["feedback"]
required: ["status", "feedback", "grades"]
}
}
}
end

def avilable_actions
[review, reject, create_feedback, dynamic_grading]
def available_tools
[create_feedback, grade]
end
end
Loading

0 comments on commit fd8fd05

Please sign in to comment.