Skip to content

Commit

Permalink
Fix bug in evaluation metrics
Browse files Browse the repository at this point in the history
  • Loading branch information
ignorejjj committed Jul 31, 2024
1 parent 785a901 commit d0f403e
Showing 1 changed file with 4 additions and 3 deletions.
7 changes: 4 additions & 3 deletions flashrag/evaluator/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,16 +30,17 @@ def calculate_metric(self, data):
return {}, []

def get_dataset_answer(self, data):
if data.choices != []:
if any(choice == [] for choice in data.choices):
golden_answers_list = data.golden_answers
else:
# multi-choice dataset
all_choices_list = data.choices
golden_choice_idx_list = data.golden_answers
golden_answers_list = [
[choices[idx] for idx in idx_list]
for choices, idx_list in zip(all_choices_list, golden_choice_idx_list)
]
else:
golden_answers_list = data.golden_answers

return golden_answers_list


Expand Down

0 comments on commit d0f403e

Please sign in to comment.