From 98f9b35ccde7dad7f3e6b9e259a201ee2784d15e Mon Sep 17 00:00:00 2001 From: Jason Dai Date: Wed, 15 May 2024 09:21:18 -0700 Subject: [PATCH] fix: Fix the default value of response_column_name in EvalTask.evaluate() PiperOrigin-RevId: 633978835 --- vertexai/preview/evaluation/_eval_tasks.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/vertexai/preview/evaluation/_eval_tasks.py b/vertexai/preview/evaluation/_eval_tasks.py index 9e24cc670a..cf4ea0c895 100644 --- a/vertexai/preview/evaluation/_eval_tasks.py +++ b/vertexai/preview/evaluation/_eval_tasks.py @@ -249,7 +249,7 @@ def _evaluate_with_experiment( model: Optional[Union[GenerativeModel, Callable[[str], str]]] = None, prompt_template: Optional[str] = None, experiment_run_name: Optional[str] = None, - response_column_name: str = "response", + response_column_name: Optional[str] = None, ) -> EvalResult: """Runs an evaluation for the EvalTask with an experiment. @@ -264,7 +264,7 @@ def _evaluate_with_experiment( to if an experiment is set for this EvalTask. If not provided, a random unique experiment run name is used. response_column_name: The column name of model response in the dataset. If - not set, default to `response`. + provided, this will override the `response_column_name` of the `EvalTask`. Returns: The evaluation result. @@ -279,7 +279,7 @@ def _evaluate_with_experiment( prompt_template=prompt_template, content_column_name=self.content_column_name, reference_column_name=self.reference_column_name, - response_column_name=response_column_name or self.response_column_name, + response_column_name=response_column_name, ) try: vertexai.preview.log_metrics(eval_result.summary_metrics) @@ -293,7 +293,7 @@ def evaluate( model: Optional[Union[GenerativeModel, Callable[[str], str]]] = None, prompt_template: Optional[str] = None, experiment_run_name: Optional[str] = None, - response_column_name: str = "response", + response_column_name: Optional[str] = None, ) -> EvalResult: """Runs an evaluation for the EvalTask. @@ -308,7 +308,7 @@ def evaluate( to if an experiment is set for this EvalTask. If not provided, a random unique experiment run name is used. response_column_name: The column name of model response in the dataset. If - not set, default to `response`. + provided, this will override the `response_column_name` of the `EvalTask`. Returns: The evaluation result. @@ -321,7 +321,7 @@ def evaluate( "`vertexai.init(experiment='experiment_name')`for logging this" " evaluation run." ) - + response_column_name = response_column_name or self.response_column_name experiment_run_name = experiment_run_name or f"{uuid.uuid4()}" if self.experiment and global_experiment_name: @@ -354,7 +354,7 @@ def evaluate( prompt_template=prompt_template, content_column_name=self.content_column_name, reference_column_name=self.reference_column_name, - response_column_name=response_column_name or self.response_column_name, + response_column_name=response_column_name, ) return eval_result