@@ -77,6 +77,9 @@ def _CreateEvaluationRunParameters_to_vertex(
7777 if getv (from_object , ["evaluation_config" ]) is not None :
7878 setv (to_object , ["evaluationConfig" ], getv (from_object , ["evaluation_config" ]))
7979
80+ if getv (from_object , ["labels" ]) is not None :
81+ setv (to_object , ["labels" ], getv (from_object , ["labels" ]))
82+
8083 if getv (from_object , ["config" ]) is not None :
8184 setv (to_object , ["config" ], getv (from_object , ["config" ]))
8285
@@ -236,6 +239,9 @@ def _EvaluationRun_from_vertex(
236239 if getv (from_object , ["inferenceConfigs" ]) is not None :
237240 setv (to_object , ["inference_configs" ], getv (from_object , ["inferenceConfigs" ]))
238241
242+ if getv (from_object , ["labels" ]) is not None :
243+ setv (to_object , ["labels" ], getv (from_object , ["labels" ]))
244+
239245 return to_object
240246
241247
@@ -464,6 +470,7 @@ def _create_evaluation_run(
464470 display_name : Optional [str ] = None ,
465471 data_source : types .EvaluationRunDataSourceOrDict ,
466472 evaluation_config : types .EvaluationRunConfigOrDict ,
473+ labels : Optional [dict [str , str ]] = None ,
467474 config : Optional [types .CreateEvaluationRunConfigOrDict ] = None ,
468475 inference_configs : Optional [
469476 dict [str , types .EvaluationRunInferenceConfigOrDict ]
@@ -478,6 +485,7 @@ def _create_evaluation_run(
478485 display_name = display_name ,
479486 data_source = data_source ,
480487 evaluation_config = evaluation_config ,
488+ labels = labels ,
481489 config = config ,
482490 inference_configs = inference_configs ,
483491 )
@@ -1316,6 +1324,7 @@ def create_evaluation_run(
13161324 list [types .EvaluationRunMetricOrDict ]
13171325 ] = None , # TODO: Make required unified metrics available in prod.
13181326 agent_info : Optional [types .AgentInfo ] = None ,
1327+ labels : Optional [dict [str , str ]] = None ,
13191328 config : Optional [types .CreateEvaluationRunConfigOrDict ] = None ,
13201329 ) -> types .EvaluationRun :
13211330 """Creates an EvaluationRun."""
@@ -1353,13 +1362,25 @@ def create_evaluation_run(
13531362 tools = agent_info .tool_declarations ,
13541363 )
13551364 )
1365+ if (
1366+ not agent_info .agent
1367+ or len (agent_info .agent .split ("reasoningEngines/" )) != 2
1368+ ):
1369+ raise ValueError (
1370+ "agent_info.agent cannot be empty. Please provide a valid reasoning engine resource name in the format of projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}."
1371+ )
1372+ labels = labels or {}
1373+ labels ["vertex-ai-evaluation-agent-engine-id" ] = agent_info .agent .split (
1374+ "reasoningEngines/"
1375+ )[- 1 ]
13561376
13571377 return self ._create_evaluation_run ( # type: ignore[no-any-return]
13581378 name = name ,
13591379 display_name = display_name ,
13601380 data_source = dataset ,
13611381 evaluation_config = evaluation_config ,
13621382 inference_configs = inference_configs ,
1383+ labels = labels ,
13631384 config = config ,
13641385 )
13651386
@@ -1566,6 +1587,7 @@ async def _create_evaluation_run(
15661587 display_name : Optional [str ] = None ,
15671588 data_source : types .EvaluationRunDataSourceOrDict ,
15681589 evaluation_config : types .EvaluationRunConfigOrDict ,
1590+ labels : Optional [dict [str , str ]] = None ,
15691591 config : Optional [types .CreateEvaluationRunConfigOrDict ] = None ,
15701592 inference_configs : Optional [
15711593 dict [str , types .EvaluationRunInferenceConfigOrDict ]
@@ -1580,6 +1602,7 @@ async def _create_evaluation_run(
15801602 display_name = display_name ,
15811603 data_source = data_source ,
15821604 evaluation_config = evaluation_config ,
1605+ labels = labels ,
15831606 config = config ,
15841607 inference_configs = inference_configs ,
15851608 )
@@ -2121,6 +2144,7 @@ async def create_evaluation_run(
21212144 list [types .EvaluationRunMetricOrDict ]
21222145 ] = None , # TODO: Make required unified metrics available in prod.
21232146 agent_info : Optional [types .AgentInfo ] = None ,
2147+ labels : Optional [dict [str , str ]] = None ,
21242148 config : Optional [types .CreateEvaluationRunConfigOrDict ] = None ,
21252149 ) -> types .EvaluationRun :
21262150 """Creates an EvaluationRun."""
@@ -2158,13 +2182,25 @@ async def create_evaluation_run(
21582182 tools = agent_info .tool_declarations ,
21592183 )
21602184 )
2185+ if (
2186+ not agent_info .agent
2187+ or len (agent_info .agent .split ("reasoningEngines/" )) != 2
2188+ ):
2189+ raise ValueError (
2190+ "agent_info.agent cannot be empty. Please provide a valid reasoning engine resource name in the format of projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}."
2191+ )
2192+ labels = labels or {}
2193+ labels ["vertex-ai-evaluation-agent-engine-id" ] = agent_info .agent .split (
2194+ "reasoningEngines/"
2195+ )[- 1 ]
21612196
21622197 result = await self ._create_evaluation_run ( # type: ignore[no-any-return]
21632198 name = name ,
21642199 display_name = display_name ,
21652200 data_source = dataset ,
21662201 evaluation_config = evaluation_config ,
21672202 inference_configs = inference_configs ,
2203+ labels = labels ,
21682204 config = config ,
21692205 )
21702206
0 commit comments