Skip to content

Commit 5b3010d

Browse files
vertex-sdk-botcopybara-github
authored andcommitted
feat: Add support for referencing registered metrics by resource name in evaluation run API
PiperOrigin-RevId: 878604099
1 parent b3bae32 commit 5b3010d

3 files changed

Lines changed: 35 additions & 0 deletions

File tree

vertexai/_genai/_evals_common.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1329,6 +1329,15 @@ def _resolve_evaluation_run_metrics(
13291329
for metric_instance in metrics:
13301330
if isinstance(metric_instance, types.EvaluationRunMetric):
13311331
resolved_metrics_list.append(metric_instance)
1332+
elif isinstance(metric_instance, str) and metric_instance.startswith(
1333+
"projects/"
1334+
):
1335+
resolved_metrics_list.append(
1336+
types.EvaluationRunMetric(
1337+
metric=metric_instance.split("/")[-1],
1338+
metric_resource_name=metric_instance,
1339+
)
1340+
)
13321341
elif isinstance(
13331342
metric_instance, _evals_metric_loaders.LazyLoadedPrebuiltMetric
13341343
):

vertexai/_genai/evals.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -346,6 +346,13 @@ def _EvaluationRunMetric_from_vertex(
346346
_UnifiedMetric_from_vertex(getv(from_object, ["metricConfig"]), to_object),
347347
)
348348

349+
if getv(from_object, ["metricResourceName"]) is not None:
350+
setv(
351+
to_object,
352+
["metric_resource_name"],
353+
getv(from_object, ["metricResourceName"]),
354+
)
355+
349356
return to_object
350357

351358

@@ -364,6 +371,13 @@ def _EvaluationRunMetric_to_vertex(
364371
_UnifiedMetric_to_vertex(getv(from_object, ["metric_config"]), to_object),
365372
)
366373

374+
if getv(from_object, ["metric_resource_name"]) is not None:
375+
setv(
376+
to_object,
377+
["metricResourceName"],
378+
getv(from_object, ["metric_resource_name"]),
379+
)
380+
367381
return to_object
368382

369383

vertexai/_genai/types/common.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2326,6 +2326,9 @@ class LLMBasedMetricSpec(_common.BaseModel):
23262326
default=None,
23272327
description="""Dynamically generate rubrics using this specification.""",
23282328
)
2329+
metric_resource_name: Optional[str] = Field(
2330+
default=None, description="""The resource name of the metric definition."""
2331+
)
23292332

23302333

23312334
class LLMBasedMetricSpecDict(TypedDict, total=False):
@@ -2350,6 +2353,9 @@ class LLMBasedMetricSpecDict(TypedDict, total=False):
23502353
rubric_generation_spec: Optional[RubricGenerationSpecDict]
23512354
"""Dynamically generate rubrics using this specification."""
23522355

2356+
metric_resource_name: Optional[str]
2357+
"""The resource name of the metric definition."""
2358+
23532359

23542360
LLMBasedMetricSpecOrDict = Union[LLMBasedMetricSpec, LLMBasedMetricSpecDict]
23552361

@@ -2482,6 +2488,9 @@ class EvaluationRunMetric(_common.BaseModel):
24822488
metric_config: Optional[UnifiedMetric] = Field(
24832489
default=None, description="""The unified metric used for evaluation run."""
24842490
)
2491+
metric_resource_name: Optional[str] = Field(
2492+
default=None, description="""The resource name of the metric definition."""
2493+
)
24852494

24862495

24872496
class EvaluationRunMetricDict(TypedDict, total=False):
@@ -2493,6 +2502,9 @@ class EvaluationRunMetricDict(TypedDict, total=False):
24932502
metric_config: Optional[UnifiedMetricDict]
24942503
"""The unified metric used for evaluation run."""
24952504

2505+
metric_resource_name: Optional[str]
2506+
"""The resource name of the metric definition."""
2507+
24962508

24972509
EvaluationRunMetricOrDict = Union[EvaluationRunMetric, EvaluationRunMetricDict]
24982510

0 commit comments

Comments
 (0)