Skip to content

Commit

Permalink
[Evaluation] Rename parameter name project_scope to `azure_ai_proje…
Browse files Browse the repository at this point in the history
…ct` (#37409)

* Rename project_scope to azure_ai_project

* fix the docstring

* try to fix the docstring again
  • Loading branch information
ninghu authored Sep 16, 2024
1 parent 6e5a7d6 commit 9a8f379
Show file tree
Hide file tree
Showing 14 changed files with 106 additions and 96 deletions.
6 changes: 6 additions & 0 deletions sdk/evaluation/azure-ai-evaluation/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,12 @@

## 1.0.0b1 (Unreleased)


### Breaking Changes

- The parameter name `project_scope` in content safety evaluators have been renamed to `azure_ai_project` for consistency with evaluate API and simulators.


### Features Added

- First preview
Expand Down
4 changes: 2 additions & 2 deletions sdk/evaluation/azure-ai-evaluation/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -61,13 +61,13 @@ if __name__ == "__main__":
# Content Safety Evaluator

# Initialize Project Scope
project_scope = {
azure_ai_project = {
"subscription_id": "e0fd569c-e34a-4249-8c24-e8d723c7f054",
"resource_group_name": "rg-test",
"project_name": "project-test",
}

violence_eval = ViolenceEvaluator(project_scope)
violence_eval = ViolenceEvaluator(azure_ai_project)
violence_score = violence_eval(question="What is the capital of France?", answer="Paris.")
pprint(violence_score)
# {'violence': 'Very low',
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,9 @@ class ContentSafetyEvaluator:
"""
Initialize a content safety evaluator configured to evaluate content safetry metrics for QA scenario.
:param project_scope: The scope of the Azure AI project.
:param azure_ai_project: The scope of the Azure AI project.
It contains subscription id, resource group, and project name.
:type project_scope: dict
:type azure_ai_project: dict
:param parallel: If True, use parallel execution for evaluators. Else, use sequential execution.
Default is True.
:param credential: The credential for connecting to Azure AI project.
Expand All @@ -35,12 +35,12 @@ class ContentSafetyEvaluator:
.. code-block:: python
project_scope = {
azure_ai_project = {
"subscription_id": "<subscription_id>",
"resource_group_name": "<resource_group_name>",
"project_name": "<project_name>",
}
eval_fn = ContentSafetyEvaluator(project_scope)
eval_fn = ContentSafetyEvaluator(azure_ai_project)
result = eval_fn(
question="What is the capital of France?",
answer="Paris.",
Expand All @@ -66,13 +66,13 @@ class ContentSafetyEvaluator:
}
"""

def __init__(self, project_scope: dict, parallel: bool = True, credential=None):
def __init__(self, azure_ai_project: dict, parallel: bool = True, credential=None):
self._parallel = parallel
self._evaluators = [
ViolenceEvaluator(project_scope, credential),
SexualEvaluator(project_scope, credential),
SelfHarmEvaluator(project_scope, credential),
HateUnfairnessEvaluator(project_scope, credential),
ViolenceEvaluator(azure_ai_project, credential),
SexualEvaluator(azure_ai_project, credential),
SelfHarmEvaluator(azure_ai_project, credential),
HateUnfairnessEvaluator(azure_ai_project, credential),
]

def __call__(self, *, question: str, answer: str, **kwargs):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,16 +16,16 @@ class ContentSafetyEvaluatorBase(ABC):
:param metric: The metric to be evaluated.
:type metric: ~azure.ai.evaluation.evaluators._content_safety.flow.constants.EvaluationMetrics
:param project_scope: The scope of the Azure AI project.
:param azure_ai_project: The scope of the Azure AI project.
It contains subscription id, resource group, and project name.
:type project_scope: Dict
:type azure_ai_project: Dict
:param credential: The credential for connecting to Azure AI project.
:type credential: ~azure.core.credentials.TokenCredential
"""

def __init__(self, metric: EvaluationMetrics, project_scope: dict, credential=None):
def __init__(self, metric: EvaluationMetrics, azure_ai_project: dict, credential=None):
self._metric = metric
self._project_scope = project_scope
self._azure_ai_project = azure_ai_project
self._credential = credential

async def __call__(self, *, question: str, answer: str, **kwargs):
Expand All @@ -51,7 +51,7 @@ async def __call__(self, *, question: str, answer: str, **kwargs):
metric_name=self._metric,
question=question,
answer=answer,
project_scope=self._project_scope,
project_scope=self._azure_ai_project,
credential=self._credential,
)
return result
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,9 @@ class ContentSafetyChatEvaluator:
"""
Initialize a content safety chat evaluator configured to evaluate content safetry metrics for chat scenario.
:param project_scope: The scope of the Azure AI project.
:param azure_ai_project: The scope of the Azure AI project.
It contains subscription id, resource group, and project name.
:type project_scope: dict
:type azure_ai_project: dict
:param eval_last_turn: Set to True to evaluate only the most recent exchange in the dialogue,
focusing on the latest user inquiry and the assistant's corresponding response. Defaults to False
:type eval_last_turn: bool
Expand All @@ -45,12 +45,16 @@ class ContentSafetyChatEvaluator:
.. code-block:: python
eval_fn = ContentSafetyChatEvaluator(model_config)
conversation = [
azure_ai_project = {
"subscription_id": "<subscription_id>",
"resource_group_name": "<resource_group_name>",
"project_name": "<project_name>",
}
eval_fn = ContentSafetyChatEvaluator(azure_ai_project)
result = eval_fn(conversation=[
{"role": "user", "content": "What is the value of 2 + 2?"},
{"role": "assistant", "content": "2 + 2 = 4"}
]
result = ContentSafetyChatEvaluator(conversation=conversation)
])
**Output format**
Expand Down Expand Up @@ -82,14 +86,14 @@ class ContentSafetyChatEvaluator:
}
"""

def __init__(self, project_scope: dict, eval_last_turn: bool = False, parallel: bool = True, credential=None):
def __init__(self, azure_ai_project: dict, eval_last_turn: bool = False, parallel: bool = True, credential=None):
self._eval_last_turn = eval_last_turn
self._parallel = parallel
self._evaluators = [
ViolenceEvaluator(project_scope, credential),
SexualEvaluator(project_scope, credential),
SelfHarmEvaluator(project_scope, credential),
HateUnfairnessEvaluator(project_scope, credential),
ViolenceEvaluator(azure_ai_project, credential),
SexualEvaluator(azure_ai_project, credential),
SelfHarmEvaluator(azure_ai_project, credential),
HateUnfairnessEvaluator(azure_ai_project, credential),
]

def __call__(self, *, conversation, **kwargs):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,12 @@


class _AsyncHateUnfairnessEvaluator(ContentSafetyEvaluatorBase):
def __init__(self, project_scope: dict, credential=None):
def __init__(self, azure_ai_project: dict, credential=None):
# Hate_fairness is the actual backend metric name. Which, uh, doesn't sound great.
# so invert the name.
super().__init__(
metric=EvaluationMetrics.HATE_FAIRNESS,
project_scope=project_scope,
azure_ai_project=azure_ai_project,
credential=credential,
)

Expand All @@ -28,22 +28,22 @@ class HateUnfairnessEvaluator:
"""
Initialize a hate-unfairness evaluator for hate unfairness score.
:param project_scope: The scope of the Azure AI project.
:param azure_ai_project: The scope of the Azure AI project.
It contains subscription id, resource group, and project name.
:type project_scope: dict
:type azure_ai_project: dict
:param credential: The credential for connecting to Azure AI project.
:type credential: ~azure.core.credentials.TokenCredential
**Usage**
.. code-block:: python
project_scope = {
azure_ai_project = {
"subscription_id": "<subscription_id>",
"resource_group_name": "<resource_group_name>",
"project_name": "<project_name>",
}
eval_fn = HateUnfairnessEvaluator(project_scope)
eval_fn = HateUnfairnessEvaluator(azure_ai_project)
result = eval_fn(question="What is the capital of France?", answer="Paris.")
**Output format**
Expand All @@ -57,8 +57,8 @@ class HateUnfairnessEvaluator:
}
"""

def __init__(self, project_scope: dict, credential=None) -> None:
self._async_evaluator = _AsyncHateUnfairnessEvaluator(project_scope, credential)
def __init__(self, azure_ai_project: dict, credential=None) -> None:
self._async_evaluator = _AsyncHateUnfairnessEvaluator(azure_ai_project, credential)

def __call__(self, *, question: str, answer: str, **kwargs):
"""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,10 @@


class _AsyncSelfHarmEvaluator(ContentSafetyEvaluatorBase):
def __init__(self, project_scope: dict, credential=None):
def __init__(self, azure_ai_project: dict, credential=None):
super().__init__(
metric=EvaluationMetrics.SELF_HARM,
project_scope=project_scope,
azure_ai_project=azure_ai_project,
credential=credential,
)

Expand All @@ -26,22 +26,22 @@ class SelfHarmEvaluator:
"""
Initialize a self harm evaluator for self harm score.
:param project_scope: The scope of the Azure AI project.
:param azure_ai_project: The scope of the Azure AI project.
It contains subscription id, resource group, and project name.
:type project_scope: dict
:type azure_ai_project: dict
:param credential: The credential for connecting to Azure AI project.
:type credential: ~azure.core.credentials.TokenCredential
**Usage**
.. code-block:: python
project_scope = {
azure_ai_project = {
"subscription_id": "<subscription_id>",
"resource_group_name": "<resource_group_name>",
"project_name": "<project_name>",
}
eval_fn = SelfHarmEvaluator(project_scope)
eval_fn = SelfHarmEvaluator(azure_ai_project)
result = eval_fn(question="What is the capital of France?", answer="Paris.")
**Output format**
Expand All @@ -55,8 +55,8 @@ class SelfHarmEvaluator:
}
"""

def __init__(self, project_scope: dict, credential=None):
self._async_evaluator = _AsyncSelfHarmEvaluator(project_scope, credential)
def __init__(self, azure_ai_project: dict, credential=None):
self._async_evaluator = _AsyncSelfHarmEvaluator(azure_ai_project, credential)

def __call__(self, *, question: str, answer: str, **kwargs):
"""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,10 @@


class _AsyncSexualEvaluator(ContentSafetyEvaluatorBase):
def __init__(self, project_scope: dict, credential=None):
def __init__(self, azure_ai_project: dict, credential=None):
super().__init__(
metric=EvaluationMetrics.SEXUAL,
project_scope=project_scope,
azure_ai_project=azure_ai_project,
credential=credential,
)

Expand All @@ -26,22 +26,22 @@ class SexualEvaluator:
"""
Initialize a sexual evaluator for sexual score.
:param project_scope: The scope of the Azure AI project.
:param azure_ai_project: The scope of the Azure AI project.
It contains subscription id, resource group, and project name.
:type project_scope: dict
:type azure_ai_project: dict
:param credential: The credential for connecting to Azure AI project.
:type credential: ~azure.core.credentials.TokenCredential
**Usage**
.. code-block:: python
project_scope = {
azure_ai_project = {
"subscription_id": "<subscription_id>",
"resource_group_name": "<resource_group_name>",
"project_name": "<project_name>",
}
eval_fn = SexualEvaluator(project_scope)
eval_fn = SexualEvaluator(azure_ai_project)
result = eval_fn(question="What is the capital of France?", answer="Paris.")
**Output format**
Expand All @@ -55,8 +55,8 @@ class SexualEvaluator:
}
"""

def __init__(self, project_scope: dict, credential=None):
self._async_evaluator = _AsyncSexualEvaluator(project_scope, credential)
def __init__(self, azure_ai_project: dict, credential=None):
self._async_evaluator = _AsyncSexualEvaluator(azure_ai_project, credential)

def __call__(self, *, question: str, answer: str, **kwargs):
"""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,10 @@


class _AsyncViolenceEvaluator(ContentSafetyEvaluatorBase):
def __init__(self, project_scope: dict, credential=None):
def __init__(self, azure_ai_project: dict, credential=None):
super().__init__(
metric=EvaluationMetrics.VIOLENCE,
project_scope=project_scope,
azure_ai_project=azure_ai_project,
credential=credential,
)

Expand All @@ -26,22 +26,22 @@ class ViolenceEvaluator:
"""
Initialize a violence evaluator for violence score.
:param project_scope: The scope of the Azure AI project.
:param azure_ai_project: The scope of the Azure AI project.
It contains subscription id, resource group, and project name.
:type project_scope: dict
:type azure_ai_project: dict
:param credential: The credential for connecting to Azure AI project.
:type credential: ~azure.core.credentials.TokenCredential
**Usage**
.. code-block:: python
project_scope = {
azure_ai_project = {
"subscription_id": "<subscription_id>",
"resource_group_name": "<resource_group_name>",
"project_name": "<project_name>",
}
eval_fn = ViolenceEvaluator(project_scope)
eval_fn = ViolenceEvaluator(azure_ai_project)
result = eval_fn(question="What is the capital of France?", answer="Paris.")
**Output format**
Expand All @@ -55,8 +55,8 @@ class ViolenceEvaluator:
}
"""

def __init__(self, project_scope: dict, credential=None):
self._async_evaluator = _AsyncViolenceEvaluator(project_scope, credential)
def __init__(self, azure_ai_project: dict, credential=None):
self._async_evaluator = _AsyncViolenceEvaluator(azure_ai_project, credential)

def __call__(self, *, question: str, answer: str, **kwargs):
"""
Expand Down
Loading

0 comments on commit 9a8f379

Please sign in to comment.