Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Python: Bedrock connector #9100

Merged
merged 21 commits into from
Oct 10, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
cc42162
Add Bedrock packages
TaoChenOSU Sep 25, 2024
009682a
Chat & text completion Skeleton
TaoChenOSU Sep 26, 2024
3085a0d
Move model provider to a separate file
TaoChenOSU Sep 26, 2024
ac3a8e1
Text completion and some streaming text completion; next finish strea…
TaoChenOSU Sep 27, 2024
d7d338c
chat completion non-streaming partially doen; next finish it
TaoChenOSU Sep 30, 2024
28b36ba
chat completion non-streaming partially done; next add function calli…
TaoChenOSU Oct 1, 2024
e180142
chat completion non-streaming done; next add chat completion streaming
TaoChenOSU Oct 3, 2024
34e1209
Merge branch 'main' into local-branch-bedrock
TaoChenOSU Oct 4, 2024
337e64f
Streaming done; next embedding and documentation, and unit tests
TaoChenOSU Oct 4, 2024
79a667f
Text embedding
TaoChenOSU Oct 4, 2024
13319ee
Add model check
TaoChenOSU Oct 4, 2024
b759c97
Rearrange imports
TaoChenOSU Oct 4, 2024
9ccc082
Unit tests
TaoChenOSU Oct 7, 2024
cccabe8
More comprehensive integration tests for Bedrock
TaoChenOSU Oct 8, 2024
fb846fc
Fix integration tests
TaoChenOSU Oct 8, 2024
8625c1e
Samples and ReadMe
TaoChenOSU Oct 8, 2024
08b2e15
Set up AWS in workflow
TaoChenOSU Oct 8, 2024
c96ae7e
Fix unit tests
TaoChenOSU Oct 8, 2024
1a7ab1f
Merge branch 'main' into taochen/python-bedrock-connector
TaoChenOSU Oct 9, 2024
794b32c
Merge branch 'main' into taochen/python-bedrock-connector
TaoChenOSU Oct 10, 2024
8de07a4
Merge branch 'main' into taochen/python-bedrock-connector
TaoChenOSU Oct 10, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Chat & text completion Skeleton
  • Loading branch information
TaoChenOSU committed Sep 26, 2024
commit 009682ac1eb1c235270cfecdbc56775429323882
1 change: 1 addition & 0 deletions python/.cspell.json
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
"aiplatform",
"azuredocindex",
"azuredocs",
"boto",
"contentvector",
"contoso",
"datamodel",
Expand Down
Empty file.
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
# Copyright (c) Microsoft. All rights reserved.


from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings


class BedrockPromptExecutionSettings(PromptExecutionSettings):
"""Bedrock Prompt Execution Settings."""

...


class BedrockChatPromptExecutionSettings(BedrockPromptExecutionSettings):
"""Bedrock Chat Prompt Execution Settings."""

...


class BedrockTextPromptExecutionSettings(BedrockPromptExecutionSettings):
"""Bedrock Text Prompt Execution Settings."""

...


class BedrockEmbeddingPromptExecutionSettings(PromptExecutionSettings):
"""Bedrock Embedding Prompt Execution Settings."""

...
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
# Copyright (c) Microsoft. All rights reserved.

from typing import ClassVar

from semantic_kernel.kernel_pydantic import KernelBaseSettings
from semantic_kernel.utils.experimental_decorator import experimental_class


@experimental_class
class BedrockSettings(KernelBaseSettings):
"""Amazon Bedrock service settings.

The settings are first loaded from environment variables with
the prefix 'BEDROCK_'.
If the environment variables are not found, the settings can
be loaded from a .env file with the encoding 'utf-8'.
If the settings are not found in the .env file, the settings
are ignored; however, validation will fail alerting that the
settings are missing.

Optional settings for prefix 'BEDROCK_' are:
- chat_model_id: str | None - The Amazon Bedrock chat model ID to use.
(Env var BEDROCK_CHAT_MODEL_ID)
- text_model_id: str | None - The Amazon Bedrock text model ID to use.
(Env var BEDROCK_TEXT_MODEL_ID)
- embedding_model_id: str | None - The Amazon Bedrock embedding model ID to use.
(Env var BEDROCK_EMBEDDING_MODEL_ID)
"""

env_prefix: ClassVar[str] = "BEDROCK_"

chat_model_id: str | None = None
text_model_id: str | None = None
embedding_model_id: str | None = None
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
# Copyright (c) Microsoft. All rights reserved.

from abc import ABC
from enum import Enum
from typing import Any

import boto3

from semantic_kernel.kernel_pydantic import KernelBaseModel


class BedrockModelProvider(Enum):
"""Amazon Bedrock Model Provider Enum.

This list contains the providers of all base models available on Amazon Bedrock.
"""

AI21LABS = "ai21"
AMAZON = "amazon"
ANTHROPIC = "anthropic"
COHERE = "cohere"
META = "meta"
MISTRALAI = "mistral"
STABILITYAI = "stability"


class BedrockBase(KernelBaseModel, ABC):
"""Amazon Bedrock Service Base Class."""

MODEL_PROVIDER_NAME: str = "bedrock"

bedrock_client: Any

def __init__(
self,
client: Any | None = None,
**kwargs: Any,
) -> None:
"""Initialize the Amazon Bedrock Service Base Class.

Args:
client: The Amazon Bedrock client to use.
**kwargs: Additional keyword arguments.
"""
self.bedrock_client = client or boto3.client("bedrock-runtime")
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
# Copyright (c) Microsoft. All rights reserved.

import sys
from collections.abc import AsyncGenerator
from typing import TYPE_CHECKING, Any

if sys.version_info >= (3, 12):
from typing import override # pragma: no cover
else:
from typing_extensions import override # pragma: no cover

from pydantic import ValidationError

from semantic_kernel.connectors.ai.bedrock.bedrock_prompt_execution_settings import BedrockChatPromptExecutionSettings
from semantic_kernel.connectors.ai.bedrock.bedrock_settings import BedrockSettings
from semantic_kernel.connectors.ai.bedrock.services.bedrock_base import BedrockBase
from semantic_kernel.connectors.ai.chat_completion_client_base import ChatCompletionClientBase
from semantic_kernel.exceptions.service_exceptions import ServiceInitializationError
from semantic_kernel.utils.telemetry.model_diagnostics.decorators import (
trace_chat_completion,
trace_streaming_chat_completion,
)

if TYPE_CHECKING:
from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings
from semantic_kernel.contents.chat_history import ChatHistory
from semantic_kernel.contents.chat_message_content import ChatMessageContent
from semantic_kernel.contents.streaming_chat_message_content import StreamingChatMessageContent


class BedrockChatCompletion(BedrockBase, ChatCompletionClientBase):
"""Amazon Bedrock Chat Completion Service."""

def __init__(self, model_id: str | None = None, client: Any | None = None) -> None:
"""Initialize the Amazon Bedrock Chat Completion Service.

Args:
model_id: The Amazon Bedrock chat model ID to use.
client: The Amazon Bedrock client to use.
"""
try:
bedrock_settings = BedrockSettings.create(chat_model_id=model_id)
except ValidationError as e:
raise ServiceInitializationError("Failed to initialize the Amazon Bedrock Chat Completion Service.") from e

if bedrock_settings.chat_model_id is None:
raise ServiceInitializationError("The Amazon Bedrock Chat Model ID is missing.")

super().__init__(ai_model_id=bedrock_settings.chat_model_id, client=client)

# region Overriding base class methods

# Override from AIServiceClientBase
@override
def get_prompt_execution_settings_class(self) -> type["PromptExecutionSettings"]:
return BedrockChatPromptExecutionSettings

@override
@trace_chat_completion(BedrockBase.MODEL_PROVIDER_NAME)
async def _inner_get_chat_message_contents(
self,
chat_history: "ChatHistory",
settings: "PromptExecutionSettings",
) -> list["ChatMessageContent"]:
if not isinstance(settings, BedrockChatPromptExecutionSettings):
settings = self.get_prompt_execution_settings_from_settings(settings)
assert isinstance(settings, BedrockChatPromptExecutionSettings) # nosec

return []

@override
@trace_streaming_chat_completion(BedrockBase.MODEL_PROVIDER_NAME)
async def _inner_get_streaming_chat_message_contents(
self,
chat_history: "ChatHistory",
settings: "PromptExecutionSettings",
) -> AsyncGenerator[list["StreamingChatMessageContent"], Any]:
if not isinstance(settings, BedrockChatPromptExecutionSettings):
settings = self.get_prompt_execution_settings_from_settings(settings)
assert isinstance(settings, BedrockChatPromptExecutionSettings) # nosec

yield []

# endregion
Original file line number Diff line number Diff line change
@@ -1,9 +1,84 @@
# Copyright (c) Microsoft. All rights reserved.

import sys
from collections.abc import AsyncGenerator
from typing import TYPE_CHECKING, Any

from semantic_kernel.connectors.ai.bedrock.bedrock_prompt_execution_settings import BedrockTextPromptExecutionSettings
from semantic_kernel.contents.streaming_text_content import StreamingTextContent
from semantic_kernel.contents.text_content import TextContent

if sys.version_info >= (3, 12):
from typing import override # pragma: no cover
else:
from typing_extensions import override # pragma: no cover

from pydantic import ValidationError

from semantic_kernel.connectors.ai.bedrock.bedrock_settings import BedrockSettings
from semantic_kernel.connectors.ai.bedrock.services.bedrock_base import BedrockBase
from semantic_kernel.connectors.ai.text_completion_client_base import TextCompletionClientBase
from semantic_kernel.exceptions.service_exceptions import ServiceInitializationError
from semantic_kernel.utils.telemetry.model_diagnostics.decorators import (
trace_streaming_text_completion,
trace_text_completion,
)

if TYPE_CHECKING:
from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings

class BedrockTextCompletion(TextCompletionClientBase):

class BedrockTextCompletion(BedrockBase, TextCompletionClientBase):
"""Amazon Bedrock Text Completion Service."""

pass
def __init__(self, model_id: str | None = None, client: Any | None = None) -> None:
"""Initialize the Amazon Bedrock Text Completion Service.

Args:
model_id: The Amazon Bedrock text model ID to use.
client: The Amazon Bedrock client to use.
"""
try:
bedrock_settings = BedrockSettings.create(text_model_id=model_id)
except ValidationError as e:
raise ServiceInitializationError("Failed to initialize the Amazon Bedrock Text Completion Service.") from e

if bedrock_settings.text_model_id is None:
raise ServiceInitializationError("The Amazon Bedrock Text Model ID is missing.")

super().__init__(ai_model_id=bedrock_settings.text_model_id, client=client)

# region Overriding base class methods

# Override from AIServiceClientBase
@override
def get_prompt_execution_settings_class(self) -> type["PromptExecutionSettings"]:
return BedrockTextPromptExecutionSettings

@override
@trace_text_completion(BedrockBase.MODEL_PROVIDER_NAME)
async def _inner_get_text_contents(
self,
prompt: str,
settings: "PromptExecutionSettings",
) -> list[TextContent]:
if not isinstance(settings, BedrockTextPromptExecutionSettings):
settings = self.get_prompt_execution_settings_from_settings(settings)
assert isinstance(settings, BedrockTextPromptExecutionSettings) # nosec

return []

@override
@trace_streaming_text_completion(BedrockBase.MODEL_PROVIDER_NAME)
async def _inner_get_streaming_text_contents(
self,
prompt: str,
settings: "PromptExecutionSettings",
) -> AsyncGenerator[list[StreamingTextContent], Any]:
if not isinstance(settings, BedrockTextPromptExecutionSettings):
settings = self.get_prompt_execution_settings_from_settings(settings)
assert isinstance(settings, BedrockTextPromptExecutionSettings) # nosec

yield []

# endregion