Skip to content

Commit d9d1264

Browse files
committed
tests: add tests for litellm message conversion
1 parent 36fcaf9 commit d9d1264

File tree

1 file changed

+161
-0
lines changed

1 file changed

+161
-0
lines changed

tests/integrations/litellm/test_litellm.py

Lines changed: 161 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
import base64
12
import json
23
import pytest
34
import time
@@ -23,6 +24,7 @@ async def __call__(self, *args, **kwargs):
2324
from sentry_sdk.consts import OP, SPANDATA
2425
from sentry_sdk.integrations.litellm import (
2526
LiteLLMIntegration,
27+
_convert_message_parts,
2628
_input_callback,
2729
_success_callback,
2830
_failure_callback,
@@ -753,3 +755,162 @@ def test_litellm_message_truncation(sentry_init, capture_events):
753755
assert "small message 4" in str(parsed_messages[0])
754756
assert "small message 5" in str(parsed_messages[1])
755757
assert tx["_meta"]["spans"]["0"]["data"]["gen_ai.request.messages"][""]["len"] == 5
758+
759+
760+
IMAGE_DATA = b"fake_image_data_12345"
761+
IMAGE_B64 = base64.b64encode(IMAGE_DATA).decode("utf-8")
762+
IMAGE_DATA_URI = f"data:image/png;base64,{IMAGE_B64}"
763+
764+
765+
def test_binary_content_encoding_image_url(sentry_init, capture_events):
766+
sentry_init(
767+
integrations=[LiteLLMIntegration(include_prompts=True)],
768+
traces_sample_rate=1.0,
769+
send_default_pii=True,
770+
)
771+
events = capture_events()
772+
773+
messages = [
774+
{
775+
"role": "user",
776+
"content": [
777+
{"type": "text", "text": "Look at this image:"},
778+
{
779+
"type": "image_url",
780+
"image_url": {"url": IMAGE_DATA_URI, "detail": "high"},
781+
},
782+
],
783+
}
784+
]
785+
mock_response = MockCompletionResponse()
786+
787+
with start_transaction(name="litellm test"):
788+
kwargs = {"model": "gpt-4-vision-preview", "messages": messages}
789+
_input_callback(kwargs)
790+
_success_callback(kwargs, mock_response, datetime.now(), datetime.now())
791+
792+
(event,) = events
793+
(span,) = event["spans"]
794+
messages_data = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES])
795+
796+
blob_item = next(
797+
(
798+
item
799+
for msg in messages_data
800+
if "content" in msg
801+
for item in msg["content"]
802+
if item.get("type") == "blob"
803+
),
804+
None,
805+
)
806+
assert blob_item is not None
807+
assert blob_item["modality"] == "image"
808+
assert blob_item["mime_type"] == "data:image/png"
809+
assert IMAGE_B64 in blob_item["content"] or "[Filtered]" in str(
810+
blob_item["content"]
811+
)
812+
813+
814+
def test_binary_content_encoding_mixed_content(sentry_init, capture_events):
815+
sentry_init(
816+
integrations=[LiteLLMIntegration(include_prompts=True)],
817+
traces_sample_rate=1.0,
818+
send_default_pii=True,
819+
)
820+
events = capture_events()
821+
822+
messages = [
823+
{
824+
"role": "user",
825+
"content": [
826+
{"type": "text", "text": "Here is an image:"},
827+
{
828+
"type": "image_url",
829+
"image_url": {"url": IMAGE_DATA_URI},
830+
},
831+
{"type": "text", "text": "What do you see?"},
832+
],
833+
}
834+
]
835+
mock_response = MockCompletionResponse()
836+
837+
with start_transaction(name="litellm test"):
838+
kwargs = {"model": "gpt-4-vision-preview", "messages": messages}
839+
_input_callback(kwargs)
840+
_success_callback(kwargs, mock_response, datetime.now(), datetime.now())
841+
842+
(event,) = events
843+
(span,) = event["spans"]
844+
messages_data = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES])
845+
846+
content_items = [
847+
item for msg in messages_data if "content" in msg for item in msg["content"]
848+
]
849+
assert any(item.get("type") == "text" for item in content_items)
850+
assert any(item.get("type") == "blob" for item in content_items)
851+
852+
853+
def test_binary_content_encoding_uri_type(sentry_init, capture_events):
854+
sentry_init(
855+
integrations=[LiteLLMIntegration(include_prompts=True)],
856+
traces_sample_rate=1.0,
857+
send_default_pii=True,
858+
)
859+
events = capture_events()
860+
861+
messages = [
862+
{
863+
"role": "user",
864+
"content": [
865+
{
866+
"type": "image_url",
867+
"image_url": {"url": "https://example.com/image.jpg"},
868+
}
869+
],
870+
}
871+
]
872+
mock_response = MockCompletionResponse()
873+
874+
with start_transaction(name="litellm test"):
875+
kwargs = {"model": "gpt-4-vision-preview", "messages": messages}
876+
_input_callback(kwargs)
877+
_success_callback(kwargs, mock_response, datetime.now(), datetime.now())
878+
879+
(event,) = events
880+
(span,) = event["spans"]
881+
messages_data = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES])
882+
883+
uri_item = next(
884+
(
885+
item
886+
for msg in messages_data
887+
if "content" in msg
888+
for item in msg["content"]
889+
if item.get("type") == "uri"
890+
),
891+
None,
892+
)
893+
assert uri_item is not None
894+
assert uri_item["uri"] == "https://example.com/image.jpg"
895+
896+
897+
def test_convert_message_parts_direct():
898+
messages = [
899+
{
900+
"role": "user",
901+
"content": [
902+
{"type": "text", "text": "Hello"},
903+
{
904+
"type": "image_url",
905+
"image_url": {"url": IMAGE_DATA_URI},
906+
},
907+
],
908+
}
909+
]
910+
converted = _convert_message_parts(messages)
911+
blob_item = next(
912+
item for item in converted[0]["content"] if item.get("type") == "blob"
913+
)
914+
assert blob_item["modality"] == "image"
915+
assert blob_item["mime_type"] == "data:image/png"
916+
assert IMAGE_B64 in blob_item["content"]

0 commit comments

Comments
 (0)