| 
 | 1 | +# Copyright The OpenTelemetry Authors  | 
 | 2 | +#  | 
 | 3 | +# Licensed under the Apache License, Version 2.0 (the "License");  | 
 | 4 | +# you may not use this file except in compliance with the License.  | 
 | 5 | +# You may obtain a copy of the License at  | 
 | 6 | +#  | 
 | 7 | +#     http://www.apache.org/licenses/LICENSE-2.0  | 
 | 8 | +#  | 
 | 9 | +# Unless required by applicable law or agreed to in writing, software  | 
 | 10 | +# distributed under the License is distributed on an "AS IS" BASIS,  | 
 | 11 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  | 
 | 12 | +# See the License for the specific language governing permissions and  | 
 | 13 | +# limitations under the License.  | 
 | 14 | + | 
 | 15 | +import pytest  | 
 | 16 | +from openai import OpenAI  | 
 | 17 | + | 
 | 18 | +from opentelemetry.semconv._incubating.attributes import (  | 
 | 19 | +    gen_ai_attributes as GenAIAttributes,  | 
 | 20 | +)  | 
 | 21 | + | 
 | 22 | + | 
 | 23 | +@pytest.mark.vcr()  | 
 | 24 | +def test_responses_create_with_content(  | 
 | 25 | +    span_exporter, log_exporter, openai_client, instrument_with_content  | 
 | 26 | +):  | 
 | 27 | +    llm_model_value = "gpt-4o-mini"  | 
 | 28 | +    input_value = "Say this is a test"  | 
 | 29 | + | 
 | 30 | +    response = openai_client.responses.create(  | 
 | 31 | +        input=input_value, model=llm_model_value  | 
 | 32 | +    )  | 
 | 33 | + | 
 | 34 | +    spans = span_exporter.get_finished_spans()  | 
 | 35 | +    assert len(spans) == 1  | 
 | 36 | +      | 
 | 37 | +    span = spans[0]  | 
 | 38 | +    assert span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] == "chat"  | 
 | 39 | +    assert span.attributes[GenAIAttributes.GEN_AI_SYSTEM] == "openai"  | 
 | 40 | +    assert span.attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL] == llm_model_value  | 
 | 41 | +    assert span.attributes[GenAIAttributes.GEN_AI_RESPONSE_MODEL] == response.model  | 
 | 42 | +    assert span.attributes[GenAIAttributes.GEN_AI_RESPONSE_ID] == response.id  | 
 | 43 | + | 
 | 44 | +    # Check usage tokens if available  | 
 | 45 | +    if response.usage:  | 
 | 46 | +        assert GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS in span.attributes  | 
 | 47 | +        assert GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS in span.attributes  | 
 | 48 | + | 
 | 49 | +    logs = log_exporter.get_finished_logs()  | 
 | 50 | +    # At least input message should be logged  | 
 | 51 | +    assert len(logs) >= 1  | 
 | 52 | + | 
 | 53 | + | 
 | 54 | +@pytest.mark.vcr()  | 
 | 55 | +def test_responses_create_no_content(  | 
 | 56 | +    span_exporter, log_exporter, openai_client, instrument_no_content  | 
 | 57 | +):  | 
 | 58 | +    llm_model_value = "gpt-4o-mini"  | 
 | 59 | +    input_value = "Say this is a test"  | 
 | 60 | + | 
 | 61 | +    response = openai_client.responses.create(  | 
 | 62 | +        input=input_value, model=llm_model_value  | 
 | 63 | +    )  | 
 | 64 | + | 
 | 65 | +    spans = span_exporter.get_finished_spans()  | 
 | 66 | +    assert len(spans) == 1  | 
 | 67 | +      | 
 | 68 | +    span = spans[0]  | 
 | 69 | +    assert span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] == "chat"  | 
 | 70 | +    assert span.attributes[GenAIAttributes.GEN_AI_SYSTEM] == "openai"  | 
 | 71 | +    assert span.attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL] == llm_model_value  | 
 | 72 | + | 
 | 73 | +    # No content should be captured in logs when capture_content is False  | 
 | 74 | +    logs = log_exporter.get_finished_logs()  | 
 | 75 | +    for log in logs:  | 
 | 76 | +        if log.body and isinstance(log.body, dict):  | 
 | 77 | +            assert "content" not in log.body or not log.body.get("content")  | 
0 commit comments