diff --git a/ddtrace/llmobs/_integrations/openai.py b/ddtrace/llmobs/_integrations/openai.py index 1344c1ea84a..1f0e61e5f6e 100644 --- a/ddtrace/llmobs/_integrations/openai.py +++ b/ddtrace/llmobs/_integrations/openai.py @@ -98,11 +98,13 @@ def _logs_tags(cls, span: Span) -> str: @classmethod def _metrics_tags(cls, span: Span) -> List[str]: + model_name = span.get_tag("openai.request.model") or "" tags = [ "version:%s" % (config.version or ""), "env:%s" % (config.env or ""), "service:%s" % (span.service or ""), - "openai.request.model:%s" % (span.get_tag("openai.request.model") or ""), + "openai.request.model:%s" % model_name, + "model:%s" % model_name, "openai.request.endpoint:%s" % (span.get_tag("openai.request.endpoint") or ""), "openai.request.method:%s" % (span.get_tag("openai.request.method") or ""), "openai.organization.id:%s" % (span.get_tag("openai.organization.id") or ""), diff --git a/releasenotes/notes/openai-model-tag-2482b3d5b2905db9.yaml b/releasenotes/notes/openai-model-tag-2482b3d5b2905db9.yaml new file mode 100644 index 00000000000..deb2be5ac17 --- /dev/null +++ b/releasenotes/notes/openai-model-tag-2482b3d5b2905db9.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + openai: This introduces 'model' tag for openai integration metrics for consistency with the OpenAI SaaS Integration. It has the same value as `openai.request.model`. \ No newline at end of file diff --git a/tests/contrib/openai/test_openai_v0.py b/tests/contrib/openai/test_openai_v0.py index db15db897bc..631e80f67d0 100644 --- a/tests/contrib/openai/test_openai_v0.py +++ b/tests/contrib/openai/test_openai_v0.py @@ -151,6 +151,7 @@ def test_completion( "env:", "service:", "openai.request.model:ada", + "model:ada", "openai.request.endpoint:/v1/completions", "openai.request.method:POST", "openai.organization.id:", @@ -220,6 +221,7 @@ async def test_acompletion( "env:", "service:", "openai.request.model:curie", + "model:curie", "openai.request.endpoint:/v1/completions", "openai.request.method:POST", "openai.organization.id:", @@ -321,6 +323,7 @@ def test_global_tags(openai_vcr, ddtrace_config_openai, openai, mock_metrics, mo "env:staging", "version:1234", "openai.request.model:ada", + "model:ada", "openai.request.endpoint:/v1/completions", "openai.request.method:POST", "openai.organization.name:datadog-4", @@ -1285,6 +1288,7 @@ def test_completion_stream(openai, openai_vcr, mock_metrics, mock_tracer): "env:", "service:", "openai.request.model:ada", + "model:ada", "openai.request.endpoint:/v1/completions", "openai.request.method:POST", "openai.organization.id:", @@ -1325,6 +1329,7 @@ async def test_completion_async_stream(openai, openai_vcr, mock_metrics, mock_tr "env:", "service:", "openai.request.model:ada", + "model:ada", "openai.request.endpoint:/v1/completions", "openai.request.method:POST", "openai.organization.id:", @@ -1372,6 +1377,7 @@ def test_chat_completion_stream(openai, openai_vcr, mock_metrics, snapshot_trace "env:", "service:", "openai.request.model:gpt-3.5-turbo", + "model:gpt-3.5-turbo", "openai.request.endpoint:/v1/chat/completions", "openai.request.method:POST", "openai.organization.id:", @@ -1422,6 +1428,7 @@ async def test_chat_completion_async_stream(openai, openai_vcr, mock_metrics, sn "env:", "service:", "openai.request.model:gpt-3.5-turbo", + "model:gpt-3.5-turbo", "openai.request.endpoint:/v1/chat/completions", "openai.request.method:POST", "openai.organization.id:", diff --git a/tests/contrib/openai/test_openai_v1.py b/tests/contrib/openai/test_openai_v1.py index a1bc06aaf4e..c51584cda60 100644 --- a/tests/contrib/openai/test_openai_v1.py +++ b/tests/contrib/openai/test_openai_v1.py @@ -168,6 +168,7 @@ def test_completion( "env:", "service:", "openai.request.model:ada", + "model:ada", "openai.request.endpoint:/v1/completions", "openai.request.method:POST", "openai.organization.id:", @@ -237,6 +238,7 @@ async def test_acompletion( "env:", "service:", "openai.request.model:curie", + "model:curie", "openai.request.endpoint:/v1/completions", "openai.request.method:POST", "openai.organization.id:", @@ -344,6 +346,7 @@ def test_global_tags(openai_vcr, ddtrace_config_openai, openai, mock_metrics, mo "env:staging", "version:1234", "openai.request.model:ada", + "model:ada", "openai.request.endpoint:/v1/completions", "openai.request.method:POST", "openai.organization.name:datadog-4", @@ -941,6 +944,7 @@ def test_completion_stream(openai, openai_vcr, mock_metrics, mock_tracer): "env:", "service:", "openai.request.model:ada", + "model:ada", "openai.request.endpoint:/v1/completions", "openai.request.method:POST", "openai.organization.id:", @@ -979,6 +983,7 @@ async def test_completion_async_stream(openai, openai_vcr, mock_metrics, mock_tr "env:", "service:", "openai.request.model:ada", + "model:ada", "openai.request.endpoint:/v1/completions", "openai.request.method:POST", "openai.organization.id:", @@ -1021,6 +1026,7 @@ def test_completion_stream_context_manager(openai, openai_vcr, mock_metrics, moc "env:", "service:", "openai.request.model:ada", + "model:ada", "openai.request.endpoint:/v1/completions", "openai.request.method:POST", "openai.organization.id:", @@ -1067,6 +1073,7 @@ def test_chat_completion_stream(openai, openai_vcr, mock_metrics, snapshot_trace "env:", "service:", "openai.request.model:gpt-3.5-turbo", + "model:gpt-3.5-turbo", "openai.request.endpoint:/v1/chat/completions", "openai.request.method:POST", "openai.organization.id:", @@ -1115,6 +1122,7 @@ async def test_chat_completion_async_stream(openai, openai_vcr, mock_metrics, sn "env:", "service:", "openai.request.model:gpt-3.5-turbo", + "model:gpt-3.5-turbo", "openai.request.endpoint:/v1/chat/completions", "openai.request.method:POST", "openai.organization.id:", @@ -1170,6 +1178,7 @@ async def test_chat_completion_async_stream_context_manager(openai, openai_vcr, "env:", "service:", "openai.request.model:gpt-3.5-turbo", + "model:gpt-3.5-turbo", "openai.request.endpoint:/v1/chat/completions", "openai.request.method:POST", "openai.organization.id:",