Skip to content

Commit

Permalink
add function name to stored message for non-streamed cases
Browse files Browse the repository at this point in the history
  • Loading branch information
Yun-Kim committed Apr 15, 2024
1 parent 60763e1 commit 587c96d
Show file tree
Hide file tree
Showing 4 changed files with 57 additions and 29 deletions.
16 changes: 13 additions & 3 deletions ddtrace/contrib/openai/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,24 +118,34 @@ def _construct_message_from_streamed_chunks(streamed_chunks: List[Any]) -> Dict[
"""
message = {}
content = ""
formatted_content = ""
idx = None
for chunk in streamed_chunks:
chunk_content = getattr(chunk.delta, "content", "")
if chunk_content:
content += chunk_content
elif getattr(chunk.delta, "function_call", None):
content += chunk.delta.function_call.arguments
if idx is None:
formatted_content += "\n\n[function: {}]\n\n".format(getattr(chunk.delta.function_call, "name", ""))
idx = chunk.index
function_args = getattr(chunk.delta.function_call, "arguments", "")
content += "{}".format(function_args)
formatted_content += "{}".format(function_args)
elif getattr(chunk.delta, "tool_calls", None):
for tool_call in chunk.delta.tool_calls:
if tool_call.index != idx:
content += "\n\n{}\n\n".format(getattr(tool_call.function, "name", ""))
formatted_content += "\n\n[tool: {}]\n\n".format(getattr(tool_call.function, "name", ""))
idx = tool_call.index
content += "{}".format(tool_call.function.arguments)
function_args = getattr(tool_call.function, "arguments", "")
content += "{}".format(function_args)
formatted_content += "{}".format(function_args)

message["role"] = streamed_chunks[0].delta.role or "assistant"
if streamed_chunks[-1].finish_reason is not None:
message["finish_reason"] = streamed_chunks[-1].finish_reason
message["content"] = content.strip()
if formatted_content:
message["formatted_content"] = formatted_content.strip()
return message


Expand Down
56 changes: 33 additions & 23 deletions ddtrace/llmobs/_integrations/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,12 +152,13 @@ def _llmobs_set_meta_tags_from_completion(
span.set_tag_str(INPUT_PARAMETERS, json.dumps(parameters))
if err is not None:
span.set_tag_str(OUTPUT_MESSAGES, json.dumps([{"content": ""}]))
elif streamed_completions:
return
if streamed_completions:
span.set_tag_str(
OUTPUT_MESSAGES, json.dumps([{"content": choice["text"]} for choice in streamed_completions])
)
else:
span.set_tag_str(OUTPUT_MESSAGES, json.dumps([{"content": choice.text} for choice in resp.choices]))
return
span.set_tag_str(OUTPUT_MESSAGES, json.dumps([{"content": choice.text} for choice in resp.choices]))

@staticmethod
def _llmobs_set_meta_tags_from_chat(
Expand All @@ -168,33 +169,42 @@ def _llmobs_set_meta_tags_from_chat(
for m in kwargs.get("messages", []):
if isinstance(m, dict):
input_messages.append({"content": str(m.get("content", "")), "role": str(m.get("role", ""))})
else:
input_messages.append({"content": str(getattr(m, "content", "")), "role": str(getattr(m, "role", ""))})
continue
input_messages.append({"content": str(getattr(m, "content", "")), "role": str(getattr(m, "role", ""))})
span.set_tag_str(INPUT_MESSAGES, json.dumps(input_messages))
parameters = {"temperature": kwargs.get("temperature", 0)}
if kwargs.get("max_tokens"):
parameters["max_tokens"] = kwargs.get("max_tokens")
span.set_tag_str(INPUT_PARAMETERS, json.dumps(parameters))
if err is not None:
span.set_tag_str(OUTPUT_MESSAGES, json.dumps([{"content": ""}]))
elif streamed_messages:
span.set_tag_str(
OUTPUT_MESSAGES, json.dumps([{"content": m["content"], "role": m["role"]} for m in streamed_messages])
)
else:
output_messages = []
for idx, choice in enumerate(resp.choices):
content = getattr(choice.message, "content", "")
if getattr(choice.message, "function_call", None):
content = choice.message.function_call.arguments
elif getattr(choice.message, "tool_calls", None):
content = ""
for tool_call in choice.message.tool_calls:
content += "\n[tool: {}]\n\n{}\n".format(
getattr(tool_call.function, "name", ""), tool_call.function.arguments
)
output_messages.append({"content": str(content).strip(), "role": choice.message.role})
span.set_tag_str(OUTPUT_MESSAGES, json.dumps(output_messages))
return
if streamed_messages:
messages = []
for message in streamed_messages:
if "formatted_content" in message:
messages.append({"content": message["formatted_content"], "role": message["role"]})
continue
messages.append({"content": message["content"], "role": message["role"]})
span.set_tag_str(OUTPUT_MESSAGES, json.dumps(messages))
return
output_messages = []
for idx, choice in enumerate(resp.choices):
content = getattr(choice.message, "content", "")
if getattr(choice.message, "function_call", None):
content = "[function: {}]\n\n{}".format(
getattr(choice.message.function_call, "name", ""),
getattr(choice.message.function_call, "arguments", ""),
)
elif getattr(choice.message, "tool_calls", None):
content = ""
for tool_call in choice.message.tool_calls:
content += "\n[tool: {}]\n\n{}\n".format(
getattr(tool_call.function, "name", ""),
getattr(tool_call.function, "arguments", ""),
)
output_messages.append({"content": str(content).strip(), "role": choice.message.role})
span.set_tag_str(OUTPUT_MESSAGES, json.dumps(output_messages))

@staticmethod
def _set_llmobs_metrics_tags(span: Span, resp: Any, streamed: bool = False) -> Dict[str, Any]:
Expand Down
8 changes: 6 additions & 2 deletions tests/contrib/openai/test_openai_v0.py
Original file line number Diff line number Diff line change
Expand Up @@ -2340,6 +2340,10 @@ def test_llmobs_chat_completion_function_call(
function_call="auto",
user="ddtrace-test",
)
expected_output = "[function: {}]\n\n{}".format(
resp.choices[0].message.function_call.name,
resp.choices[0].message.function_call.arguments,
)
span = mock_tracer.pop_traces()[0][0]
assert mock_llmobs_writer.enqueue.call_count == 1
mock_llmobs_writer.enqueue.assert_called_with(
Expand All @@ -2348,7 +2352,7 @@ def test_llmobs_chat_completion_function_call(
model_name=resp.model,
model_provider="openai",
input_messages=[{"content": chat_completion_input_description, "role": "user"}],
output_messages=[{"content": resp.choices[0].message.function_call.arguments, "role": "assistant"}],
output_messages=[{"content": expected_output, "role": "assistant"}],
parameters={"temperature": 0},
token_metrics={"prompt_tokens": 157, "completion_tokens": 57, "total_tokens": 214},
tags={"ml_app": "<ml-app-name>"},
Expand Down Expand Up @@ -2381,7 +2385,7 @@ def test_llmobs_chat_completion_function_call_stream(
for chunk in resp:
resp_model = chunk.model

expected_output = '{"name":"David Nguyen","major":"Computer Science","school":"Stanford University","grades":3.8,"clubs":["Chess Club","South Asian Student Association"]}' # noqa: E501
expected_output = '[function: extract_student_info]\n\n{"name":"David Nguyen","major":"Computer Science","school":"Stanford University","grades":3.8,"clubs":["Chess Club","South Asian Student Association"]}' # noqa: E501
span = mock_tracer.pop_traces()[0][0]
assert mock_llmobs_writer.enqueue.call_count == 1
mock_llmobs_writer.enqueue.assert_called_with(
Expand Down
6 changes: 5 additions & 1 deletion tests/contrib/openai/test_openai_v1.py
Original file line number Diff line number Diff line change
Expand Up @@ -2025,6 +2025,10 @@ def test_llmobs_chat_completion_function_call(
function_call="auto",
user="ddtrace-test",
)
expected_output = "[function: {}]\n\n{}".format(
resp.choices[0].message.function_call.name,
resp.choices[0].message.function_call.arguments,
)
span = mock_tracer.pop_traces()[0][0]
assert mock_llmobs_writer.enqueue.call_count == 1
mock_llmobs_writer.enqueue.assert_called_with(
Expand All @@ -2033,7 +2037,7 @@ def test_llmobs_chat_completion_function_call(
model_name=resp.model,
model_provider="openai",
input_messages=[{"content": chat_completion_input_description, "role": "user"}],
output_messages=[{"content": resp.choices[0].message.function_call.arguments, "role": "assistant"}],
output_messages=[{"content": expected_output, "role": "assistant"}],
parameters={"temperature": 0},
token_metrics={"prompt_tokens": 157, "completion_tokens": 57, "total_tokens": 214},
tags={"ml_app": "<ml-app-name>"},
Expand Down

0 comments on commit 587c96d

Please sign in to comment.