Skip to content

Commit

Permalink
Fix the situation where output_tokens/input_tokens may be None in res…
Browse files Browse the repository at this point in the history
…ponse.usage (#10728)
  • Loading branch information
AAlexDing authored Nov 19, 2024
1 parent 904ea05 commit 3087913
Showing 1 changed file with 7 additions and 8 deletions.
15 changes: 7 additions & 8 deletions api/core/model_runtime/model_providers/anthropic/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -325,14 +325,13 @@ def _handle_chat_generate_response(
assistant_prompt_message.tool_calls.append(tool_call)

# calculate num tokens
if response.usage:
# transform usage
prompt_tokens = response.usage.input_tokens
completion_tokens = response.usage.output_tokens
else:
# calculate num tokens
prompt_tokens = self.get_num_tokens(model, credentials, prompt_messages)
completion_tokens = self.get_num_tokens(model, credentials, [assistant_prompt_message])
prompt_tokens = (response.usage and response.usage.input_tokens) or self.get_num_tokens(
model, credentials, prompt_messages
)

completion_tokens = (response.usage and response.usage.output_tokens) or self.get_num_tokens(
model, credentials, [assistant_prompt_message]
)

# transform usage
usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens)
Expand Down

0 comments on commit 3087913

Please sign in to comment.