Fix the situation where output_tokens/input_tokens may be None in response.usage

This commit is contained in:
Ding Jiatong 2024-11-15 11:47:27 +08:00 committed by GitHub
parent 4b2abf8ac2
commit 75aa0220dd
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -325,14 +325,12 @@ class AnthropicLargeLanguageModel(LargeLanguageModel):
assistant_prompt_message.tool_calls.append(tool_call)
# calculate num tokens
if response.usage:
# transform usage
prompt_tokens = response.usage.input_tokens
completion_tokens = response.usage.output_tokens
else:
# calculate num tokens
prompt_tokens = self.get_num_tokens(model, credentials, prompt_messages)
completion_tokens = self.get_num_tokens(model, credentials, [assistant_prompt_message])
prompt_tokens = (response.usage and response.usage.input_tokens) or \
self.get_num_tokens(model, credentials, prompt_messages)
completion_tokens = (response.usage and response.usage.output_tokens) or \
self.get_num_tokens(model, credentials, [assistant_prompt_message])
# transform usage
usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens)