mirror of
https://github.com/langgenius/dify.git
synced 2024-11-16 03:32:23 +08:00
Fix the situation where output_tokens/input_tokens may be None in response.usage
This commit is contained in:
parent
4b2abf8ac2
commit
75aa0220dd
|
@ -325,14 +325,12 @@ class AnthropicLargeLanguageModel(LargeLanguageModel):
|
|||
assistant_prompt_message.tool_calls.append(tool_call)
|
||||
|
||||
# calculate num tokens
|
||||
if response.usage:
|
||||
# transform usage
|
||||
prompt_tokens = response.usage.input_tokens
|
||||
completion_tokens = response.usage.output_tokens
|
||||
else:
|
||||
# calculate num tokens
|
||||
prompt_tokens = self.get_num_tokens(model, credentials, prompt_messages)
|
||||
completion_tokens = self.get_num_tokens(model, credentials, [assistant_prompt_message])
|
||||
prompt_tokens = (response.usage and response.usage.input_tokens) or \
|
||||
self.get_num_tokens(model, credentials, prompt_messages)
|
||||
|
||||
completion_tokens = (response.usage and response.usage.output_tokens) or \
|
||||
self.get_num_tokens(model, credentials, [assistant_prompt_message])
|
||||
|
||||
|
||||
# transform usage
|
||||
usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens)
|
||||
|
|
Loading…
Reference in New Issue
Block a user