mirror of
https://github.com/langgenius/dify.git
synced 2024-11-16 11:42:29 +08:00
fix: Ollama modelfeature set vision, and an exception occurred at the… (#8783)
This commit is contained in:
parent
62406991df
commit
128a66f7fe
|
@ -364,14 +364,21 @@ class OllamaLargeLanguageModel(LargeLanguageModel):
|
||||||
|
|
||||||
if chunk_json["done"]:
|
if chunk_json["done"]:
|
||||||
# calculate num tokens
|
# calculate num tokens
|
||||||
if "prompt_eval_count" in chunk_json and "eval_count" in chunk_json:
|
if "prompt_eval_count" in chunk_json:
|
||||||
# transform usage
|
|
||||||
prompt_tokens = chunk_json["prompt_eval_count"]
|
prompt_tokens = chunk_json["prompt_eval_count"]
|
||||||
completion_tokens = chunk_json["eval_count"]
|
|
||||||
else:
|
else:
|
||||||
# calculate num tokens
|
prompt_message_content = prompt_messages[0].content
|
||||||
prompt_tokens = self._get_num_tokens_by_gpt2(prompt_messages[0].content)
|
if isinstance(prompt_message_content, str):
|
||||||
completion_tokens = self._get_num_tokens_by_gpt2(full_text)
|
prompt_tokens = self._get_num_tokens_by_gpt2(prompt_message_content)
|
||||||
|
else:
|
||||||
|
content_text = ""
|
||||||
|
for message_content in prompt_message_content:
|
||||||
|
if message_content.type == PromptMessageContentType.TEXT:
|
||||||
|
message_content = cast(TextPromptMessageContent, message_content)
|
||||||
|
content_text += message_content.data
|
||||||
|
prompt_tokens = self._get_num_tokens_by_gpt2(content_text)
|
||||||
|
|
||||||
|
completion_tokens = chunk_json.get("eval_count", self._get_num_tokens_by_gpt2(full_text))
|
||||||
|
|
||||||
# transform usage
|
# transform usage
|
||||||
usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens)
|
usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens)
|
||||||
|
|
Loading…
Reference in New Issue
Block a user