mirror of
https://github.com/langgenius/dify.git
synced 2024-11-15 19:22:36 +08:00
fix(node): handle empty text segments gracefully
Ensure that messages are only created from non-empty text segments, preventing potential issues with empty content. test: add scenario for file variable handling Introduce a test case for scenarios involving prompt templates with file variables, particularly images, to improve reliability and test coverage. Updated `LLMNodeTestScenario` to use `Sequence` and `Mapping` for more flexible configurations. Closes #123, relates to #456.
This commit is contained in:
parent
97fab7649b
commit
6872b32c7d
|
@ -868,8 +868,10 @@ class LLMNode(BaseNode[LLMNodeData]):
|
|||
image_contents.append(image_content)
|
||||
|
||||
# Create message with text from all segments
|
||||
prompt_message = _combine_text_message_with_role(text=segment_group.text, role=message.role)
|
||||
prompt_messages.append(prompt_message)
|
||||
plain_text = segment_group.text
|
||||
if plain_text:
|
||||
prompt_message = _combine_text_message_with_role(text=plain_text, role=message.role)
|
||||
prompt_messages.append(prompt_message)
|
||||
|
||||
if image_contents:
|
||||
# Create message with image contents
|
||||
|
|
|
@ -363,11 +363,49 @@ def test_fetch_prompt_messages__basic(faker, llm_node, model_config):
|
|||
),
|
||||
],
|
||||
),
|
||||
LLMNodeTestScenario(
|
||||
description="Prompt template with variable selector of File",
|
||||
user_query=fake_query,
|
||||
user_files=[],
|
||||
vision_enabled=True,
|
||||
vision_detail=fake_vision_detail,
|
||||
features=[ModelFeature.VISION],
|
||||
window_size=fake_window_size,
|
||||
prompt_template=[
|
||||
LLMNodeChatModelMessage(
|
||||
text="{{#input.image#}}",
|
||||
role=PromptMessageRole.USER,
|
||||
edition_type="basic",
|
||||
),
|
||||
],
|
||||
expected_messages=[
|
||||
UserPromptMessage(
|
||||
content=[
|
||||
ImagePromptMessageContent(data=fake_remote_url, detail=fake_vision_detail),
|
||||
]
|
||||
),
|
||||
]
|
||||
+ mock_history[fake_window_size * -2 :]
|
||||
+ [UserPromptMessage(content=fake_query)],
|
||||
file_variables={
|
||||
"input.image": File(
|
||||
tenant_id="test",
|
||||
type=FileType.IMAGE,
|
||||
filename="test1.jpg",
|
||||
transfer_method=FileTransferMethod.REMOTE_URL,
|
||||
remote_url=fake_remote_url,
|
||||
)
|
||||
},
|
||||
),
|
||||
]
|
||||
|
||||
for scenario in test_scenarios:
|
||||
model_config.model_schema.features = scenario.features
|
||||
|
||||
for k, v in scenario.file_variables.items():
|
||||
selector = k.split(".")
|
||||
llm_node.graph_runtime_state.variable_pool.add(selector, v)
|
||||
|
||||
# Call the method under test
|
||||
prompt_messages, _ = llm_node._fetch_prompt_messages(
|
||||
user_query=scenario.user_query,
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
from collections.abc import Mapping, Sequence
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from core.file import File
|
||||
|
@ -11,10 +13,13 @@ class LLMNodeTestScenario(BaseModel):
|
|||
|
||||
description: str = Field(..., description="Description of the test scenario")
|
||||
user_query: str = Field(..., description="User query input")
|
||||
user_files: list[File] = Field(default_factory=list, description="List of user files")
|
||||
user_files: Sequence[File] = Field(default_factory=list, description="List of user files")
|
||||
vision_enabled: bool = Field(default=False, description="Whether vision is enabled")
|
||||
vision_detail: str | None = Field(None, description="Vision detail level if vision is enabled")
|
||||
features: list[ModelFeature] = Field(default_factory=list, description="List of model features")
|
||||
features: Sequence[ModelFeature] = Field(default_factory=list, description="List of model features")
|
||||
window_size: int = Field(..., description="Window size for memory")
|
||||
prompt_template: list[LLMNodeChatModelMessage] = Field(..., description="Template for prompt messages")
|
||||
expected_messages: list[PromptMessage] = Field(..., description="Expected messages after processing")
|
||||
prompt_template: Sequence[LLMNodeChatModelMessage] = Field(..., description="Template for prompt messages")
|
||||
file_variables: Mapping[str, File | Sequence[File]] = Field(
|
||||
default_factory=dict, description="List of file variables"
|
||||
)
|
||||
expected_messages: Sequence[PromptMessage] = Field(..., description="Expected messages after processing")
|
||||
|
|
Loading…
Reference in New Issue
Block a user