mirror of
https://github.com/langgenius/dify.git
synced 2024-11-16 03:32:23 +08:00
Merge branch 'feat/license-supports' into license-testing
This commit is contained in:
commit
7051f35b75
36
.github/actions/setup-poetry/action.yml
vendored
Normal file
36
.github/actions/setup-poetry/action.yml
vendored
Normal file
|
@ -0,0 +1,36 @@
|
|||
name: Setup Poetry and Python
|
||||
|
||||
inputs:
|
||||
python-version:
|
||||
description: Python version to use and the Poetry installed with
|
||||
required: true
|
||||
default: '3.10'
|
||||
poetry-version:
|
||||
description: Poetry version to set up
|
||||
required: true
|
||||
default: '1.8.4'
|
||||
poetry-lockfile:
|
||||
description: Path to the Poetry lockfile to restore cache from
|
||||
required: true
|
||||
default: ''
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Set up Python ${{ inputs.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ inputs.python-version }}
|
||||
cache: pip
|
||||
|
||||
- name: Install Poetry
|
||||
shell: bash
|
||||
run: pip install poetry==${{ inputs.poetry-version }}
|
||||
|
||||
- name: Restore Poetry cache
|
||||
if: ${{ inputs.poetry-lockfile != '' }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ inputs.python-version }}
|
||||
cache: poetry
|
||||
cache-dependency-path: ${{ inputs.poetry-lockfile }}
|
10
.github/workflows/api-tests.yml
vendored
10
.github/workflows/api-tests.yml
vendored
|
@ -28,15 +28,11 @@ jobs:
|
|||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install Poetry
|
||||
uses: abatilo/actions-poetry@v3
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
- name: Setup Poetry and Python ${{ matrix.python-version }}
|
||||
uses: ./.github/actions/setup-poetry
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
cache: poetry
|
||||
cache-dependency-path: api/poetry.lock
|
||||
poetry-lockfile: api/poetry.lock
|
||||
|
||||
- name: Check Poetry lockfile
|
||||
run: |
|
||||
|
|
17
.github/workflows/db-migration-test.yml
vendored
17
.github/workflows/db-migration-test.yml
vendored
|
@ -6,6 +6,7 @@ on:
|
|||
- main
|
||||
paths:
|
||||
- api/migrations/**
|
||||
- .github/workflows/db-migration-test.yml
|
||||
|
||||
concurrency:
|
||||
group: db-migration-test-${{ github.ref }}
|
||||
|
@ -14,25 +15,15 @@ concurrency:
|
|||
jobs:
|
||||
db-migration-test:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- "3.10"
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
- name: Setup Poetry and Python
|
||||
uses: ./.github/actions/setup-poetry
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
cache-dependency-path: |
|
||||
api/pyproject.toml
|
||||
api/poetry.lock
|
||||
|
||||
- name: Install Poetry
|
||||
uses: abatilo/actions-poetry@v3
|
||||
poetry-lockfile: api/poetry.lock
|
||||
|
||||
- name: Install dependencies
|
||||
run: poetry install -C api
|
||||
|
|
24
.github/workflows/style.yml
vendored
24
.github/workflows/style.yml
vendored
|
@ -22,34 +22,28 @@ jobs:
|
|||
id: changed-files
|
||||
uses: tj-actions/changed-files@v45
|
||||
with:
|
||||
files: api/**
|
||||
files: |
|
||||
api/**
|
||||
.github/workflows/style.yml
|
||||
|
||||
- name: Install Poetry
|
||||
- name: Setup Poetry and Python
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
uses: abatilo/actions-poetry@v3
|
||||
uses: ./.github/actions/setup-poetry
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
python-version: '3.10'
|
||||
|
||||
- name: Python dependencies
|
||||
- name: Install dependencies
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: poetry install -C api --only lint
|
||||
|
||||
- name: Ruff check
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: poetry run -C api ruff check ./api
|
||||
run: |
|
||||
poetry run -C api ruff check ./api
|
||||
poetry run -C api ruff format --check ./api
|
||||
|
||||
- name: Dotenv check
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: poetry run -C api dotenv-linter ./api/.env.example ./web/.env.example
|
||||
|
||||
- name: Ruff formatter check
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: poetry run -C api ruff format --check ./api
|
||||
|
||||
- name: Lint hints
|
||||
if: failure()
|
||||
run: echo "Please run 'dev/reformat' to fix the fixable linting errors."
|
||||
|
|
10
.github/workflows/vdb-tests.yml
vendored
10
.github/workflows/vdb-tests.yml
vendored
|
@ -28,15 +28,11 @@ jobs:
|
|||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install Poetry
|
||||
uses: abatilo/actions-poetry@v3
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
- name: Setup Poetry and Python ${{ matrix.python-version }}
|
||||
uses: ./.github/actions/setup-poetry
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
cache: poetry
|
||||
cache-dependency-path: api/poetry.lock
|
||||
poetry-lockfile: api/poetry.lock
|
||||
|
||||
- name: Check Poetry lockfile
|
||||
run: |
|
||||
|
|
|
@ -177,3 +177,4 @@ To protect your privacy, please avoid posting security issues on GitHub. Instead
|
|||
## License
|
||||
|
||||
This repository is available under the [Dify Open Source License](LICENSE), which is essentially Apache 2.0 with a few additional restrictions.
|
||||
|
||||
|
|
|
@ -285,8 +285,9 @@ UPLOAD_IMAGE_FILE_SIZE_LIMIT=10
|
|||
UPLOAD_VIDEO_FILE_SIZE_LIMIT=100
|
||||
UPLOAD_AUDIO_FILE_SIZE_LIMIT=50
|
||||
|
||||
# Model Configuration
|
||||
# Model configuration
|
||||
MULTIMODAL_SEND_IMAGE_FORMAT=base64
|
||||
MULTIMODAL_SEND_VIDEO_FORMAT=base64
|
||||
PROMPT_GENERATION_MAX_TOKENS=512
|
||||
CODE_GENERATION_MAX_TOKENS=1024
|
||||
|
||||
|
@ -324,10 +325,10 @@ UNSTRUCTURED_API_KEY=
|
|||
SSRF_PROXY_HTTP_URL=
|
||||
SSRF_PROXY_HTTPS_URL=
|
||||
SSRF_DEFAULT_MAX_RETRIES=3
|
||||
SSRF_DEFAULT_TIME_OUT=
|
||||
SSRF_DEFAULT_CONNECT_TIME_OUT=
|
||||
SSRF_DEFAULT_READ_TIME_OUT=
|
||||
SSRF_DEFAULT_WRITE_TIME_OUT=
|
||||
SSRF_DEFAULT_TIME_OUT=5
|
||||
SSRF_DEFAULT_CONNECT_TIME_OUT=5
|
||||
SSRF_DEFAULT_READ_TIME_OUT=5
|
||||
SSRF_DEFAULT_WRITE_TIME_OUT=5
|
||||
|
||||
BATCH_UPLOAD_LIMIT=10
|
||||
KEYWORD_DATA_SOURCE_TYPE=database
|
||||
|
@ -366,6 +367,10 @@ LOG_FILE=
|
|||
LOG_FILE_MAX_SIZE=20
|
||||
# Log file max backup count
|
||||
LOG_FILE_BACKUP_COUNT=5
|
||||
# Log dateformat
|
||||
LOG_DATEFORMAT=%Y-%m-%d %H:%M:%S
|
||||
# Log Timezone
|
||||
LOG_TZ=UTC
|
||||
|
||||
# Indexing configuration
|
||||
INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH=1000
|
||||
|
@ -395,3 +400,5 @@ POSITION_PROVIDER_EXCLUDES=
|
|||
|
||||
# Reset password token expiry minutes
|
||||
RESET_PASSWORD_TOKEN_EXPIRY_MINUTES=5
|
||||
|
||||
CREATE_TIDB_SERVICE_JOB_ENABLED=false
|
|
@ -4,7 +4,7 @@ FROM python:3.10-slim-bookworm AS base
|
|||
WORKDIR /app/api
|
||||
|
||||
# Install Poetry
|
||||
ENV POETRY_VERSION=1.8.3
|
||||
ENV POETRY_VERSION=1.8.4
|
||||
|
||||
# if you located in China, you can use aliyun mirror to speed up
|
||||
# RUN pip install --no-cache-dir poetry==${POETRY_VERSION} -i https://mirrors.aliyun.com/pypi/simple/
|
||||
|
@ -55,7 +55,7 @@ RUN apt-get update \
|
|||
&& echo "deb http://deb.debian.org/debian testing main" > /etc/apt/sources.list \
|
||||
&& apt-get update \
|
||||
# For Security
|
||||
&& apt-get install -y --no-install-recommends expat=2.6.3-2 libldap-2.5-0=2.5.18+dfsg-3+b1 perl=5.40.0-6 libsqlite3-0=3.46.1-1 zlib1g=1:1.3.dfsg+really1.3.1-1+b1 \
|
||||
&& apt-get install -y --no-install-recommends expat=2.6.3-2 libldap-2.5-0=2.5.18+dfsg-3+b1 perl=5.40.0-7 libsqlite3-0=3.46.1-1 zlib1g=1:1.3.dfsg+really1.3.1-1+b1 \
|
||||
# install a chinese font to support the use of tools like matplotlib
|
||||
&& apt-get install -y fonts-noto-cjk \
|
||||
&& apt-get autoremove -y \
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
import os
|
||||
import sys
|
||||
|
||||
from configs import dify_config
|
||||
|
||||
if os.environ.get("DEBUG", "false").lower() != "true":
|
||||
if not dify_config.DEBUG:
|
||||
from gevent import monkey
|
||||
|
||||
monkey.patch_all()
|
||||
|
@ -29,6 +30,9 @@ from models import account, dataset, model, source, task, tool, tools, web # no
|
|||
|
||||
# DO NOT REMOVE ABOVE
|
||||
|
||||
if sys.version_info[:2] == (3, 10):
|
||||
print("Warning: Python 3.10 will not be supported in the next version.")
|
||||
|
||||
|
||||
warnings.simplefilter("ignore", ResourceWarning)
|
||||
|
||||
|
@ -49,7 +53,6 @@ if dify_config.TESTING:
|
|||
@app.after_request
|
||||
def after_request(response):
|
||||
"""Add Version headers to the response."""
|
||||
response.set_cookie("remember_token", "", expires=0)
|
||||
response.headers.add("X-Version", dify_config.CURRENT_VERSION)
|
||||
response.headers.add("X-Env", dify_config.DEPLOY_ENV)
|
||||
return response
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
import os
|
||||
|
||||
if os.environ.get("DEBUG", "false").lower() != "true":
|
||||
from configs import dify_config
|
||||
|
||||
if not dify_config.DEBUG:
|
||||
from gevent import monkey
|
||||
|
||||
monkey.patch_all()
|
||||
|
|
|
@ -276,6 +276,16 @@ class HttpConfig(BaseSettings):
|
|||
default=1 * 1024 * 1024,
|
||||
)
|
||||
|
||||
SSRF_DEFAULT_MAX_RETRIES: PositiveInt = Field(
|
||||
description="Maximum number of retries for network requests (SSRF)",
|
||||
default=3,
|
||||
)
|
||||
|
||||
SSRF_PROXY_ALL_URL: Optional[str] = Field(
|
||||
description="Proxy URL for HTTP or HTTPS requests to prevent Server-Side Request Forgery (SSRF)",
|
||||
default=None,
|
||||
)
|
||||
|
||||
SSRF_PROXY_HTTP_URL: Optional[str] = Field(
|
||||
description="Proxy URL for HTTP requests to prevent Server-Side Request Forgery (SSRF)",
|
||||
default=None,
|
||||
|
@ -366,7 +376,7 @@ class LoggingConfig(BaseSettings):
|
|||
|
||||
LOG_TZ: Optional[str] = Field(
|
||||
description="Timezone for log timestamps (e.g., 'America/New_York')",
|
||||
default=None,
|
||||
default="UTC",
|
||||
)
|
||||
|
||||
|
||||
|
@ -601,6 +611,11 @@ class DataSetConfig(BaseSettings):
|
|||
default=500,
|
||||
)
|
||||
|
||||
CREATE_TIDB_SERVICE_JOB_ENABLED: bool = Field(
|
||||
description="Enable or disable create tidb service job",
|
||||
default=False,
|
||||
)
|
||||
|
||||
|
||||
class WorkspaceConfig(BaseSettings):
|
||||
"""
|
||||
|
@ -624,12 +639,17 @@ class IndexingConfig(BaseSettings):
|
|||
)
|
||||
|
||||
|
||||
class ImageFormatConfig(BaseSettings):
|
||||
class VisionFormatConfig(BaseSettings):
|
||||
MULTIMODAL_SEND_IMAGE_FORMAT: Literal["base64", "url"] = Field(
|
||||
description="Format for sending images in multimodal contexts ('base64' or 'url'), default is base64",
|
||||
default="base64",
|
||||
)
|
||||
|
||||
MULTIMODAL_SEND_VIDEO_FORMAT: Literal["base64", "url"] = Field(
|
||||
description="Format for sending videos in multimodal contexts ('base64' or 'url'), default is base64",
|
||||
default="base64",
|
||||
)
|
||||
|
||||
|
||||
class CeleryBeatConfig(BaseSettings):
|
||||
CELERY_BEAT_SCHEDULER_TIME: int = Field(
|
||||
|
@ -732,7 +752,7 @@ class FeatureConfig(
|
|||
FileAccessConfig,
|
||||
FileUploadConfig,
|
||||
HttpConfig,
|
||||
ImageFormatConfig,
|
||||
VisionFormatConfig,
|
||||
InnerAPIConfig,
|
||||
IndexingConfig,
|
||||
LoggingConfig,
|
||||
|
|
|
@ -9,7 +9,7 @@ class PackagingInfo(BaseSettings):
|
|||
|
||||
CURRENT_VERSION: str = Field(
|
||||
description="Dify version",
|
||||
default="0.11.0",
|
||||
default="0.11.1",
|
||||
)
|
||||
|
||||
COMMIT_SHA: str = Field(
|
||||
|
|
|
@ -317,8 +317,11 @@ class DatasetInitApi(Resource):
|
|||
raise ValueError("embedding model and embedding model provider are required for high quality indexing.")
|
||||
try:
|
||||
model_manager = ModelManager()
|
||||
model_manager.get_default_model_instance(
|
||||
tenant_id=current_user.current_tenant_id, model_type=ModelType.TEXT_EMBEDDING
|
||||
model_manager.get_model_instance(
|
||||
tenant_id=current_user.current_tenant_id,
|
||||
provider=args["embedding_model_provider"],
|
||||
model_type=ModelType.TEXT_EMBEDDING,
|
||||
model=args["embedding_model"],
|
||||
)
|
||||
except InvokeAuthorizationError:
|
||||
raise ProviderNotInitializeError(
|
||||
|
@ -945,7 +948,7 @@ class DocumentRetryApi(DocumentResource):
|
|||
raise DocumentAlreadyFinishedError()
|
||||
retry_documents.append(document)
|
||||
except Exception as e:
|
||||
logging.error(f"Document {document_id} retry failed: {str(e)}")
|
||||
logging.exception(f"Document {document_id} retry failed: {str(e)}")
|
||||
continue
|
||||
# retry document
|
||||
DocumentService.retry_document(dataset_id, retry_documents)
|
||||
|
|
|
@ -7,7 +7,11 @@ from controllers.service_api import api
|
|||
from controllers.service_api.app.error import NotChatAppError
|
||||
from controllers.service_api.wraps import FetchUserArg, WhereisUserArg, validate_app_token
|
||||
from core.app.entities.app_invoke_entities import InvokeFrom
|
||||
from fields.conversation_fields import conversation_infinite_scroll_pagination_fields, simple_conversation_fields
|
||||
from fields.conversation_fields import (
|
||||
conversation_delete_fields,
|
||||
conversation_infinite_scroll_pagination_fields,
|
||||
simple_conversation_fields,
|
||||
)
|
||||
from libs.helper import uuid_value
|
||||
from models.model import App, AppMode, EndUser
|
||||
from services.conversation_service import ConversationService
|
||||
|
@ -49,7 +53,7 @@ class ConversationApi(Resource):
|
|||
|
||||
class ConversationDetailApi(Resource):
|
||||
@validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON))
|
||||
@marshal_with(simple_conversation_fields)
|
||||
@marshal_with(conversation_delete_fields)
|
||||
def delete(self, app_model: App, end_user: EndUser, c_id):
|
||||
app_mode = AppMode.value_of(app_model.mode)
|
||||
if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
|
||||
|
|
|
@ -10,6 +10,7 @@ from controllers.service_api.app.error import NotChatAppError
|
|||
from controllers.service_api.wraps import FetchUserArg, WhereisUserArg, validate_app_token
|
||||
from core.app.entities.app_invoke_entities import InvokeFrom
|
||||
from fields.conversation_fields import message_file_fields
|
||||
from fields.raws import FilesContainedField
|
||||
from libs.helper import TimestampField, uuid_value
|
||||
from models.model import App, AppMode, EndUser
|
||||
from services.errors.message import SuggestedQuestionsAfterAnswerDisabledError
|
||||
|
@ -55,7 +56,7 @@ class MessageListApi(Resource):
|
|||
"id": fields.String,
|
||||
"conversation_id": fields.String,
|
||||
"parent_message_id": fields.String,
|
||||
"inputs": fields.Raw,
|
||||
"inputs": FilesContainedField,
|
||||
"query": fields.String,
|
||||
"answer": fields.String(attribute="re_sign_file_url_answer"),
|
||||
"message_files": fields.List(fields.Nested(message_file_fields)),
|
||||
|
|
|
@ -30,6 +30,7 @@ from core.model_runtime.entities import (
|
|||
ToolPromptMessage,
|
||||
UserPromptMessage,
|
||||
)
|
||||
from core.model_runtime.entities.message_entities import ImagePromptMessageContent
|
||||
from core.model_runtime.entities.model_entities import ModelFeature
|
||||
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
|
||||
from core.model_runtime.utils.encoders import jsonable_encoder
|
||||
|
@ -65,7 +66,7 @@ class BaseAgentRunner(AppRunner):
|
|||
prompt_messages: Optional[list[PromptMessage]] = None,
|
||||
variables_pool: Optional[ToolRuntimeVariablePool] = None,
|
||||
db_variables: Optional[ToolConversationVariables] = None,
|
||||
model_instance: ModelInstance = None,
|
||||
model_instance: ModelInstance | None = None,
|
||||
) -> None:
|
||||
self.tenant_id = tenant_id
|
||||
self.application_generate_entity = application_generate_entity
|
||||
|
@ -508,24 +509,27 @@ class BaseAgentRunner(AppRunner):
|
|||
|
||||
def organize_agent_user_prompt(self, message: Message) -> UserPromptMessage:
|
||||
files = db.session.query(MessageFile).filter(MessageFile.message_id == message.id).all()
|
||||
if files:
|
||||
file_extra_config = FileUploadConfigManager.convert(message.app_model_config.to_dict())
|
||||
|
||||
if file_extra_config:
|
||||
file_objs = file_factory.build_from_message_files(
|
||||
message_files=files, tenant_id=self.tenant_id, config=file_extra_config
|
||||
)
|
||||
else:
|
||||
file_objs = []
|
||||
|
||||
if not file_objs:
|
||||
return UserPromptMessage(content=message.query)
|
||||
else:
|
||||
prompt_message_contents: list[PromptMessageContent] = []
|
||||
prompt_message_contents.append(TextPromptMessageContent(data=message.query))
|
||||
for file_obj in file_objs:
|
||||
prompt_message_contents.append(file_manager.to_prompt_message_content(file_obj))
|
||||
|
||||
return UserPromptMessage(content=prompt_message_contents)
|
||||
else:
|
||||
if not files:
|
||||
return UserPromptMessage(content=message.query)
|
||||
file_extra_config = FileUploadConfigManager.convert(message.app_model_config.to_dict())
|
||||
if not file_extra_config:
|
||||
return UserPromptMessage(content=message.query)
|
||||
|
||||
image_detail_config = file_extra_config.image_config.detail if file_extra_config.image_config else None
|
||||
image_detail_config = image_detail_config or ImagePromptMessageContent.DETAIL.LOW
|
||||
|
||||
file_objs = file_factory.build_from_message_files(
|
||||
message_files=files, tenant_id=self.tenant_id, config=file_extra_config
|
||||
)
|
||||
if not file_objs:
|
||||
return UserPromptMessage(content=message.query)
|
||||
prompt_message_contents: list[PromptMessageContent] = []
|
||||
prompt_message_contents.append(TextPromptMessageContent(data=message.query))
|
||||
for file in file_objs:
|
||||
prompt_message_contents.append(
|
||||
file_manager.to_prompt_message_content(
|
||||
file,
|
||||
image_detail_config=image_detail_config,
|
||||
)
|
||||
)
|
||||
return UserPromptMessage(content=prompt_message_contents)
|
||||
|
|
|
@ -10,6 +10,7 @@ from core.model_runtime.entities import (
|
|||
TextPromptMessageContent,
|
||||
UserPromptMessage,
|
||||
)
|
||||
from core.model_runtime.entities.message_entities import ImagePromptMessageContent
|
||||
from core.model_runtime.utils.encoders import jsonable_encoder
|
||||
|
||||
|
||||
|
@ -36,8 +37,24 @@ class CotChatAgentRunner(CotAgentRunner):
|
|||
if self.files:
|
||||
prompt_message_contents: list[PromptMessageContent] = []
|
||||
prompt_message_contents.append(TextPromptMessageContent(data=query))
|
||||
for file_obj in self.files:
|
||||
prompt_message_contents.append(file_manager.to_prompt_message_content(file_obj))
|
||||
|
||||
# get image detail config
|
||||
image_detail_config = (
|
||||
self.application_generate_entity.file_upload_config.image_config.detail
|
||||
if (
|
||||
self.application_generate_entity.file_upload_config
|
||||
and self.application_generate_entity.file_upload_config.image_config
|
||||
)
|
||||
else None
|
||||
)
|
||||
image_detail_config = image_detail_config or ImagePromptMessageContent.DETAIL.LOW
|
||||
for file in self.files:
|
||||
prompt_message_contents.append(
|
||||
file_manager.to_prompt_message_content(
|
||||
file,
|
||||
image_detail_config=image_detail_config,
|
||||
)
|
||||
)
|
||||
|
||||
prompt_messages.append(UserPromptMessage(content=prompt_message_contents))
|
||||
else:
|
||||
|
|
|
@ -22,6 +22,7 @@ from core.model_runtime.entities import (
|
|||
ToolPromptMessage,
|
||||
UserPromptMessage,
|
||||
)
|
||||
from core.model_runtime.entities.message_entities import ImagePromptMessageContent
|
||||
from core.prompt.agent_history_prompt_transform import AgentHistoryPromptTransform
|
||||
from core.tools.entities.tool_entities import ToolInvokeMeta
|
||||
from core.tools.tool_engine import ToolEngine
|
||||
|
@ -397,8 +398,24 @@ class FunctionCallAgentRunner(BaseAgentRunner):
|
|||
if self.files:
|
||||
prompt_message_contents: list[PromptMessageContent] = []
|
||||
prompt_message_contents.append(TextPromptMessageContent(data=query))
|
||||
for file_obj in self.files:
|
||||
prompt_message_contents.append(file_manager.to_prompt_message_content(file_obj))
|
||||
|
||||
# get image detail config
|
||||
image_detail_config = (
|
||||
self.application_generate_entity.file_upload_config.image_config.detail
|
||||
if (
|
||||
self.application_generate_entity.file_upload_config
|
||||
and self.application_generate_entity.file_upload_config.image_config
|
||||
)
|
||||
else None
|
||||
)
|
||||
image_detail_config = image_detail_config or ImagePromptMessageContent.DETAIL.LOW
|
||||
for file in self.files:
|
||||
prompt_message_contents.append(
|
||||
file_manager.to_prompt_message_content(
|
||||
file,
|
||||
image_detail_config=image_detail_config,
|
||||
)
|
||||
)
|
||||
|
||||
prompt_messages.append(UserPromptMessage(content=prompt_message_contents))
|
||||
else:
|
||||
|
|
|
@ -4,7 +4,7 @@ from typing import Any, Optional
|
|||
|
||||
from pydantic import BaseModel, Field, field_validator
|
||||
|
||||
from core.file import FileExtraConfig, FileTransferMethod, FileType
|
||||
from core.file import FileTransferMethod, FileType, FileUploadConfig
|
||||
from core.model_runtime.entities.message_entities import PromptMessageRole
|
||||
from models.model import AppMode
|
||||
|
||||
|
@ -211,7 +211,7 @@ class TracingConfigEntity(BaseModel):
|
|||
|
||||
|
||||
class AppAdditionalFeatures(BaseModel):
|
||||
file_upload: Optional[FileExtraConfig] = None
|
||||
file_upload: Optional[FileUploadConfig] = None
|
||||
opening_statement: Optional[str] = None
|
||||
suggested_questions: list[str] = []
|
||||
suggested_questions_after_answer: bool = False
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
from collections.abc import Mapping
|
||||
from typing import Any
|
||||
|
||||
from core.file import FileExtraConfig
|
||||
from core.file import FileUploadConfig
|
||||
|
||||
|
||||
class FileUploadConfigManager:
|
||||
|
@ -29,19 +29,18 @@ class FileUploadConfigManager:
|
|||
if is_vision:
|
||||
data["image_config"]["detail"] = file_upload_dict.get("image", {}).get("detail", "low")
|
||||
|
||||
return FileExtraConfig.model_validate(data)
|
||||
return FileUploadConfig.model_validate(data)
|
||||
|
||||
@classmethod
|
||||
def validate_and_set_defaults(cls, config: dict, is_vision: bool = True) -> tuple[dict, list[str]]:
|
||||
def validate_and_set_defaults(cls, config: dict) -> tuple[dict, list[str]]:
|
||||
"""
|
||||
Validate and set defaults for file upload feature
|
||||
|
||||
:param config: app model config args
|
||||
:param is_vision: if True, the feature is vision feature
|
||||
"""
|
||||
if not config.get("file_upload"):
|
||||
config["file_upload"] = {}
|
||||
else:
|
||||
FileExtraConfig.model_validate(config["file_upload"])
|
||||
FileUploadConfig.model_validate(config["file_upload"])
|
||||
|
||||
return config, ["file_upload"]
|
||||
|
|
|
@ -52,9 +52,7 @@ class AdvancedChatAppConfigManager(BaseAppConfigManager):
|
|||
related_config_keys = []
|
||||
|
||||
# file upload validation
|
||||
config, current_related_config_keys = FileUploadConfigManager.validate_and_set_defaults(
|
||||
config=config, is_vision=False
|
||||
)
|
||||
config, current_related_config_keys = FileUploadConfigManager.validate_and_set_defaults(config=config)
|
||||
related_config_keys.extend(current_related_config_keys)
|
||||
|
||||
# opening_statement
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
import contextvars
|
||||
import logging
|
||||
import os
|
||||
import threading
|
||||
import uuid
|
||||
from collections.abc import Generator
|
||||
|
@ -10,6 +9,7 @@ from flask import Flask, current_app
|
|||
from pydantic import ValidationError
|
||||
|
||||
import contexts
|
||||
from configs import dify_config
|
||||
from constants import UUID_NIL
|
||||
from core.app.app_config.features.file_upload.manager import FileUploadConfigManager
|
||||
from core.app.apps.advanced_chat.app_config_manager import AdvancedChatAppConfigManager
|
||||
|
@ -26,7 +26,6 @@ from core.ops.ops_trace_manager import TraceQueueManager
|
|||
from extensions.ext_database import db
|
||||
from factories import file_factory
|
||||
from models.account import Account
|
||||
from models.enums import CreatedByRole
|
||||
from models.model import App, Conversation, EndUser, Message
|
||||
from models.workflow import Workflow
|
||||
|
||||
|
@ -98,13 +97,10 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator):
|
|||
# parse files
|
||||
files = args["files"] if args.get("files") else []
|
||||
file_extra_config = FileUploadConfigManager.convert(workflow.features_dict, is_vision=False)
|
||||
role = CreatedByRole.ACCOUNT if isinstance(user, Account) else CreatedByRole.END_USER
|
||||
if file_extra_config:
|
||||
file_objs = file_factory.build_from_mappings(
|
||||
mappings=files,
|
||||
tenant_id=app_model.tenant_id,
|
||||
user_id=user.id,
|
||||
role=role,
|
||||
config=file_extra_config,
|
||||
)
|
||||
else:
|
||||
|
@ -127,10 +123,11 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator):
|
|||
application_generate_entity = AdvancedChatAppGenerateEntity(
|
||||
task_id=str(uuid.uuid4()),
|
||||
app_config=app_config,
|
||||
file_upload_config=file_extra_config,
|
||||
conversation_id=conversation.id if conversation else None,
|
||||
inputs=conversation.inputs
|
||||
if conversation
|
||||
else self._prepare_user_inputs(user_inputs=inputs, app_config=app_config, user_id=user.id, role=role),
|
||||
else self._prepare_user_inputs(user_inputs=inputs, app_config=app_config),
|
||||
query=query,
|
||||
files=file_objs,
|
||||
parent_message_id=args.get("parent_message_id") if invoke_from != InvokeFrom.SERVICE_API else UUID_NIL,
|
||||
|
@ -317,7 +314,7 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator):
|
|||
logger.exception("Validation Error when generating")
|
||||
queue_manager.publish_error(e, PublishFrom.APPLICATION_MANAGER)
|
||||
except (ValueError, InvokeError) as e:
|
||||
if os.environ.get("DEBUG", "false").lower() == "true":
|
||||
if dify_config.DEBUG:
|
||||
logger.exception("Error when generating")
|
||||
queue_manager.publish_error(e, PublishFrom.APPLICATION_MANAGER)
|
||||
except Exception as e:
|
||||
|
|
|
@ -242,7 +242,7 @@ class AdvancedChatAppGenerateTaskPipeline(BasedGenerateTaskPipeline, WorkflowCyc
|
|||
start_listener_time = time.time()
|
||||
yield MessageAudioStreamResponse(audio=audio_trunk.audio, task_id=task_id)
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
logger.exception(e)
|
||||
break
|
||||
if tts_publisher:
|
||||
yield MessageAudioEndStreamResponse(audio="", task_id=task_id)
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
import logging
|
||||
import os
|
||||
import threading
|
||||
import uuid
|
||||
from collections.abc import Generator
|
||||
|
@ -8,6 +7,7 @@ from typing import Any, Literal, Union, overload
|
|||
from flask import Flask, current_app
|
||||
from pydantic import ValidationError
|
||||
|
||||
from configs import dify_config
|
||||
from constants import UUID_NIL
|
||||
from core.app.app_config.easy_ui_based_app.model_config.converter import ModelConfigConverter
|
||||
from core.app.app_config.features.file_upload.manager import FileUploadConfigManager
|
||||
|
@ -23,7 +23,6 @@ from core.ops.ops_trace_manager import TraceQueueManager
|
|||
from extensions.ext_database import db
|
||||
from factories import file_factory
|
||||
from models import Account, App, EndUser
|
||||
from models.enums import CreatedByRole
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -103,8 +102,6 @@ class AgentChatAppGenerator(MessageBasedAppGenerator):
|
|||
# always enable retriever resource in debugger mode
|
||||
override_model_config_dict["retriever_resource"] = {"enabled": True}
|
||||
|
||||
role = CreatedByRole.ACCOUNT if isinstance(user, Account) else CreatedByRole.END_USER
|
||||
|
||||
# parse files
|
||||
files = args.get("files") or []
|
||||
file_extra_config = FileUploadConfigManager.convert(override_model_config_dict or app_model_config.to_dict())
|
||||
|
@ -112,8 +109,6 @@ class AgentChatAppGenerator(MessageBasedAppGenerator):
|
|||
file_objs = file_factory.build_from_mappings(
|
||||
mappings=files,
|
||||
tenant_id=app_model.tenant_id,
|
||||
user_id=user.id,
|
||||
role=role,
|
||||
config=file_extra_config,
|
||||
)
|
||||
else:
|
||||
|
@ -135,10 +130,11 @@ class AgentChatAppGenerator(MessageBasedAppGenerator):
|
|||
task_id=str(uuid.uuid4()),
|
||||
app_config=app_config,
|
||||
model_conf=ModelConfigConverter.convert(app_config),
|
||||
file_upload_config=file_extra_config,
|
||||
conversation_id=conversation.id if conversation else None,
|
||||
inputs=conversation.inputs
|
||||
if conversation
|
||||
else self._prepare_user_inputs(user_inputs=inputs, app_config=app_config, user_id=user.id, role=role),
|
||||
else self._prepare_user_inputs(user_inputs=inputs, app_config=app_config),
|
||||
query=query,
|
||||
files=file_objs,
|
||||
parent_message_id=args.get("parent_message_id") if invoke_from != InvokeFrom.SERVICE_API else UUID_NIL,
|
||||
|
@ -230,7 +226,7 @@ class AgentChatAppGenerator(MessageBasedAppGenerator):
|
|||
logger.exception("Validation Error when generating")
|
||||
queue_manager.publish_error(e, PublishFrom.APPLICATION_MANAGER)
|
||||
except (ValueError, InvokeError) as e:
|
||||
if os.environ.get("DEBUG") and os.environ.get("DEBUG").lower() == "true":
|
||||
if dify_config.DEBUG:
|
||||
logger.exception("Error when generating")
|
||||
queue_manager.publish_error(e, PublishFrom.APPLICATION_MANAGER)
|
||||
except Exception as e:
|
||||
|
|
|
@ -2,12 +2,11 @@ from collections.abc import Mapping
|
|||
from typing import TYPE_CHECKING, Any, Optional
|
||||
|
||||
from core.app.app_config.entities import VariableEntityType
|
||||
from core.file import File, FileExtraConfig
|
||||
from core.file import File, FileUploadConfig
|
||||
from factories import file_factory
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from core.app.app_config.entities import AppConfig, VariableEntity
|
||||
from models.enums import CreatedByRole
|
||||
|
||||
|
||||
class BaseAppGenerator:
|
||||
|
@ -16,8 +15,6 @@ class BaseAppGenerator:
|
|||
*,
|
||||
user_inputs: Optional[Mapping[str, Any]],
|
||||
app_config: "AppConfig",
|
||||
user_id: str,
|
||||
role: "CreatedByRole",
|
||||
) -> Mapping[str, Any]:
|
||||
user_inputs = user_inputs or {}
|
||||
# Filter input variables from form configuration, handle required fields, default values, and option values
|
||||
|
@ -34,9 +31,7 @@ class BaseAppGenerator:
|
|||
k: file_factory.build_from_mapping(
|
||||
mapping=v,
|
||||
tenant_id=app_config.tenant_id,
|
||||
user_id=user_id,
|
||||
role=role,
|
||||
config=FileExtraConfig(
|
||||
config=FileUploadConfig(
|
||||
allowed_file_types=entity_dictionary[k].allowed_file_types,
|
||||
allowed_extensions=entity_dictionary[k].allowed_file_extensions,
|
||||
allowed_upload_methods=entity_dictionary[k].allowed_file_upload_methods,
|
||||
|
@ -50,9 +45,7 @@ class BaseAppGenerator:
|
|||
k: file_factory.build_from_mappings(
|
||||
mappings=v,
|
||||
tenant_id=app_config.tenant_id,
|
||||
user_id=user_id,
|
||||
role=role,
|
||||
config=FileExtraConfig(
|
||||
config=FileUploadConfig(
|
||||
allowed_file_types=entity_dictionary[k].allowed_file_types,
|
||||
allowed_extensions=entity_dictionary[k].allowed_file_extensions,
|
||||
allowed_upload_methods=entity_dictionary[k].allowed_file_upload_methods,
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
import logging
|
||||
import os
|
||||
import threading
|
||||
import uuid
|
||||
from collections.abc import Generator
|
||||
|
@ -8,6 +7,7 @@ from typing import Any, Literal, Union, overload
|
|||
from flask import Flask, current_app
|
||||
from pydantic import ValidationError
|
||||
|
||||
from configs import dify_config
|
||||
from constants import UUID_NIL
|
||||
from core.app.app_config.easy_ui_based_app.model_config.converter import ModelConfigConverter
|
||||
from core.app.app_config.features.file_upload.manager import FileUploadConfigManager
|
||||
|
@ -23,7 +23,6 @@ from core.ops.ops_trace_manager import TraceQueueManager
|
|||
from extensions.ext_database import db
|
||||
from factories import file_factory
|
||||
from models.account import Account
|
||||
from models.enums import CreatedByRole
|
||||
from models.model import App, EndUser
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
@ -101,8 +100,6 @@ class ChatAppGenerator(MessageBasedAppGenerator):
|
|||
# always enable retriever resource in debugger mode
|
||||
override_model_config_dict["retriever_resource"] = {"enabled": True}
|
||||
|
||||
role = CreatedByRole.ACCOUNT if isinstance(user, Account) else CreatedByRole.END_USER
|
||||
|
||||
# parse files
|
||||
files = args["files"] if args.get("files") else []
|
||||
file_extra_config = FileUploadConfigManager.convert(override_model_config_dict or app_model_config.to_dict())
|
||||
|
@ -110,8 +107,6 @@ class ChatAppGenerator(MessageBasedAppGenerator):
|
|||
file_objs = file_factory.build_from_mappings(
|
||||
mappings=files,
|
||||
tenant_id=app_model.tenant_id,
|
||||
user_id=user.id,
|
||||
role=role,
|
||||
config=file_extra_config,
|
||||
)
|
||||
else:
|
||||
|
@ -133,10 +128,11 @@ class ChatAppGenerator(MessageBasedAppGenerator):
|
|||
task_id=str(uuid.uuid4()),
|
||||
app_config=app_config,
|
||||
model_conf=ModelConfigConverter.convert(app_config),
|
||||
file_upload_config=file_extra_config,
|
||||
conversation_id=conversation.id if conversation else None,
|
||||
inputs=conversation.inputs
|
||||
if conversation
|
||||
else self._prepare_user_inputs(user_inputs=inputs, app_config=app_config, user_id=user.id, role=role),
|
||||
else self._prepare_user_inputs(user_inputs=inputs, app_config=app_config),
|
||||
query=query,
|
||||
files=file_objs,
|
||||
parent_message_id=args.get("parent_message_id") if invoke_from != InvokeFrom.SERVICE_API else UUID_NIL,
|
||||
|
@ -227,7 +223,7 @@ class ChatAppGenerator(MessageBasedAppGenerator):
|
|||
logger.exception("Validation Error when generating")
|
||||
queue_manager.publish_error(e, PublishFrom.APPLICATION_MANAGER)
|
||||
except (ValueError, InvokeError) as e:
|
||||
if os.environ.get("DEBUG") and os.environ.get("DEBUG").lower() == "true":
|
||||
if dify_config.DEBUG:
|
||||
logger.exception("Error when generating")
|
||||
queue_manager.publish_error(e, PublishFrom.APPLICATION_MANAGER)
|
||||
except Exception as e:
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
import logging
|
||||
import os
|
||||
import threading
|
||||
import uuid
|
||||
from collections.abc import Generator
|
||||
|
@ -8,6 +7,7 @@ from typing import Any, Literal, Union, overload
|
|||
from flask import Flask, current_app
|
||||
from pydantic import ValidationError
|
||||
|
||||
from configs import dify_config
|
||||
from core.app.app_config.easy_ui_based_app.model_config.converter import ModelConfigConverter
|
||||
from core.app.app_config.features.file_upload.manager import FileUploadConfigManager
|
||||
from core.app.apps.base_app_queue_manager import AppQueueManager, GenerateTaskStoppedError, PublishFrom
|
||||
|
@ -22,7 +22,6 @@ from core.ops.ops_trace_manager import TraceQueueManager
|
|||
from extensions.ext_database import db
|
||||
from factories import file_factory
|
||||
from models import Account, App, EndUser, Message
|
||||
from models.enums import CreatedByRole
|
||||
from services.errors.app import MoreLikeThisDisabledError
|
||||
from services.errors.message import MessageNotExistsError
|
||||
|
||||
|
@ -88,8 +87,6 @@ class CompletionAppGenerator(MessageBasedAppGenerator):
|
|||
tenant_id=app_model.tenant_id, config=args.get("model_config")
|
||||
)
|
||||
|
||||
role = CreatedByRole.ACCOUNT if isinstance(user, Account) else CreatedByRole.END_USER
|
||||
|
||||
# parse files
|
||||
files = args["files"] if args.get("files") else []
|
||||
file_extra_config = FileUploadConfigManager.convert(override_model_config_dict or app_model_config.to_dict())
|
||||
|
@ -97,8 +94,6 @@ class CompletionAppGenerator(MessageBasedAppGenerator):
|
|||
file_objs = file_factory.build_from_mappings(
|
||||
mappings=files,
|
||||
tenant_id=app_model.tenant_id,
|
||||
user_id=user.id,
|
||||
role=role,
|
||||
config=file_extra_config,
|
||||
)
|
||||
else:
|
||||
|
@ -110,7 +105,6 @@ class CompletionAppGenerator(MessageBasedAppGenerator):
|
|||
)
|
||||
|
||||
# get tracing instance
|
||||
user_id = user.id if isinstance(user, Account) else user.session_id
|
||||
trace_manager = TraceQueueManager(app_model.id)
|
||||
|
||||
# init application generate entity
|
||||
|
@ -118,7 +112,8 @@ class CompletionAppGenerator(MessageBasedAppGenerator):
|
|||
task_id=str(uuid.uuid4()),
|
||||
app_config=app_config,
|
||||
model_conf=ModelConfigConverter.convert(app_config),
|
||||
inputs=self._prepare_user_inputs(user_inputs=inputs, app_config=app_config, user_id=user.id, role=role),
|
||||
file_upload_config=file_extra_config,
|
||||
inputs=self._prepare_user_inputs(user_inputs=inputs, app_config=app_config),
|
||||
query=query,
|
||||
files=file_objs,
|
||||
user_id=user.id,
|
||||
|
@ -203,7 +198,7 @@ class CompletionAppGenerator(MessageBasedAppGenerator):
|
|||
logger.exception("Validation Error when generating")
|
||||
queue_manager.publish_error(e, PublishFrom.APPLICATION_MANAGER)
|
||||
except (ValueError, InvokeError) as e:
|
||||
if os.environ.get("DEBUG") and os.environ.get("DEBUG").lower() == "true":
|
||||
if dify_config.DEBUG:
|
||||
logger.exception("Error when generating")
|
||||
queue_manager.publish_error(e, PublishFrom.APPLICATION_MANAGER)
|
||||
except Exception as e:
|
||||
|
@ -259,14 +254,11 @@ class CompletionAppGenerator(MessageBasedAppGenerator):
|
|||
override_model_config_dict["model"] = model_dict
|
||||
|
||||
# parse files
|
||||
role = CreatedByRole.ACCOUNT if isinstance(user, Account) else CreatedByRole.END_USER
|
||||
file_extra_config = FileUploadConfigManager.convert(override_model_config_dict)
|
||||
if file_extra_config:
|
||||
file_objs = file_factory.build_from_mappings(
|
||||
mappings=message.message_files,
|
||||
tenant_id=app_model.tenant_id,
|
||||
user_id=user.id,
|
||||
role=role,
|
||||
config=file_extra_config,
|
||||
)
|
||||
else:
|
||||
|
|
|
@ -46,9 +46,7 @@ class WorkflowAppConfigManager(BaseAppConfigManager):
|
|||
related_config_keys = []
|
||||
|
||||
# file upload validation
|
||||
config, current_related_config_keys = FileUploadConfigManager.validate_and_set_defaults(
|
||||
config=config, is_vision=False
|
||||
)
|
||||
config, current_related_config_keys = FileUploadConfigManager.validate_and_set_defaults(config=config)
|
||||
related_config_keys.extend(current_related_config_keys)
|
||||
|
||||
# text_to_speech
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
import contextvars
|
||||
import logging
|
||||
import os
|
||||
import threading
|
||||
import uuid
|
||||
from collections.abc import Generator, Mapping, Sequence
|
||||
|
@ -10,6 +9,7 @@ from flask import Flask, current_app
|
|||
from pydantic import ValidationError
|
||||
|
||||
import contexts
|
||||
from configs import dify_config
|
||||
from core.app.app_config.features.file_upload.manager import FileUploadConfigManager
|
||||
from core.app.apps.base_app_generator import BaseAppGenerator
|
||||
from core.app.apps.base_app_queue_manager import AppQueueManager, GenerateTaskStoppedError, PublishFrom
|
||||
|
@ -25,7 +25,6 @@ from core.ops.ops_trace_manager import TraceQueueManager
|
|||
from extensions.ext_database import db
|
||||
from factories import file_factory
|
||||
from models import Account, App, EndUser, Workflow
|
||||
from models.enums import CreatedByRole
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -70,15 +69,11 @@ class WorkflowAppGenerator(BaseAppGenerator):
|
|||
):
|
||||
files: Sequence[Mapping[str, Any]] = args.get("files") or []
|
||||
|
||||
role = CreatedByRole.ACCOUNT if isinstance(user, Account) else CreatedByRole.END_USER
|
||||
|
||||
# parse files
|
||||
file_extra_config = FileUploadConfigManager.convert(workflow.features_dict, is_vision=False)
|
||||
system_files = file_factory.build_from_mappings(
|
||||
mappings=files,
|
||||
tenant_id=app_model.tenant_id,
|
||||
user_id=user.id,
|
||||
role=role,
|
||||
config=file_extra_config,
|
||||
)
|
||||
|
||||
|
@ -100,7 +95,8 @@ class WorkflowAppGenerator(BaseAppGenerator):
|
|||
application_generate_entity = WorkflowAppGenerateEntity(
|
||||
task_id=str(uuid.uuid4()),
|
||||
app_config=app_config,
|
||||
inputs=self._prepare_user_inputs(user_inputs=inputs, app_config=app_config, user_id=user.id, role=role),
|
||||
file_upload_config=file_extra_config,
|
||||
inputs=self._prepare_user_inputs(user_inputs=inputs, app_config=app_config),
|
||||
files=system_files,
|
||||
user_id=user.id,
|
||||
stream=stream,
|
||||
|
@ -261,7 +257,7 @@ class WorkflowAppGenerator(BaseAppGenerator):
|
|||
logger.exception("Validation Error when generating")
|
||||
queue_manager.publish_error(e, PublishFrom.APPLICATION_MANAGER)
|
||||
except (ValueError, InvokeError) as e:
|
||||
if os.environ.get("DEBUG") and os.environ.get("DEBUG", "false").lower() == "true":
|
||||
if dify_config.DEBUG:
|
||||
logger.exception("Error when generating")
|
||||
queue_manager.publish_error(e, PublishFrom.APPLICATION_MANAGER)
|
||||
except Exception as e:
|
||||
|
|
|
@ -216,7 +216,7 @@ class WorkflowAppGenerateTaskPipeline(BasedGenerateTaskPipeline, WorkflowCycleMa
|
|||
else:
|
||||
yield MessageAudioStreamResponse(audio=audio_trunk.audio, task_id=task_id)
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
logger.exception(e)
|
||||
break
|
||||
if tts_publisher:
|
||||
yield MessageAudioEndStreamResponse(audio="", task_id=task_id)
|
||||
|
|
|
@ -361,6 +361,7 @@ class WorkflowBasedAppRunner(AppRunner):
|
|||
node_run_index=workflow_entry.graph_engine.graph_runtime_state.node_run_steps,
|
||||
output=event.pre_iteration_output,
|
||||
parallel_mode_run_id=event.parallel_mode_run_id,
|
||||
duration=event.duration,
|
||||
)
|
||||
)
|
||||
elif isinstance(event, (IterationRunSucceededEvent | IterationRunFailedEvent)):
|
||||
|
|
|
@ -7,7 +7,7 @@ from pydantic import BaseModel, ConfigDict, Field, ValidationInfo, field_validat
|
|||
from constants import UUID_NIL
|
||||
from core.app.app_config.entities import AppConfig, EasyUIBasedAppConfig, WorkflowUIBasedAppConfig
|
||||
from core.entities.provider_configuration import ProviderModelBundle
|
||||
from core.file.models import File
|
||||
from core.file import File, FileUploadConfig
|
||||
from core.model_runtime.entities.model_entities import AIModelEntity
|
||||
from core.ops.ops_trace_manager import TraceQueueManager
|
||||
|
||||
|
@ -80,6 +80,7 @@ class AppGenerateEntity(BaseModel):
|
|||
|
||||
# app config
|
||||
app_config: AppConfig
|
||||
file_upload_config: Optional[FileUploadConfig] = None
|
||||
|
||||
inputs: Mapping[str, Any]
|
||||
files: Sequence[File]
|
||||
|
|
|
@ -111,6 +111,7 @@ class QueueIterationNextEvent(AppQueueEvent):
|
|||
"""iteratoin run in parallel mode run id"""
|
||||
node_run_index: int
|
||||
output: Optional[Any] = None # output for the current iteration
|
||||
duration: Optional[float] = None
|
||||
|
||||
@field_validator("output", mode="before")
|
||||
@classmethod
|
||||
|
@ -307,6 +308,8 @@ class QueueNodeSucceededEvent(AppQueueEvent):
|
|||
execution_metadata: Optional[dict[NodeRunMetadataKey, Any]] = None
|
||||
|
||||
error: Optional[str] = None
|
||||
"""single iteration duration map"""
|
||||
iteration_duration_map: Optional[dict[str, float]] = None
|
||||
|
||||
|
||||
class QueueNodeInIterationFailedEvent(AppQueueEvent):
|
||||
|
|
|
@ -434,6 +434,7 @@ class IterationNodeNextStreamResponse(StreamResponse):
|
|||
parallel_id: Optional[str] = None
|
||||
parallel_start_node_id: Optional[str] = None
|
||||
parallel_mode_run_id: Optional[str] = None
|
||||
duration: Optional[float] = None
|
||||
|
||||
event: StreamEvent = StreamEvent.ITERATION_NEXT
|
||||
workflow_run_id: str
|
||||
|
|
|
@ -624,6 +624,7 @@ class WorkflowCycleManage:
|
|||
parallel_id=event.parallel_id,
|
||||
parallel_start_node_id=event.parallel_start_node_id,
|
||||
parallel_mode_run_id=event.parallel_mode_run_id,
|
||||
duration=event.duration,
|
||||
),
|
||||
)
|
||||
|
||||
|
|
|
@ -2,13 +2,13 @@ from .constants import FILE_MODEL_IDENTITY
|
|||
from .enums import ArrayFileAttribute, FileAttribute, FileBelongsTo, FileTransferMethod, FileType
|
||||
from .models import (
|
||||
File,
|
||||
FileExtraConfig,
|
||||
FileUploadConfig,
|
||||
ImageConfig,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"FileType",
|
||||
"FileExtraConfig",
|
||||
"FileUploadConfig",
|
||||
"FileTransferMethod",
|
||||
"FileBelongsTo",
|
||||
"File",
|
||||
|
|
|
@ -3,7 +3,7 @@ import base64
|
|||
from configs import dify_config
|
||||
from core.file import file_repository
|
||||
from core.helper import ssrf_proxy
|
||||
from core.model_runtime.entities import AudioPromptMessageContent, ImagePromptMessageContent
|
||||
from core.model_runtime.entities import AudioPromptMessageContent, ImagePromptMessageContent, VideoPromptMessageContent
|
||||
from extensions.ext_database import db
|
||||
from extensions.ext_storage import storage
|
||||
|
||||
|
@ -33,25 +33,28 @@ def get_attr(*, file: File, attr: FileAttribute):
|
|||
raise ValueError(f"Invalid file attribute: {attr}")
|
||||
|
||||
|
||||
def to_prompt_message_content(f: File, /):
|
||||
def to_prompt_message_content(
|
||||
f: File,
|
||||
/,
|
||||
*,
|
||||
image_detail_config: ImagePromptMessageContent.DETAIL = ImagePromptMessageContent.DETAIL.LOW,
|
||||
):
|
||||
"""
|
||||
Convert a File object to an ImagePromptMessageContent object.
|
||||
Convert a File object to an ImagePromptMessageContent or AudioPromptMessageContent object.
|
||||
|
||||
This function takes a File object and converts it to an ImagePromptMessageContent
|
||||
object, which can be used as a prompt for image-based AI models.
|
||||
This function takes a File object and converts it to an appropriate PromptMessageContent
|
||||
object, which can be used as a prompt for image or audio-based AI models.
|
||||
|
||||
Args:
|
||||
file (File): The File object to convert. Must be of type FileType.IMAGE.
|
||||
f (File): The File object to convert.
|
||||
detail (Optional[ImagePromptMessageContent.DETAIL]): The detail level for image prompts.
|
||||
If not provided, defaults to ImagePromptMessageContent.DETAIL.LOW.
|
||||
|
||||
Returns:
|
||||
ImagePromptMessageContent: An object containing the image data and detail level.
|
||||
Union[ImagePromptMessageContent, AudioPromptMessageContent]: An object containing the file data and detail level
|
||||
|
||||
Raises:
|
||||
ValueError: If the file is not an image or if the file data is missing.
|
||||
|
||||
Note:
|
||||
The detail level of the image prompt is determined by the file's extra_config.
|
||||
If not specified, it defaults to ImagePromptMessageContent.DETAIL.LOW.
|
||||
ValueError: If the file type is not supported or if required data is missing.
|
||||
"""
|
||||
match f.type:
|
||||
case FileType.IMAGE:
|
||||
|
@ -60,19 +63,20 @@ def to_prompt_message_content(f: File, /):
|
|||
else:
|
||||
data = _to_base64_data_string(f)
|
||||
|
||||
if f._extra_config and f._extra_config.image_config and f._extra_config.image_config.detail:
|
||||
detail = f._extra_config.image_config.detail
|
||||
else:
|
||||
detail = ImagePromptMessageContent.DETAIL.LOW
|
||||
|
||||
return ImagePromptMessageContent(data=data, detail=detail)
|
||||
return ImagePromptMessageContent(data=data, detail=image_detail_config)
|
||||
case FileType.AUDIO:
|
||||
encoded_string = _file_to_encoded_string(f)
|
||||
if f.extension is None:
|
||||
raise ValueError("Missing file extension")
|
||||
return AudioPromptMessageContent(data=encoded_string, format=f.extension.lstrip("."))
|
||||
case FileType.VIDEO:
|
||||
if dify_config.MULTIMODAL_SEND_VIDEO_FORMAT == "url":
|
||||
data = _to_url(f)
|
||||
else:
|
||||
data = _to_base64_data_string(f)
|
||||
return VideoPromptMessageContent(data=data, format=f.extension.lstrip("."))
|
||||
case _:
|
||||
raise ValueError(f"file type {f.type} is not supported")
|
||||
raise ValueError("file type f.type is not supported")
|
||||
|
||||
|
||||
def download(f: File, /):
|
||||
|
@ -112,7 +116,7 @@ def _download_file_content(path: str, /):
|
|||
def _get_encoded_string(f: File, /):
|
||||
match f.transfer_method:
|
||||
case FileTransferMethod.REMOTE_URL:
|
||||
response = ssrf_proxy.get(f.remote_url)
|
||||
response = ssrf_proxy.get(f.remote_url, follow_redirects=True)
|
||||
response.raise_for_status()
|
||||
content = response.content
|
||||
encoded_string = base64.b64encode(content).decode("utf-8")
|
||||
|
@ -140,6 +144,8 @@ def _file_to_encoded_string(f: File, /):
|
|||
match f.type:
|
||||
case FileType.IMAGE:
|
||||
return _to_base64_data_string(f)
|
||||
case FileType.VIDEO:
|
||||
return _to_base64_data_string(f)
|
||||
case FileType.AUDIO:
|
||||
return _get_encoded_string(f)
|
||||
case _:
|
||||
|
|
|
@ -21,7 +21,7 @@ class ImageConfig(BaseModel):
|
|||
detail: ImagePromptMessageContent.DETAIL | None = None
|
||||
|
||||
|
||||
class FileExtraConfig(BaseModel):
|
||||
class FileUploadConfig(BaseModel):
|
||||
"""
|
||||
File Upload Entity.
|
||||
"""
|
||||
|
@ -46,7 +46,6 @@ class File(BaseModel):
|
|||
extension: Optional[str] = Field(default=None, description="File extension, should contains dot")
|
||||
mime_type: Optional[str] = None
|
||||
size: int = -1
|
||||
_extra_config: FileExtraConfig | None = None
|
||||
|
||||
def to_dict(self) -> Mapping[str, str | int | None]:
|
||||
data = self.model_dump(mode="json")
|
||||
|
@ -107,34 +106,4 @@ class File(BaseModel):
|
|||
case FileTransferMethod.TOOL_FILE:
|
||||
if not self.related_id:
|
||||
raise ValueError("Missing file related_id")
|
||||
|
||||
# Validate the extra config.
|
||||
if not self._extra_config:
|
||||
return self
|
||||
|
||||
if self._extra_config.allowed_file_types:
|
||||
if self.type not in self._extra_config.allowed_file_types and self.type != FileType.CUSTOM:
|
||||
raise ValueError(f"Invalid file type: {self.type}")
|
||||
|
||||
if self._extra_config.allowed_extensions and self.extension not in self._extra_config.allowed_extensions:
|
||||
raise ValueError(f"Invalid file extension: {self.extension}")
|
||||
|
||||
if (
|
||||
self._extra_config.allowed_upload_methods
|
||||
and self.transfer_method not in self._extra_config.allowed_upload_methods
|
||||
):
|
||||
raise ValueError(f"Invalid transfer method: {self.transfer_method}")
|
||||
|
||||
match self.type:
|
||||
case FileType.IMAGE:
|
||||
# NOTE: This part of validation is deprecated, but still used in app features "Image Upload".
|
||||
if not self._extra_config.image_config:
|
||||
return self
|
||||
# TODO: skip check if transfer_methods is empty, because many test cases are not setting this field
|
||||
if (
|
||||
self._extra_config.image_config.transfer_methods
|
||||
and self.transfer_method not in self._extra_config.image_config.transfer_methods
|
||||
):
|
||||
raise ValueError(f"Invalid transfer method: {self.transfer_method}")
|
||||
|
||||
return self
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
from .code_executor import CodeExecutor, CodeLanguage
|
||||
|
||||
__all__ = ["CodeExecutor", "CodeLanguage"]
|
|
@ -1,7 +1,8 @@
|
|||
import logging
|
||||
from collections.abc import Mapping
|
||||
from enum import Enum
|
||||
from threading import Lock
|
||||
from typing import Optional
|
||||
from typing import Any, Optional
|
||||
|
||||
from httpx import Timeout, post
|
||||
from pydantic import BaseModel
|
||||
|
@ -117,7 +118,7 @@ class CodeExecutor:
|
|||
return response.data.stdout or ""
|
||||
|
||||
@classmethod
|
||||
def execute_workflow_code_template(cls, language: CodeLanguage, code: str, inputs: dict) -> dict:
|
||||
def execute_workflow_code_template(cls, language: CodeLanguage, code: str, inputs: Mapping[str, Any]) -> dict:
|
||||
"""
|
||||
Execute code
|
||||
:param language: code language
|
||||
|
|
|
@ -2,6 +2,8 @@ import json
|
|||
import re
|
||||
from abc import ABC, abstractmethod
|
||||
from base64 import b64encode
|
||||
from collections.abc import Mapping
|
||||
from typing import Any
|
||||
|
||||
|
||||
class TemplateTransformer(ABC):
|
||||
|
@ -10,7 +12,7 @@ class TemplateTransformer(ABC):
|
|||
_result_tag: str = "<<RESULT>>"
|
||||
|
||||
@classmethod
|
||||
def transform_caller(cls, code: str, inputs: dict) -> tuple[str, str]:
|
||||
def transform_caller(cls, code: str, inputs: Mapping[str, Any]) -> tuple[str, str]:
|
||||
"""
|
||||
Transform code to python runner
|
||||
:param code: code
|
||||
|
@ -48,13 +50,13 @@ class TemplateTransformer(ABC):
|
|||
pass
|
||||
|
||||
@classmethod
|
||||
def serialize_inputs(cls, inputs: dict) -> str:
|
||||
def serialize_inputs(cls, inputs: Mapping[str, Any]) -> str:
|
||||
inputs_json_str = json.dumps(inputs, ensure_ascii=False).encode()
|
||||
input_base64_encoded = b64encode(inputs_json_str).decode("utf-8")
|
||||
return input_base64_encoded
|
||||
|
||||
@classmethod
|
||||
def assemble_runner_script(cls, code: str, inputs: dict) -> str:
|
||||
def assemble_runner_script(cls, code: str, inputs: Mapping[str, Any]) -> str:
|
||||
# assemble runner script
|
||||
script = cls.get_runner_script()
|
||||
script = script.replace(cls._code_placeholder, code)
|
||||
|
|
|
@ -3,26 +3,20 @@ Proxy requests to avoid SSRF
|
|||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
|
||||
import httpx
|
||||
|
||||
SSRF_PROXY_ALL_URL = os.getenv("SSRF_PROXY_ALL_URL", "")
|
||||
SSRF_PROXY_HTTP_URL = os.getenv("SSRF_PROXY_HTTP_URL", "")
|
||||
SSRF_PROXY_HTTPS_URL = os.getenv("SSRF_PROXY_HTTPS_URL", "")
|
||||
SSRF_DEFAULT_MAX_RETRIES = int(os.getenv("SSRF_DEFAULT_MAX_RETRIES", "3"))
|
||||
SSRF_DEFAULT_TIME_OUT = float(os.getenv("SSRF_DEFAULT_TIME_OUT", "5"))
|
||||
SSRF_DEFAULT_CONNECT_TIME_OUT = float(os.getenv("SSRF_DEFAULT_CONNECT_TIME_OUT", "5"))
|
||||
SSRF_DEFAULT_READ_TIME_OUT = float(os.getenv("SSRF_DEFAULT_READ_TIME_OUT", "5"))
|
||||
SSRF_DEFAULT_WRITE_TIME_OUT = float(os.getenv("SSRF_DEFAULT_WRITE_TIME_OUT", "5"))
|
||||
from configs import dify_config
|
||||
|
||||
SSRF_DEFAULT_MAX_RETRIES = dify_config.SSRF_DEFAULT_MAX_RETRIES
|
||||
|
||||
proxy_mounts = (
|
||||
{
|
||||
"http://": httpx.HTTPTransport(proxy=SSRF_PROXY_HTTP_URL),
|
||||
"https://": httpx.HTTPTransport(proxy=SSRF_PROXY_HTTPS_URL),
|
||||
"http://": httpx.HTTPTransport(proxy=dify_config.SSRF_PROXY_HTTP_URL),
|
||||
"https://": httpx.HTTPTransport(proxy=dify_config.SSRF_PROXY_HTTPS_URL),
|
||||
}
|
||||
if SSRF_PROXY_HTTP_URL and SSRF_PROXY_HTTPS_URL
|
||||
if dify_config.SSRF_PROXY_HTTP_URL and dify_config.SSRF_PROXY_HTTPS_URL
|
||||
else None
|
||||
)
|
||||
|
||||
|
@ -38,17 +32,17 @@ def make_request(method, url, max_retries=SSRF_DEFAULT_MAX_RETRIES, **kwargs):
|
|||
|
||||
if "timeout" not in kwargs:
|
||||
kwargs["timeout"] = httpx.Timeout(
|
||||
SSRF_DEFAULT_TIME_OUT,
|
||||
connect=SSRF_DEFAULT_CONNECT_TIME_OUT,
|
||||
read=SSRF_DEFAULT_READ_TIME_OUT,
|
||||
write=SSRF_DEFAULT_WRITE_TIME_OUT,
|
||||
timeout=dify_config.SSRF_DEFAULT_TIME_OUT,
|
||||
connect=dify_config.SSRF_DEFAULT_CONNECT_TIME_OUT,
|
||||
read=dify_config.SSRF_DEFAULT_READ_TIME_OUT,
|
||||
write=dify_config.SSRF_DEFAULT_WRITE_TIME_OUT,
|
||||
)
|
||||
|
||||
retries = 0
|
||||
while retries <= max_retries:
|
||||
try:
|
||||
if SSRF_PROXY_ALL_URL:
|
||||
with httpx.Client(proxy=SSRF_PROXY_ALL_URL) as client:
|
||||
if dify_config.SSRF_PROXY_ALL_URL:
|
||||
with httpx.Client(proxy=dify_config.SSRF_PROXY_ALL_URL) as client:
|
||||
response = client.request(method=method, url=url, **kwargs)
|
||||
elif proxy_mounts:
|
||||
with httpx.Client(mounts=proxy_mounts) as client:
|
||||
|
|
|
@ -81,15 +81,18 @@ class TokenBufferMemory:
|
|||
db.session.query(WorkflowRun).filter(WorkflowRun.id == message.workflow_run_id).first()
|
||||
)
|
||||
|
||||
if workflow_run:
|
||||
if workflow_run and workflow_run.workflow:
|
||||
file_extra_config = FileUploadConfigManager.convert(
|
||||
workflow_run.workflow.features_dict, is_vision=False
|
||||
)
|
||||
|
||||
detail = ImagePromptMessageContent.DETAIL.LOW
|
||||
if file_extra_config and app_record:
|
||||
file_objs = file_factory.build_from_message_files(
|
||||
message_files=files, tenant_id=app_record.tenant_id, config=file_extra_config
|
||||
)
|
||||
if file_extra_config.image_config and file_extra_config.image_config.detail:
|
||||
detail = file_extra_config.image_config.detail
|
||||
else:
|
||||
file_objs = []
|
||||
|
||||
|
@ -98,12 +101,16 @@ class TokenBufferMemory:
|
|||
else:
|
||||
prompt_message_contents: list[PromptMessageContent] = []
|
||||
prompt_message_contents.append(TextPromptMessageContent(data=message.query))
|
||||
for file_obj in file_objs:
|
||||
if file_obj.type in {FileType.IMAGE, FileType.AUDIO}:
|
||||
prompt_message = file_manager.to_prompt_message_content(file_obj)
|
||||
for file in file_objs:
|
||||
if file.type in {FileType.IMAGE, FileType.AUDIO}:
|
||||
prompt_message = file_manager.to_prompt_message_content(
|
||||
file,
|
||||
image_detail_config=detail,
|
||||
)
|
||||
prompt_message_contents.append(prompt_message)
|
||||
|
||||
prompt_messages.append(UserPromptMessage(content=prompt_message_contents))
|
||||
|
||||
else:
|
||||
prompt_messages.append(UserPromptMessage(content=message.query))
|
||||
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
import logging
|
||||
import os
|
||||
from collections.abc import Callable, Generator, Iterable, Sequence
|
||||
from typing import IO, Any, Optional, Union, cast
|
||||
|
||||
from configs import dify_config
|
||||
from core.entities.embedding_type import EmbeddingInputType
|
||||
from core.entities.provider_configuration import ProviderConfiguration, ProviderModelBundle
|
||||
from core.entities.provider_entities import ModelLoadBalancingConfiguration
|
||||
|
@ -473,7 +473,7 @@ class LBModelManager:
|
|||
|
||||
continue
|
||||
|
||||
if bool(os.environ.get("DEBUG", "False").lower() == "true"):
|
||||
if dify_config.DEBUG:
|
||||
logger.info(
|
||||
f"Model LB\nid: {config.id}\nname:{config.name}\n"
|
||||
f"tenant_id: {self._tenant_id}\nprovider: {self._provider}\n"
|
||||
|
|
|
@ -12,11 +12,13 @@ from .message_entities import (
|
|||
TextPromptMessageContent,
|
||||
ToolPromptMessage,
|
||||
UserPromptMessage,
|
||||
VideoPromptMessageContent,
|
||||
)
|
||||
from .model_entities import ModelPropertyKey
|
||||
|
||||
__all__ = [
|
||||
"ImagePromptMessageContent",
|
||||
"VideoPromptMessageContent",
|
||||
"PromptMessage",
|
||||
"PromptMessageRole",
|
||||
"LLMUsage",
|
||||
|
|
|
@ -56,6 +56,7 @@ class PromptMessageContentType(Enum):
|
|||
TEXT = "text"
|
||||
IMAGE = "image"
|
||||
AUDIO = "audio"
|
||||
VIDEO = "video"
|
||||
|
||||
|
||||
class PromptMessageContent(BaseModel):
|
||||
|
@ -75,6 +76,12 @@ class TextPromptMessageContent(PromptMessageContent):
|
|||
type: PromptMessageContentType = PromptMessageContentType.TEXT
|
||||
|
||||
|
||||
class VideoPromptMessageContent(PromptMessageContent):
|
||||
type: PromptMessageContentType = PromptMessageContentType.VIDEO
|
||||
data: str = Field(..., description="Base64 encoded video data")
|
||||
format: str = Field(..., description="Video format")
|
||||
|
||||
|
||||
class AudioPromptMessageContent(PromptMessageContent):
|
||||
type: PromptMessageContentType = PromptMessageContentType.AUDIO
|
||||
data: str = Field(..., description="Base64 encoded audio data")
|
||||
|
|
|
@ -47,9 +47,9 @@ class AzureRerankModel(RerankModel):
|
|||
result = response.read()
|
||||
return json.loads(result)
|
||||
except urllib.error.HTTPError as error:
|
||||
logger.error(f"The request failed with status code: {error.code}")
|
||||
logger.error(error.info())
|
||||
logger.error(error.read().decode("utf8", "ignore"))
|
||||
logger.exception(f"The request failed with status code: {error.code}")
|
||||
logger.exception(error.info())
|
||||
logger.exception(error.read().decode("utf8", "ignore"))
|
||||
raise
|
||||
|
||||
def _invoke(
|
||||
|
|
|
@ -113,7 +113,7 @@ class AzureOpenAILargeLanguageModel(_CommonAzureOpenAI, LargeLanguageModel):
|
|||
try:
|
||||
client = AzureOpenAI(**self._to_credential_kwargs(credentials))
|
||||
|
||||
if model.startswith("o1"):
|
||||
if "o1" in model:
|
||||
client.chat.completions.create(
|
||||
messages=[{"role": "user", "content": "ping"}],
|
||||
model=model,
|
||||
|
@ -311,7 +311,7 @@ class AzureOpenAILargeLanguageModel(_CommonAzureOpenAI, LargeLanguageModel):
|
|||
prompt_messages = self._clear_illegal_prompt_messages(model, prompt_messages)
|
||||
|
||||
block_as_stream = False
|
||||
if model.startswith("o1"):
|
||||
if "o1" in model:
|
||||
if stream:
|
||||
block_as_stream = True
|
||||
stream = False
|
||||
|
@ -404,7 +404,7 @@ class AzureOpenAILargeLanguageModel(_CommonAzureOpenAI, LargeLanguageModel):
|
|||
]
|
||||
)
|
||||
|
||||
if model.startswith("o1"):
|
||||
if "o1" in model:
|
||||
system_message_count = len([m for m in prompt_messages if isinstance(m, SystemPromptMessage)])
|
||||
if system_message_count > 0:
|
||||
new_prompt_messages = []
|
||||
|
@ -653,7 +653,7 @@ class AzureOpenAILargeLanguageModel(_CommonAzureOpenAI, LargeLanguageModel):
|
|||
tokens_per_message = 4
|
||||
# if there's a name, the role is omitted
|
||||
tokens_per_name = -1
|
||||
elif model.startswith("gpt-35-turbo") or model.startswith("gpt-4") or model.startswith("o1"):
|
||||
elif model.startswith("gpt-35-turbo") or model.startswith("gpt-4") or "o1" in model:
|
||||
tokens_per_message = 3
|
||||
tokens_per_name = 1
|
||||
else:
|
||||
|
|
|
@ -16,9 +16,9 @@ parameter_rules:
|
|||
use_template: max_tokens
|
||||
required: true
|
||||
type: int
|
||||
default: 4096
|
||||
default: 8192
|
||||
min: 1
|
||||
max: 4096
|
||||
max: 8192
|
||||
help:
|
||||
zh_Hans: 停止前生成的最大令牌数。请注意,Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。
|
||||
en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter.
|
||||
|
|
|
@ -0,0 +1,95 @@
|
|||
model: Qwen2.5-72B-Instruct
|
||||
label:
|
||||
zh_Hans: Qwen2.5-72B-Instruct
|
||||
en_US: Qwen2.5-72B-Instruct
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 32768
|
||||
parameter_rules:
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
label:
|
||||
en_US: "Max Tokens"
|
||||
zh_Hans: "最大Token数"
|
||||
type: int
|
||||
default: 512
|
||||
min: 1
|
||||
required: true
|
||||
help:
|
||||
en_US: "The maximum number of tokens that can be generated by the model varies depending on the model."
|
||||
zh_Hans: "模型可生成的最大 token 个数,不同模型上限不同。"
|
||||
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
label:
|
||||
en_US: "Temperature"
|
||||
zh_Hans: "采样温度"
|
||||
type: float
|
||||
default: 0.7
|
||||
min: 0.0
|
||||
max: 1.0
|
||||
precision: 1
|
||||
required: true
|
||||
help:
|
||||
en_US: "The randomness of the sampling temperature control output. The temperature value is within the range of [0.0, 1.0]. The higher the value, the more random and creative the output; the lower the value, the more stable it is. It is recommended to adjust either top_p or temperature parameters according to your needs to avoid adjusting both at the same time."
|
||||
zh_Hans: "采样温度控制输出的随机性。温度值在 [0.0, 1.0] 范围内,值越高,输出越随机和创造性;值越低,输出越稳定。建议根据需求调整 top_p 或 temperature 参数,避免同时调整两者。"
|
||||
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
label:
|
||||
en_US: "Top P"
|
||||
zh_Hans: "Top P"
|
||||
type: float
|
||||
default: 0.7
|
||||
min: 0.0
|
||||
max: 1.0
|
||||
precision: 1
|
||||
required: true
|
||||
help:
|
||||
en_US: "The value range of the sampling method is [0.0, 1.0]. The top_p value determines that the model selects tokens from the top p% of candidate words with the highest probability; when top_p is 0, this parameter is invalid. It is recommended to adjust either top_p or temperature parameters according to your needs to avoid adjusting both at the same time."
|
||||
zh_Hans: "采样方法的取值范围为 [0.0,1.0]。top_p 值确定模型从概率最高的前p%的候选词中选取 tokens;当 top_p 为 0 时,此参数无效。建议根据需求调整 top_p 或 temperature 参数,避免同时调整两者。"
|
||||
|
||||
- name: top_k
|
||||
use_template: top_k
|
||||
label:
|
||||
en_US: "Top K"
|
||||
zh_Hans: "Top K"
|
||||
type: int
|
||||
default: 50
|
||||
min: 0
|
||||
max: 100
|
||||
required: true
|
||||
help:
|
||||
en_US: "The value range is [0,100], which limits the model to only select from the top k words with the highest probability when choosing the next word at each step. The larger the value, the more diverse text generation will be."
|
||||
zh_Hans: "取值范围为 [0,100],限制模型在每一步选择下一个词时,只从概率最高的前 k 个词中选取。数值越大,文本生成越多样。"
|
||||
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
label:
|
||||
en_US: "Frequency Penalty"
|
||||
zh_Hans: "频率惩罚"
|
||||
type: float
|
||||
default: 0
|
||||
min: -1.0
|
||||
max: 1.0
|
||||
precision: 1
|
||||
required: false
|
||||
help:
|
||||
en_US: "Used to adjust the frequency of repeated content in automatically generated text. Positive numbers reduce repetition, while negative numbers increase repetition. After setting this parameter, if a word has already appeared in the text, the model will decrease the probability of choosing that word for subsequent generation."
|
||||
zh_Hans: "用于调整自动生成文本中重复内容的频率。正数减少重复,负数增加重复。设置此参数后,如果一个词在文本中已经出现过,模型在后续生成中选择该词的概率会降低。"
|
||||
|
||||
- name: user
|
||||
use_template: text
|
||||
label:
|
||||
en_US: "User"
|
||||
zh_Hans: "用户"
|
||||
type: string
|
||||
required: false
|
||||
help:
|
||||
en_US: "Used to track and differentiate conversation requests from different users."
|
||||
zh_Hans: "用于追踪和区分不同用户的对话请求。"
|
|
@ -1,3 +1,4 @@
|
|||
- Qwen2.5-72B-Instruct
|
||||
- Qwen2-7B-Instruct
|
||||
- Qwen2-72B-Instruct
|
||||
- Yi-1.5-34B-Chat
|
||||
|
|
|
@ -6,6 +6,7 @@ from core.model_runtime.entities.message_entities import (
|
|||
PromptMessage,
|
||||
PromptMessageTool,
|
||||
)
|
||||
from core.model_runtime.entities.model_entities import ModelFeature
|
||||
from core.model_runtime.model_providers.openai_api_compatible.llm.llm import OAIAPICompatLargeLanguageModel
|
||||
|
||||
|
||||
|
@ -28,14 +29,13 @@ class GiteeAILargeLanguageModel(OAIAPICompatLargeLanguageModel):
|
|||
user: Optional[str] = None,
|
||||
) -> Union[LLMResult, Generator]:
|
||||
self._add_custom_parameters(credentials, model, model_parameters)
|
||||
return super()._invoke(model, credentials, prompt_messages, model_parameters, tools, stop, stream)
|
||||
return super()._invoke(model, credentials, prompt_messages, model_parameters, tools, stop, stream, user)
|
||||
|
||||
def validate_credentials(self, model: str, credentials: dict) -> None:
|
||||
self._add_custom_parameters(credentials, model, None)
|
||||
super().validate_credentials(model, credentials)
|
||||
|
||||
@staticmethod
|
||||
def _add_custom_parameters(credentials: dict, model: str, model_parameters: dict) -> None:
|
||||
def _add_custom_parameters(self, credentials: dict, model: str, model_parameters: dict) -> None:
|
||||
if model is None:
|
||||
model = "bge-large-zh-v1.5"
|
||||
|
||||
|
@ -45,3 +45,7 @@ class GiteeAILargeLanguageModel(OAIAPICompatLargeLanguageModel):
|
|||
credentials["mode"] = LLMMode.COMPLETION.value
|
||||
else:
|
||||
credentials["mode"] = LLMMode.CHAT.value
|
||||
|
||||
schema = self.get_model_schema(model, credentials)
|
||||
if ModelFeature.TOOL_CALL in schema.features or ModelFeature.MULTI_TOOL_CALL in schema.features:
|
||||
credentials["function_calling_type"] = "tool_call"
|
||||
|
|
|
@ -55,6 +55,7 @@ class JinaRerankModel(RerankModel):
|
|||
base_url + "/rerank",
|
||||
json={"model": model, "query": query, "documents": docs, "top_n": top_n},
|
||||
headers={"Authorization": f"Bearer {credentials.get('api_key')}"},
|
||||
timeout=20,
|
||||
)
|
||||
response.raise_for_status()
|
||||
results = response.json()
|
||||
|
|
|
@ -617,6 +617,10 @@ class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel):
|
|||
# o1 compatibility
|
||||
block_as_stream = False
|
||||
if model.startswith("o1"):
|
||||
if "max_tokens" in model_parameters:
|
||||
model_parameters["max_completion_tokens"] = model_parameters["max_tokens"]
|
||||
del model_parameters["max_tokens"]
|
||||
|
||||
if stream:
|
||||
block_as_stream = True
|
||||
stream = False
|
||||
|
|
|
@ -29,6 +29,7 @@ from core.model_runtime.entities.message_entities import (
|
|||
TextPromptMessageContent,
|
||||
ToolPromptMessage,
|
||||
UserPromptMessage,
|
||||
VideoPromptMessageContent,
|
||||
)
|
||||
from core.model_runtime.entities.model_entities import (
|
||||
AIModelEntity,
|
||||
|
@ -431,6 +432,14 @@ class TongyiLargeLanguageModel(LargeLanguageModel):
|
|||
|
||||
sub_message_dict = {"image": image_url}
|
||||
sub_messages.append(sub_message_dict)
|
||||
elif message_content.type == PromptMessageContentType.VIDEO:
|
||||
message_content = cast(VideoPromptMessageContent, message_content)
|
||||
video_url = message_content.data
|
||||
if message_content.data.startswith("data:"):
|
||||
raise InvokeError("not support base64, please set MULTIMODAL_SEND_VIDEO_FORMAT to url")
|
||||
|
||||
sub_message_dict = {"video": video_url}
|
||||
sub_messages.append(sub_message_dict)
|
||||
|
||||
# resort sub_messages to ensure text is always at last
|
||||
sub_messages = sorted(sub_messages, key=lambda x: "text" in x)
|
||||
|
|
|
@ -13,9 +13,9 @@ parameter_rules:
|
|||
use_template: max_tokens
|
||||
required: true
|
||||
type: int
|
||||
default: 4096
|
||||
default: 8192
|
||||
min: 1
|
||||
max: 4096
|
||||
max: 8192
|
||||
help:
|
||||
zh_Hans: 停止前生成的最大令牌数。请注意,Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。
|
||||
en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter.
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
provider: vessl_ai
|
||||
label:
|
||||
en_US: vessl_ai
|
||||
en_US: VESSL AI
|
||||
icon_small:
|
||||
en_US: icon_s_en.svg
|
||||
icon_large:
|
||||
|
@ -20,28 +20,28 @@ model_credential_schema:
|
|||
label:
|
||||
en_US: Model Name
|
||||
placeholder:
|
||||
en_US: Enter your model name
|
||||
en_US: Enter model name
|
||||
credential_form_schemas:
|
||||
- variable: endpoint_url
|
||||
label:
|
||||
en_US: endpoint url
|
||||
en_US: Endpoint Url
|
||||
type: text-input
|
||||
required: true
|
||||
placeholder:
|
||||
en_US: Enter the url of your endpoint url
|
||||
en_US: Enter VESSL AI service endpoint url
|
||||
- variable: api_key
|
||||
required: true
|
||||
label:
|
||||
en_US: API Key
|
||||
type: secret-input
|
||||
placeholder:
|
||||
en_US: Enter your VESSL AI secret key
|
||||
en_US: Enter VESSL AI secret key
|
||||
- variable: mode
|
||||
show_on:
|
||||
- variable: __model_type
|
||||
value: llm
|
||||
label:
|
||||
en_US: Completion mode
|
||||
en_US: Completion Mode
|
||||
type: select
|
||||
required: false
|
||||
default: chat
|
||||
|
|
|
@ -313,21 +313,35 @@ class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel):
|
|||
return params
|
||||
|
||||
def _construct_glm_4v_messages(self, prompt_message: Union[str, list[PromptMessageContent]]) -> list[dict]:
|
||||
if isinstance(prompt_message, str):
|
||||
if isinstance(prompt_message, list):
|
||||
sub_messages = []
|
||||
for item in prompt_message:
|
||||
if item.type == PromptMessageContentType.IMAGE:
|
||||
sub_messages.append(
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {"url": self._remove_base64_header(item.data)},
|
||||
}
|
||||
)
|
||||
elif item.type == PromptMessageContentType.VIDEO:
|
||||
sub_messages.append(
|
||||
{
|
||||
"type": "video_url",
|
||||
"video_url": {"url": self._remove_base64_header(item.data)},
|
||||
}
|
||||
)
|
||||
else:
|
||||
sub_messages.append({"type": "text", "text": item.data})
|
||||
return sub_messages
|
||||
else:
|
||||
return [{"type": "text", "text": prompt_message}]
|
||||
|
||||
return [
|
||||
{"type": "image_url", "image_url": {"url": self._remove_image_header(item.data)}}
|
||||
if item.type == PromptMessageContentType.IMAGE
|
||||
else {"type": "text", "text": item.data}
|
||||
for item in prompt_message
|
||||
]
|
||||
def _remove_base64_header(self, file_content: str) -> str:
|
||||
if file_content.startswith("data:"):
|
||||
data_split = file_content.split(";base64,")
|
||||
return data_split[1]
|
||||
|
||||
def _remove_image_header(self, image: str) -> str:
|
||||
if image.startswith("data:image"):
|
||||
return image.split(",")[1]
|
||||
|
||||
return image
|
||||
return file_content
|
||||
|
||||
def _handle_generate_response(
|
||||
self,
|
||||
|
|
|
@ -126,6 +126,6 @@ class OutputModeration(BaseModel):
|
|||
result: ModerationOutputsResult = moderation_factory.moderation_for_outputs(moderation_buffer)
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.error("Moderation Output error: %s", e)
|
||||
logger.exception("Moderation Output error: %s", e)
|
||||
|
||||
return None
|
||||
|
|
|
@ -54,3 +54,7 @@ class LangSmithConfig(BaseTracingConfig):
|
|||
raise ValueError("endpoint must start with https://")
|
||||
|
||||
return v
|
||||
|
||||
|
||||
OPS_FILE_PATH = "ops_trace/"
|
||||
OPS_TRACE_FAILED_KEY = "FAILED_OPS_TRACE"
|
||||
|
|
|
@ -23,6 +23,11 @@ class BaseTraceInfo(BaseModel):
|
|||
return v
|
||||
return ""
|
||||
|
||||
class Config:
|
||||
json_encoders = {
|
||||
datetime: lambda v: v.isoformat(),
|
||||
}
|
||||
|
||||
|
||||
class WorkflowTraceInfo(BaseTraceInfo):
|
||||
workflow_data: Any
|
||||
|
@ -100,6 +105,12 @@ class GenerateNameTraceInfo(BaseTraceInfo):
|
|||
tenant_id: str
|
||||
|
||||
|
||||
class TaskData(BaseModel):
|
||||
app_id: str
|
||||
trace_info_type: str
|
||||
trace_info: Any
|
||||
|
||||
|
||||
trace_info_info_map = {
|
||||
"WorkflowTraceInfo": WorkflowTraceInfo,
|
||||
"MessageTraceInfo": MessageTraceInfo,
|
||||
|
|
|
@ -6,12 +6,13 @@ import threading
|
|||
import time
|
||||
from datetime import timedelta
|
||||
from typing import Any, Optional, Union
|
||||
from uuid import UUID
|
||||
from uuid import UUID, uuid4
|
||||
|
||||
from flask import current_app
|
||||
|
||||
from core.helper.encrypter import decrypt_token, encrypt_token, obfuscated_token
|
||||
from core.ops.entities.config_entity import (
|
||||
OPS_FILE_PATH,
|
||||
LangfuseConfig,
|
||||
LangSmithConfig,
|
||||
TracingProviderEnum,
|
||||
|
@ -22,6 +23,7 @@ from core.ops.entities.trace_entity import (
|
|||
MessageTraceInfo,
|
||||
ModerationTraceInfo,
|
||||
SuggestedQuestionTraceInfo,
|
||||
TaskData,
|
||||
ToolTraceInfo,
|
||||
TraceTaskName,
|
||||
WorkflowTraceInfo,
|
||||
|
@ -30,6 +32,7 @@ from core.ops.langfuse_trace.langfuse_trace import LangFuseDataTrace
|
|||
from core.ops.langsmith_trace.langsmith_trace import LangSmithDataTrace
|
||||
from core.ops.utils import get_message_data
|
||||
from extensions.ext_database import db
|
||||
from extensions.ext_storage import storage
|
||||
from models.model import App, AppModelConfig, Conversation, Message, MessageAgentThought, MessageFile, TraceAppConfig
|
||||
from models.workflow import WorkflowAppLog, WorkflowRun
|
||||
from tasks.ops_trace_task import process_trace_tasks
|
||||
|
@ -708,7 +711,7 @@ class TraceQueueManager:
|
|||
trace_task.app_id = self.app_id
|
||||
trace_manager_queue.put(trace_task)
|
||||
except Exception as e:
|
||||
logging.error(f"Error adding trace task: {e}")
|
||||
logging.exception(f"Error adding trace task: {e}")
|
||||
finally:
|
||||
self.start_timer()
|
||||
|
||||
|
@ -727,7 +730,7 @@ class TraceQueueManager:
|
|||
if tasks:
|
||||
self.send_to_celery(tasks)
|
||||
except Exception as e:
|
||||
logging.error(f"Error processing trace tasks: {e}")
|
||||
logging.exception(f"Error processing trace tasks: {e}")
|
||||
|
||||
def start_timer(self):
|
||||
global trace_manager_timer
|
||||
|
@ -740,10 +743,17 @@ class TraceQueueManager:
|
|||
def send_to_celery(self, tasks: list[TraceTask]):
|
||||
with self.flask_app.app_context():
|
||||
for task in tasks:
|
||||
file_id = uuid4().hex
|
||||
trace_info = task.execute()
|
||||
task_data = {
|
||||
task_data = TaskData(
|
||||
app_id=task.app_id,
|
||||
trace_info_type=type(trace_info).__name__,
|
||||
trace_info=trace_info.model_dump() if trace_info else None,
|
||||
)
|
||||
file_path = f"{OPS_FILE_PATH}{task.app_id}/{file_id}.json"
|
||||
storage.save(file_path, task_data.model_dump_json().encode("utf-8"))
|
||||
file_info = {
|
||||
"file_id": file_id,
|
||||
"app_id": task.app_id,
|
||||
"trace_info_type": type(trace_info).__name__,
|
||||
"trace_info": trace_info.model_dump() if trace_info else {},
|
||||
}
|
||||
process_trace_tasks.delay(task_data)
|
||||
process_trace_tasks.delay(file_info)
|
||||
|
|
|
@ -15,6 +15,7 @@ from core.model_runtime.entities import (
|
|||
TextPromptMessageContent,
|
||||
UserPromptMessage,
|
||||
)
|
||||
from core.model_runtime.entities.message_entities import ImagePromptMessageContent
|
||||
from core.prompt.entities.advanced_prompt_entities import ChatModelMessage, CompletionModelPromptTemplate, MemoryConfig
|
||||
from core.prompt.prompt_transform import PromptTransform
|
||||
from core.prompt.utils.prompt_template_parser import PromptTemplateParser
|
||||
|
@ -26,8 +27,13 @@ class AdvancedPromptTransform(PromptTransform):
|
|||
Advanced Prompt Transform for Workflow LLM Node.
|
||||
"""
|
||||
|
||||
def __init__(self, with_variable_tmpl: bool = False) -> None:
|
||||
def __init__(
|
||||
self,
|
||||
with_variable_tmpl: bool = False,
|
||||
image_detail_config: ImagePromptMessageContent.DETAIL = ImagePromptMessageContent.DETAIL.LOW,
|
||||
) -> None:
|
||||
self.with_variable_tmpl = with_variable_tmpl
|
||||
self.image_detail_config = image_detail_config
|
||||
|
||||
def get_prompt(
|
||||
self,
|
||||
|
|
|
@ -242,7 +242,7 @@ class CouchbaseVector(BaseVector):
|
|||
try:
|
||||
self._cluster.query(query, named_parameters={"doc_ids": ids}).execute()
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
logger.exception(e)
|
||||
|
||||
def delete_by_document_id(self, document_id: str):
|
||||
query = f"""
|
||||
|
|
|
@ -178,6 +178,7 @@ class ElasticSearchVector(BaseVector):
|
|||
Field.VECTOR.value: { # Make sure the dimension is correct here
|
||||
"type": "dense_vector",
|
||||
"dims": dim,
|
||||
"index": True,
|
||||
"similarity": "cosine",
|
||||
},
|
||||
Field.METADATA_KEY.value: {
|
||||
|
|
|
@ -79,7 +79,7 @@ class LindormVectorStore(BaseVector):
|
|||
existing_docs = self._client.mget(index=self._collection_name, body={"ids": batch_ids}, _source=False)
|
||||
return {doc["_id"] for doc in existing_docs["docs"] if doc["found"]}
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching batch {batch_ids}: {e}")
|
||||
logger.exception(f"Error fetching batch {batch_ids}: {e}")
|
||||
return set()
|
||||
|
||||
@retry(stop=stop_after_attempt(3), wait=wait_fixed(60))
|
||||
|
@ -96,7 +96,7 @@ class LindormVectorStore(BaseVector):
|
|||
)
|
||||
return {doc["_id"] for doc in existing_docs["docs"] if doc["found"]}
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching batch {batch_ids}: {e}")
|
||||
logger.exception(f"Error fetching batch {batch_ids}: {e}")
|
||||
return set()
|
||||
|
||||
if ids is None:
|
||||
|
@ -177,7 +177,7 @@ class LindormVectorStore(BaseVector):
|
|||
else:
|
||||
logger.warning(f"Index '{self._collection_name}' does not exist. No deletion performed.")
|
||||
except Exception as e:
|
||||
logger.error(f"Error occurred while deleting the index: {e}")
|
||||
logger.exception(f"Error occurred while deleting the index: {e}")
|
||||
raise e
|
||||
|
||||
def text_exists(self, id: str) -> bool:
|
||||
|
@ -201,7 +201,7 @@ class LindormVectorStore(BaseVector):
|
|||
try:
|
||||
response = self._client.search(index=self._collection_name, body=query)
|
||||
except Exception as e:
|
||||
logger.error(f"Error executing search: {e}")
|
||||
logger.exception(f"Error executing search: {e}")
|
||||
raise
|
||||
|
||||
docs_and_scores = []
|
||||
|
|
|
@ -86,7 +86,7 @@ class MilvusVector(BaseVector):
|
|||
ids = self._client.insert(collection_name=self._collection_name, data=batch_insert_list)
|
||||
pks.extend(ids)
|
||||
except MilvusException as e:
|
||||
logger.error("Failed to insert batch starting at entity: %s/%s", i, total_count)
|
||||
logger.exception("Failed to insert batch starting at entity: %s/%s", i, total_count)
|
||||
raise e
|
||||
return pks
|
||||
|
||||
|
|
|
@ -142,7 +142,7 @@ class MyScaleVector(BaseVector):
|
|||
for r in self._client.query(sql).named_results()
|
||||
]
|
||||
except Exception as e:
|
||||
logging.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m")
|
||||
logging.exception(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m")
|
||||
return []
|
||||
|
||||
def delete(self) -> None:
|
||||
|
|
|
@ -129,7 +129,7 @@ class OpenSearchVector(BaseVector):
|
|||
if status == 404:
|
||||
logger.warning(f"Document not found for deletion: {doc_id}")
|
||||
else:
|
||||
logger.error(f"Error deleting document: {error}")
|
||||
logger.exception(f"Error deleting document: {error}")
|
||||
|
||||
def delete(self) -> None:
|
||||
self._client.indices.delete(index=self._collection_name.lower())
|
||||
|
@ -158,7 +158,7 @@ class OpenSearchVector(BaseVector):
|
|||
try:
|
||||
response = self._client.search(index=self._collection_name.lower(), body=query)
|
||||
except Exception as e:
|
||||
logger.error(f"Error executing search: {e}")
|
||||
logger.exception(f"Error executing search: {e}")
|
||||
raise
|
||||
|
||||
docs = []
|
||||
|
|
|
@ -89,7 +89,7 @@ class CacheEmbedding(Embeddings):
|
|||
db.session.rollback()
|
||||
except Exception as ex:
|
||||
db.session.rollback()
|
||||
logger.error("Failed to embed documents: %s", ex)
|
||||
logger.exception("Failed to embed documents: %s", ex)
|
||||
raise ex
|
||||
|
||||
return text_embeddings
|
||||
|
|
|
@ -28,7 +28,6 @@ logger = logging.getLogger(__name__)
|
|||
class WordExtractor(BaseExtractor):
|
||||
"""Load docx files.
|
||||
|
||||
|
||||
Args:
|
||||
file_path: Path to the file to load.
|
||||
"""
|
||||
|
@ -230,7 +229,7 @@ class WordExtractor(BaseExtractor):
|
|||
for i in url_pattern.findall(x.text):
|
||||
hyperlinks_url = str(i)
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
logger.exception(e)
|
||||
|
||||
def parse_paragraph(paragraph):
|
||||
paragraph_content = []
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
from typing import Literal, Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
from pydantic import BaseModel, Field, field_validator
|
||||
|
||||
from core.model_runtime.utils.encoders import jsonable_encoder
|
||||
from core.tools.entities.common_entities import I18nObject
|
||||
|
@ -32,9 +32,14 @@ class UserToolProvider(BaseModel):
|
|||
original_credentials: Optional[dict] = None
|
||||
is_team_authorization: bool = False
|
||||
allow_delete: bool = True
|
||||
tools: list[UserTool] | None = None
|
||||
tools: list[UserTool] = Field(default_factory=list)
|
||||
labels: list[str] | None = None
|
||||
|
||||
@field_validator("tools", mode="before")
|
||||
@classmethod
|
||||
def convert_none_to_empty_list(cls, v):
|
||||
return v if v is not None else []
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
# -------------
|
||||
# overwrite tool parameter types for temp fix
|
||||
|
|
|
@ -48,6 +48,13 @@
|
|||
- feishu_task
|
||||
- feishu_calendar
|
||||
- feishu_spreadsheet
|
||||
- lark_base
|
||||
- lark_document
|
||||
- lark_message_and_group
|
||||
- lark_wiki
|
||||
- lark_task
|
||||
- lark_calendar
|
||||
- lark_spreadsheet
|
||||
- slack
|
||||
- twilio
|
||||
- wecom
|
||||
|
|
24
api/core/tools/provider/builtin/cogview/tools/cogvideo.py
Normal file
24
api/core/tools/provider/builtin/cogview/tools/cogvideo.py
Normal file
|
@ -0,0 +1,24 @@
|
|||
from typing import Any, Union
|
||||
|
||||
from zhipuai import ZhipuAI
|
||||
|
||||
from core.tools.entities.tool_entities import ToolInvokeMessage
|
||||
from core.tools.tool.builtin_tool import BuiltinTool
|
||||
|
||||
|
||||
class CogVideoTool(BuiltinTool):
|
||||
def _invoke(
|
||||
self, user_id: str, tool_parameters: dict[str, Any]
|
||||
) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
|
||||
client = ZhipuAI(
|
||||
base_url=self.runtime.credentials["zhipuai_base_url"],
|
||||
api_key=self.runtime.credentials["zhipuai_api_key"],
|
||||
)
|
||||
if not tool_parameters.get("prompt") and not tool_parameters.get("image_url"):
|
||||
return self.create_text_message("require at least one of prompt and image_url")
|
||||
|
||||
response = client.videos.generations(
|
||||
model="cogvideox", prompt=tool_parameters.get("prompt"), image_url=tool_parameters.get("image_url")
|
||||
)
|
||||
|
||||
return self.create_json_message(response.dict())
|
32
api/core/tools/provider/builtin/cogview/tools/cogvideo.yaml
Normal file
32
api/core/tools/provider/builtin/cogview/tools/cogvideo.yaml
Normal file
|
@ -0,0 +1,32 @@
|
|||
identity:
|
||||
name: cogvideo
|
||||
author: hjlarry
|
||||
label:
|
||||
en_US: CogVideo
|
||||
zh_Hans: CogVideo 视频生成
|
||||
description:
|
||||
human:
|
||||
en_US: Use the CogVideox model provided by ZhipuAI to generate videos based on user prompts and images.
|
||||
zh_Hans: 使用智谱cogvideox模型,根据用户输入的提示词和图片,生成视频。
|
||||
llm: A tool for generating videos. The input is user's prompt or image url or both of them, the output is a task id. You can use another tool with this task id to check the status and get the video.
|
||||
parameters:
|
||||
- name: prompt
|
||||
type: string
|
||||
label:
|
||||
en_US: prompt
|
||||
zh_Hans: 提示词
|
||||
human_description:
|
||||
en_US: The prompt text used to generate video.
|
||||
zh_Hans: 用于生成视频的提示词。
|
||||
llm_description: The prompt text used to generate video. Optional.
|
||||
form: llm
|
||||
- name: image_url
|
||||
type: string
|
||||
label:
|
||||
en_US: image url
|
||||
zh_Hans: 图片链接
|
||||
human_description:
|
||||
en_US: The image url used to generate video.
|
||||
zh_Hans: 输入一个图片链接,生成的视频将基于该图片和提示词。
|
||||
llm_description: The image url used to generate video. Optional.
|
||||
form: llm
|
|
@ -0,0 +1,30 @@
|
|||
from typing import Any, Union
|
||||
|
||||
import httpx
|
||||
from zhipuai import ZhipuAI
|
||||
|
||||
from core.tools.entities.tool_entities import ToolInvokeMessage
|
||||
from core.tools.tool.builtin_tool import BuiltinTool
|
||||
|
||||
|
||||
class CogVideoJobTool(BuiltinTool):
|
||||
def _invoke(
|
||||
self, user_id: str, tool_parameters: dict[str, Any]
|
||||
) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
|
||||
client = ZhipuAI(
|
||||
api_key=self.runtime.credentials["zhipuai_api_key"],
|
||||
base_url=self.runtime.credentials["zhipuai_base_url"],
|
||||
)
|
||||
|
||||
response = client.videos.retrieve_videos_result(id=tool_parameters.get("id"))
|
||||
result = [self.create_json_message(response.dict())]
|
||||
if response.task_status == "SUCCESS":
|
||||
for item in response.video_result:
|
||||
video_cover_image = self.create_image_message(item.cover_image_url)
|
||||
result.append(video_cover_image)
|
||||
video = self.create_blob_message(
|
||||
blob=httpx.get(item.url).content, meta={"mime_type": "video/mp4"}, save_as=self.VariableKey.VIDEO
|
||||
)
|
||||
result.append(video)
|
||||
|
||||
return result
|
|
@ -0,0 +1,21 @@
|
|||
identity:
|
||||
name: cogvideo_job
|
||||
author: hjlarry
|
||||
label:
|
||||
en_US: CogVideo Result
|
||||
zh_Hans: CogVideo 结果获取
|
||||
description:
|
||||
human:
|
||||
en_US: Get the result of CogVideo tool generation.
|
||||
zh_Hans: 根据 CogVideo 工具返回的 id 获取视频生成结果。
|
||||
llm: Get the result of CogVideo tool generation. The input is the id which is returned by the CogVideo tool. The output is the url of video and video cover image.
|
||||
parameters:
|
||||
- name: id
|
||||
type: string
|
||||
label:
|
||||
en_US: id
|
||||
human_description:
|
||||
en_US: The id returned by the CogVideo.
|
||||
zh_Hans: CogVideo 工具返回的 id。
|
||||
llm_description: The id returned by the cogvideo.
|
||||
form: llm
|
|
@ -48,7 +48,6 @@ class ComfyUiClient:
|
|||
prompt = origin_prompt.copy()
|
||||
id_to_class_type = {id: details["class_type"] for id, details in prompt.items()}
|
||||
k_sampler = [key for key, value in id_to_class_type.items() if value == "KSampler"][0]
|
||||
prompt.get(k_sampler)["inputs"]["seed"] = random.randint(10**14, 10**15 - 1)
|
||||
positive_input_id = prompt.get(k_sampler)["inputs"]["positive"][0]
|
||||
prompt.get(positive_input_id)["inputs"]["text"] = positive_prompt
|
||||
|
||||
|
@ -72,6 +71,18 @@ class ComfyUiClient:
|
|||
prompt.get(load_image)["inputs"]["image"] = image_name
|
||||
return prompt
|
||||
|
||||
def set_prompt_seed_by_id(self, origin_prompt: dict, seed_id: str) -> dict:
|
||||
prompt = origin_prompt.copy()
|
||||
if seed_id not in prompt:
|
||||
raise Exception("Not a valid seed node")
|
||||
if "seed" in prompt[seed_id]["inputs"]:
|
||||
prompt[seed_id]["inputs"]["seed"] = random.randint(10**14, 10**15 - 1)
|
||||
elif "noise_seed" in prompt[seed_id]["inputs"]:
|
||||
prompt[seed_id]["inputs"]["noise_seed"] = random.randint(10**14, 10**15 - 1)
|
||||
else:
|
||||
raise Exception("Not a valid seed node")
|
||||
return prompt
|
||||
|
||||
def track_progress(self, prompt: dict, ws: WebSocket, prompt_id: str):
|
||||
node_ids = list(prompt.keys())
|
||||
finished_nodes = []
|
||||
|
|
|
@ -70,6 +70,9 @@ class ComfyUIWorkflowTool(BuiltinTool):
|
|||
else:
|
||||
prompt = comfyui.set_prompt_images_by_default(prompt, image_names)
|
||||
|
||||
if seed_id := tool_parameters.get("seed_id"):
|
||||
prompt = comfyui.set_prompt_seed_by_id(prompt, seed_id)
|
||||
|
||||
images = comfyui.generate_image_by_prompt(prompt)
|
||||
result = []
|
||||
for img in images:
|
||||
|
|
|
@ -52,3 +52,12 @@ parameters:
|
|||
en_US: When the workflow has multiple image nodes, enter the ID list of these nodes, and the images will be passed to ComfyUI in the order of the list.
|
||||
zh_Hans: 当工作流有多个图片节点时,输入这些节点的ID列表,图片将按列表顺序传给ComfyUI
|
||||
form: form
|
||||
- name: seed_id
|
||||
type: string
|
||||
label:
|
||||
en_US: Seed Node Id
|
||||
zh_Hans: 种子节点ID
|
||||
human_description:
|
||||
en_US: If you need to generate different images each time, you need to enter the ID of the seed node.
|
||||
zh_Hans: 如果需要每次生成时使用不同的种子,需要输入包含种子的节点的ID
|
||||
form: form
|
||||
|
|
1
api/core/tools/provider/builtin/email/_assets/icon.svg
Normal file
1
api/core/tools/provider/builtin/email/_assets/icon.svg
Normal file
File diff suppressed because one or more lines are too long
After Width: | Height: | Size: 11 KiB |
7
api/core/tools/provider/builtin/email/email.py
Normal file
7
api/core/tools/provider/builtin/email/email.py
Normal file
|
@ -0,0 +1,7 @@
|
|||
from core.tools.provider.builtin.email.tools.send_mail import SendMailTool
|
||||
from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController
|
||||
|
||||
|
||||
class SmtpProvider(BuiltinToolProviderController):
|
||||
def _validate_credentials(self, credentials: dict) -> None:
|
||||
SendMailTool()
|
83
api/core/tools/provider/builtin/email/email.yaml
Normal file
83
api/core/tools/provider/builtin/email/email.yaml
Normal file
|
@ -0,0 +1,83 @@
|
|||
identity:
|
||||
author: wakaka6
|
||||
name: email
|
||||
label:
|
||||
en_US: email
|
||||
zh_Hans: 电子邮件
|
||||
description:
|
||||
en_US: send email through smtp protocol
|
||||
zh_Hans: 通过smtp协议发送电子邮件
|
||||
icon: icon.svg
|
||||
tags:
|
||||
- utilities
|
||||
credentials_for_provider:
|
||||
email_account:
|
||||
type: text-input
|
||||
required: true
|
||||
label:
|
||||
en_US: email account
|
||||
zh_Hans: 邮件账号
|
||||
placeholder:
|
||||
en_US: input you email account
|
||||
zh_Hans: 输入你的邮箱账号
|
||||
help:
|
||||
en_US: email account
|
||||
zh_Hans: 邮件账号
|
||||
email_password:
|
||||
type: secret-input
|
||||
required: true
|
||||
label:
|
||||
en_US: email password
|
||||
zh_Hans: 邮件密码
|
||||
placeholder:
|
||||
en_US: email password
|
||||
zh_Hans: 邮件密码
|
||||
help:
|
||||
en_US: email password
|
||||
zh_Hans: 邮件密码
|
||||
smtp_server:
|
||||
type: text-input
|
||||
required: true
|
||||
label:
|
||||
en_US: smtp server
|
||||
zh_Hans: 发信smtp服务器地址
|
||||
placeholder:
|
||||
en_US: smtp server
|
||||
zh_Hans: 发信smtp服务器地址
|
||||
help:
|
||||
en_US: smtp server
|
||||
zh_Hans: 发信smtp服务器地址
|
||||
smtp_port:
|
||||
type: text-input
|
||||
required: true
|
||||
label:
|
||||
en_US: smtp server port
|
||||
zh_Hans: 发信smtp服务器端口
|
||||
placeholder:
|
||||
en_US: smtp server port
|
||||
zh_Hans: 发信smtp服务器端口
|
||||
help:
|
||||
en_US: smtp server port
|
||||
zh_Hans: 发信smtp服务器端口
|
||||
encrypt_method:
|
||||
type: select
|
||||
required: true
|
||||
options:
|
||||
- value: NONE
|
||||
label:
|
||||
en_US: NONE
|
||||
zh_Hans: 无加密
|
||||
- value: SSL
|
||||
label:
|
||||
en_US: SSL
|
||||
zh_Hans: SSL加密
|
||||
- value: TLS
|
||||
label:
|
||||
en_US: START TLS
|
||||
zh_Hans: START TLS加密
|
||||
label:
|
||||
en_US: encrypt method
|
||||
zh_Hans: 加密方式
|
||||
help:
|
||||
en_US: smtp server encrypt method
|
||||
zh_Hans: 发信smtp服务器加密方式
|
53
api/core/tools/provider/builtin/email/tools/send.py
Normal file
53
api/core/tools/provider/builtin/email/tools/send.py
Normal file
|
@ -0,0 +1,53 @@
|
|||
import logging
|
||||
import smtplib
|
||||
import ssl
|
||||
from email.mime.multipart import MIMEMultipart
|
||||
from email.mime.text import MIMEText
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class SendEmailToolParameters(BaseModel):
|
||||
smtp_server: str
|
||||
smtp_port: int
|
||||
|
||||
email_account: str
|
||||
email_password: str
|
||||
|
||||
sender_to: str
|
||||
subject: str
|
||||
email_content: str
|
||||
encrypt_method: str
|
||||
|
||||
|
||||
def send_mail(parmas: SendEmailToolParameters):
|
||||
timeout = 60
|
||||
msg = MIMEMultipart("alternative")
|
||||
msg["From"] = parmas.email_account
|
||||
msg["To"] = parmas.sender_to
|
||||
msg["Subject"] = parmas.subject
|
||||
msg.attach(MIMEText(parmas.email_content, "plain"))
|
||||
msg.attach(MIMEText(parmas.email_content, "html"))
|
||||
|
||||
ctx = ssl.create_default_context()
|
||||
|
||||
if parmas.encrypt_method.upper() == "SSL":
|
||||
try:
|
||||
with smtplib.SMTP_SSL(parmas.smtp_server, parmas.smtp_port, context=ctx, timeout=timeout) as server:
|
||||
server.login(parmas.email_account, parmas.email_password)
|
||||
server.sendmail(parmas.email_account, parmas.sender_to, msg.as_string())
|
||||
return True
|
||||
except Exception as e:
|
||||
logging.exception("send email failed: %s", e)
|
||||
return False
|
||||
else: # NONE or TLS
|
||||
try:
|
||||
with smtplib.SMTP(parmas.smtp_server, parmas.smtp_port, timeout=timeout) as server:
|
||||
if parmas.encrypt_method.upper() == "TLS":
|
||||
server.starttls(context=ctx)
|
||||
server.login(parmas.email_account, parmas.email_password)
|
||||
server.sendmail(parmas.email_account, parmas.sender_to, msg.as_string())
|
||||
return True
|
||||
except Exception as e:
|
||||
logging.exception("send email failed: %s", e)
|
||||
return False
|
66
api/core/tools/provider/builtin/email/tools/send_mail.py
Normal file
66
api/core/tools/provider/builtin/email/tools/send_mail.py
Normal file
|
@ -0,0 +1,66 @@
|
|||
import re
|
||||
from typing import Any, Union
|
||||
|
||||
from core.tools.entities.tool_entities import ToolInvokeMessage
|
||||
from core.tools.provider.builtin.email.tools.send import (
|
||||
SendEmailToolParameters,
|
||||
send_mail,
|
||||
)
|
||||
from core.tools.tool.builtin_tool import BuiltinTool
|
||||
|
||||
|
||||
class SendMailTool(BuiltinTool):
|
||||
def _invoke(
|
||||
self, user_id: str, tool_parameters: dict[str, Any]
|
||||
) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
|
||||
"""
|
||||
invoke tools
|
||||
"""
|
||||
sender = self.runtime.credentials.get("email_account", "")
|
||||
email_rgx = re.compile(r"^[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+(\.[a-zA-Z0-9_-]+)+$")
|
||||
password = self.runtime.credentials.get("email_password", "")
|
||||
smtp_server = self.runtime.credentials.get("smtp_server", "")
|
||||
if not smtp_server:
|
||||
return self.create_text_message("please input smtp server")
|
||||
smtp_port = self.runtime.credentials.get("smtp_port", "")
|
||||
try:
|
||||
smtp_port = int(smtp_port)
|
||||
except ValueError:
|
||||
return self.create_text_message("Invalid parameter smtp_port(should be int)")
|
||||
|
||||
if not sender:
|
||||
return self.create_text_message("please input sender")
|
||||
if not email_rgx.match(sender):
|
||||
return self.create_text_message("Invalid parameter userid, the sender is not a mailbox")
|
||||
|
||||
receiver_email = tool_parameters["send_to"]
|
||||
if not receiver_email:
|
||||
return self.create_text_message("please input receiver email")
|
||||
if not email_rgx.match(receiver_email):
|
||||
return self.create_text_message("Invalid parameter receiver email, the receiver email is not a mailbox")
|
||||
email_content = tool_parameters.get("email_content", "")
|
||||
|
||||
if not email_content:
|
||||
return self.create_text_message("please input email content")
|
||||
|
||||
subject = tool_parameters.get("subject", "")
|
||||
if not subject:
|
||||
return self.create_text_message("please input email subject")
|
||||
|
||||
encrypt_method = self.runtime.credentials.get("encrypt_method", "")
|
||||
if not encrypt_method:
|
||||
return self.create_text_message("please input encrypt method")
|
||||
|
||||
send_email_params = SendEmailToolParameters(
|
||||
smtp_server=smtp_server,
|
||||
smtp_port=smtp_port,
|
||||
email_account=sender,
|
||||
email_password=password,
|
||||
sender_to=receiver_email,
|
||||
subject=subject,
|
||||
email_content=email_content,
|
||||
encrypt_method=encrypt_method,
|
||||
)
|
||||
if send_mail(send_email_params):
|
||||
return self.create_text_message("send email success")
|
||||
return self.create_text_message("send email failed")
|
46
api/core/tools/provider/builtin/email/tools/send_mail.yaml
Normal file
46
api/core/tools/provider/builtin/email/tools/send_mail.yaml
Normal file
|
@ -0,0 +1,46 @@
|
|||
identity:
|
||||
name: send_mail
|
||||
author: wakaka6
|
||||
label:
|
||||
en_US: send email
|
||||
zh_Hans: 发送邮件
|
||||
icon: icon.svg
|
||||
description:
|
||||
human:
|
||||
en_US: A tool for sending email
|
||||
zh_Hans: 用于发送邮件
|
||||
llm: A tool for sending email
|
||||
parameters:
|
||||
- name: send_to
|
||||
type: string
|
||||
required: true
|
||||
label:
|
||||
en_US: Recipient email account
|
||||
zh_Hans: 收件人邮箱账号
|
||||
human_description:
|
||||
en_US: Recipient email account
|
||||
zh_Hans: 收件人邮箱账号
|
||||
llm_description: Recipient email account
|
||||
form: llm
|
||||
- name: subject
|
||||
type: string
|
||||
required: true
|
||||
label:
|
||||
en_US: email subject
|
||||
zh_Hans: 邮件主题
|
||||
human_description:
|
||||
en_US: email subject
|
||||
zh_Hans: 邮件主题
|
||||
llm_description: email subject
|
||||
form: llm
|
||||
- name: email_content
|
||||
type: string
|
||||
required: true
|
||||
label:
|
||||
en_US: email content
|
||||
zh_Hans: 邮件内容
|
||||
human_description:
|
||||
en_US: email content
|
||||
zh_Hans: 邮件内容
|
||||
llm_description: email content
|
||||
form: llm
|
|
@ -0,0 +1,75 @@
|
|||
import json
|
||||
import re
|
||||
from typing import Any, Union
|
||||
|
||||
from core.tools.entities.tool_entities import ToolInvokeMessage
|
||||
from core.tools.provider.builtin.email.tools.send import (
|
||||
SendEmailToolParameters,
|
||||
send_mail,
|
||||
)
|
||||
from core.tools.tool.builtin_tool import BuiltinTool
|
||||
|
||||
|
||||
class SendMailTool(BuiltinTool):
|
||||
def _invoke(
|
||||
self, user_id: str, tool_parameters: dict[str, Any]
|
||||
) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
|
||||
"""
|
||||
invoke tools
|
||||
"""
|
||||
sender = self.runtime.credentials.get("email_account", "")
|
||||
email_rgx = re.compile(r"^[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+(\.[a-zA-Z0-9_-]+)+$")
|
||||
password = self.runtime.credentials.get("email_password", "")
|
||||
smtp_server = self.runtime.credentials.get("smtp_server", "")
|
||||
if not smtp_server:
|
||||
return self.create_text_message("please input smtp server")
|
||||
smtp_port = self.runtime.credentials.get("smtp_port", "")
|
||||
try:
|
||||
smtp_port = int(smtp_port)
|
||||
except ValueError:
|
||||
return self.create_text_message("Invalid parameter smtp_port(should be int)")
|
||||
|
||||
if not sender:
|
||||
return self.create_text_message("please input sender")
|
||||
if not email_rgx.match(sender):
|
||||
return self.create_text_message("Invalid parameter userid, the sender is not a mailbox")
|
||||
|
||||
receivers_email = tool_parameters["send_to"]
|
||||
if not receivers_email:
|
||||
return self.create_text_message("please input receiver email")
|
||||
receivers_email = json.loads(receivers_email)
|
||||
for receiver in receivers_email:
|
||||
if not email_rgx.match(receiver):
|
||||
return self.create_text_message(
|
||||
f"Invalid parameter receiver email, the receiver email({receiver}) is not a mailbox"
|
||||
)
|
||||
email_content = tool_parameters.get("email_content", "")
|
||||
|
||||
if not email_content:
|
||||
return self.create_text_message("please input email content")
|
||||
|
||||
subject = tool_parameters.get("subject", "")
|
||||
if not subject:
|
||||
return self.create_text_message("please input email subject")
|
||||
|
||||
encrypt_method = self.runtime.credentials.get("encrypt_method", "")
|
||||
if not encrypt_method:
|
||||
return self.create_text_message("please input encrypt method")
|
||||
|
||||
msg = {}
|
||||
for receiver in receivers_email:
|
||||
send_email_params = SendEmailToolParameters(
|
||||
smtp_server=smtp_server,
|
||||
smtp_port=smtp_port,
|
||||
email_account=sender,
|
||||
email_password=password,
|
||||
sender_to=receiver,
|
||||
subject=subject,
|
||||
email_content=email_content,
|
||||
encrypt_method=encrypt_method,
|
||||
)
|
||||
if send_mail(send_email_params):
|
||||
msg[receiver] = "send email success"
|
||||
else:
|
||||
msg[receiver] = "send email failed"
|
||||
return self.create_text_message(json.dumps(msg))
|
|
@ -0,0 +1,46 @@
|
|||
identity:
|
||||
name: send_mail_batch
|
||||
author: wakaka6
|
||||
label:
|
||||
en_US: send email to multiple recipients
|
||||
zh_Hans: 发送邮件给多个收件人
|
||||
icon: icon.svg
|
||||
description:
|
||||
human:
|
||||
en_US: A tool for sending email to multiple recipients
|
||||
zh_Hans: 用于发送邮件给多个收件人的工具
|
||||
llm: A tool for sending email to multiple recipients
|
||||
parameters:
|
||||
- name: send_to
|
||||
type: string
|
||||
required: true
|
||||
label:
|
||||
en_US: Recipient email account(json list)
|
||||
zh_Hans: 收件人邮箱账号(json list)
|
||||
human_description:
|
||||
en_US: Recipient email account
|
||||
zh_Hans: 收件人邮箱账号
|
||||
llm_description: A list of recipient email account(json format)
|
||||
form: llm
|
||||
- name: subject
|
||||
type: string
|
||||
required: true
|
||||
label:
|
||||
en_US: email subject
|
||||
zh_Hans: 邮件主题
|
||||
human_description:
|
||||
en_US: email subject
|
||||
zh_Hans: 邮件主题
|
||||
llm_description: email subject
|
||||
form: llm
|
||||
- name: email_content
|
||||
type: string
|
||||
required: true
|
||||
label:
|
||||
en_US: email content
|
||||
zh_Hans: 邮件内容
|
||||
human_description:
|
||||
en_US: email content
|
||||
zh_Hans: 邮件内容
|
||||
llm_description: email content
|
||||
form: llm
|
|
@ -34,7 +34,7 @@ parameters:
|
|||
Page size, default value: 20, maximum value: 100.
|
||||
zh_Hans: 分页大小,默认值:20,最大值:100。
|
||||
llm_description: 分页大小,默认值:20,最大值:100。
|
||||
form: llm
|
||||
form: form
|
||||
|
||||
- name: page_token
|
||||
type: string
|
||||
|
|
|
@ -147,7 +147,7 @@ parameters:
|
|||
Page size, default value: 20, maximum value: 500.
|
||||
zh_Hans: 分页大小,默认值:20,最大值:500。
|
||||
llm_description: 分页大小,默认值:20,最大值:500。
|
||||
form: llm
|
||||
form: form
|
||||
|
||||
- name: page_token
|
||||
type: string
|
||||
|
|
|
@ -47,7 +47,7 @@ parameters:
|
|||
en_US: The page size, i.e., the number of data entries returned in a single request. The default value is 50, and the value range is [50,1000].
|
||||
zh_Hans: 分页大小,即单次请求所返回的数据条目数。默认值为 50,取值范围为 [50,1000]。
|
||||
llm_description: 分页大小,即单次请求所返回的数据条目数。默认值为 50,取值范围为 [50,1000]。
|
||||
form: llm
|
||||
form: form
|
||||
|
||||
- name: page_token
|
||||
type: string
|
||||
|
|
|
@ -85,7 +85,7 @@ parameters:
|
|||
en_US: The page size, i.e., the number of data entries returned in a single request. The default value is 20, and the value range is [10,100].
|
||||
zh_Hans: 分页大小,即单次请求所返回的数据条目数。默认值为 20,取值范围为 [10,100]。
|
||||
llm_description: 分页大小,即单次请求所返回的数据条目数。默认值为 20,取值范围为 [10,100]。
|
||||
form: llm
|
||||
form: form
|
||||
|
||||
- name: page_token
|
||||
type: string
|
||||
|
|
|
@ -59,7 +59,7 @@ parameters:
|
|||
en_US: Paging size, the default and maximum value is 500.
|
||||
zh_Hans: 分页大小, 默认值和最大值为 500。
|
||||
llm_description: 分页大小, 表示一次请求最多返回多少条数据,默认值和最大值为 500。
|
||||
form: llm
|
||||
form: form
|
||||
|
||||
- name: page_token
|
||||
type: string
|
||||
|
|
|
@ -81,7 +81,7 @@ parameters:
|
|||
en_US: The page size, i.e., the number of data entries returned in a single request. The default value is 20, and the value range is [1,50].
|
||||
zh_Hans: 分页大小,即单次请求所返回的数据条目数。默认值为 20,取值范围为 [1,50]。
|
||||
llm_description: 分页大小,即单次请求所返回的数据条目数。默认值为 20,取值范围为 [1,50]。
|
||||
form: llm
|
||||
form: form
|
||||
|
||||
- name: page_token
|
||||
type: string
|
||||
|
|
|
@ -57,7 +57,7 @@ parameters:
|
|||
en_US: The page size, i.e., the number of data entries returned in a single request. The default value is 20, and the value range is [1,50].
|
||||
zh_Hans: 分页大小,即单次请求所返回的数据条目数。默认值为 20,取值范围为 [1,50]。
|
||||
llm_description: 分页大小,即单次请求所返回的数据条目数。默认值为 20,取值范围为 [1,50]。
|
||||
form: llm
|
||||
form: form
|
||||
|
||||
- name: page_token
|
||||
type: string
|
||||
|
|
|
@ -56,7 +56,7 @@ parameters:
|
|||
en_US: Number of columns to add, range (0-5000].
|
||||
zh_Hans: 要增加的列数,范围(0-5000]。
|
||||
llm_description: 要增加的列数,范围(0-5000]。
|
||||
form: llm
|
||||
form: form
|
||||
|
||||
- name: values
|
||||
type: string
|
||||
|
|
|
@ -56,7 +56,7 @@ parameters:
|
|||
en_US: Number of rows to add, range (0-5000].
|
||||
zh_Hans: 要增加行数,范围(0-5000]。
|
||||
llm_description: 要增加行数,范围(0-5000]。
|
||||
form: llm
|
||||
form: form
|
||||
|
||||
- name: values
|
||||
type: string
|
||||
|
|
|
@ -82,7 +82,7 @@ parameters:
|
|||
en_US: Starting column number, starting from 1.
|
||||
zh_Hans: 起始列号,从 1 开始。
|
||||
llm_description: 起始列号,从 1 开始。
|
||||
form: llm
|
||||
form: form
|
||||
|
||||
- name: num_cols
|
||||
type: number
|
||||
|
@ -94,4 +94,4 @@ parameters:
|
|||
en_US: Number of columns to read.
|
||||
zh_Hans: 读取列数
|
||||
llm_description: 读取列数
|
||||
form: llm
|
||||
form: form
|
||||
|
|
|
@ -82,7 +82,7 @@ parameters:
|
|||
en_US: Starting row number, starting from 1.
|
||||
zh_Hans: 起始行号,从 1 开始。
|
||||
llm_description: 起始行号,从 1 开始。
|
||||
form: llm
|
||||
form: form
|
||||
|
||||
- name: num_rows
|
||||
type: number
|
||||
|
@ -94,4 +94,4 @@ parameters:
|
|||
en_US: Number of rows to read.
|
||||
zh_Hans: 读取行数
|
||||
llm_description: 读取行数
|
||||
form: llm
|
||||
form: form
|
||||
|
|
|
@ -82,7 +82,7 @@ parameters:
|
|||
en_US: Starting row number, starting from 1.
|
||||
zh_Hans: 起始行号,从 1 开始。
|
||||
llm_description: 起始行号,从 1 开始。
|
||||
form: llm
|
||||
form: form
|
||||
|
||||
- name: num_rows
|
||||
type: number
|
||||
|
@ -94,7 +94,7 @@ parameters:
|
|||
en_US: Number of rows to read.
|
||||
zh_Hans: 读取行数
|
||||
llm_description: 读取行数
|
||||
form: llm
|
||||
form: form
|
||||
|
||||
- name: range
|
||||
type: string
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user