diff --git a/.github/actions/setup-poetry/action.yml b/.github/actions/setup-poetry/action.yml new file mode 100644 index 0000000000..5feab33d1d --- /dev/null +++ b/.github/actions/setup-poetry/action.yml @@ -0,0 +1,36 @@ +name: Setup Poetry and Python + +inputs: + python-version: + description: Python version to use and the Poetry installed with + required: true + default: '3.10' + poetry-version: + description: Poetry version to set up + required: true + default: '1.8.4' + poetry-lockfile: + description: Path to the Poetry lockfile to restore cache from + required: true + default: '' + +runs: + using: composite + steps: + - name: Set up Python ${{ inputs.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ inputs.python-version }} + cache: pip + + - name: Install Poetry + shell: bash + run: pip install poetry==${{ inputs.poetry-version }} + + - name: Restore Poetry cache + if: ${{ inputs.poetry-lockfile != '' }} + uses: actions/setup-python@v5 + with: + python-version: ${{ inputs.python-version }} + cache: poetry + cache-dependency-path: ${{ inputs.poetry-lockfile }} diff --git a/.github/workflows/api-tests.yml b/.github/workflows/api-tests.yml index eb09abe77c..76e844aaad 100644 --- a/.github/workflows/api-tests.yml +++ b/.github/workflows/api-tests.yml @@ -28,15 +28,11 @@ jobs: - name: Checkout code uses: actions/checkout@v4 - - name: Install Poetry - uses: abatilo/actions-poetry@v3 - - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v5 + - name: Setup Poetry and Python ${{ matrix.python-version }} + uses: ./.github/actions/setup-poetry with: python-version: ${{ matrix.python-version }} - cache: poetry - cache-dependency-path: api/poetry.lock + poetry-lockfile: api/poetry.lock - name: Check Poetry lockfile run: | diff --git a/.github/workflows/db-migration-test.yml b/.github/workflows/db-migration-test.yml index b8246aacb3..f4eb0f8e33 100644 --- a/.github/workflows/db-migration-test.yml +++ b/.github/workflows/db-migration-test.yml @@ -6,6 +6,7 @@ on: - main paths: - api/migrations/** + - .github/workflows/db-migration-test.yml concurrency: group: db-migration-test-${{ github.ref }} @@ -14,25 +15,15 @@ concurrency: jobs: db-migration-test: runs-on: ubuntu-latest - strategy: - matrix: - python-version: - - "3.10" steps: - name: Checkout code uses: actions/checkout@v4 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v5 + - name: Setup Poetry and Python + uses: ./.github/actions/setup-poetry with: - python-version: ${{ matrix.python-version }} - cache-dependency-path: | - api/pyproject.toml - api/poetry.lock - - - name: Install Poetry - uses: abatilo/actions-poetry@v3 + poetry-lockfile: api/poetry.lock - name: Install dependencies run: poetry install -C api diff --git a/.github/workflows/style.yml b/.github/workflows/style.yml index 01f9757b3c..282afefe74 100644 --- a/.github/workflows/style.yml +++ b/.github/workflows/style.yml @@ -22,34 +22,28 @@ jobs: id: changed-files uses: tj-actions/changed-files@v45 with: - files: api/** + files: | + api/** + .github/workflows/style.yml - - name: Install Poetry + - name: Setup Poetry and Python if: steps.changed-files.outputs.any_changed == 'true' - uses: abatilo/actions-poetry@v3 + uses: ./.github/actions/setup-poetry - - name: Set up Python - uses: actions/setup-python@v5 - if: steps.changed-files.outputs.any_changed == 'true' - with: - python-version: '3.10' - - - name: Python dependencies + - name: Install dependencies if: steps.changed-files.outputs.any_changed == 'true' run: poetry install -C api --only lint - name: Ruff check if: steps.changed-files.outputs.any_changed == 'true' - run: poetry run -C api ruff check ./api + run: | + poetry run -C api ruff check ./api + poetry run -C api ruff format --check ./api - name: Dotenv check if: steps.changed-files.outputs.any_changed == 'true' run: poetry run -C api dotenv-linter ./api/.env.example ./web/.env.example - - name: Ruff formatter check - if: steps.changed-files.outputs.any_changed == 'true' - run: poetry run -C api ruff format --check ./api - - name: Lint hints if: failure() run: echo "Please run 'dev/reformat' to fix the fixable linting errors." diff --git a/.github/workflows/vdb-tests.yml b/.github/workflows/vdb-tests.yml index 8ea38fde76..caddd23bab 100644 --- a/.github/workflows/vdb-tests.yml +++ b/.github/workflows/vdb-tests.yml @@ -28,15 +28,11 @@ jobs: - name: Checkout code uses: actions/checkout@v4 - - name: Install Poetry - uses: abatilo/actions-poetry@v3 - - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v5 + - name: Setup Poetry and Python ${{ matrix.python-version }} + uses: ./.github/actions/setup-poetry with: python-version: ${{ matrix.python-version }} - cache: poetry - cache-dependency-path: api/poetry.lock + poetry-lockfile: api/poetry.lock - name: Check Poetry lockfile run: | diff --git a/README.md b/README.md index 4779048001..d42b3b13a6 100644 --- a/README.md +++ b/README.md @@ -177,3 +177,4 @@ To protect your privacy, please avoid posting security issues on GitHub. Instead ## License This repository is available under the [Dify Open Source License](LICENSE), which is essentially Apache 2.0 with a few additional restrictions. + diff --git a/api/.env.example b/api/.env.example index a92490608f..5751605b48 100644 --- a/api/.env.example +++ b/api/.env.example @@ -285,8 +285,9 @@ UPLOAD_IMAGE_FILE_SIZE_LIMIT=10 UPLOAD_VIDEO_FILE_SIZE_LIMIT=100 UPLOAD_AUDIO_FILE_SIZE_LIMIT=50 -# Model Configuration +# Model configuration MULTIMODAL_SEND_IMAGE_FORMAT=base64 +MULTIMODAL_SEND_VIDEO_FORMAT=base64 PROMPT_GENERATION_MAX_TOKENS=512 CODE_GENERATION_MAX_TOKENS=1024 @@ -324,10 +325,10 @@ UNSTRUCTURED_API_KEY= SSRF_PROXY_HTTP_URL= SSRF_PROXY_HTTPS_URL= SSRF_DEFAULT_MAX_RETRIES=3 -SSRF_DEFAULT_TIME_OUT= -SSRF_DEFAULT_CONNECT_TIME_OUT= -SSRF_DEFAULT_READ_TIME_OUT= -SSRF_DEFAULT_WRITE_TIME_OUT= +SSRF_DEFAULT_TIME_OUT=5 +SSRF_DEFAULT_CONNECT_TIME_OUT=5 +SSRF_DEFAULT_READ_TIME_OUT=5 +SSRF_DEFAULT_WRITE_TIME_OUT=5 BATCH_UPLOAD_LIMIT=10 KEYWORD_DATA_SOURCE_TYPE=database @@ -366,6 +367,10 @@ LOG_FILE= LOG_FILE_MAX_SIZE=20 # Log file max backup count LOG_FILE_BACKUP_COUNT=5 +# Log dateformat +LOG_DATEFORMAT=%Y-%m-%d %H:%M:%S +# Log Timezone +LOG_TZ=UTC # Indexing configuration INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH=1000 @@ -395,3 +400,5 @@ POSITION_PROVIDER_EXCLUDES= # Reset password token expiry minutes RESET_PASSWORD_TOKEN_EXPIRY_MINUTES=5 + +CREATE_TIDB_SERVICE_JOB_ENABLED=false \ No newline at end of file diff --git a/api/Dockerfile b/api/Dockerfile index eb37303182..ed981e46d6 100644 --- a/api/Dockerfile +++ b/api/Dockerfile @@ -4,7 +4,7 @@ FROM python:3.10-slim-bookworm AS base WORKDIR /app/api # Install Poetry -ENV POETRY_VERSION=1.8.3 +ENV POETRY_VERSION=1.8.4 # if you located in China, you can use aliyun mirror to speed up # RUN pip install --no-cache-dir poetry==${POETRY_VERSION} -i https://mirrors.aliyun.com/pypi/simple/ @@ -55,7 +55,7 @@ RUN apt-get update \ && echo "deb http://deb.debian.org/debian testing main" > /etc/apt/sources.list \ && apt-get update \ # For Security - && apt-get install -y --no-install-recommends expat=2.6.3-2 libldap-2.5-0=2.5.18+dfsg-3+b1 perl=5.40.0-6 libsqlite3-0=3.46.1-1 zlib1g=1:1.3.dfsg+really1.3.1-1+b1 \ + && apt-get install -y --no-install-recommends expat=2.6.3-2 libldap-2.5-0=2.5.18+dfsg-3+b1 perl=5.40.0-7 libsqlite3-0=3.46.1-1 zlib1g=1:1.3.dfsg+really1.3.1-1+b1 \ # install a chinese font to support the use of tools like matplotlib && apt-get install -y fonts-noto-cjk \ && apt-get autoremove -y \ diff --git a/api/app.py b/api/app.py index ed214bde97..a667a84fd6 100644 --- a/api/app.py +++ b/api/app.py @@ -1,8 +1,9 @@ import os +import sys from configs import dify_config -if os.environ.get("DEBUG", "false").lower() != "true": +if not dify_config.DEBUG: from gevent import monkey monkey.patch_all() @@ -29,6 +30,9 @@ from models import account, dataset, model, source, task, tool, tools, web # no # DO NOT REMOVE ABOVE +if sys.version_info[:2] == (3, 10): + print("Warning: Python 3.10 will not be supported in the next version.") + warnings.simplefilter("ignore", ResourceWarning) @@ -49,7 +53,6 @@ if dify_config.TESTING: @app.after_request def after_request(response): """Add Version headers to the response.""" - response.set_cookie("remember_token", "", expires=0) response.headers.add("X-Version", dify_config.CURRENT_VERSION) response.headers.add("X-Env", dify_config.DEPLOY_ENV) return response diff --git a/api/app_factory.py b/api/app_factory.py index aba78ccab8..60a584798b 100644 --- a/api/app_factory.py +++ b/api/app_factory.py @@ -1,6 +1,8 @@ import os -if os.environ.get("DEBUG", "false").lower() != "true": +from configs import dify_config + +if not dify_config.DEBUG: from gevent import monkey monkey.patch_all() diff --git a/api/configs/feature/__init__.py b/api/configs/feature/__init__.py index 3ac2c28c1f..f368a19469 100644 --- a/api/configs/feature/__init__.py +++ b/api/configs/feature/__init__.py @@ -276,6 +276,16 @@ class HttpConfig(BaseSettings): default=1 * 1024 * 1024, ) + SSRF_DEFAULT_MAX_RETRIES: PositiveInt = Field( + description="Maximum number of retries for network requests (SSRF)", + default=3, + ) + + SSRF_PROXY_ALL_URL: Optional[str] = Field( + description="Proxy URL for HTTP or HTTPS requests to prevent Server-Side Request Forgery (SSRF)", + default=None, + ) + SSRF_PROXY_HTTP_URL: Optional[str] = Field( description="Proxy URL for HTTP requests to prevent Server-Side Request Forgery (SSRF)", default=None, @@ -366,7 +376,7 @@ class LoggingConfig(BaseSettings): LOG_TZ: Optional[str] = Field( description="Timezone for log timestamps (e.g., 'America/New_York')", - default=None, + default="UTC", ) @@ -601,6 +611,11 @@ class DataSetConfig(BaseSettings): default=500, ) + CREATE_TIDB_SERVICE_JOB_ENABLED: bool = Field( + description="Enable or disable create tidb service job", + default=False, + ) + class WorkspaceConfig(BaseSettings): """ @@ -624,12 +639,17 @@ class IndexingConfig(BaseSettings): ) -class ImageFormatConfig(BaseSettings): +class VisionFormatConfig(BaseSettings): MULTIMODAL_SEND_IMAGE_FORMAT: Literal["base64", "url"] = Field( description="Format for sending images in multimodal contexts ('base64' or 'url'), default is base64", default="base64", ) + MULTIMODAL_SEND_VIDEO_FORMAT: Literal["base64", "url"] = Field( + description="Format for sending videos in multimodal contexts ('base64' or 'url'), default is base64", + default="base64", + ) + class CeleryBeatConfig(BaseSettings): CELERY_BEAT_SCHEDULER_TIME: int = Field( @@ -732,7 +752,7 @@ class FeatureConfig( FileAccessConfig, FileUploadConfig, HttpConfig, - ImageFormatConfig, + VisionFormatConfig, InnerAPIConfig, IndexingConfig, LoggingConfig, diff --git a/api/configs/packaging/__init__.py b/api/configs/packaging/__init__.py index b5cb1f06d9..65065efbc0 100644 --- a/api/configs/packaging/__init__.py +++ b/api/configs/packaging/__init__.py @@ -9,7 +9,7 @@ class PackagingInfo(BaseSettings): CURRENT_VERSION: str = Field( description="Dify version", - default="0.11.0", + default="0.11.1", ) COMMIT_SHA: str = Field( diff --git a/api/controllers/console/datasets/datasets_document.py b/api/controllers/console/datasets/datasets_document.py index 8e784dc70b..60848039c5 100644 --- a/api/controllers/console/datasets/datasets_document.py +++ b/api/controllers/console/datasets/datasets_document.py @@ -317,8 +317,11 @@ class DatasetInitApi(Resource): raise ValueError("embedding model and embedding model provider are required for high quality indexing.") try: model_manager = ModelManager() - model_manager.get_default_model_instance( - tenant_id=current_user.current_tenant_id, model_type=ModelType.TEXT_EMBEDDING + model_manager.get_model_instance( + tenant_id=current_user.current_tenant_id, + provider=args["embedding_model_provider"], + model_type=ModelType.TEXT_EMBEDDING, + model=args["embedding_model"], ) except InvokeAuthorizationError: raise ProviderNotInitializeError( @@ -945,7 +948,7 @@ class DocumentRetryApi(DocumentResource): raise DocumentAlreadyFinishedError() retry_documents.append(document) except Exception as e: - logging.error(f"Document {document_id} retry failed: {str(e)}") + logging.exception(f"Document {document_id} retry failed: {str(e)}") continue # retry document DocumentService.retry_document(dataset_id, retry_documents) diff --git a/api/controllers/service_api/app/conversation.py b/api/controllers/service_api/app/conversation.py index 527ef4ecd3..c62fd77d36 100644 --- a/api/controllers/service_api/app/conversation.py +++ b/api/controllers/service_api/app/conversation.py @@ -7,7 +7,11 @@ from controllers.service_api import api from controllers.service_api.app.error import NotChatAppError from controllers.service_api.wraps import FetchUserArg, WhereisUserArg, validate_app_token from core.app.entities.app_invoke_entities import InvokeFrom -from fields.conversation_fields import conversation_infinite_scroll_pagination_fields, simple_conversation_fields +from fields.conversation_fields import ( + conversation_delete_fields, + conversation_infinite_scroll_pagination_fields, + simple_conversation_fields, +) from libs.helper import uuid_value from models.model import App, AppMode, EndUser from services.conversation_service import ConversationService @@ -49,7 +53,7 @@ class ConversationApi(Resource): class ConversationDetailApi(Resource): @validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON)) - @marshal_with(simple_conversation_fields) + @marshal_with(conversation_delete_fields) def delete(self, app_model: App, end_user: EndUser, c_id): app_mode = AppMode.value_of(app_model.mode) if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}: diff --git a/api/controllers/service_api/app/message.py b/api/controllers/service_api/app/message.py index ecff804adc..ada40ec9cb 100644 --- a/api/controllers/service_api/app/message.py +++ b/api/controllers/service_api/app/message.py @@ -10,6 +10,7 @@ from controllers.service_api.app.error import NotChatAppError from controllers.service_api.wraps import FetchUserArg, WhereisUserArg, validate_app_token from core.app.entities.app_invoke_entities import InvokeFrom from fields.conversation_fields import message_file_fields +from fields.raws import FilesContainedField from libs.helper import TimestampField, uuid_value from models.model import App, AppMode, EndUser from services.errors.message import SuggestedQuestionsAfterAnswerDisabledError @@ -55,7 +56,7 @@ class MessageListApi(Resource): "id": fields.String, "conversation_id": fields.String, "parent_message_id": fields.String, - "inputs": fields.Raw, + "inputs": FilesContainedField, "query": fields.String, "answer": fields.String(attribute="re_sign_file_url_answer"), "message_files": fields.List(fields.Nested(message_file_fields)), diff --git a/api/core/agent/base_agent_runner.py b/api/core/agent/base_agent_runner.py index 507455c176..860ec5de0c 100644 --- a/api/core/agent/base_agent_runner.py +++ b/api/core/agent/base_agent_runner.py @@ -30,6 +30,7 @@ from core.model_runtime.entities import ( ToolPromptMessage, UserPromptMessage, ) +from core.model_runtime.entities.message_entities import ImagePromptMessageContent from core.model_runtime.entities.model_entities import ModelFeature from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel from core.model_runtime.utils.encoders import jsonable_encoder @@ -65,7 +66,7 @@ class BaseAgentRunner(AppRunner): prompt_messages: Optional[list[PromptMessage]] = None, variables_pool: Optional[ToolRuntimeVariablePool] = None, db_variables: Optional[ToolConversationVariables] = None, - model_instance: ModelInstance = None, + model_instance: ModelInstance | None = None, ) -> None: self.tenant_id = tenant_id self.application_generate_entity = application_generate_entity @@ -508,24 +509,27 @@ class BaseAgentRunner(AppRunner): def organize_agent_user_prompt(self, message: Message) -> UserPromptMessage: files = db.session.query(MessageFile).filter(MessageFile.message_id == message.id).all() - if files: - file_extra_config = FileUploadConfigManager.convert(message.app_model_config.to_dict()) - - if file_extra_config: - file_objs = file_factory.build_from_message_files( - message_files=files, tenant_id=self.tenant_id, config=file_extra_config - ) - else: - file_objs = [] - - if not file_objs: - return UserPromptMessage(content=message.query) - else: - prompt_message_contents: list[PromptMessageContent] = [] - prompt_message_contents.append(TextPromptMessageContent(data=message.query)) - for file_obj in file_objs: - prompt_message_contents.append(file_manager.to_prompt_message_content(file_obj)) - - return UserPromptMessage(content=prompt_message_contents) - else: + if not files: return UserPromptMessage(content=message.query) + file_extra_config = FileUploadConfigManager.convert(message.app_model_config.to_dict()) + if not file_extra_config: + return UserPromptMessage(content=message.query) + + image_detail_config = file_extra_config.image_config.detail if file_extra_config.image_config else None + image_detail_config = image_detail_config or ImagePromptMessageContent.DETAIL.LOW + + file_objs = file_factory.build_from_message_files( + message_files=files, tenant_id=self.tenant_id, config=file_extra_config + ) + if not file_objs: + return UserPromptMessage(content=message.query) + prompt_message_contents: list[PromptMessageContent] = [] + prompt_message_contents.append(TextPromptMessageContent(data=message.query)) + for file in file_objs: + prompt_message_contents.append( + file_manager.to_prompt_message_content( + file, + image_detail_config=image_detail_config, + ) + ) + return UserPromptMessage(content=prompt_message_contents) diff --git a/api/core/agent/cot_chat_agent_runner.py b/api/core/agent/cot_chat_agent_runner.py index 6261a9b12c..d8d047fe91 100644 --- a/api/core/agent/cot_chat_agent_runner.py +++ b/api/core/agent/cot_chat_agent_runner.py @@ -10,6 +10,7 @@ from core.model_runtime.entities import ( TextPromptMessageContent, UserPromptMessage, ) +from core.model_runtime.entities.message_entities import ImagePromptMessageContent from core.model_runtime.utils.encoders import jsonable_encoder @@ -36,8 +37,24 @@ class CotChatAgentRunner(CotAgentRunner): if self.files: prompt_message_contents: list[PromptMessageContent] = [] prompt_message_contents.append(TextPromptMessageContent(data=query)) - for file_obj in self.files: - prompt_message_contents.append(file_manager.to_prompt_message_content(file_obj)) + + # get image detail config + image_detail_config = ( + self.application_generate_entity.file_upload_config.image_config.detail + if ( + self.application_generate_entity.file_upload_config + and self.application_generate_entity.file_upload_config.image_config + ) + else None + ) + image_detail_config = image_detail_config or ImagePromptMessageContent.DETAIL.LOW + for file in self.files: + prompt_message_contents.append( + file_manager.to_prompt_message_content( + file, + image_detail_config=image_detail_config, + ) + ) prompt_messages.append(UserPromptMessage(content=prompt_message_contents)) else: diff --git a/api/core/agent/fc_agent_runner.py b/api/core/agent/fc_agent_runner.py index 9083b4e85f..cd546dee12 100644 --- a/api/core/agent/fc_agent_runner.py +++ b/api/core/agent/fc_agent_runner.py @@ -22,6 +22,7 @@ from core.model_runtime.entities import ( ToolPromptMessage, UserPromptMessage, ) +from core.model_runtime.entities.message_entities import ImagePromptMessageContent from core.prompt.agent_history_prompt_transform import AgentHistoryPromptTransform from core.tools.entities.tool_entities import ToolInvokeMeta from core.tools.tool_engine import ToolEngine @@ -397,8 +398,24 @@ class FunctionCallAgentRunner(BaseAgentRunner): if self.files: prompt_message_contents: list[PromptMessageContent] = [] prompt_message_contents.append(TextPromptMessageContent(data=query)) - for file_obj in self.files: - prompt_message_contents.append(file_manager.to_prompt_message_content(file_obj)) + + # get image detail config + image_detail_config = ( + self.application_generate_entity.file_upload_config.image_config.detail + if ( + self.application_generate_entity.file_upload_config + and self.application_generate_entity.file_upload_config.image_config + ) + else None + ) + image_detail_config = image_detail_config or ImagePromptMessageContent.DETAIL.LOW + for file in self.files: + prompt_message_contents.append( + file_manager.to_prompt_message_content( + file, + image_detail_config=image_detail_config, + ) + ) prompt_messages.append(UserPromptMessage(content=prompt_message_contents)) else: diff --git a/api/core/app/app_config/entities.py b/api/core/app/app_config/entities.py index 6c6e342a07..9b72452d7a 100644 --- a/api/core/app/app_config/entities.py +++ b/api/core/app/app_config/entities.py @@ -4,7 +4,7 @@ from typing import Any, Optional from pydantic import BaseModel, Field, field_validator -from core.file import FileExtraConfig, FileTransferMethod, FileType +from core.file import FileTransferMethod, FileType, FileUploadConfig from core.model_runtime.entities.message_entities import PromptMessageRole from models.model import AppMode @@ -211,7 +211,7 @@ class TracingConfigEntity(BaseModel): class AppAdditionalFeatures(BaseModel): - file_upload: Optional[FileExtraConfig] = None + file_upload: Optional[FileUploadConfig] = None opening_statement: Optional[str] = None suggested_questions: list[str] = [] suggested_questions_after_answer: bool = False diff --git a/api/core/app/app_config/features/file_upload/manager.py b/api/core/app/app_config/features/file_upload/manager.py index d0f75d0b75..2043ea0e41 100644 --- a/api/core/app/app_config/features/file_upload/manager.py +++ b/api/core/app/app_config/features/file_upload/manager.py @@ -1,7 +1,7 @@ from collections.abc import Mapping from typing import Any -from core.file import FileExtraConfig +from core.file import FileUploadConfig class FileUploadConfigManager: @@ -29,19 +29,18 @@ class FileUploadConfigManager: if is_vision: data["image_config"]["detail"] = file_upload_dict.get("image", {}).get("detail", "low") - return FileExtraConfig.model_validate(data) + return FileUploadConfig.model_validate(data) @classmethod - def validate_and_set_defaults(cls, config: dict, is_vision: bool = True) -> tuple[dict, list[str]]: + def validate_and_set_defaults(cls, config: dict) -> tuple[dict, list[str]]: """ Validate and set defaults for file upload feature :param config: app model config args - :param is_vision: if True, the feature is vision feature """ if not config.get("file_upload"): config["file_upload"] = {} else: - FileExtraConfig.model_validate(config["file_upload"]) + FileUploadConfig.model_validate(config["file_upload"]) return config, ["file_upload"] diff --git a/api/core/app/apps/advanced_chat/app_config_manager.py b/api/core/app/apps/advanced_chat/app_config_manager.py index b52f235849..cb606953cd 100644 --- a/api/core/app/apps/advanced_chat/app_config_manager.py +++ b/api/core/app/apps/advanced_chat/app_config_manager.py @@ -52,9 +52,7 @@ class AdvancedChatAppConfigManager(BaseAppConfigManager): related_config_keys = [] # file upload validation - config, current_related_config_keys = FileUploadConfigManager.validate_and_set_defaults( - config=config, is_vision=False - ) + config, current_related_config_keys = FileUploadConfigManager.validate_and_set_defaults(config=config) related_config_keys.extend(current_related_config_keys) # opening_statement diff --git a/api/core/app/apps/advanced_chat/app_generator.py b/api/core/app/apps/advanced_chat/app_generator.py index 39ab87c914..0b88345061 100644 --- a/api/core/app/apps/advanced_chat/app_generator.py +++ b/api/core/app/apps/advanced_chat/app_generator.py @@ -1,6 +1,5 @@ import contextvars import logging -import os import threading import uuid from collections.abc import Generator @@ -10,6 +9,7 @@ from flask import Flask, current_app from pydantic import ValidationError import contexts +from configs import dify_config from constants import UUID_NIL from core.app.app_config.features.file_upload.manager import FileUploadConfigManager from core.app.apps.advanced_chat.app_config_manager import AdvancedChatAppConfigManager @@ -26,7 +26,6 @@ from core.ops.ops_trace_manager import TraceQueueManager from extensions.ext_database import db from factories import file_factory from models.account import Account -from models.enums import CreatedByRole from models.model import App, Conversation, EndUser, Message from models.workflow import Workflow @@ -98,13 +97,10 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator): # parse files files = args["files"] if args.get("files") else [] file_extra_config = FileUploadConfigManager.convert(workflow.features_dict, is_vision=False) - role = CreatedByRole.ACCOUNT if isinstance(user, Account) else CreatedByRole.END_USER if file_extra_config: file_objs = file_factory.build_from_mappings( mappings=files, tenant_id=app_model.tenant_id, - user_id=user.id, - role=role, config=file_extra_config, ) else: @@ -127,10 +123,11 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator): application_generate_entity = AdvancedChatAppGenerateEntity( task_id=str(uuid.uuid4()), app_config=app_config, + file_upload_config=file_extra_config, conversation_id=conversation.id if conversation else None, inputs=conversation.inputs if conversation - else self._prepare_user_inputs(user_inputs=inputs, app_config=app_config, user_id=user.id, role=role), + else self._prepare_user_inputs(user_inputs=inputs, app_config=app_config), query=query, files=file_objs, parent_message_id=args.get("parent_message_id") if invoke_from != InvokeFrom.SERVICE_API else UUID_NIL, @@ -317,7 +314,7 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator): logger.exception("Validation Error when generating") queue_manager.publish_error(e, PublishFrom.APPLICATION_MANAGER) except (ValueError, InvokeError) as e: - if os.environ.get("DEBUG", "false").lower() == "true": + if dify_config.DEBUG: logger.exception("Error when generating") queue_manager.publish_error(e, PublishFrom.APPLICATION_MANAGER) except Exception as e: diff --git a/api/core/app/apps/advanced_chat/generate_task_pipeline.py b/api/core/app/apps/advanced_chat/generate_task_pipeline.py index 1fc7ffe2c7..1d4c0ea0fa 100644 --- a/api/core/app/apps/advanced_chat/generate_task_pipeline.py +++ b/api/core/app/apps/advanced_chat/generate_task_pipeline.py @@ -242,7 +242,7 @@ class AdvancedChatAppGenerateTaskPipeline(BasedGenerateTaskPipeline, WorkflowCyc start_listener_time = time.time() yield MessageAudioStreamResponse(audio=audio_trunk.audio, task_id=task_id) except Exception as e: - logger.error(e) + logger.exception(e) break if tts_publisher: yield MessageAudioEndStreamResponse(audio="", task_id=task_id) diff --git a/api/core/app/apps/agent_chat/app_generator.py b/api/core/app/apps/agent_chat/app_generator.py index de12f5a441..d1564a260e 100644 --- a/api/core/app/apps/agent_chat/app_generator.py +++ b/api/core/app/apps/agent_chat/app_generator.py @@ -1,5 +1,4 @@ import logging -import os import threading import uuid from collections.abc import Generator @@ -8,6 +7,7 @@ from typing import Any, Literal, Union, overload from flask import Flask, current_app from pydantic import ValidationError +from configs import dify_config from constants import UUID_NIL from core.app.app_config.easy_ui_based_app.model_config.converter import ModelConfigConverter from core.app.app_config.features.file_upload.manager import FileUploadConfigManager @@ -23,7 +23,6 @@ from core.ops.ops_trace_manager import TraceQueueManager from extensions.ext_database import db from factories import file_factory from models import Account, App, EndUser -from models.enums import CreatedByRole logger = logging.getLogger(__name__) @@ -103,8 +102,6 @@ class AgentChatAppGenerator(MessageBasedAppGenerator): # always enable retriever resource in debugger mode override_model_config_dict["retriever_resource"] = {"enabled": True} - role = CreatedByRole.ACCOUNT if isinstance(user, Account) else CreatedByRole.END_USER - # parse files files = args.get("files") or [] file_extra_config = FileUploadConfigManager.convert(override_model_config_dict or app_model_config.to_dict()) @@ -112,8 +109,6 @@ class AgentChatAppGenerator(MessageBasedAppGenerator): file_objs = file_factory.build_from_mappings( mappings=files, tenant_id=app_model.tenant_id, - user_id=user.id, - role=role, config=file_extra_config, ) else: @@ -135,10 +130,11 @@ class AgentChatAppGenerator(MessageBasedAppGenerator): task_id=str(uuid.uuid4()), app_config=app_config, model_conf=ModelConfigConverter.convert(app_config), + file_upload_config=file_extra_config, conversation_id=conversation.id if conversation else None, inputs=conversation.inputs if conversation - else self._prepare_user_inputs(user_inputs=inputs, app_config=app_config, user_id=user.id, role=role), + else self._prepare_user_inputs(user_inputs=inputs, app_config=app_config), query=query, files=file_objs, parent_message_id=args.get("parent_message_id") if invoke_from != InvokeFrom.SERVICE_API else UUID_NIL, @@ -230,7 +226,7 @@ class AgentChatAppGenerator(MessageBasedAppGenerator): logger.exception("Validation Error when generating") queue_manager.publish_error(e, PublishFrom.APPLICATION_MANAGER) except (ValueError, InvokeError) as e: - if os.environ.get("DEBUG") and os.environ.get("DEBUG").lower() == "true": + if dify_config.DEBUG: logger.exception("Error when generating") queue_manager.publish_error(e, PublishFrom.APPLICATION_MANAGER) except Exception as e: diff --git a/api/core/app/apps/base_app_generator.py b/api/core/app/apps/base_app_generator.py index d8e38476c7..6e6da95401 100644 --- a/api/core/app/apps/base_app_generator.py +++ b/api/core/app/apps/base_app_generator.py @@ -2,12 +2,11 @@ from collections.abc import Mapping from typing import TYPE_CHECKING, Any, Optional from core.app.app_config.entities import VariableEntityType -from core.file import File, FileExtraConfig +from core.file import File, FileUploadConfig from factories import file_factory if TYPE_CHECKING: from core.app.app_config.entities import AppConfig, VariableEntity - from models.enums import CreatedByRole class BaseAppGenerator: @@ -16,8 +15,6 @@ class BaseAppGenerator: *, user_inputs: Optional[Mapping[str, Any]], app_config: "AppConfig", - user_id: str, - role: "CreatedByRole", ) -> Mapping[str, Any]: user_inputs = user_inputs or {} # Filter input variables from form configuration, handle required fields, default values, and option values @@ -34,9 +31,7 @@ class BaseAppGenerator: k: file_factory.build_from_mapping( mapping=v, tenant_id=app_config.tenant_id, - user_id=user_id, - role=role, - config=FileExtraConfig( + config=FileUploadConfig( allowed_file_types=entity_dictionary[k].allowed_file_types, allowed_extensions=entity_dictionary[k].allowed_file_extensions, allowed_upload_methods=entity_dictionary[k].allowed_file_upload_methods, @@ -50,9 +45,7 @@ class BaseAppGenerator: k: file_factory.build_from_mappings( mappings=v, tenant_id=app_config.tenant_id, - user_id=user_id, - role=role, - config=FileExtraConfig( + config=FileUploadConfig( allowed_file_types=entity_dictionary[k].allowed_file_types, allowed_extensions=entity_dictionary[k].allowed_file_extensions, allowed_upload_methods=entity_dictionary[k].allowed_file_upload_methods, diff --git a/api/core/app/apps/chat/app_generator.py b/api/core/app/apps/chat/app_generator.py index 5c074f5306..e683dfef3f 100644 --- a/api/core/app/apps/chat/app_generator.py +++ b/api/core/app/apps/chat/app_generator.py @@ -1,5 +1,4 @@ import logging -import os import threading import uuid from collections.abc import Generator @@ -8,6 +7,7 @@ from typing import Any, Literal, Union, overload from flask import Flask, current_app from pydantic import ValidationError +from configs import dify_config from constants import UUID_NIL from core.app.app_config.easy_ui_based_app.model_config.converter import ModelConfigConverter from core.app.app_config.features.file_upload.manager import FileUploadConfigManager @@ -23,7 +23,6 @@ from core.ops.ops_trace_manager import TraceQueueManager from extensions.ext_database import db from factories import file_factory from models.account import Account -from models.enums import CreatedByRole from models.model import App, EndUser logger = logging.getLogger(__name__) @@ -101,8 +100,6 @@ class ChatAppGenerator(MessageBasedAppGenerator): # always enable retriever resource in debugger mode override_model_config_dict["retriever_resource"] = {"enabled": True} - role = CreatedByRole.ACCOUNT if isinstance(user, Account) else CreatedByRole.END_USER - # parse files files = args["files"] if args.get("files") else [] file_extra_config = FileUploadConfigManager.convert(override_model_config_dict or app_model_config.to_dict()) @@ -110,8 +107,6 @@ class ChatAppGenerator(MessageBasedAppGenerator): file_objs = file_factory.build_from_mappings( mappings=files, tenant_id=app_model.tenant_id, - user_id=user.id, - role=role, config=file_extra_config, ) else: @@ -133,10 +128,11 @@ class ChatAppGenerator(MessageBasedAppGenerator): task_id=str(uuid.uuid4()), app_config=app_config, model_conf=ModelConfigConverter.convert(app_config), + file_upload_config=file_extra_config, conversation_id=conversation.id if conversation else None, inputs=conversation.inputs if conversation - else self._prepare_user_inputs(user_inputs=inputs, app_config=app_config, user_id=user.id, role=role), + else self._prepare_user_inputs(user_inputs=inputs, app_config=app_config), query=query, files=file_objs, parent_message_id=args.get("parent_message_id") if invoke_from != InvokeFrom.SERVICE_API else UUID_NIL, @@ -227,7 +223,7 @@ class ChatAppGenerator(MessageBasedAppGenerator): logger.exception("Validation Error when generating") queue_manager.publish_error(e, PublishFrom.APPLICATION_MANAGER) except (ValueError, InvokeError) as e: - if os.environ.get("DEBUG") and os.environ.get("DEBUG").lower() == "true": + if dify_config.DEBUG: logger.exception("Error when generating") queue_manager.publish_error(e, PublishFrom.APPLICATION_MANAGER) except Exception as e: diff --git a/api/core/app/apps/completion/app_generator.py b/api/core/app/apps/completion/app_generator.py index 46450d39c0..22ee8b0967 100644 --- a/api/core/app/apps/completion/app_generator.py +++ b/api/core/app/apps/completion/app_generator.py @@ -1,5 +1,4 @@ import logging -import os import threading import uuid from collections.abc import Generator @@ -8,6 +7,7 @@ from typing import Any, Literal, Union, overload from flask import Flask, current_app from pydantic import ValidationError +from configs import dify_config from core.app.app_config.easy_ui_based_app.model_config.converter import ModelConfigConverter from core.app.app_config.features.file_upload.manager import FileUploadConfigManager from core.app.apps.base_app_queue_manager import AppQueueManager, GenerateTaskStoppedError, PublishFrom @@ -22,7 +22,6 @@ from core.ops.ops_trace_manager import TraceQueueManager from extensions.ext_database import db from factories import file_factory from models import Account, App, EndUser, Message -from models.enums import CreatedByRole from services.errors.app import MoreLikeThisDisabledError from services.errors.message import MessageNotExistsError @@ -88,8 +87,6 @@ class CompletionAppGenerator(MessageBasedAppGenerator): tenant_id=app_model.tenant_id, config=args.get("model_config") ) - role = CreatedByRole.ACCOUNT if isinstance(user, Account) else CreatedByRole.END_USER - # parse files files = args["files"] if args.get("files") else [] file_extra_config = FileUploadConfigManager.convert(override_model_config_dict or app_model_config.to_dict()) @@ -97,8 +94,6 @@ class CompletionAppGenerator(MessageBasedAppGenerator): file_objs = file_factory.build_from_mappings( mappings=files, tenant_id=app_model.tenant_id, - user_id=user.id, - role=role, config=file_extra_config, ) else: @@ -110,7 +105,6 @@ class CompletionAppGenerator(MessageBasedAppGenerator): ) # get tracing instance - user_id = user.id if isinstance(user, Account) else user.session_id trace_manager = TraceQueueManager(app_model.id) # init application generate entity @@ -118,7 +112,8 @@ class CompletionAppGenerator(MessageBasedAppGenerator): task_id=str(uuid.uuid4()), app_config=app_config, model_conf=ModelConfigConverter.convert(app_config), - inputs=self._prepare_user_inputs(user_inputs=inputs, app_config=app_config, user_id=user.id, role=role), + file_upload_config=file_extra_config, + inputs=self._prepare_user_inputs(user_inputs=inputs, app_config=app_config), query=query, files=file_objs, user_id=user.id, @@ -203,7 +198,7 @@ class CompletionAppGenerator(MessageBasedAppGenerator): logger.exception("Validation Error when generating") queue_manager.publish_error(e, PublishFrom.APPLICATION_MANAGER) except (ValueError, InvokeError) as e: - if os.environ.get("DEBUG") and os.environ.get("DEBUG").lower() == "true": + if dify_config.DEBUG: logger.exception("Error when generating") queue_manager.publish_error(e, PublishFrom.APPLICATION_MANAGER) except Exception as e: @@ -259,14 +254,11 @@ class CompletionAppGenerator(MessageBasedAppGenerator): override_model_config_dict["model"] = model_dict # parse files - role = CreatedByRole.ACCOUNT if isinstance(user, Account) else CreatedByRole.END_USER file_extra_config = FileUploadConfigManager.convert(override_model_config_dict) if file_extra_config: file_objs = file_factory.build_from_mappings( mappings=message.message_files, tenant_id=app_model.tenant_id, - user_id=user.id, - role=role, config=file_extra_config, ) else: diff --git a/api/core/app/apps/workflow/app_config_manager.py b/api/core/app/apps/workflow/app_config_manager.py index 8b98e74b85..b0aa21c731 100644 --- a/api/core/app/apps/workflow/app_config_manager.py +++ b/api/core/app/apps/workflow/app_config_manager.py @@ -46,9 +46,7 @@ class WorkflowAppConfigManager(BaseAppConfigManager): related_config_keys = [] # file upload validation - config, current_related_config_keys = FileUploadConfigManager.validate_and_set_defaults( - config=config, is_vision=False - ) + config, current_related_config_keys = FileUploadConfigManager.validate_and_set_defaults(config=config) related_config_keys.extend(current_related_config_keys) # text_to_speech diff --git a/api/core/app/apps/workflow/app_generator.py b/api/core/app/apps/workflow/app_generator.py index a865c8a68b..a0080ece20 100644 --- a/api/core/app/apps/workflow/app_generator.py +++ b/api/core/app/apps/workflow/app_generator.py @@ -1,6 +1,5 @@ import contextvars import logging -import os import threading import uuid from collections.abc import Generator, Mapping, Sequence @@ -10,6 +9,7 @@ from flask import Flask, current_app from pydantic import ValidationError import contexts +from configs import dify_config from core.app.app_config.features.file_upload.manager import FileUploadConfigManager from core.app.apps.base_app_generator import BaseAppGenerator from core.app.apps.base_app_queue_manager import AppQueueManager, GenerateTaskStoppedError, PublishFrom @@ -25,7 +25,6 @@ from core.ops.ops_trace_manager import TraceQueueManager from extensions.ext_database import db from factories import file_factory from models import Account, App, EndUser, Workflow -from models.enums import CreatedByRole logger = logging.getLogger(__name__) @@ -70,15 +69,11 @@ class WorkflowAppGenerator(BaseAppGenerator): ): files: Sequence[Mapping[str, Any]] = args.get("files") or [] - role = CreatedByRole.ACCOUNT if isinstance(user, Account) else CreatedByRole.END_USER - # parse files file_extra_config = FileUploadConfigManager.convert(workflow.features_dict, is_vision=False) system_files = file_factory.build_from_mappings( mappings=files, tenant_id=app_model.tenant_id, - user_id=user.id, - role=role, config=file_extra_config, ) @@ -100,7 +95,8 @@ class WorkflowAppGenerator(BaseAppGenerator): application_generate_entity = WorkflowAppGenerateEntity( task_id=str(uuid.uuid4()), app_config=app_config, - inputs=self._prepare_user_inputs(user_inputs=inputs, app_config=app_config, user_id=user.id, role=role), + file_upload_config=file_extra_config, + inputs=self._prepare_user_inputs(user_inputs=inputs, app_config=app_config), files=system_files, user_id=user.id, stream=stream, @@ -261,7 +257,7 @@ class WorkflowAppGenerator(BaseAppGenerator): logger.exception("Validation Error when generating") queue_manager.publish_error(e, PublishFrom.APPLICATION_MANAGER) except (ValueError, InvokeError) as e: - if os.environ.get("DEBUG") and os.environ.get("DEBUG", "false").lower() == "true": + if dify_config.DEBUG: logger.exception("Error when generating") queue_manager.publish_error(e, PublishFrom.APPLICATION_MANAGER) except Exception as e: diff --git a/api/core/app/apps/workflow/generate_task_pipeline.py b/api/core/app/apps/workflow/generate_task_pipeline.py index d119d94a61..aaa4824fe8 100644 --- a/api/core/app/apps/workflow/generate_task_pipeline.py +++ b/api/core/app/apps/workflow/generate_task_pipeline.py @@ -216,7 +216,7 @@ class WorkflowAppGenerateTaskPipeline(BasedGenerateTaskPipeline, WorkflowCycleMa else: yield MessageAudioStreamResponse(audio=audio_trunk.audio, task_id=task_id) except Exception as e: - logger.error(e) + logger.exception(e) break if tts_publisher: yield MessageAudioEndStreamResponse(audio="", task_id=task_id) diff --git a/api/core/app/apps/workflow_app_runner.py b/api/core/app/apps/workflow_app_runner.py index 9a01e8a253..2872390d46 100644 --- a/api/core/app/apps/workflow_app_runner.py +++ b/api/core/app/apps/workflow_app_runner.py @@ -361,6 +361,7 @@ class WorkflowBasedAppRunner(AppRunner): node_run_index=workflow_entry.graph_engine.graph_runtime_state.node_run_steps, output=event.pre_iteration_output, parallel_mode_run_id=event.parallel_mode_run_id, + duration=event.duration, ) ) elif isinstance(event, (IterationRunSucceededEvent | IterationRunFailedEvent)): diff --git a/api/core/app/entities/app_invoke_entities.py b/api/core/app/entities/app_invoke_entities.py index f2eba29323..31c3a996e1 100644 --- a/api/core/app/entities/app_invoke_entities.py +++ b/api/core/app/entities/app_invoke_entities.py @@ -7,7 +7,7 @@ from pydantic import BaseModel, ConfigDict, Field, ValidationInfo, field_validat from constants import UUID_NIL from core.app.app_config.entities import AppConfig, EasyUIBasedAppConfig, WorkflowUIBasedAppConfig from core.entities.provider_configuration import ProviderModelBundle -from core.file.models import File +from core.file import File, FileUploadConfig from core.model_runtime.entities.model_entities import AIModelEntity from core.ops.ops_trace_manager import TraceQueueManager @@ -80,6 +80,7 @@ class AppGenerateEntity(BaseModel): # app config app_config: AppConfig + file_upload_config: Optional[FileUploadConfig] = None inputs: Mapping[str, Any] files: Sequence[File] diff --git a/api/core/app/entities/queue_entities.py b/api/core/app/entities/queue_entities.py index f1542ec5d8..69bc0d7f9e 100644 --- a/api/core/app/entities/queue_entities.py +++ b/api/core/app/entities/queue_entities.py @@ -111,6 +111,7 @@ class QueueIterationNextEvent(AppQueueEvent): """iteratoin run in parallel mode run id""" node_run_index: int output: Optional[Any] = None # output for the current iteration + duration: Optional[float] = None @field_validator("output", mode="before") @classmethod @@ -307,6 +308,8 @@ class QueueNodeSucceededEvent(AppQueueEvent): execution_metadata: Optional[dict[NodeRunMetadataKey, Any]] = None error: Optional[str] = None + """single iteration duration map""" + iteration_duration_map: Optional[dict[str, float]] = None class QueueNodeInIterationFailedEvent(AppQueueEvent): diff --git a/api/core/app/entities/task_entities.py b/api/core/app/entities/task_entities.py index 7e9aad54be..03cc6941a8 100644 --- a/api/core/app/entities/task_entities.py +++ b/api/core/app/entities/task_entities.py @@ -434,6 +434,7 @@ class IterationNodeNextStreamResponse(StreamResponse): parallel_id: Optional[str] = None parallel_start_node_id: Optional[str] = None parallel_mode_run_id: Optional[str] = None + duration: Optional[float] = None event: StreamEvent = StreamEvent.ITERATION_NEXT workflow_run_id: str diff --git a/api/core/app/task_pipeline/workflow_cycle_manage.py b/api/core/app/task_pipeline/workflow_cycle_manage.py index b89edf9079..042339969f 100644 --- a/api/core/app/task_pipeline/workflow_cycle_manage.py +++ b/api/core/app/task_pipeline/workflow_cycle_manage.py @@ -624,6 +624,7 @@ class WorkflowCycleManage: parallel_id=event.parallel_id, parallel_start_node_id=event.parallel_start_node_id, parallel_mode_run_id=event.parallel_mode_run_id, + duration=event.duration, ), ) diff --git a/api/core/file/__init__.py b/api/core/file/__init__.py index bdaf8793fa..fe9e52258a 100644 --- a/api/core/file/__init__.py +++ b/api/core/file/__init__.py @@ -2,13 +2,13 @@ from .constants import FILE_MODEL_IDENTITY from .enums import ArrayFileAttribute, FileAttribute, FileBelongsTo, FileTransferMethod, FileType from .models import ( File, - FileExtraConfig, + FileUploadConfig, ImageConfig, ) __all__ = [ "FileType", - "FileExtraConfig", + "FileUploadConfig", "FileTransferMethod", "FileBelongsTo", "File", diff --git a/api/core/file/file_manager.py b/api/core/file/file_manager.py index b69d7a74c0..eb260a8f84 100644 --- a/api/core/file/file_manager.py +++ b/api/core/file/file_manager.py @@ -3,7 +3,7 @@ import base64 from configs import dify_config from core.file import file_repository from core.helper import ssrf_proxy -from core.model_runtime.entities import AudioPromptMessageContent, ImagePromptMessageContent +from core.model_runtime.entities import AudioPromptMessageContent, ImagePromptMessageContent, VideoPromptMessageContent from extensions.ext_database import db from extensions.ext_storage import storage @@ -33,25 +33,28 @@ def get_attr(*, file: File, attr: FileAttribute): raise ValueError(f"Invalid file attribute: {attr}") -def to_prompt_message_content(f: File, /): +def to_prompt_message_content( + f: File, + /, + *, + image_detail_config: ImagePromptMessageContent.DETAIL = ImagePromptMessageContent.DETAIL.LOW, +): """ - Convert a File object to an ImagePromptMessageContent object. + Convert a File object to an ImagePromptMessageContent or AudioPromptMessageContent object. - This function takes a File object and converts it to an ImagePromptMessageContent - object, which can be used as a prompt for image-based AI models. + This function takes a File object and converts it to an appropriate PromptMessageContent + object, which can be used as a prompt for image or audio-based AI models. Args: - file (File): The File object to convert. Must be of type FileType.IMAGE. + f (File): The File object to convert. + detail (Optional[ImagePromptMessageContent.DETAIL]): The detail level for image prompts. + If not provided, defaults to ImagePromptMessageContent.DETAIL.LOW. Returns: - ImagePromptMessageContent: An object containing the image data and detail level. + Union[ImagePromptMessageContent, AudioPromptMessageContent]: An object containing the file data and detail level Raises: - ValueError: If the file is not an image or if the file data is missing. - - Note: - The detail level of the image prompt is determined by the file's extra_config. - If not specified, it defaults to ImagePromptMessageContent.DETAIL.LOW. + ValueError: If the file type is not supported or if required data is missing. """ match f.type: case FileType.IMAGE: @@ -60,19 +63,20 @@ def to_prompt_message_content(f: File, /): else: data = _to_base64_data_string(f) - if f._extra_config and f._extra_config.image_config and f._extra_config.image_config.detail: - detail = f._extra_config.image_config.detail - else: - detail = ImagePromptMessageContent.DETAIL.LOW - - return ImagePromptMessageContent(data=data, detail=detail) + return ImagePromptMessageContent(data=data, detail=image_detail_config) case FileType.AUDIO: encoded_string = _file_to_encoded_string(f) if f.extension is None: raise ValueError("Missing file extension") return AudioPromptMessageContent(data=encoded_string, format=f.extension.lstrip(".")) + case FileType.VIDEO: + if dify_config.MULTIMODAL_SEND_VIDEO_FORMAT == "url": + data = _to_url(f) + else: + data = _to_base64_data_string(f) + return VideoPromptMessageContent(data=data, format=f.extension.lstrip(".")) case _: - raise ValueError(f"file type {f.type} is not supported") + raise ValueError("file type f.type is not supported") def download(f: File, /): @@ -112,7 +116,7 @@ def _download_file_content(path: str, /): def _get_encoded_string(f: File, /): match f.transfer_method: case FileTransferMethod.REMOTE_URL: - response = ssrf_proxy.get(f.remote_url) + response = ssrf_proxy.get(f.remote_url, follow_redirects=True) response.raise_for_status() content = response.content encoded_string = base64.b64encode(content).decode("utf-8") @@ -140,6 +144,8 @@ def _file_to_encoded_string(f: File, /): match f.type: case FileType.IMAGE: return _to_base64_data_string(f) + case FileType.VIDEO: + return _to_base64_data_string(f) case FileType.AUDIO: return _get_encoded_string(f) case _: diff --git a/api/core/file/models.py b/api/core/file/models.py index 866ff3155b..0142893787 100644 --- a/api/core/file/models.py +++ b/api/core/file/models.py @@ -21,7 +21,7 @@ class ImageConfig(BaseModel): detail: ImagePromptMessageContent.DETAIL | None = None -class FileExtraConfig(BaseModel): +class FileUploadConfig(BaseModel): """ File Upload Entity. """ @@ -46,7 +46,6 @@ class File(BaseModel): extension: Optional[str] = Field(default=None, description="File extension, should contains dot") mime_type: Optional[str] = None size: int = -1 - _extra_config: FileExtraConfig | None = None def to_dict(self) -> Mapping[str, str | int | None]: data = self.model_dump(mode="json") @@ -107,34 +106,4 @@ class File(BaseModel): case FileTransferMethod.TOOL_FILE: if not self.related_id: raise ValueError("Missing file related_id") - - # Validate the extra config. - if not self._extra_config: - return self - - if self._extra_config.allowed_file_types: - if self.type not in self._extra_config.allowed_file_types and self.type != FileType.CUSTOM: - raise ValueError(f"Invalid file type: {self.type}") - - if self._extra_config.allowed_extensions and self.extension not in self._extra_config.allowed_extensions: - raise ValueError(f"Invalid file extension: {self.extension}") - - if ( - self._extra_config.allowed_upload_methods - and self.transfer_method not in self._extra_config.allowed_upload_methods - ): - raise ValueError(f"Invalid transfer method: {self.transfer_method}") - - match self.type: - case FileType.IMAGE: - # NOTE: This part of validation is deprecated, but still used in app features "Image Upload". - if not self._extra_config.image_config: - return self - # TODO: skip check if transfer_methods is empty, because many test cases are not setting this field - if ( - self._extra_config.image_config.transfer_methods - and self.transfer_method not in self._extra_config.image_config.transfer_methods - ): - raise ValueError(f"Invalid transfer method: {self.transfer_method}") - return self diff --git a/api/core/helper/code_executor/__init__.py b/api/core/helper/code_executor/__init__.py index e69de29bb2..ec885c2218 100644 --- a/api/core/helper/code_executor/__init__.py +++ b/api/core/helper/code_executor/__init__.py @@ -0,0 +1,3 @@ +from .code_executor import CodeExecutor, CodeLanguage + +__all__ = ["CodeExecutor", "CodeLanguage"] diff --git a/api/core/helper/code_executor/code_executor.py b/api/core/helper/code_executor/code_executor.py index 4932284540..03c4b8d49d 100644 --- a/api/core/helper/code_executor/code_executor.py +++ b/api/core/helper/code_executor/code_executor.py @@ -1,7 +1,8 @@ import logging +from collections.abc import Mapping from enum import Enum from threading import Lock -from typing import Optional +from typing import Any, Optional from httpx import Timeout, post from pydantic import BaseModel @@ -117,7 +118,7 @@ class CodeExecutor: return response.data.stdout or "" @classmethod - def execute_workflow_code_template(cls, language: CodeLanguage, code: str, inputs: dict) -> dict: + def execute_workflow_code_template(cls, language: CodeLanguage, code: str, inputs: Mapping[str, Any]) -> dict: """ Execute code :param language: code language diff --git a/api/core/helper/code_executor/template_transformer.py b/api/core/helper/code_executor/template_transformer.py index 6f016f27bc..b7a07b21e1 100644 --- a/api/core/helper/code_executor/template_transformer.py +++ b/api/core/helper/code_executor/template_transformer.py @@ -2,6 +2,8 @@ import json import re from abc import ABC, abstractmethod from base64 import b64encode +from collections.abc import Mapping +from typing import Any class TemplateTransformer(ABC): @@ -10,7 +12,7 @@ class TemplateTransformer(ABC): _result_tag: str = "<>" @classmethod - def transform_caller(cls, code: str, inputs: dict) -> tuple[str, str]: + def transform_caller(cls, code: str, inputs: Mapping[str, Any]) -> tuple[str, str]: """ Transform code to python runner :param code: code @@ -48,13 +50,13 @@ class TemplateTransformer(ABC): pass @classmethod - def serialize_inputs(cls, inputs: dict) -> str: + def serialize_inputs(cls, inputs: Mapping[str, Any]) -> str: inputs_json_str = json.dumps(inputs, ensure_ascii=False).encode() input_base64_encoded = b64encode(inputs_json_str).decode("utf-8") return input_base64_encoded @classmethod - def assemble_runner_script(cls, code: str, inputs: dict) -> str: + def assemble_runner_script(cls, code: str, inputs: Mapping[str, Any]) -> str: # assemble runner script script = cls.get_runner_script() script = script.replace(cls._code_placeholder, code) diff --git a/api/core/helper/ssrf_proxy.py b/api/core/helper/ssrf_proxy.py index df812ca83f..374bd9d57b 100644 --- a/api/core/helper/ssrf_proxy.py +++ b/api/core/helper/ssrf_proxy.py @@ -3,26 +3,20 @@ Proxy requests to avoid SSRF """ import logging -import os import time import httpx -SSRF_PROXY_ALL_URL = os.getenv("SSRF_PROXY_ALL_URL", "") -SSRF_PROXY_HTTP_URL = os.getenv("SSRF_PROXY_HTTP_URL", "") -SSRF_PROXY_HTTPS_URL = os.getenv("SSRF_PROXY_HTTPS_URL", "") -SSRF_DEFAULT_MAX_RETRIES = int(os.getenv("SSRF_DEFAULT_MAX_RETRIES", "3")) -SSRF_DEFAULT_TIME_OUT = float(os.getenv("SSRF_DEFAULT_TIME_OUT", "5")) -SSRF_DEFAULT_CONNECT_TIME_OUT = float(os.getenv("SSRF_DEFAULT_CONNECT_TIME_OUT", "5")) -SSRF_DEFAULT_READ_TIME_OUT = float(os.getenv("SSRF_DEFAULT_READ_TIME_OUT", "5")) -SSRF_DEFAULT_WRITE_TIME_OUT = float(os.getenv("SSRF_DEFAULT_WRITE_TIME_OUT", "5")) +from configs import dify_config + +SSRF_DEFAULT_MAX_RETRIES = dify_config.SSRF_DEFAULT_MAX_RETRIES proxy_mounts = ( { - "http://": httpx.HTTPTransport(proxy=SSRF_PROXY_HTTP_URL), - "https://": httpx.HTTPTransport(proxy=SSRF_PROXY_HTTPS_URL), + "http://": httpx.HTTPTransport(proxy=dify_config.SSRF_PROXY_HTTP_URL), + "https://": httpx.HTTPTransport(proxy=dify_config.SSRF_PROXY_HTTPS_URL), } - if SSRF_PROXY_HTTP_URL and SSRF_PROXY_HTTPS_URL + if dify_config.SSRF_PROXY_HTTP_URL and dify_config.SSRF_PROXY_HTTPS_URL else None ) @@ -38,17 +32,17 @@ def make_request(method, url, max_retries=SSRF_DEFAULT_MAX_RETRIES, **kwargs): if "timeout" not in kwargs: kwargs["timeout"] = httpx.Timeout( - SSRF_DEFAULT_TIME_OUT, - connect=SSRF_DEFAULT_CONNECT_TIME_OUT, - read=SSRF_DEFAULT_READ_TIME_OUT, - write=SSRF_DEFAULT_WRITE_TIME_OUT, + timeout=dify_config.SSRF_DEFAULT_TIME_OUT, + connect=dify_config.SSRF_DEFAULT_CONNECT_TIME_OUT, + read=dify_config.SSRF_DEFAULT_READ_TIME_OUT, + write=dify_config.SSRF_DEFAULT_WRITE_TIME_OUT, ) retries = 0 while retries <= max_retries: try: - if SSRF_PROXY_ALL_URL: - with httpx.Client(proxy=SSRF_PROXY_ALL_URL) as client: + if dify_config.SSRF_PROXY_ALL_URL: + with httpx.Client(proxy=dify_config.SSRF_PROXY_ALL_URL) as client: response = client.request(method=method, url=url, **kwargs) elif proxy_mounts: with httpx.Client(mounts=proxy_mounts) as client: diff --git a/api/core/memory/token_buffer_memory.py b/api/core/memory/token_buffer_memory.py index d92c36a2df..688fb4776a 100644 --- a/api/core/memory/token_buffer_memory.py +++ b/api/core/memory/token_buffer_memory.py @@ -81,15 +81,18 @@ class TokenBufferMemory: db.session.query(WorkflowRun).filter(WorkflowRun.id == message.workflow_run_id).first() ) - if workflow_run: + if workflow_run and workflow_run.workflow: file_extra_config = FileUploadConfigManager.convert( workflow_run.workflow.features_dict, is_vision=False ) + detail = ImagePromptMessageContent.DETAIL.LOW if file_extra_config and app_record: file_objs = file_factory.build_from_message_files( message_files=files, tenant_id=app_record.tenant_id, config=file_extra_config ) + if file_extra_config.image_config and file_extra_config.image_config.detail: + detail = file_extra_config.image_config.detail else: file_objs = [] @@ -98,12 +101,16 @@ class TokenBufferMemory: else: prompt_message_contents: list[PromptMessageContent] = [] prompt_message_contents.append(TextPromptMessageContent(data=message.query)) - for file_obj in file_objs: - if file_obj.type in {FileType.IMAGE, FileType.AUDIO}: - prompt_message = file_manager.to_prompt_message_content(file_obj) + for file in file_objs: + if file.type in {FileType.IMAGE, FileType.AUDIO}: + prompt_message = file_manager.to_prompt_message_content( + file, + image_detail_config=detail, + ) prompt_message_contents.append(prompt_message) prompt_messages.append(UserPromptMessage(content=prompt_message_contents)) + else: prompt_messages.append(UserPromptMessage(content=message.query)) diff --git a/api/core/model_manager.py b/api/core/model_manager.py index e21449ec24..059ba6c3d1 100644 --- a/api/core/model_manager.py +++ b/api/core/model_manager.py @@ -1,8 +1,8 @@ import logging -import os from collections.abc import Callable, Generator, Iterable, Sequence from typing import IO, Any, Optional, Union, cast +from configs import dify_config from core.entities.embedding_type import EmbeddingInputType from core.entities.provider_configuration import ProviderConfiguration, ProviderModelBundle from core.entities.provider_entities import ModelLoadBalancingConfiguration @@ -473,7 +473,7 @@ class LBModelManager: continue - if bool(os.environ.get("DEBUG", "False").lower() == "true"): + if dify_config.DEBUG: logger.info( f"Model LB\nid: {config.id}\nname:{config.name}\n" f"tenant_id: {self._tenant_id}\nprovider: {self._provider}\n" diff --git a/api/core/model_runtime/entities/__init__.py b/api/core/model_runtime/entities/__init__.py index b3eb4d4dfe..f5d4427e3e 100644 --- a/api/core/model_runtime/entities/__init__.py +++ b/api/core/model_runtime/entities/__init__.py @@ -12,11 +12,13 @@ from .message_entities import ( TextPromptMessageContent, ToolPromptMessage, UserPromptMessage, + VideoPromptMessageContent, ) from .model_entities import ModelPropertyKey __all__ = [ "ImagePromptMessageContent", + "VideoPromptMessageContent", "PromptMessage", "PromptMessageRole", "LLMUsage", diff --git a/api/core/model_runtime/entities/message_entities.py b/api/core/model_runtime/entities/message_entities.py index cda1639661..3c244d368e 100644 --- a/api/core/model_runtime/entities/message_entities.py +++ b/api/core/model_runtime/entities/message_entities.py @@ -56,6 +56,7 @@ class PromptMessageContentType(Enum): TEXT = "text" IMAGE = "image" AUDIO = "audio" + VIDEO = "video" class PromptMessageContent(BaseModel): @@ -75,6 +76,12 @@ class TextPromptMessageContent(PromptMessageContent): type: PromptMessageContentType = PromptMessageContentType.TEXT +class VideoPromptMessageContent(PromptMessageContent): + type: PromptMessageContentType = PromptMessageContentType.VIDEO + data: str = Field(..., description="Base64 encoded video data") + format: str = Field(..., description="Video format") + + class AudioPromptMessageContent(PromptMessageContent): type: PromptMessageContentType = PromptMessageContentType.AUDIO data: str = Field(..., description="Base64 encoded audio data") diff --git a/api/core/model_runtime/model_providers/azure_ai_studio/rerank/rerank.py b/api/core/model_runtime/model_providers/azure_ai_studio/rerank/rerank.py index b6dfb20b66..84672520e0 100644 --- a/api/core/model_runtime/model_providers/azure_ai_studio/rerank/rerank.py +++ b/api/core/model_runtime/model_providers/azure_ai_studio/rerank/rerank.py @@ -47,9 +47,9 @@ class AzureRerankModel(RerankModel): result = response.read() return json.loads(result) except urllib.error.HTTPError as error: - logger.error(f"The request failed with status code: {error.code}") - logger.error(error.info()) - logger.error(error.read().decode("utf8", "ignore")) + logger.exception(f"The request failed with status code: {error.code}") + logger.exception(error.info()) + logger.exception(error.read().decode("utf8", "ignore")) raise def _invoke( diff --git a/api/core/model_runtime/model_providers/azure_openai/llm/llm.py b/api/core/model_runtime/model_providers/azure_openai/llm/llm.py index 1cd4823e13..95c8f36271 100644 --- a/api/core/model_runtime/model_providers/azure_openai/llm/llm.py +++ b/api/core/model_runtime/model_providers/azure_openai/llm/llm.py @@ -113,7 +113,7 @@ class AzureOpenAILargeLanguageModel(_CommonAzureOpenAI, LargeLanguageModel): try: client = AzureOpenAI(**self._to_credential_kwargs(credentials)) - if model.startswith("o1"): + if "o1" in model: client.chat.completions.create( messages=[{"role": "user", "content": "ping"}], model=model, @@ -311,7 +311,7 @@ class AzureOpenAILargeLanguageModel(_CommonAzureOpenAI, LargeLanguageModel): prompt_messages = self._clear_illegal_prompt_messages(model, prompt_messages) block_as_stream = False - if model.startswith("o1"): + if "o1" in model: if stream: block_as_stream = True stream = False @@ -404,7 +404,7 @@ class AzureOpenAILargeLanguageModel(_CommonAzureOpenAI, LargeLanguageModel): ] ) - if model.startswith("o1"): + if "o1" in model: system_message_count = len([m for m in prompt_messages if isinstance(m, SystemPromptMessage)]) if system_message_count > 0: new_prompt_messages = [] @@ -653,7 +653,7 @@ class AzureOpenAILargeLanguageModel(_CommonAzureOpenAI, LargeLanguageModel): tokens_per_message = 4 # if there's a name, the role is omitted tokens_per_name = -1 - elif model.startswith("gpt-35-turbo") or model.startswith("gpt-4") or model.startswith("o1"): + elif model.startswith("gpt-35-turbo") or model.startswith("gpt-4") or "o1" in model: tokens_per_message = 3 tokens_per_name = 1 else: diff --git a/api/core/model_runtime/model_providers/bedrock/llm/anthropic.claude-3-sonnet-v2.yaml b/api/core/model_runtime/model_providers/bedrock/llm/anthropic.claude-3-sonnet-v2.yaml index b1e5698375..d30dd5d6cf 100644 --- a/api/core/model_runtime/model_providers/bedrock/llm/anthropic.claude-3-sonnet-v2.yaml +++ b/api/core/model_runtime/model_providers/bedrock/llm/anthropic.claude-3-sonnet-v2.yaml @@ -16,9 +16,9 @@ parameter_rules: use_template: max_tokens required: true type: int - default: 4096 + default: 8192 min: 1 - max: 4096 + max: 8192 help: zh_Hans: 停止前生成的最大令牌数。请注意,Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。 en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter. diff --git a/api/core/model_runtime/model_providers/gitee_ai/llm/Qwen2.5-72B-Instruct.yaml b/api/core/model_runtime/model_providers/gitee_ai/llm/Qwen2.5-72B-Instruct.yaml new file mode 100644 index 0000000000..cb300e5ddf --- /dev/null +++ b/api/core/model_runtime/model_providers/gitee_ai/llm/Qwen2.5-72B-Instruct.yaml @@ -0,0 +1,95 @@ +model: Qwen2.5-72B-Instruct +label: + zh_Hans: Qwen2.5-72B-Instruct + en_US: Qwen2.5-72B-Instruct +model_type: llm +features: + - agent-thought + - tool-call + - stream-tool-call +model_properties: + mode: chat + context_size: 32768 +parameter_rules: + - name: max_tokens + use_template: max_tokens + label: + en_US: "Max Tokens" + zh_Hans: "最大Token数" + type: int + default: 512 + min: 1 + required: true + help: + en_US: "The maximum number of tokens that can be generated by the model varies depending on the model." + zh_Hans: "模型可生成的最大 token 个数,不同模型上限不同。" + + - name: temperature + use_template: temperature + label: + en_US: "Temperature" + zh_Hans: "采样温度" + type: float + default: 0.7 + min: 0.0 + max: 1.0 + precision: 1 + required: true + help: + en_US: "The randomness of the sampling temperature control output. The temperature value is within the range of [0.0, 1.0]. The higher the value, the more random and creative the output; the lower the value, the more stable it is. It is recommended to adjust either top_p or temperature parameters according to your needs to avoid adjusting both at the same time." + zh_Hans: "采样温度控制输出的随机性。温度值在 [0.0, 1.0] 范围内,值越高,输出越随机和创造性;值越低,输出越稳定。建议根据需求调整 top_p 或 temperature 参数,避免同时调整两者。" + + - name: top_p + use_template: top_p + label: + en_US: "Top P" + zh_Hans: "Top P" + type: float + default: 0.7 + min: 0.0 + max: 1.0 + precision: 1 + required: true + help: + en_US: "The value range of the sampling method is [0.0, 1.0]. The top_p value determines that the model selects tokens from the top p% of candidate words with the highest probability; when top_p is 0, this parameter is invalid. It is recommended to adjust either top_p or temperature parameters according to your needs to avoid adjusting both at the same time." + zh_Hans: "采样方法的取值范围为 [0.0,1.0]。top_p 值确定模型从概率最高的前p%的候选词中选取 tokens;当 top_p 为 0 时,此参数无效。建议根据需求调整 top_p 或 temperature 参数,避免同时调整两者。" + + - name: top_k + use_template: top_k + label: + en_US: "Top K" + zh_Hans: "Top K" + type: int + default: 50 + min: 0 + max: 100 + required: true + help: + en_US: "The value range is [0,100], which limits the model to only select from the top k words with the highest probability when choosing the next word at each step. The larger the value, the more diverse text generation will be." + zh_Hans: "取值范围为 [0,100],限制模型在每一步选择下一个词时,只从概率最高的前 k 个词中选取。数值越大,文本生成越多样。" + + - name: frequency_penalty + use_template: frequency_penalty + label: + en_US: "Frequency Penalty" + zh_Hans: "频率惩罚" + type: float + default: 0 + min: -1.0 + max: 1.0 + precision: 1 + required: false + help: + en_US: "Used to adjust the frequency of repeated content in automatically generated text. Positive numbers reduce repetition, while negative numbers increase repetition. After setting this parameter, if a word has already appeared in the text, the model will decrease the probability of choosing that word for subsequent generation." + zh_Hans: "用于调整自动生成文本中重复内容的频率。正数减少重复,负数增加重复。设置此参数后,如果一个词在文本中已经出现过,模型在后续生成中选择该词的概率会降低。" + + - name: user + use_template: text + label: + en_US: "User" + zh_Hans: "用户" + type: string + required: false + help: + en_US: "Used to track and differentiate conversation requests from different users." + zh_Hans: "用于追踪和区分不同用户的对话请求。" diff --git a/api/core/model_runtime/model_providers/gitee_ai/llm/_position.yaml b/api/core/model_runtime/model_providers/gitee_ai/llm/_position.yaml index 21f6120742..13c31ad02b 100644 --- a/api/core/model_runtime/model_providers/gitee_ai/llm/_position.yaml +++ b/api/core/model_runtime/model_providers/gitee_ai/llm/_position.yaml @@ -1,3 +1,4 @@ +- Qwen2.5-72B-Instruct - Qwen2-7B-Instruct - Qwen2-72B-Instruct - Yi-1.5-34B-Chat diff --git a/api/core/model_runtime/model_providers/gitee_ai/llm/llm.py b/api/core/model_runtime/model_providers/gitee_ai/llm/llm.py index b65db6f665..1b85b7fced 100644 --- a/api/core/model_runtime/model_providers/gitee_ai/llm/llm.py +++ b/api/core/model_runtime/model_providers/gitee_ai/llm/llm.py @@ -6,6 +6,7 @@ from core.model_runtime.entities.message_entities import ( PromptMessage, PromptMessageTool, ) +from core.model_runtime.entities.model_entities import ModelFeature from core.model_runtime.model_providers.openai_api_compatible.llm.llm import OAIAPICompatLargeLanguageModel @@ -28,14 +29,13 @@ class GiteeAILargeLanguageModel(OAIAPICompatLargeLanguageModel): user: Optional[str] = None, ) -> Union[LLMResult, Generator]: self._add_custom_parameters(credentials, model, model_parameters) - return super()._invoke(model, credentials, prompt_messages, model_parameters, tools, stop, stream) + return super()._invoke(model, credentials, prompt_messages, model_parameters, tools, stop, stream, user) def validate_credentials(self, model: str, credentials: dict) -> None: self._add_custom_parameters(credentials, model, None) super().validate_credentials(model, credentials) - @staticmethod - def _add_custom_parameters(credentials: dict, model: str, model_parameters: dict) -> None: + def _add_custom_parameters(self, credentials: dict, model: str, model_parameters: dict) -> None: if model is None: model = "bge-large-zh-v1.5" @@ -45,3 +45,7 @@ class GiteeAILargeLanguageModel(OAIAPICompatLargeLanguageModel): credentials["mode"] = LLMMode.COMPLETION.value else: credentials["mode"] = LLMMode.CHAT.value + + schema = self.get_model_schema(model, credentials) + if ModelFeature.TOOL_CALL in schema.features or ModelFeature.MULTI_TOOL_CALL in schema.features: + credentials["function_calling_type"] = "tool_call" diff --git a/api/core/model_runtime/model_providers/jina/rerank/rerank.py b/api/core/model_runtime/model_providers/jina/rerank/rerank.py index 0350207651..aacc8e75d3 100644 --- a/api/core/model_runtime/model_providers/jina/rerank/rerank.py +++ b/api/core/model_runtime/model_providers/jina/rerank/rerank.py @@ -55,6 +55,7 @@ class JinaRerankModel(RerankModel): base_url + "/rerank", json={"model": model, "query": query, "documents": docs, "top_n": top_n}, headers={"Authorization": f"Bearer {credentials.get('api_key')}"}, + timeout=20, ) response.raise_for_status() results = response.json() diff --git a/api/core/model_runtime/model_providers/openai/llm/llm.py b/api/core/model_runtime/model_providers/openai/llm/llm.py index 922e5e1314..68317d7179 100644 --- a/api/core/model_runtime/model_providers/openai/llm/llm.py +++ b/api/core/model_runtime/model_providers/openai/llm/llm.py @@ -617,6 +617,10 @@ class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel): # o1 compatibility block_as_stream = False if model.startswith("o1"): + if "max_tokens" in model_parameters: + model_parameters["max_completion_tokens"] = model_parameters["max_tokens"] + del model_parameters["max_tokens"] + if stream: block_as_stream = True stream = False diff --git a/api/core/model_runtime/model_providers/tongyi/llm/llm.py b/api/core/model_runtime/model_providers/tongyi/llm/llm.py index 3a1bb75a59..cde5d214d0 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/llm.py +++ b/api/core/model_runtime/model_providers/tongyi/llm/llm.py @@ -29,6 +29,7 @@ from core.model_runtime.entities.message_entities import ( TextPromptMessageContent, ToolPromptMessage, UserPromptMessage, + VideoPromptMessageContent, ) from core.model_runtime.entities.model_entities import ( AIModelEntity, @@ -431,6 +432,14 @@ class TongyiLargeLanguageModel(LargeLanguageModel): sub_message_dict = {"image": image_url} sub_messages.append(sub_message_dict) + elif message_content.type == PromptMessageContentType.VIDEO: + message_content = cast(VideoPromptMessageContent, message_content) + video_url = message_content.data + if message_content.data.startswith("data:"): + raise InvokeError("not support base64, please set MULTIMODAL_SEND_VIDEO_FORMAT to url") + + sub_message_dict = {"video": video_url} + sub_messages.append(sub_message_dict) # resort sub_messages to ensure text is always at last sub_messages = sorted(sub_messages, key=lambda x: "text" in x) diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/anthropic.claude-3.5-sonnet-v2.yaml b/api/core/model_runtime/model_providers/vertex_ai/llm/anthropic.claude-3.5-sonnet-v2.yaml index 0be3e26e7a..37b9f30cc3 100644 --- a/api/core/model_runtime/model_providers/vertex_ai/llm/anthropic.claude-3.5-sonnet-v2.yaml +++ b/api/core/model_runtime/model_providers/vertex_ai/llm/anthropic.claude-3.5-sonnet-v2.yaml @@ -13,9 +13,9 @@ parameter_rules: use_template: max_tokens required: true type: int - default: 4096 + default: 8192 min: 1 - max: 4096 + max: 8192 help: zh_Hans: 停止前生成的最大令牌数。请注意,Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。 en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter. diff --git a/api/core/model_runtime/model_providers/vessl_ai/vessl_ai.yaml b/api/core/model_runtime/model_providers/vessl_ai/vessl_ai.yaml index 6052756cae..2138b281b9 100644 --- a/api/core/model_runtime/model_providers/vessl_ai/vessl_ai.yaml +++ b/api/core/model_runtime/model_providers/vessl_ai/vessl_ai.yaml @@ -1,6 +1,6 @@ provider: vessl_ai label: - en_US: vessl_ai + en_US: VESSL AI icon_small: en_US: icon_s_en.svg icon_large: @@ -20,28 +20,28 @@ model_credential_schema: label: en_US: Model Name placeholder: - en_US: Enter your model name + en_US: Enter model name credential_form_schemas: - variable: endpoint_url label: - en_US: endpoint url + en_US: Endpoint Url type: text-input required: true placeholder: - en_US: Enter the url of your endpoint url + en_US: Enter VESSL AI service endpoint url - variable: api_key required: true label: en_US: API Key type: secret-input placeholder: - en_US: Enter your VESSL AI secret key + en_US: Enter VESSL AI secret key - variable: mode show_on: - variable: __model_type value: llm label: - en_US: Completion mode + en_US: Completion Mode type: select required: false default: chat diff --git a/api/core/model_runtime/model_providers/zhipuai/llm/llm.py b/api/core/model_runtime/model_providers/zhipuai/llm/llm.py index 43bffad2a0..eddb94aba3 100644 --- a/api/core/model_runtime/model_providers/zhipuai/llm/llm.py +++ b/api/core/model_runtime/model_providers/zhipuai/llm/llm.py @@ -313,21 +313,35 @@ class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel): return params def _construct_glm_4v_messages(self, prompt_message: Union[str, list[PromptMessageContent]]) -> list[dict]: - if isinstance(prompt_message, str): + if isinstance(prompt_message, list): + sub_messages = [] + for item in prompt_message: + if item.type == PromptMessageContentType.IMAGE: + sub_messages.append( + { + "type": "image_url", + "image_url": {"url": self._remove_base64_header(item.data)}, + } + ) + elif item.type == PromptMessageContentType.VIDEO: + sub_messages.append( + { + "type": "video_url", + "video_url": {"url": self._remove_base64_header(item.data)}, + } + ) + else: + sub_messages.append({"type": "text", "text": item.data}) + return sub_messages + else: return [{"type": "text", "text": prompt_message}] - return [ - {"type": "image_url", "image_url": {"url": self._remove_image_header(item.data)}} - if item.type == PromptMessageContentType.IMAGE - else {"type": "text", "text": item.data} - for item in prompt_message - ] + def _remove_base64_header(self, file_content: str) -> str: + if file_content.startswith("data:"): + data_split = file_content.split(";base64,") + return data_split[1] - def _remove_image_header(self, image: str) -> str: - if image.startswith("data:image"): - return image.split(",")[1] - - return image + return file_content def _handle_generate_response( self, diff --git a/api/core/moderation/output_moderation.py b/api/core/moderation/output_moderation.py index d8d794be18..83f4d2d57d 100644 --- a/api/core/moderation/output_moderation.py +++ b/api/core/moderation/output_moderation.py @@ -126,6 +126,6 @@ class OutputModeration(BaseModel): result: ModerationOutputsResult = moderation_factory.moderation_for_outputs(moderation_buffer) return result except Exception as e: - logger.error("Moderation Output error: %s", e) + logger.exception("Moderation Output error: %s", e) return None diff --git a/api/core/ops/entities/config_entity.py b/api/core/ops/entities/config_entity.py index 5c79867571..ef0f9c708f 100644 --- a/api/core/ops/entities/config_entity.py +++ b/api/core/ops/entities/config_entity.py @@ -54,3 +54,7 @@ class LangSmithConfig(BaseTracingConfig): raise ValueError("endpoint must start with https://") return v + + +OPS_FILE_PATH = "ops_trace/" +OPS_TRACE_FAILED_KEY = "FAILED_OPS_TRACE" diff --git a/api/core/ops/entities/trace_entity.py b/api/core/ops/entities/trace_entity.py index db6ce9d9c3..256595286f 100644 --- a/api/core/ops/entities/trace_entity.py +++ b/api/core/ops/entities/trace_entity.py @@ -23,6 +23,11 @@ class BaseTraceInfo(BaseModel): return v return "" + class Config: + json_encoders = { + datetime: lambda v: v.isoformat(), + } + class WorkflowTraceInfo(BaseTraceInfo): workflow_data: Any @@ -100,6 +105,12 @@ class GenerateNameTraceInfo(BaseTraceInfo): tenant_id: str +class TaskData(BaseModel): + app_id: str + trace_info_type: str + trace_info: Any + + trace_info_info_map = { "WorkflowTraceInfo": WorkflowTraceInfo, "MessageTraceInfo": MessageTraceInfo, diff --git a/api/core/ops/ops_trace_manager.py b/api/core/ops/ops_trace_manager.py index 764944f799..79704c115f 100644 --- a/api/core/ops/ops_trace_manager.py +++ b/api/core/ops/ops_trace_manager.py @@ -6,12 +6,13 @@ import threading import time from datetime import timedelta from typing import Any, Optional, Union -from uuid import UUID +from uuid import UUID, uuid4 from flask import current_app from core.helper.encrypter import decrypt_token, encrypt_token, obfuscated_token from core.ops.entities.config_entity import ( + OPS_FILE_PATH, LangfuseConfig, LangSmithConfig, TracingProviderEnum, @@ -22,6 +23,7 @@ from core.ops.entities.trace_entity import ( MessageTraceInfo, ModerationTraceInfo, SuggestedQuestionTraceInfo, + TaskData, ToolTraceInfo, TraceTaskName, WorkflowTraceInfo, @@ -30,6 +32,7 @@ from core.ops.langfuse_trace.langfuse_trace import LangFuseDataTrace from core.ops.langsmith_trace.langsmith_trace import LangSmithDataTrace from core.ops.utils import get_message_data from extensions.ext_database import db +from extensions.ext_storage import storage from models.model import App, AppModelConfig, Conversation, Message, MessageAgentThought, MessageFile, TraceAppConfig from models.workflow import WorkflowAppLog, WorkflowRun from tasks.ops_trace_task import process_trace_tasks @@ -708,7 +711,7 @@ class TraceQueueManager: trace_task.app_id = self.app_id trace_manager_queue.put(trace_task) except Exception as e: - logging.error(f"Error adding trace task: {e}") + logging.exception(f"Error adding trace task: {e}") finally: self.start_timer() @@ -727,7 +730,7 @@ class TraceQueueManager: if tasks: self.send_to_celery(tasks) except Exception as e: - logging.error(f"Error processing trace tasks: {e}") + logging.exception(f"Error processing trace tasks: {e}") def start_timer(self): global trace_manager_timer @@ -740,10 +743,17 @@ class TraceQueueManager: def send_to_celery(self, tasks: list[TraceTask]): with self.flask_app.app_context(): for task in tasks: + file_id = uuid4().hex trace_info = task.execute() - task_data = { + task_data = TaskData( + app_id=task.app_id, + trace_info_type=type(trace_info).__name__, + trace_info=trace_info.model_dump() if trace_info else None, + ) + file_path = f"{OPS_FILE_PATH}{task.app_id}/{file_id}.json" + storage.save(file_path, task_data.model_dump_json().encode("utf-8")) + file_info = { + "file_id": file_id, "app_id": task.app_id, - "trace_info_type": type(trace_info).__name__, - "trace_info": trace_info.model_dump() if trace_info else {}, } - process_trace_tasks.delay(task_data) + process_trace_tasks.delay(file_info) diff --git a/api/core/prompt/advanced_prompt_transform.py b/api/core/prompt/advanced_prompt_transform.py index bbd9531b19..0f3f824966 100644 --- a/api/core/prompt/advanced_prompt_transform.py +++ b/api/core/prompt/advanced_prompt_transform.py @@ -15,6 +15,7 @@ from core.model_runtime.entities import ( TextPromptMessageContent, UserPromptMessage, ) +from core.model_runtime.entities.message_entities import ImagePromptMessageContent from core.prompt.entities.advanced_prompt_entities import ChatModelMessage, CompletionModelPromptTemplate, MemoryConfig from core.prompt.prompt_transform import PromptTransform from core.prompt.utils.prompt_template_parser import PromptTemplateParser @@ -26,8 +27,13 @@ class AdvancedPromptTransform(PromptTransform): Advanced Prompt Transform for Workflow LLM Node. """ - def __init__(self, with_variable_tmpl: bool = False) -> None: + def __init__( + self, + with_variable_tmpl: bool = False, + image_detail_config: ImagePromptMessageContent.DETAIL = ImagePromptMessageContent.DETAIL.LOW, + ) -> None: self.with_variable_tmpl = with_variable_tmpl + self.image_detail_config = image_detail_config def get_prompt( self, diff --git a/api/core/rag/datasource/vdb/couchbase/couchbase_vector.py b/api/core/rag/datasource/vdb/couchbase/couchbase_vector.py index 3f88d2ca2b..98da5e3d5e 100644 --- a/api/core/rag/datasource/vdb/couchbase/couchbase_vector.py +++ b/api/core/rag/datasource/vdb/couchbase/couchbase_vector.py @@ -242,7 +242,7 @@ class CouchbaseVector(BaseVector): try: self._cluster.query(query, named_parameters={"doc_ids": ids}).execute() except Exception as e: - logger.error(e) + logger.exception(e) def delete_by_document_id(self, document_id: str): query = f""" diff --git a/api/core/rag/datasource/vdb/elasticsearch/elasticsearch_vector.py b/api/core/rag/datasource/vdb/elasticsearch/elasticsearch_vector.py index c62042af80..b08811a021 100644 --- a/api/core/rag/datasource/vdb/elasticsearch/elasticsearch_vector.py +++ b/api/core/rag/datasource/vdb/elasticsearch/elasticsearch_vector.py @@ -178,6 +178,7 @@ class ElasticSearchVector(BaseVector): Field.VECTOR.value: { # Make sure the dimension is correct here "type": "dense_vector", "dims": dim, + "index": True, "similarity": "cosine", }, Field.METADATA_KEY.value: { diff --git a/api/core/rag/datasource/vdb/lindorm/lindorm_vector.py b/api/core/rag/datasource/vdb/lindorm/lindorm_vector.py index abd8261a69..30d7f09ec2 100644 --- a/api/core/rag/datasource/vdb/lindorm/lindorm_vector.py +++ b/api/core/rag/datasource/vdb/lindorm/lindorm_vector.py @@ -79,7 +79,7 @@ class LindormVectorStore(BaseVector): existing_docs = self._client.mget(index=self._collection_name, body={"ids": batch_ids}, _source=False) return {doc["_id"] for doc in existing_docs["docs"] if doc["found"]} except Exception as e: - logger.error(f"Error fetching batch {batch_ids}: {e}") + logger.exception(f"Error fetching batch {batch_ids}: {e}") return set() @retry(stop=stop_after_attempt(3), wait=wait_fixed(60)) @@ -96,7 +96,7 @@ class LindormVectorStore(BaseVector): ) return {doc["_id"] for doc in existing_docs["docs"] if doc["found"]} except Exception as e: - logger.error(f"Error fetching batch {batch_ids}: {e}") + logger.exception(f"Error fetching batch {batch_ids}: {e}") return set() if ids is None: @@ -177,7 +177,7 @@ class LindormVectorStore(BaseVector): else: logger.warning(f"Index '{self._collection_name}' does not exist. No deletion performed.") except Exception as e: - logger.error(f"Error occurred while deleting the index: {e}") + logger.exception(f"Error occurred while deleting the index: {e}") raise e def text_exists(self, id: str) -> bool: @@ -201,7 +201,7 @@ class LindormVectorStore(BaseVector): try: response = self._client.search(index=self._collection_name, body=query) except Exception as e: - logger.error(f"Error executing search: {e}") + logger.exception(f"Error executing search: {e}") raise docs_and_scores = [] diff --git a/api/core/rag/datasource/vdb/milvus/milvus_vector.py b/api/core/rag/datasource/vdb/milvus/milvus_vector.py index 080a1ef567..5a263d6e78 100644 --- a/api/core/rag/datasource/vdb/milvus/milvus_vector.py +++ b/api/core/rag/datasource/vdb/milvus/milvus_vector.py @@ -86,7 +86,7 @@ class MilvusVector(BaseVector): ids = self._client.insert(collection_name=self._collection_name, data=batch_insert_list) pks.extend(ids) except MilvusException as e: - logger.error("Failed to insert batch starting at entity: %s/%s", i, total_count) + logger.exception("Failed to insert batch starting at entity: %s/%s", i, total_count) raise e return pks diff --git a/api/core/rag/datasource/vdb/myscale/myscale_vector.py b/api/core/rag/datasource/vdb/myscale/myscale_vector.py index 1fca926a2d..2610b60a77 100644 --- a/api/core/rag/datasource/vdb/myscale/myscale_vector.py +++ b/api/core/rag/datasource/vdb/myscale/myscale_vector.py @@ -142,7 +142,7 @@ class MyScaleVector(BaseVector): for r in self._client.query(sql).named_results() ] except Exception as e: - logging.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m") + logging.exception(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m") return [] def delete(self) -> None: diff --git a/api/core/rag/datasource/vdb/opensearch/opensearch_vector.py b/api/core/rag/datasource/vdb/opensearch/opensearch_vector.py index 0e0f107268..49eb00f140 100644 --- a/api/core/rag/datasource/vdb/opensearch/opensearch_vector.py +++ b/api/core/rag/datasource/vdb/opensearch/opensearch_vector.py @@ -129,7 +129,7 @@ class OpenSearchVector(BaseVector): if status == 404: logger.warning(f"Document not found for deletion: {doc_id}") else: - logger.error(f"Error deleting document: {error}") + logger.exception(f"Error deleting document: {error}") def delete(self) -> None: self._client.indices.delete(index=self._collection_name.lower()) @@ -158,7 +158,7 @@ class OpenSearchVector(BaseVector): try: response = self._client.search(index=self._collection_name.lower(), body=query) except Exception as e: - logger.error(f"Error executing search: {e}") + logger.exception(f"Error executing search: {e}") raise docs = [] diff --git a/api/core/rag/embedding/cached_embedding.py b/api/core/rag/embedding/cached_embedding.py index b3e93ce760..3ac65b88bb 100644 --- a/api/core/rag/embedding/cached_embedding.py +++ b/api/core/rag/embedding/cached_embedding.py @@ -89,7 +89,7 @@ class CacheEmbedding(Embeddings): db.session.rollback() except Exception as ex: db.session.rollback() - logger.error("Failed to embed documents: %s", ex) + logger.exception("Failed to embed documents: %s", ex) raise ex return text_embeddings diff --git a/api/core/rag/extractor/word_extractor.py b/api/core/rag/extractor/word_extractor.py index d4434ea28f..8e084ab4ff 100644 --- a/api/core/rag/extractor/word_extractor.py +++ b/api/core/rag/extractor/word_extractor.py @@ -28,7 +28,6 @@ logger = logging.getLogger(__name__) class WordExtractor(BaseExtractor): """Load docx files. - Args: file_path: Path to the file to load. """ @@ -230,7 +229,7 @@ class WordExtractor(BaseExtractor): for i in url_pattern.findall(x.text): hyperlinks_url = str(i) except Exception as e: - logger.error(e) + logger.exception(e) def parse_paragraph(paragraph): paragraph_content = [] diff --git a/api/core/tools/entities/api_entities.py b/api/core/tools/entities/api_entities.py index b1db559441..ddb1481276 100644 --- a/api/core/tools/entities/api_entities.py +++ b/api/core/tools/entities/api_entities.py @@ -1,6 +1,6 @@ from typing import Literal, Optional -from pydantic import BaseModel +from pydantic import BaseModel, Field, field_validator from core.model_runtime.utils.encoders import jsonable_encoder from core.tools.entities.common_entities import I18nObject @@ -32,9 +32,14 @@ class UserToolProvider(BaseModel): original_credentials: Optional[dict] = None is_team_authorization: bool = False allow_delete: bool = True - tools: list[UserTool] | None = None + tools: list[UserTool] = Field(default_factory=list) labels: list[str] | None = None + @field_validator("tools", mode="before") + @classmethod + def convert_none_to_empty_list(cls, v): + return v if v is not None else [] + def to_dict(self) -> dict: # ------------- # overwrite tool parameter types for temp fix diff --git a/api/core/tools/provider/_position.yaml b/api/core/tools/provider/_position.yaml index 336588e3e2..d80974486d 100644 --- a/api/core/tools/provider/_position.yaml +++ b/api/core/tools/provider/_position.yaml @@ -48,6 +48,13 @@ - feishu_task - feishu_calendar - feishu_spreadsheet +- lark_base +- lark_document +- lark_message_and_group +- lark_wiki +- lark_task +- lark_calendar +- lark_spreadsheet - slack - twilio - wecom diff --git a/api/core/tools/provider/builtin/cogview/tools/cogvideo.py b/api/core/tools/provider/builtin/cogview/tools/cogvideo.py new file mode 100644 index 0000000000..7f69e833cb --- /dev/null +++ b/api/core/tools/provider/builtin/cogview/tools/cogvideo.py @@ -0,0 +1,24 @@ +from typing import Any, Union + +from zhipuai import ZhipuAI + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.tool.builtin_tool import BuiltinTool + + +class CogVideoTool(BuiltinTool): + def _invoke( + self, user_id: str, tool_parameters: dict[str, Any] + ) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]: + client = ZhipuAI( + base_url=self.runtime.credentials["zhipuai_base_url"], + api_key=self.runtime.credentials["zhipuai_api_key"], + ) + if not tool_parameters.get("prompt") and not tool_parameters.get("image_url"): + return self.create_text_message("require at least one of prompt and image_url") + + response = client.videos.generations( + model="cogvideox", prompt=tool_parameters.get("prompt"), image_url=tool_parameters.get("image_url") + ) + + return self.create_json_message(response.dict()) diff --git a/api/core/tools/provider/builtin/cogview/tools/cogvideo.yaml b/api/core/tools/provider/builtin/cogview/tools/cogvideo.yaml new file mode 100644 index 0000000000..3df0cfcea9 --- /dev/null +++ b/api/core/tools/provider/builtin/cogview/tools/cogvideo.yaml @@ -0,0 +1,32 @@ +identity: + name: cogvideo + author: hjlarry + label: + en_US: CogVideo + zh_Hans: CogVideo 视频生成 +description: + human: + en_US: Use the CogVideox model provided by ZhipuAI to generate videos based on user prompts and images. + zh_Hans: 使用智谱cogvideox模型,根据用户输入的提示词和图片,生成视频。 + llm: A tool for generating videos. The input is user's prompt or image url or both of them, the output is a task id. You can use another tool with this task id to check the status and get the video. +parameters: + - name: prompt + type: string + label: + en_US: prompt + zh_Hans: 提示词 + human_description: + en_US: The prompt text used to generate video. + zh_Hans: 用于生成视频的提示词。 + llm_description: The prompt text used to generate video. Optional. + form: llm + - name: image_url + type: string + label: + en_US: image url + zh_Hans: 图片链接 + human_description: + en_US: The image url used to generate video. + zh_Hans: 输入一个图片链接,生成的视频将基于该图片和提示词。 + llm_description: The image url used to generate video. Optional. + form: llm diff --git a/api/core/tools/provider/builtin/cogview/tools/cogvideo_job.py b/api/core/tools/provider/builtin/cogview/tools/cogvideo_job.py new file mode 100644 index 0000000000..a521f1c28a --- /dev/null +++ b/api/core/tools/provider/builtin/cogview/tools/cogvideo_job.py @@ -0,0 +1,30 @@ +from typing import Any, Union + +import httpx +from zhipuai import ZhipuAI + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.tool.builtin_tool import BuiltinTool + + +class CogVideoJobTool(BuiltinTool): + def _invoke( + self, user_id: str, tool_parameters: dict[str, Any] + ) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]: + client = ZhipuAI( + api_key=self.runtime.credentials["zhipuai_api_key"], + base_url=self.runtime.credentials["zhipuai_base_url"], + ) + + response = client.videos.retrieve_videos_result(id=tool_parameters.get("id")) + result = [self.create_json_message(response.dict())] + if response.task_status == "SUCCESS": + for item in response.video_result: + video_cover_image = self.create_image_message(item.cover_image_url) + result.append(video_cover_image) + video = self.create_blob_message( + blob=httpx.get(item.url).content, meta={"mime_type": "video/mp4"}, save_as=self.VariableKey.VIDEO + ) + result.append(video) + + return result diff --git a/api/core/tools/provider/builtin/cogview/tools/cogvideo_job.yaml b/api/core/tools/provider/builtin/cogview/tools/cogvideo_job.yaml new file mode 100644 index 0000000000..fb2eb3ab13 --- /dev/null +++ b/api/core/tools/provider/builtin/cogview/tools/cogvideo_job.yaml @@ -0,0 +1,21 @@ +identity: + name: cogvideo_job + author: hjlarry + label: + en_US: CogVideo Result + zh_Hans: CogVideo 结果获取 +description: + human: + en_US: Get the result of CogVideo tool generation. + zh_Hans: 根据 CogVideo 工具返回的 id 获取视频生成结果。 + llm: Get the result of CogVideo tool generation. The input is the id which is returned by the CogVideo tool. The output is the url of video and video cover image. +parameters: + - name: id + type: string + label: + en_US: id + human_description: + en_US: The id returned by the CogVideo. + zh_Hans: CogVideo 工具返回的 id。 + llm_description: The id returned by the cogvideo. + form: llm diff --git a/api/core/tools/provider/builtin/comfyui/tools/comfyui_client.py b/api/core/tools/provider/builtin/comfyui/tools/comfyui_client.py index 1aae7b2442..bed9cd1882 100644 --- a/api/core/tools/provider/builtin/comfyui/tools/comfyui_client.py +++ b/api/core/tools/provider/builtin/comfyui/tools/comfyui_client.py @@ -48,7 +48,6 @@ class ComfyUiClient: prompt = origin_prompt.copy() id_to_class_type = {id: details["class_type"] for id, details in prompt.items()} k_sampler = [key for key, value in id_to_class_type.items() if value == "KSampler"][0] - prompt.get(k_sampler)["inputs"]["seed"] = random.randint(10**14, 10**15 - 1) positive_input_id = prompt.get(k_sampler)["inputs"]["positive"][0] prompt.get(positive_input_id)["inputs"]["text"] = positive_prompt @@ -72,6 +71,18 @@ class ComfyUiClient: prompt.get(load_image)["inputs"]["image"] = image_name return prompt + def set_prompt_seed_by_id(self, origin_prompt: dict, seed_id: str) -> dict: + prompt = origin_prompt.copy() + if seed_id not in prompt: + raise Exception("Not a valid seed node") + if "seed" in prompt[seed_id]["inputs"]: + prompt[seed_id]["inputs"]["seed"] = random.randint(10**14, 10**15 - 1) + elif "noise_seed" in prompt[seed_id]["inputs"]: + prompt[seed_id]["inputs"]["noise_seed"] = random.randint(10**14, 10**15 - 1) + else: + raise Exception("Not a valid seed node") + return prompt + def track_progress(self, prompt: dict, ws: WebSocket, prompt_id: str): node_ids = list(prompt.keys()) finished_nodes = [] diff --git a/api/core/tools/provider/builtin/comfyui/tools/comfyui_workflow.py b/api/core/tools/provider/builtin/comfyui/tools/comfyui_workflow.py index d62772cda7..8783736277 100644 --- a/api/core/tools/provider/builtin/comfyui/tools/comfyui_workflow.py +++ b/api/core/tools/provider/builtin/comfyui/tools/comfyui_workflow.py @@ -70,6 +70,9 @@ class ComfyUIWorkflowTool(BuiltinTool): else: prompt = comfyui.set_prompt_images_by_default(prompt, image_names) + if seed_id := tool_parameters.get("seed_id"): + prompt = comfyui.set_prompt_seed_by_id(prompt, seed_id) + images = comfyui.generate_image_by_prompt(prompt) result = [] for img in images: diff --git a/api/core/tools/provider/builtin/comfyui/tools/comfyui_workflow.yaml b/api/core/tools/provider/builtin/comfyui/tools/comfyui_workflow.yaml index dc4e0d77b2..9428acbe94 100644 --- a/api/core/tools/provider/builtin/comfyui/tools/comfyui_workflow.yaml +++ b/api/core/tools/provider/builtin/comfyui/tools/comfyui_workflow.yaml @@ -52,3 +52,12 @@ parameters: en_US: When the workflow has multiple image nodes, enter the ID list of these nodes, and the images will be passed to ComfyUI in the order of the list. zh_Hans: 当工作流有多个图片节点时,输入这些节点的ID列表,图片将按列表顺序传给ComfyUI form: form + - name: seed_id + type: string + label: + en_US: Seed Node Id + zh_Hans: 种子节点ID + human_description: + en_US: If you need to generate different images each time, you need to enter the ID of the seed node. + zh_Hans: 如果需要每次生成时使用不同的种子,需要输入包含种子的节点的ID + form: form diff --git a/api/core/tools/provider/builtin/email/_assets/icon.svg b/api/core/tools/provider/builtin/email/_assets/icon.svg new file mode 100644 index 0000000000..b34f333890 --- /dev/null +++ b/api/core/tools/provider/builtin/email/_assets/icon.svg @@ -0,0 +1 @@ + diff --git a/api/core/tools/provider/builtin/email/email.py b/api/core/tools/provider/builtin/email/email.py new file mode 100644 index 0000000000..182d8dac28 --- /dev/null +++ b/api/core/tools/provider/builtin/email/email.py @@ -0,0 +1,7 @@ +from core.tools.provider.builtin.email.tools.send_mail import SendMailTool +from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController + + +class SmtpProvider(BuiltinToolProviderController): + def _validate_credentials(self, credentials: dict) -> None: + SendMailTool() diff --git a/api/core/tools/provider/builtin/email/email.yaml b/api/core/tools/provider/builtin/email/email.yaml new file mode 100644 index 0000000000..bb1bb7f6f3 --- /dev/null +++ b/api/core/tools/provider/builtin/email/email.yaml @@ -0,0 +1,83 @@ +identity: + author: wakaka6 + name: email + label: + en_US: email + zh_Hans: 电子邮件 + description: + en_US: send email through smtp protocol + zh_Hans: 通过smtp协议发送电子邮件 + icon: icon.svg + tags: + - utilities +credentials_for_provider: + email_account: + type: text-input + required: true + label: + en_US: email account + zh_Hans: 邮件账号 + placeholder: + en_US: input you email account + zh_Hans: 输入你的邮箱账号 + help: + en_US: email account + zh_Hans: 邮件账号 + email_password: + type: secret-input + required: true + label: + en_US: email password + zh_Hans: 邮件密码 + placeholder: + en_US: email password + zh_Hans: 邮件密码 + help: + en_US: email password + zh_Hans: 邮件密码 + smtp_server: + type: text-input + required: true + label: + en_US: smtp server + zh_Hans: 发信smtp服务器地址 + placeholder: + en_US: smtp server + zh_Hans: 发信smtp服务器地址 + help: + en_US: smtp server + zh_Hans: 发信smtp服务器地址 + smtp_port: + type: text-input + required: true + label: + en_US: smtp server port + zh_Hans: 发信smtp服务器端口 + placeholder: + en_US: smtp server port + zh_Hans: 发信smtp服务器端口 + help: + en_US: smtp server port + zh_Hans: 发信smtp服务器端口 + encrypt_method: + type: select + required: true + options: + - value: NONE + label: + en_US: NONE + zh_Hans: 无加密 + - value: SSL + label: + en_US: SSL + zh_Hans: SSL加密 + - value: TLS + label: + en_US: START TLS + zh_Hans: START TLS加密 + label: + en_US: encrypt method + zh_Hans: 加密方式 + help: + en_US: smtp server encrypt method + zh_Hans: 发信smtp服务器加密方式 diff --git a/api/core/tools/provider/builtin/email/tools/send.py b/api/core/tools/provider/builtin/email/tools/send.py new file mode 100644 index 0000000000..35df574a41 --- /dev/null +++ b/api/core/tools/provider/builtin/email/tools/send.py @@ -0,0 +1,53 @@ +import logging +import smtplib +import ssl +from email.mime.multipart import MIMEMultipart +from email.mime.text import MIMEText + +from pydantic import BaseModel + + +class SendEmailToolParameters(BaseModel): + smtp_server: str + smtp_port: int + + email_account: str + email_password: str + + sender_to: str + subject: str + email_content: str + encrypt_method: str + + +def send_mail(parmas: SendEmailToolParameters): + timeout = 60 + msg = MIMEMultipart("alternative") + msg["From"] = parmas.email_account + msg["To"] = parmas.sender_to + msg["Subject"] = parmas.subject + msg.attach(MIMEText(parmas.email_content, "plain")) + msg.attach(MIMEText(parmas.email_content, "html")) + + ctx = ssl.create_default_context() + + if parmas.encrypt_method.upper() == "SSL": + try: + with smtplib.SMTP_SSL(parmas.smtp_server, parmas.smtp_port, context=ctx, timeout=timeout) as server: + server.login(parmas.email_account, parmas.email_password) + server.sendmail(parmas.email_account, parmas.sender_to, msg.as_string()) + return True + except Exception as e: + logging.exception("send email failed: %s", e) + return False + else: # NONE or TLS + try: + with smtplib.SMTP(parmas.smtp_server, parmas.smtp_port, timeout=timeout) as server: + if parmas.encrypt_method.upper() == "TLS": + server.starttls(context=ctx) + server.login(parmas.email_account, parmas.email_password) + server.sendmail(parmas.email_account, parmas.sender_to, msg.as_string()) + return True + except Exception as e: + logging.exception("send email failed: %s", e) + return False diff --git a/api/core/tools/provider/builtin/email/tools/send_mail.py b/api/core/tools/provider/builtin/email/tools/send_mail.py new file mode 100644 index 0000000000..d51d5439b7 --- /dev/null +++ b/api/core/tools/provider/builtin/email/tools/send_mail.py @@ -0,0 +1,66 @@ +import re +from typing import Any, Union + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.provider.builtin.email.tools.send import ( + SendEmailToolParameters, + send_mail, +) +from core.tools.tool.builtin_tool import BuiltinTool + + +class SendMailTool(BuiltinTool): + def _invoke( + self, user_id: str, tool_parameters: dict[str, Any] + ) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]: + """ + invoke tools + """ + sender = self.runtime.credentials.get("email_account", "") + email_rgx = re.compile(r"^[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+(\.[a-zA-Z0-9_-]+)+$") + password = self.runtime.credentials.get("email_password", "") + smtp_server = self.runtime.credentials.get("smtp_server", "") + if not smtp_server: + return self.create_text_message("please input smtp server") + smtp_port = self.runtime.credentials.get("smtp_port", "") + try: + smtp_port = int(smtp_port) + except ValueError: + return self.create_text_message("Invalid parameter smtp_port(should be int)") + + if not sender: + return self.create_text_message("please input sender") + if not email_rgx.match(sender): + return self.create_text_message("Invalid parameter userid, the sender is not a mailbox") + + receiver_email = tool_parameters["send_to"] + if not receiver_email: + return self.create_text_message("please input receiver email") + if not email_rgx.match(receiver_email): + return self.create_text_message("Invalid parameter receiver email, the receiver email is not a mailbox") + email_content = tool_parameters.get("email_content", "") + + if not email_content: + return self.create_text_message("please input email content") + + subject = tool_parameters.get("subject", "") + if not subject: + return self.create_text_message("please input email subject") + + encrypt_method = self.runtime.credentials.get("encrypt_method", "") + if not encrypt_method: + return self.create_text_message("please input encrypt method") + + send_email_params = SendEmailToolParameters( + smtp_server=smtp_server, + smtp_port=smtp_port, + email_account=sender, + email_password=password, + sender_to=receiver_email, + subject=subject, + email_content=email_content, + encrypt_method=encrypt_method, + ) + if send_mail(send_email_params): + return self.create_text_message("send email success") + return self.create_text_message("send email failed") diff --git a/api/core/tools/provider/builtin/email/tools/send_mail.yaml b/api/core/tools/provider/builtin/email/tools/send_mail.yaml new file mode 100644 index 0000000000..f54880bf3e --- /dev/null +++ b/api/core/tools/provider/builtin/email/tools/send_mail.yaml @@ -0,0 +1,46 @@ +identity: + name: send_mail + author: wakaka6 + label: + en_US: send email + zh_Hans: 发送邮件 + icon: icon.svg +description: + human: + en_US: A tool for sending email + zh_Hans: 用于发送邮件 + llm: A tool for sending email +parameters: + - name: send_to + type: string + required: true + label: + en_US: Recipient email account + zh_Hans: 收件人邮箱账号 + human_description: + en_US: Recipient email account + zh_Hans: 收件人邮箱账号 + llm_description: Recipient email account + form: llm + - name: subject + type: string + required: true + label: + en_US: email subject + zh_Hans: 邮件主题 + human_description: + en_US: email subject + zh_Hans: 邮件主题 + llm_description: email subject + form: llm + - name: email_content + type: string + required: true + label: + en_US: email content + zh_Hans: 邮件内容 + human_description: + en_US: email content + zh_Hans: 邮件内容 + llm_description: email content + form: llm diff --git a/api/core/tools/provider/builtin/email/tools/send_mail_batch.py b/api/core/tools/provider/builtin/email/tools/send_mail_batch.py new file mode 100644 index 0000000000..ff7e176990 --- /dev/null +++ b/api/core/tools/provider/builtin/email/tools/send_mail_batch.py @@ -0,0 +1,75 @@ +import json +import re +from typing import Any, Union + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.provider.builtin.email.tools.send import ( + SendEmailToolParameters, + send_mail, +) +from core.tools.tool.builtin_tool import BuiltinTool + + +class SendMailTool(BuiltinTool): + def _invoke( + self, user_id: str, tool_parameters: dict[str, Any] + ) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]: + """ + invoke tools + """ + sender = self.runtime.credentials.get("email_account", "") + email_rgx = re.compile(r"^[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+(\.[a-zA-Z0-9_-]+)+$") + password = self.runtime.credentials.get("email_password", "") + smtp_server = self.runtime.credentials.get("smtp_server", "") + if not smtp_server: + return self.create_text_message("please input smtp server") + smtp_port = self.runtime.credentials.get("smtp_port", "") + try: + smtp_port = int(smtp_port) + except ValueError: + return self.create_text_message("Invalid parameter smtp_port(should be int)") + + if not sender: + return self.create_text_message("please input sender") + if not email_rgx.match(sender): + return self.create_text_message("Invalid parameter userid, the sender is not a mailbox") + + receivers_email = tool_parameters["send_to"] + if not receivers_email: + return self.create_text_message("please input receiver email") + receivers_email = json.loads(receivers_email) + for receiver in receivers_email: + if not email_rgx.match(receiver): + return self.create_text_message( + f"Invalid parameter receiver email, the receiver email({receiver}) is not a mailbox" + ) + email_content = tool_parameters.get("email_content", "") + + if not email_content: + return self.create_text_message("please input email content") + + subject = tool_parameters.get("subject", "") + if not subject: + return self.create_text_message("please input email subject") + + encrypt_method = self.runtime.credentials.get("encrypt_method", "") + if not encrypt_method: + return self.create_text_message("please input encrypt method") + + msg = {} + for receiver in receivers_email: + send_email_params = SendEmailToolParameters( + smtp_server=smtp_server, + smtp_port=smtp_port, + email_account=sender, + email_password=password, + sender_to=receiver, + subject=subject, + email_content=email_content, + encrypt_method=encrypt_method, + ) + if send_mail(send_email_params): + msg[receiver] = "send email success" + else: + msg[receiver] = "send email failed" + return self.create_text_message(json.dumps(msg)) diff --git a/api/core/tools/provider/builtin/email/tools/send_mail_batch.yaml b/api/core/tools/provider/builtin/email/tools/send_mail_batch.yaml new file mode 100644 index 0000000000..6e4aa785cb --- /dev/null +++ b/api/core/tools/provider/builtin/email/tools/send_mail_batch.yaml @@ -0,0 +1,46 @@ +identity: + name: send_mail_batch + author: wakaka6 + label: + en_US: send email to multiple recipients + zh_Hans: 发送邮件给多个收件人 + icon: icon.svg +description: + human: + en_US: A tool for sending email to multiple recipients + zh_Hans: 用于发送邮件给多个收件人的工具 + llm: A tool for sending email to multiple recipients +parameters: + - name: send_to + type: string + required: true + label: + en_US: Recipient email account(json list) + zh_Hans: 收件人邮箱账号(json list) + human_description: + en_US: Recipient email account + zh_Hans: 收件人邮箱账号 + llm_description: A list of recipient email account(json format) + form: llm + - name: subject + type: string + required: true + label: + en_US: email subject + zh_Hans: 邮件主题 + human_description: + en_US: email subject + zh_Hans: 邮件主题 + llm_description: email subject + form: llm + - name: email_content + type: string + required: true + label: + en_US: email content + zh_Hans: 邮件内容 + human_description: + en_US: email content + zh_Hans: 邮件内容 + llm_description: email content + form: llm diff --git a/api/core/tools/provider/builtin/feishu_base/tools/list_tables.yaml b/api/core/tools/provider/builtin/feishu_base/tools/list_tables.yaml index 5a3891bd45..7571519039 100644 --- a/api/core/tools/provider/builtin/feishu_base/tools/list_tables.yaml +++ b/api/core/tools/provider/builtin/feishu_base/tools/list_tables.yaml @@ -34,7 +34,7 @@ parameters: Page size, default value: 20, maximum value: 100. zh_Hans: 分页大小,默认值:20,最大值:100。 llm_description: 分页大小,默认值:20,最大值:100。 - form: llm + form: form - name: page_token type: string diff --git a/api/core/tools/provider/builtin/feishu_base/tools/search_records.yaml b/api/core/tools/provider/builtin/feishu_base/tools/search_records.yaml index 6cac4b0524..decf76d53e 100644 --- a/api/core/tools/provider/builtin/feishu_base/tools/search_records.yaml +++ b/api/core/tools/provider/builtin/feishu_base/tools/search_records.yaml @@ -147,7 +147,7 @@ parameters: Page size, default value: 20, maximum value: 500. zh_Hans: 分页大小,默认值:20,最大值:500。 llm_description: 分页大小,默认值:20,最大值:500。 - form: llm + form: form - name: page_token type: string diff --git a/api/core/tools/provider/builtin/feishu_calendar/tools/list_events.yaml b/api/core/tools/provider/builtin/feishu_calendar/tools/list_events.yaml index f4a5bfe6ba..5f0155a246 100644 --- a/api/core/tools/provider/builtin/feishu_calendar/tools/list_events.yaml +++ b/api/core/tools/provider/builtin/feishu_calendar/tools/list_events.yaml @@ -47,7 +47,7 @@ parameters: en_US: The page size, i.e., the number of data entries returned in a single request. The default value is 50, and the value range is [50,1000]. zh_Hans: 分页大小,即单次请求所返回的数据条目数。默认值为 50,取值范围为 [50,1000]。 llm_description: 分页大小,即单次请求所返回的数据条目数。默认值为 50,取值范围为 [50,1000]。 - form: llm + form: form - name: page_token type: string diff --git a/api/core/tools/provider/builtin/feishu_calendar/tools/search_events.yaml b/api/core/tools/provider/builtin/feishu_calendar/tools/search_events.yaml index e92a282091..bd60a07b5b 100644 --- a/api/core/tools/provider/builtin/feishu_calendar/tools/search_events.yaml +++ b/api/core/tools/provider/builtin/feishu_calendar/tools/search_events.yaml @@ -85,7 +85,7 @@ parameters: en_US: The page size, i.e., the number of data entries returned in a single request. The default value is 20, and the value range is [10,100]. zh_Hans: 分页大小,即单次请求所返回的数据条目数。默认值为 20,取值范围为 [10,100]。 llm_description: 分页大小,即单次请求所返回的数据条目数。默认值为 20,取值范围为 [10,100]。 - form: llm + form: form - name: page_token type: string diff --git a/api/core/tools/provider/builtin/feishu_document/tools/list_document_blocks.yaml b/api/core/tools/provider/builtin/feishu_document/tools/list_document_blocks.yaml index 5b8ef7d53c..d4fab96c1f 100644 --- a/api/core/tools/provider/builtin/feishu_document/tools/list_document_blocks.yaml +++ b/api/core/tools/provider/builtin/feishu_document/tools/list_document_blocks.yaml @@ -59,7 +59,7 @@ parameters: en_US: Paging size, the default and maximum value is 500. zh_Hans: 分页大小, 默认值和最大值为 500。 llm_description: 分页大小, 表示一次请求最多返回多少条数据,默认值和最大值为 500。 - form: llm + form: form - name: page_token type: string diff --git a/api/core/tools/provider/builtin/feishu_message/tools/get_chat_messages.yaml b/api/core/tools/provider/builtin/feishu_message/tools/get_chat_messages.yaml index 153c8c80e5..984c9120e8 100644 --- a/api/core/tools/provider/builtin/feishu_message/tools/get_chat_messages.yaml +++ b/api/core/tools/provider/builtin/feishu_message/tools/get_chat_messages.yaml @@ -81,7 +81,7 @@ parameters: en_US: The page size, i.e., the number of data entries returned in a single request. The default value is 20, and the value range is [1,50]. zh_Hans: 分页大小,即单次请求所返回的数据条目数。默认值为 20,取值范围为 [1,50]。 llm_description: 分页大小,即单次请求所返回的数据条目数。默认值为 20,取值范围为 [1,50]。 - form: llm + form: form - name: page_token type: string diff --git a/api/core/tools/provider/builtin/feishu_message/tools/get_thread_messages.yaml b/api/core/tools/provider/builtin/feishu_message/tools/get_thread_messages.yaml index 8d5fed9d0b..85a138292f 100644 --- a/api/core/tools/provider/builtin/feishu_message/tools/get_thread_messages.yaml +++ b/api/core/tools/provider/builtin/feishu_message/tools/get_thread_messages.yaml @@ -57,7 +57,7 @@ parameters: en_US: The page size, i.e., the number of data entries returned in a single request. The default value is 20, and the value range is [1,50]. zh_Hans: 分页大小,即单次请求所返回的数据条目数。默认值为 20,取值范围为 [1,50]。 llm_description: 分页大小,即单次请求所返回的数据条目数。默认值为 20,取值范围为 [1,50]。 - form: llm + form: form - name: page_token type: string diff --git a/api/core/tools/provider/builtin/feishu_spreadsheet/tools/add_cols.yaml b/api/core/tools/provider/builtin/feishu_spreadsheet/tools/add_cols.yaml index ef457f8e00..b73335f405 100644 --- a/api/core/tools/provider/builtin/feishu_spreadsheet/tools/add_cols.yaml +++ b/api/core/tools/provider/builtin/feishu_spreadsheet/tools/add_cols.yaml @@ -56,7 +56,7 @@ parameters: en_US: Number of columns to add, range (0-5000]. zh_Hans: 要增加的列数,范围(0-5000]。 llm_description: 要增加的列数,范围(0-5000]。 - form: llm + form: form - name: values type: string diff --git a/api/core/tools/provider/builtin/feishu_spreadsheet/tools/add_rows.yaml b/api/core/tools/provider/builtin/feishu_spreadsheet/tools/add_rows.yaml index 37653325ae..6bce305b98 100644 --- a/api/core/tools/provider/builtin/feishu_spreadsheet/tools/add_rows.yaml +++ b/api/core/tools/provider/builtin/feishu_spreadsheet/tools/add_rows.yaml @@ -56,7 +56,7 @@ parameters: en_US: Number of rows to add, range (0-5000]. zh_Hans: 要增加行数,范围(0-5000]。 llm_description: 要增加行数,范围(0-5000]。 - form: llm + form: form - name: values type: string diff --git a/api/core/tools/provider/builtin/feishu_spreadsheet/tools/read_cols.yaml b/api/core/tools/provider/builtin/feishu_spreadsheet/tools/read_cols.yaml index 3273857b70..34da74592d 100644 --- a/api/core/tools/provider/builtin/feishu_spreadsheet/tools/read_cols.yaml +++ b/api/core/tools/provider/builtin/feishu_spreadsheet/tools/read_cols.yaml @@ -82,7 +82,7 @@ parameters: en_US: Starting column number, starting from 1. zh_Hans: 起始列号,从 1 开始。 llm_description: 起始列号,从 1 开始。 - form: llm + form: form - name: num_cols type: number @@ -94,4 +94,4 @@ parameters: en_US: Number of columns to read. zh_Hans: 读取列数 llm_description: 读取列数 - form: llm + form: form diff --git a/api/core/tools/provider/builtin/feishu_spreadsheet/tools/read_rows.yaml b/api/core/tools/provider/builtin/feishu_spreadsheet/tools/read_rows.yaml index 3e9206e8ef..5dfa8d5835 100644 --- a/api/core/tools/provider/builtin/feishu_spreadsheet/tools/read_rows.yaml +++ b/api/core/tools/provider/builtin/feishu_spreadsheet/tools/read_rows.yaml @@ -82,7 +82,7 @@ parameters: en_US: Starting row number, starting from 1. zh_Hans: 起始行号,从 1 开始。 llm_description: 起始行号,从 1 开始。 - form: llm + form: form - name: num_rows type: number @@ -94,4 +94,4 @@ parameters: en_US: Number of rows to read. zh_Hans: 读取行数 llm_description: 读取行数 - form: llm + form: form diff --git a/api/core/tools/provider/builtin/feishu_spreadsheet/tools/read_table.yaml b/api/core/tools/provider/builtin/feishu_spreadsheet/tools/read_table.yaml index e3dc80e1eb..10534436d6 100644 --- a/api/core/tools/provider/builtin/feishu_spreadsheet/tools/read_table.yaml +++ b/api/core/tools/provider/builtin/feishu_spreadsheet/tools/read_table.yaml @@ -82,7 +82,7 @@ parameters: en_US: Starting row number, starting from 1. zh_Hans: 起始行号,从 1 开始。 llm_description: 起始行号,从 1 开始。 - form: llm + form: form - name: num_rows type: number @@ -94,7 +94,7 @@ parameters: en_US: Number of rows to read. zh_Hans: 读取行数 llm_description: 读取行数 - form: llm + form: form - name: range type: string diff --git a/api/core/tools/provider/builtin/feishu_wiki/tools/get_wiki_nodes.yaml b/api/core/tools/provider/builtin/feishu_wiki/tools/get_wiki_nodes.yaml index 7d6ac3c824..74d51e7bcb 100644 --- a/api/core/tools/provider/builtin/feishu_wiki/tools/get_wiki_nodes.yaml +++ b/api/core/tools/provider/builtin/feishu_wiki/tools/get_wiki_nodes.yaml @@ -36,7 +36,7 @@ parameters: en_US: The size of each page, with a maximum value of 50. zh_Hans: 分页大小,最大值 50。 llm_description: 分页大小,最大值 50。 - form: llm + form: form - name: page_token type: string diff --git a/api/core/tools/provider/builtin/gitee_ai/gitee_ai.yaml b/api/core/tools/provider/builtin/gitee_ai/gitee_ai.yaml index 2e18f8a7fc..d0475665dd 100644 --- a/api/core/tools/provider/builtin/gitee_ai/gitee_ai.yaml +++ b/api/core/tools/provider/builtin/gitee_ai/gitee_ai.yaml @@ -5,7 +5,7 @@ identity: en_US: Gitee AI zh_Hans: Gitee AI description: - en_US: 快速体验大模型,领先探索 AI 开源世界 + en_US: Quickly experience large models and explore the leading AI open source world zh_Hans: 快速体验大模型,领先探索 AI 开源世界 icon: icon.svg tags: diff --git a/api/core/tools/provider/builtin/gitlab/tools/gitlab_commits.py b/api/core/tools/provider/builtin/gitlab/tools/gitlab_commits.py index 45ab15f437..716da7c8c1 100644 --- a/api/core/tools/provider/builtin/gitlab/tools/gitlab_commits.py +++ b/api/core/tools/provider/builtin/gitlab/tools/gitlab_commits.py @@ -13,15 +13,15 @@ class GitlabCommitsTool(BuiltinTool): def _invoke( self, user_id: str, tool_parameters: dict[str, Any] ) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]: - project = tool_parameters.get("project", "") + branch = tool_parameters.get("branch", "") repository = tool_parameters.get("repository", "") employee = tool_parameters.get("employee", "") start_time = tool_parameters.get("start_time", "") end_time = tool_parameters.get("end_time", "") change_type = tool_parameters.get("change_type", "all") - if not project and not repository: - return self.create_text_message("Either project or repository is required") + if not repository: + return self.create_text_message("Either repository is required") if not start_time: start_time = (datetime.utcnow() - timedelta(days=1)).isoformat() @@ -37,14 +37,9 @@ class GitlabCommitsTool(BuiltinTool): site_url = "https://gitlab.com" # Get commit content - if repository: - result = self.fetch_commits( - site_url, access_token, repository, employee, start_time, end_time, change_type, is_repository=True - ) - else: - result = self.fetch_commits( - site_url, access_token, project, employee, start_time, end_time, change_type, is_repository=False - ) + result = self.fetch_commits( + site_url, access_token, repository, branch, employee, start_time, end_time, change_type, is_repository=True + ) return [self.create_json_message(item) for item in result] @@ -52,7 +47,8 @@ class GitlabCommitsTool(BuiltinTool): self, site_url: str, access_token: str, - identifier: str, + repository: str, + branch: str, employee: str, start_time: str, end_time: str, @@ -64,27 +60,14 @@ class GitlabCommitsTool(BuiltinTool): results = [] try: - if is_repository: - # URL encode the repository path - encoded_identifier = urllib.parse.quote(identifier, safe="") - commits_url = f"{domain}/api/v4/projects/{encoded_identifier}/repository/commits" - else: - # Get all projects - url = f"{domain}/api/v4/projects" - response = requests.get(url, headers=headers) - response.raise_for_status() - projects = response.json() - - filtered_projects = [p for p in projects if identifier == "*" or p["name"] == identifier] - - for project in filtered_projects: - project_id = project["id"] - project_name = project["name"] - print(f"Project: {project_name}") - - commits_url = f"{domain}/api/v4/projects/{project_id}/repository/commits" + # URL encode the repository path + encoded_repository = urllib.parse.quote(repository, safe="") + commits_url = f"{domain}/api/v4/projects/{encoded_repository}/repository/commits" + # Fetch commits for the repository params = {"since": start_time, "until": end_time} + if branch: + params["ref_name"] = branch if employee: params["author"] = employee @@ -96,10 +79,7 @@ class GitlabCommitsTool(BuiltinTool): commit_sha = commit["id"] author_name = commit["author_name"] - if is_repository: - diff_url = f"{domain}/api/v4/projects/{encoded_identifier}/repository/commits/{commit_sha}/diff" - else: - diff_url = f"{domain}/api/v4/projects/{project_id}/repository/commits/{commit_sha}/diff" + diff_url = f"{domain}/api/v4/projects/{encoded_repository}/repository/commits/{commit_sha}/diff" diff_response = requests.get(diff_url, headers=headers) diff_response.raise_for_status() @@ -120,7 +100,14 @@ class GitlabCommitsTool(BuiltinTool): if line.startswith("+") and not line.startswith("+++") ] ) - results.append({"commit_sha": commit_sha, "author_name": author_name, "diff": final_code}) + results.append( + { + "diff_url": diff_url, + "commit_sha": commit_sha, + "author_name": author_name, + "diff": final_code, + } + ) else: if total_changes > 1: final_code = "".join( @@ -134,7 +121,12 @@ class GitlabCommitsTool(BuiltinTool): ) final_code_escaped = json.dumps(final_code)[1:-1] # Escape the final code results.append( - {"commit_sha": commit_sha, "author_name": author_name, "diff": final_code_escaped} + { + "diff_url": diff_url, + "commit_sha": commit_sha, + "author_name": author_name, + "diff": final_code_escaped, + } ) except requests.RequestException as e: print(f"Error fetching data from GitLab: {e}") diff --git a/api/core/tools/provider/builtin/gitlab/tools/gitlab_commits.yaml b/api/core/tools/provider/builtin/gitlab/tools/gitlab_commits.yaml index 669378ac97..2ff5fb570e 100644 --- a/api/core/tools/provider/builtin/gitlab/tools/gitlab_commits.yaml +++ b/api/core/tools/provider/builtin/gitlab/tools/gitlab_commits.yaml @@ -23,7 +23,7 @@ parameters: form: llm - name: repository type: string - required: false + required: true label: en_US: repository zh_Hans: 仓库路径 @@ -32,16 +32,16 @@ parameters: zh_Hans: 仓库路径,以namespace/project_name的形式。 llm_description: Repository path for GitLab, like namespace/project_name. form: llm - - name: project + - name: branch type: string required: false label: - en_US: project - zh_Hans: 项目名 + en_US: branch + zh_Hans: 分支名 human_description: - en_US: project - zh_Hans: 项目名 - llm_description: project for GitLab + en_US: branch + zh_Hans: 分支名 + llm_description: branch for GitLab form: llm - name: start_time type: string diff --git a/api/core/tools/provider/builtin/gitlab/tools/gitlab_mergerequests.py b/api/core/tools/provider/builtin/gitlab/tools/gitlab_mergerequests.py new file mode 100644 index 0000000000..ef99fa82e9 --- /dev/null +++ b/api/core/tools/provider/builtin/gitlab/tools/gitlab_mergerequests.py @@ -0,0 +1,78 @@ +import urllib.parse +from typing import Any, Union + +import requests + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.tool.builtin_tool import BuiltinTool + + +class GitlabMergeRequestsTool(BuiltinTool): + def _invoke( + self, user_id: str, tool_parameters: dict[str, Any] + ) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]: + repository = tool_parameters.get("repository", "") + branch = tool_parameters.get("branch", "") + start_time = tool_parameters.get("start_time", "") + end_time = tool_parameters.get("end_time", "") + state = tool_parameters.get("state", "opened") # Default to "opened" + + if not repository: + return self.create_text_message("Repository is required") + + access_token = self.runtime.credentials.get("access_tokens") + site_url = self.runtime.credentials.get("site_url") + + if not access_token: + return self.create_text_message("Gitlab API Access Tokens is required.") + if not site_url: + site_url = "https://gitlab.com" + + # Get merge requests + result = self.get_merge_requests(site_url, access_token, repository, branch, start_time, end_time, state) + + return [self.create_json_message(item) for item in result] + + def get_merge_requests( + self, site_url: str, access_token: str, repository: str, branch: str, start_time: str, end_time: str, state: str + ) -> list[dict[str, Any]]: + domain = site_url + headers = {"PRIVATE-TOKEN": access_token} + results = [] + + try: + # URL encode the repository path + encoded_repository = urllib.parse.quote(repository, safe="") + merge_requests_url = f"{domain}/api/v4/projects/{encoded_repository}/merge_requests" + params = {"state": state} + + # Add time filters if provided + if start_time: + params["created_after"] = start_time + if end_time: + params["created_before"] = end_time + + response = requests.get(merge_requests_url, headers=headers, params=params) + response.raise_for_status() + merge_requests = response.json() + + for mr in merge_requests: + # Filter by target branch + if branch and mr["target_branch"] != branch: + continue + + results.append( + { + "id": mr["id"], + "title": mr["title"], + "author": mr["author"]["name"], + "web_url": mr["web_url"], + "target_branch": mr["target_branch"], + "created_at": mr["created_at"], + "state": mr["state"], + } + ) + except requests.RequestException as e: + print(f"Error fetching merge requests from GitLab: {e}") + + return results diff --git a/api/core/tools/provider/builtin/gitlab/tools/gitlab_mergerequests.yaml b/api/core/tools/provider/builtin/gitlab/tools/gitlab_mergerequests.yaml new file mode 100644 index 0000000000..4c886b69c0 --- /dev/null +++ b/api/core/tools/provider/builtin/gitlab/tools/gitlab_mergerequests.yaml @@ -0,0 +1,77 @@ +identity: + name: gitlab_mergerequests + author: Leo.Wang + label: + en_US: GitLab Merge Requests + zh_Hans: GitLab 合并请求查询 +description: + human: + en_US: A tool for query GitLab merge requests, Input should be a exists reposity or branch. + zh_Hans: 一个用于查询 GitLab 代码合并请求的工具,输入的内容应该是一个已存在的仓库名或者分支。 + llm: A tool for query GitLab merge requests, Input should be a exists reposity or branch. +parameters: + - name: repository + type: string + required: false + label: + en_US: repository + zh_Hans: 仓库路径 + human_description: + en_US: repository + zh_Hans: 仓库路径,以namespace/project_name的形式。 + llm_description: Repository path for GitLab, like namespace/project_name. + form: llm + - name: branch + type: string + required: false + label: + en_US: branch + zh_Hans: 分支名 + human_description: + en_US: branch + zh_Hans: 分支名 + llm_description: branch for GitLab + form: llm + - name: start_time + type: string + required: false + label: + en_US: start_time + zh_Hans: 开始时间 + human_description: + en_US: start_time + zh_Hans: 开始时间 + llm_description: Start time for GitLab + form: llm + - name: end_time + type: string + required: false + label: + en_US: end_time + zh_Hans: 结束时间 + human_description: + en_US: end_time + zh_Hans: 结束时间 + llm_description: End time for GitLab + form: llm + - name: state + type: select + required: false + options: + - value: opened + label: + en_US: opened + zh_Hans: 打开 + - value: closed + label: + en_US: closed + zh_Hans: 关闭 + default: opened + label: + en_US: state + zh_Hans: 变更状态 + human_description: + en_US: state + zh_Hans: 变更状态 + llm_description: Merge request state type for GitLab + form: llm diff --git a/api/core/tools/provider/builtin/gitlab/tools/gitlab_projects.py b/api/core/tools/provider/builtin/gitlab/tools/gitlab_projects.py new file mode 100644 index 0000000000..ea0c028b4f --- /dev/null +++ b/api/core/tools/provider/builtin/gitlab/tools/gitlab_projects.py @@ -0,0 +1,81 @@ +import urllib.parse +from typing import Any, Union + +import requests + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.tool.builtin_tool import BuiltinTool + + +class GitlabProjectsTool(BuiltinTool): + def _invoke( + self, user_id: str, tool_parameters: dict[str, Any] + ) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]: + project_name = tool_parameters.get("project_name", "") + page = tool_parameters.get("page", 1) + page_size = tool_parameters.get("page_size", 20) + + access_token = self.runtime.credentials.get("access_tokens") + site_url = self.runtime.credentials.get("site_url") + + if not access_token: + return self.create_text_message("Gitlab API Access Tokens is required.") + if not site_url: + site_url = "https://gitlab.com" + + # Get project content + result = self.fetch_projects(site_url, access_token, project_name, page, page_size) + + return [self.create_json_message(item) for item in result] + + def fetch_projects( + self, + site_url: str, + access_token: str, + project_name: str, + page: str, + page_size: str, + ) -> list[dict[str, Any]]: + domain = site_url + headers = {"PRIVATE-TOKEN": access_token} + results = [] + + try: + if project_name: + # URL encode the project name for the search query + encoded_project_name = urllib.parse.quote(project_name, safe="") + projects_url = ( + f"{domain}/api/v4/projects?search={encoded_project_name}&page={page}&per_page={page_size}" + ) + else: + projects_url = f"{domain}/api/v4/projects?page={page}&per_page={page_size}" + + response = requests.get(projects_url, headers=headers) + response.raise_for_status() + projects = response.json() + + for project in projects: + # Filter projects by exact name match if necessary + if project_name and project["name"].lower() == project_name.lower(): + results.append( + { + "id": project["id"], + "name": project["name"], + "description": project.get("description", ""), + "web_url": project["web_url"], + } + ) + elif not project_name: + # If no specific project name is provided, add all projects + results.append( + { + "id": project["id"], + "name": project["name"], + "description": project.get("description", ""), + "web_url": project["web_url"], + } + ) + except requests.RequestException as e: + print(f"Error fetching data from GitLab: {e}") + + return results diff --git a/api/core/tools/provider/builtin/gitlab/tools/gitlab_projects.yaml b/api/core/tools/provider/builtin/gitlab/tools/gitlab_projects.yaml new file mode 100644 index 0000000000..5fe098e1f7 --- /dev/null +++ b/api/core/tools/provider/builtin/gitlab/tools/gitlab_projects.yaml @@ -0,0 +1,45 @@ +identity: + name: gitlab_projects + author: Leo.Wang + label: + en_US: GitLab Projects + zh_Hans: GitLab 项目列表查询 +description: + human: + en_US: A tool for query GitLab projects, Input should be a project name. + zh_Hans: 一个用于查询 GitLab 项目列表的工具,输入的内容应该是一个项目名称。 + llm: A tool for query GitLab projects, Input should be a project name. +parameters: + - name: project_name + type: string + required: false + label: + en_US: project_name + zh_Hans: 项目名称 + human_description: + en_US: project_name + zh_Hans: 项目名称 + llm_description: Project name for GitLab + form: llm + - name: page + type: string + required: false + label: + en_US: page + zh_Hans: 页码 + human_description: + en_US: page + zh_Hans: 页码 + llm_description: Page index for GitLab + form: llm + - name: page_size + type: string + required: false + label: + en_US: page_size + zh_Hans: 每页数量 + human_description: + en_US: page_size + zh_Hans: 每页数量 + llm_description: Page size for GitLab + form: llm diff --git a/api/core/tools/provider/builtin/lark_base/_assets/icon.png b/api/core/tools/provider/builtin/lark_base/_assets/icon.png new file mode 100644 index 0000000000..036e586772 Binary files /dev/null and b/api/core/tools/provider/builtin/lark_base/_assets/icon.png differ diff --git a/api/core/tools/provider/builtin/lark_base/lark_base.py b/api/core/tools/provider/builtin/lark_base/lark_base.py new file mode 100644 index 0000000000..de9b368311 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_base/lark_base.py @@ -0,0 +1,7 @@ +from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController +from core.tools.utils.lark_api_utils import lark_auth + + +class LarkBaseProvider(BuiltinToolProviderController): + def _validate_credentials(self, credentials: dict) -> None: + lark_auth(credentials) diff --git a/api/core/tools/provider/builtin/lark_base/lark_base.yaml b/api/core/tools/provider/builtin/lark_base/lark_base.yaml new file mode 100644 index 0000000000..200b2e22cf --- /dev/null +++ b/api/core/tools/provider/builtin/lark_base/lark_base.yaml @@ -0,0 +1,36 @@ +identity: + author: Doug Lea + name: lark_base + label: + en_US: Lark Base + zh_Hans: Lark 多维表格 + description: + en_US: | + Lark base, requires the following permissions: bitable:app. + zh_Hans: | + Lark 多维表格,需要开通以下权限: bitable:app。 + icon: icon.png + tags: + - social + - productivity +credentials_for_provider: + app_id: + type: text-input + required: true + label: + en_US: APP ID + placeholder: + en_US: Please input your Lark app id + zh_Hans: 请输入你的 Lark app id + help: + en_US: Get your app_id and app_secret from Lark + zh_Hans: 从 Lark 获取您的 app_id 和 app_secret + url: https://open.larksuite.com/app + app_secret: + type: secret-input + required: true + label: + en_US: APP Secret + placeholder: + en_US: Please input your app secret + zh_Hans: 请输入你的 Lark app secret diff --git a/api/core/tools/provider/builtin/lark_base/tools/add_records.py b/api/core/tools/provider/builtin/lark_base/tools/add_records.py new file mode 100644 index 0000000000..c46898062a --- /dev/null +++ b/api/core/tools/provider/builtin/lark_base/tools/add_records.py @@ -0,0 +1,21 @@ +from typing import Any + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.tool.builtin_tool import BuiltinTool +from core.tools.utils.lark_api_utils import LarkRequest + + +class AddRecordsTool(BuiltinTool): + def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage: + app_id = self.runtime.credentials.get("app_id") + app_secret = self.runtime.credentials.get("app_secret") + client = LarkRequest(app_id, app_secret) + + app_token = tool_parameters.get("app_token") + table_id = tool_parameters.get("table_id") + table_name = tool_parameters.get("table_name") + records = tool_parameters.get("records") + user_id_type = tool_parameters.get("user_id_type", "open_id") + + res = client.add_records(app_token, table_id, table_name, records, user_id_type) + return self.create_json_message(res) diff --git a/api/core/tools/provider/builtin/lark_base/tools/add_records.yaml b/api/core/tools/provider/builtin/lark_base/tools/add_records.yaml new file mode 100644 index 0000000000..f2a93490dc --- /dev/null +++ b/api/core/tools/provider/builtin/lark_base/tools/add_records.yaml @@ -0,0 +1,91 @@ +identity: + name: add_records + author: Doug Lea + label: + en_US: Add Records + zh_Hans: 新增多条记录 +description: + human: + en_US: Add Multiple Records to Multidimensional Table + zh_Hans: 在多维表格数据表中新增多条记录 + llm: A tool for adding multiple records to a multidimensional table. (在多维表格数据表中新增多条记录) +parameters: + - name: app_token + type: string + required: true + label: + en_US: app_token + zh_Hans: app_token + human_description: + en_US: Unique identifier for the multidimensional table, supports inputting document URL. + zh_Hans: 多维表格的唯一标识符,支持输入文档 URL。 + llm_description: 多维表格的唯一标识符,支持输入文档 URL。 + form: llm + + - name: table_id + type: string + required: false + label: + en_US: table_id + zh_Hans: table_id + human_description: + en_US: Unique identifier for the multidimensional table data, either table_id or table_name must be provided, cannot be empty simultaneously. + zh_Hans: 多维表格数据表的唯一标识符,table_id 和 table_name 至少需要提供一个,不能同时为空。 + llm_description: 多维表格数据表的唯一标识符,table_id 和 table_name 至少需要提供一个,不能同时为空。 + form: llm + + - name: table_name + type: string + required: false + label: + en_US: table_name + zh_Hans: table_name + human_description: + en_US: Name of the multidimensional table data, either table_name or table_id must be provided, cannot be empty simultaneously. + zh_Hans: 多维表格数据表的名称,table_name 和 table_id 至少需要提供一个,不能同时为空。 + llm_description: 多维表格数据表的名称,table_name 和 table_id 至少需要提供一个,不能同时为空。 + form: llm + + - name: records + type: string + required: true + label: + en_US: records + zh_Hans: 记录列表 + human_description: + en_US: | + List of records to be added in this request. Example value: [{"multi-line-text":"text content","single_select":"option 1","date":1674206443000}] + For supported field types, refer to the integration guide (https://open.larkoffice.com/document/server-docs/docs/bitable-v1/notification). For data structures of different field types, refer to the data structure overview (https://open.larkoffice.com/document/server-docs/docs/bitable-v1/bitable-structure). + zh_Hans: | + 本次请求将要新增的记录列表,示例值:[{"多行文本":"文本内容","单选":"选项 1","日期":1674206443000}]。 + 当前接口支持的字段类型请参考接入指南(https://open.larkoffice.com/document/server-docs/docs/bitable-v1/notification),不同类型字段的数据结构请参考数据结构概述(https://open.larkoffice.com/document/server-docs/docs/bitable-v1/bitable-structure)。 + llm_description: | + 本次请求将要新增的记录列表,示例值:[{"多行文本":"文本内容","单选":"选项 1","日期":1674206443000}]。 + 当前接口支持的字段类型请参考接入指南(https://open.larkoffice.com/document/server-docs/docs/bitable-v1/notification),不同类型字段的数据结构请参考数据结构概述(https://open.larkoffice.com/document/server-docs/docs/bitable-v1/bitable-structure)。 + form: llm + + - name: user_id_type + type: select + required: false + options: + - value: open_id + label: + en_US: open_id + zh_Hans: open_id + - value: union_id + label: + en_US: union_id + zh_Hans: union_id + - value: user_id + label: + en_US: user_id + zh_Hans: user_id + default: "open_id" + label: + en_US: user_id_type + zh_Hans: 用户 ID 类型 + human_description: + en_US: User ID type, optional values are open_id, union_id, user_id, with a default value of open_id. + zh_Hans: 用户 ID 类型,可选值有 open_id、union_id、user_id,默认值为 open_id。 + llm_description: 用户 ID 类型,可选值有 open_id、union_id、user_id,默认值为 open_id。 + form: form diff --git a/api/core/tools/provider/builtin/lark_base/tools/create_base.py b/api/core/tools/provider/builtin/lark_base/tools/create_base.py new file mode 100644 index 0000000000..a857c6ced6 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_base/tools/create_base.py @@ -0,0 +1,18 @@ +from typing import Any + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.tool.builtin_tool import BuiltinTool +from core.tools.utils.lark_api_utils import LarkRequest + + +class CreateBaseTool(BuiltinTool): + def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage: + app_id = self.runtime.credentials.get("app_id") + app_secret = self.runtime.credentials.get("app_secret") + client = LarkRequest(app_id, app_secret) + + name = tool_parameters.get("name") + folder_token = tool_parameters.get("folder_token") + + res = client.create_base(name, folder_token) + return self.create_json_message(res) diff --git a/api/core/tools/provider/builtin/lark_base/tools/create_base.yaml b/api/core/tools/provider/builtin/lark_base/tools/create_base.yaml new file mode 100644 index 0000000000..e622edf336 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_base/tools/create_base.yaml @@ -0,0 +1,42 @@ +identity: + name: create_base + author: Doug Lea + label: + en_US: Create Base + zh_Hans: 创建多维表格 +description: + human: + en_US: Create Multidimensional Table in Specified Directory + zh_Hans: 在指定目录下创建多维表格 + llm: A tool for creating a multidimensional table in a specified directory. (在指定目录下创建多维表格) +parameters: + - name: name + type: string + required: false + label: + en_US: name + zh_Hans: 多维表格 App 名字 + human_description: + en_US: | + Name of the multidimensional table App. Example value: "A new multidimensional table". + zh_Hans: 多维表格 App 名字,示例值:"一篇新的多维表格"。 + llm_description: 多维表格 App 名字,示例值:"一篇新的多维表格"。 + form: llm + + - name: folder_token + type: string + required: false + label: + en_US: folder_token + zh_Hans: 多维表格 App 归属文件夹 + human_description: + en_US: | + Folder where the multidimensional table App belongs. Default is empty, meaning the table will be created in the root directory of the cloud space. Example values: Lf8uf6BoAlWkUfdGtpMjUV0PpZd or https://lark-japan.jp.larksuite.com/drive/folder/Lf8uf6BoAlWkUfdGtpMjUV0PpZd. + The folder_token must be an existing folder and supports inputting folder token or folder URL. + zh_Hans: | + 多维表格 App 归属文件夹。默认为空,表示多维表格将被创建在云空间根目录。示例值: Lf8uf6BoAlWkUfdGtpMjUV0PpZd 或者 https://lark-japan.jp.larksuite.com/drive/folder/Lf8uf6BoAlWkUfdGtpMjUV0PpZd。 + folder_token 必须是已存在的文件夹,支持输入文件夹 token 或者文件夹 URL。 + llm_description: | + 多维表格 App 归属文件夹。默认为空,表示多维表格将被创建在云空间根目录。示例值: Lf8uf6BoAlWkUfdGtpMjUV0PpZd 或者 https://lark-japan.jp.larksuite.com/drive/folder/Lf8uf6BoAlWkUfdGtpMjUV0PpZd。 + folder_token 必须是已存在的文件夹,支持输入文件夹 token 或者文件夹 URL。 + form: llm diff --git a/api/core/tools/provider/builtin/lark_base/tools/create_table.py b/api/core/tools/provider/builtin/lark_base/tools/create_table.py new file mode 100644 index 0000000000..aff7e715b7 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_base/tools/create_table.py @@ -0,0 +1,20 @@ +from typing import Any + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.tool.builtin_tool import BuiltinTool +from core.tools.utils.lark_api_utils import LarkRequest + + +class CreateTableTool(BuiltinTool): + def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage: + app_id = self.runtime.credentials.get("app_id") + app_secret = self.runtime.credentials.get("app_secret") + client = LarkRequest(app_id, app_secret) + + app_token = tool_parameters.get("app_token") + table_name = tool_parameters.get("table_name") + default_view_name = tool_parameters.get("default_view_name") + fields = tool_parameters.get("fields") + + res = client.create_table(app_token, table_name, default_view_name, fields) + return self.create_json_message(res) diff --git a/api/core/tools/provider/builtin/lark_base/tools/create_table.yaml b/api/core/tools/provider/builtin/lark_base/tools/create_table.yaml new file mode 100644 index 0000000000..8b1007b9a5 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_base/tools/create_table.yaml @@ -0,0 +1,61 @@ +identity: + name: create_table + author: Doug Lea + label: + en_US: Create Table + zh_Hans: 新增数据表 +description: + human: + en_US: Add a Data Table to Multidimensional Table + zh_Hans: 在多维表格中新增一个数据表 + llm: A tool for adding a data table to a multidimensional table. (在多维表格中新增一个数据表) +parameters: + - name: app_token + type: string + required: true + label: + en_US: app_token + zh_Hans: app_token + human_description: + en_US: Unique identifier for the multidimensional table, supports inputting document URL. + zh_Hans: 多维表格的唯一标识符,支持输入文档 URL。 + llm_description: 多维表格的唯一标识符,支持输入文档 URL。 + form: llm + + - name: table_name + type: string + required: true + label: + en_US: Table Name + zh_Hans: 数据表名称 + human_description: + en_US: | + The name of the data table, length range: 1 character to 100 characters. + zh_Hans: 数据表名称,长度范围:1 字符 ~ 100 字符。 + llm_description: 数据表名称,长度范围:1 字符 ~ 100 字符。 + form: llm + + - name: default_view_name + type: string + required: false + label: + en_US: Default View Name + zh_Hans: 默认表格视图的名称 + human_description: + en_US: The name of the default table view, defaults to "Table" if not filled. + zh_Hans: 默认表格视图的名称,不填则默认为"表格"。 + llm_description: 默认表格视图的名称,不填则默认为"表格"。 + form: llm + + - name: fields + type: string + required: true + label: + en_US: Initial Fields + zh_Hans: 初始字段 + human_description: + en_US: | + Initial fields of the data table, format: [ { "field_name": "Multi-line Text","type": 1 },{ "field_name": "Number","type": 2 },{ "field_name": "Single Select","type": 3 },{ "field_name": "Multiple Select","type": 4 },{ "field_name": "Date","type": 5 } ]. For field details, refer to: https://open.larkoffice.com/document/server-docs/docs/bitable-v1/app-table-field/guide + zh_Hans: 数据表的初始字段,格式为:[{"field_name":"多行文本","type":1},{"field_name":"数字","type":2},{"field_name":"单选","type":3},{"field_name":"多选","type":4},{"field_name":"日期","type":5}]。字段详情参考:https://open.larkoffice.com/document/server-docs/docs/bitable-v1/app-table-field/guide + llm_description: 数据表的初始字段,格式为:[{"field_name":"多行文本","type":1},{"field_name":"数字","type":2},{"field_name":"单选","type":3},{"field_name":"多选","type":4},{"field_name":"日期","type":5}]。字段详情参考:https://open.larkoffice.com/document/server-docs/docs/bitable-v1/app-table-field/guide + form: llm diff --git a/api/core/tools/provider/builtin/lark_base/tools/delete_records.py b/api/core/tools/provider/builtin/lark_base/tools/delete_records.py new file mode 100644 index 0000000000..1b0a747050 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_base/tools/delete_records.py @@ -0,0 +1,20 @@ +from typing import Any + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.tool.builtin_tool import BuiltinTool +from core.tools.utils.lark_api_utils import LarkRequest + + +class DeleteRecordsTool(BuiltinTool): + def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage: + app_id = self.runtime.credentials.get("app_id") + app_secret = self.runtime.credentials.get("app_secret") + client = LarkRequest(app_id, app_secret) + + app_token = tool_parameters.get("app_token") + table_id = tool_parameters.get("table_id") + table_name = tool_parameters.get("table_name") + record_ids = tool_parameters.get("record_ids") + + res = client.delete_records(app_token, table_id, table_name, record_ids) + return self.create_json_message(res) diff --git a/api/core/tools/provider/builtin/lark_base/tools/delete_records.yaml b/api/core/tools/provider/builtin/lark_base/tools/delete_records.yaml new file mode 100644 index 0000000000..c30ebd630c --- /dev/null +++ b/api/core/tools/provider/builtin/lark_base/tools/delete_records.yaml @@ -0,0 +1,86 @@ +identity: + name: delete_records + author: Doug Lea + label: + en_US: Delete Records + zh_Hans: 删除多条记录 +description: + human: + en_US: Delete Multiple Records from Multidimensional Table + zh_Hans: 删除多维表格数据表中的多条记录 + llm: A tool for deleting multiple records from a multidimensional table. (删除多维表格数据表中的多条记录) +parameters: + - name: app_token + type: string + required: true + label: + en_US: app_token + zh_Hans: app_token + human_description: + en_US: Unique identifier for the multidimensional table, supports inputting document URL. + zh_Hans: 多维表格的唯一标识符,支持输入文档 URL。 + llm_description: 多维表格的唯一标识符,支持输入文档 URL。 + form: llm + + - name: table_id + type: string + required: false + label: + en_US: table_id + zh_Hans: table_id + human_description: + en_US: Unique identifier for the multidimensional table data, either table_id or table_name must be provided, cannot be empty simultaneously. + zh_Hans: 多维表格数据表的唯一标识符,table_id 和 table_name 至少需要提供一个,不能同时为空。 + llm_description: 多维表格数据表的唯一标识符,table_id 和 table_name 至少需要提供一个,不能同时为空。 + form: llm + + - name: table_name + type: string + required: false + label: + en_US: table_name + zh_Hans: table_name + human_description: + en_US: Name of the multidimensional table data, either table_name or table_id must be provided, cannot be empty simultaneously. + zh_Hans: 多维表格数据表的名称,table_name 和 table_id 至少需要提供一个,不能同时为空。 + llm_description: 多维表格数据表的名称,table_name 和 table_id 至少需要提供一个,不能同时为空。 + form: llm + + - name: record_ids + type: string + required: true + label: + en_US: Record IDs + zh_Hans: 记录 ID 列表 + human_description: + en_US: | + List of IDs for the records to be deleted, example value: ["recwNXzPQv"]. + zh_Hans: 删除的多条记录 ID 列表,示例值:["recwNXzPQv"]。 + llm_description: 删除的多条记录 ID 列表,示例值:["recwNXzPQv"]。 + form: llm + + - name: user_id_type + type: select + required: false + options: + - value: open_id + label: + en_US: open_id + zh_Hans: open_id + - value: union_id + label: + en_US: union_id + zh_Hans: union_id + - value: user_id + label: + en_US: user_id + zh_Hans: user_id + default: "open_id" + label: + en_US: user_id_type + zh_Hans: 用户 ID 类型 + human_description: + en_US: User ID type, optional values are open_id, union_id, user_id, with a default value of open_id. + zh_Hans: 用户 ID 类型,可选值有 open_id、union_id、user_id,默认值为 open_id。 + llm_description: 用户 ID 类型,可选值有 open_id、union_id、user_id,默认值为 open_id。 + form: form diff --git a/api/core/tools/provider/builtin/lark_base/tools/delete_tables.py b/api/core/tools/provider/builtin/lark_base/tools/delete_tables.py new file mode 100644 index 0000000000..e0ecae2f17 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_base/tools/delete_tables.py @@ -0,0 +1,19 @@ +from typing import Any + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.tool.builtin_tool import BuiltinTool +from core.tools.utils.lark_api_utils import LarkRequest + + +class DeleteTablesTool(BuiltinTool): + def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage: + app_id = self.runtime.credentials.get("app_id") + app_secret = self.runtime.credentials.get("app_secret") + client = LarkRequest(app_id, app_secret) + + app_token = tool_parameters.get("app_token") + table_ids = tool_parameters.get("table_ids") + table_names = tool_parameters.get("table_names") + + res = client.delete_tables(app_token, table_ids, table_names) + return self.create_json_message(res) diff --git a/api/core/tools/provider/builtin/lark_base/tools/delete_tables.yaml b/api/core/tools/provider/builtin/lark_base/tools/delete_tables.yaml new file mode 100644 index 0000000000..498126eae5 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_base/tools/delete_tables.yaml @@ -0,0 +1,49 @@ +identity: + name: delete_tables + author: Doug Lea + label: + en_US: Delete Tables + zh_Hans: 删除数据表 +description: + human: + en_US: Batch Delete Data Tables from Multidimensional Table + zh_Hans: 批量删除多维表格中的数据表 + llm: A tool for batch deleting data tables from a multidimensional table. (批量删除多维表格中的数据表) +parameters: + - name: app_token + type: string + required: true + label: + en_US: app_token + zh_Hans: app_token + human_description: + en_US: Unique identifier for the multidimensional table, supports inputting document URL. + zh_Hans: 多维表格的唯一标识符,支持输入文档 URL。 + llm_description: 多维表格的唯一标识符,支持输入文档 URL。 + form: llm + + - name: table_ids + type: string + required: false + label: + en_US: Table IDs + zh_Hans: 数据表 ID + human_description: + en_US: | + IDs of the tables to be deleted. Each operation supports deleting up to 50 tables. Example: ["tbl1TkhyTWDkSoZ3"]. Ensure that either table_ids or table_names is not empty. + zh_Hans: 待删除的数据表的 ID,每次操作最多支持删除 50 个数据表。示例值:["tbl1TkhyTWDkSoZ3"]。请确保 table_ids 和 table_names 至少有一个不为空。 + llm_description: 待删除的数据表的 ID,每次操作最多支持删除 50 个数据表。示例值:["tbl1TkhyTWDkSoZ3"]。请确保 table_ids 和 table_names 至少有一个不为空。 + form: llm + + - name: table_names + type: string + required: false + label: + en_US: Table Names + zh_Hans: 数据表名称 + human_description: + en_US: | + Names of the tables to be deleted. Each operation supports deleting up to 50 tables. Example: ["Table1", "Table2"]. Ensure that either table_names or table_ids is not empty. + zh_Hans: 待删除的数据表的名称,每次操作最多支持删除 50 个数据表。示例值:["数据表1", "数据表2"]。请确保 table_names 和 table_ids 至少有一个不为空。 + llm_description: 待删除的数据表的名称,每次操作最多支持删除 50 个数据表。示例值:["数据表1", "数据表2"]。请确保 table_names 和 table_ids 至少有一个不为空。 + form: llm diff --git a/api/core/tools/provider/builtin/lark_base/tools/get_base_info.py b/api/core/tools/provider/builtin/lark_base/tools/get_base_info.py new file mode 100644 index 0000000000..2c23248b88 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_base/tools/get_base_info.py @@ -0,0 +1,17 @@ +from typing import Any + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.tool.builtin_tool import BuiltinTool +from core.tools.utils.lark_api_utils import LarkRequest + + +class GetBaseInfoTool(BuiltinTool): + def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage: + app_id = self.runtime.credentials.get("app_id") + app_secret = self.runtime.credentials.get("app_secret") + client = LarkRequest(app_id, app_secret) + + app_token = tool_parameters.get("app_token") + + res = client.get_base_info(app_token) + return self.create_json_message(res) diff --git a/api/core/tools/provider/builtin/lark_base/tools/get_base_info.yaml b/api/core/tools/provider/builtin/lark_base/tools/get_base_info.yaml new file mode 100644 index 0000000000..eb0e7a26c0 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_base/tools/get_base_info.yaml @@ -0,0 +1,23 @@ +identity: + name: get_base_info + author: Doug Lea + label: + en_US: Get Base Info + zh_Hans: 获取多维表格元数据 +description: + human: + en_US: Get Metadata Information of Specified Multidimensional Table + zh_Hans: 获取指定多维表格的元数据信息 + llm: A tool for getting metadata information of a specified multidimensional table. (获取指定多维表格的元数据信息) +parameters: + - name: app_token + type: string + required: true + label: + en_US: app_token + zh_Hans: app_token + human_description: + en_US: Unique identifier for the multidimensional table, supports inputting document URL. + zh_Hans: 多维表格的唯一标识符,支持输入文档 URL。 + llm_description: 多维表格的唯一标识符,支持输入文档 URL。 + form: llm diff --git a/api/core/tools/provider/builtin/lark_base/tools/list_tables.py b/api/core/tools/provider/builtin/lark_base/tools/list_tables.py new file mode 100644 index 0000000000..55b706854b --- /dev/null +++ b/api/core/tools/provider/builtin/lark_base/tools/list_tables.py @@ -0,0 +1,19 @@ +from typing import Any + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.tool.builtin_tool import BuiltinTool +from core.tools.utils.lark_api_utils import LarkRequest + + +class ListTablesTool(BuiltinTool): + def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage: + app_id = self.runtime.credentials.get("app_id") + app_secret = self.runtime.credentials.get("app_secret") + client = LarkRequest(app_id, app_secret) + + app_token = tool_parameters.get("app_token") + page_token = tool_parameters.get("page_token") + page_size = tool_parameters.get("page_size", 20) + + res = client.list_tables(app_token, page_token, page_size) + return self.create_json_message(res) diff --git a/api/core/tools/provider/builtin/lark_base/tools/list_tables.yaml b/api/core/tools/provider/builtin/lark_base/tools/list_tables.yaml new file mode 100644 index 0000000000..7571519039 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_base/tools/list_tables.yaml @@ -0,0 +1,50 @@ +identity: + name: list_tables + author: Doug Lea + label: + en_US: List Tables + zh_Hans: 列出数据表 +description: + human: + en_US: Get All Data Tables under Multidimensional Table + zh_Hans: 获取多维表格下的所有数据表 + llm: A tool for getting all data tables under a multidimensional table. (获取多维表格下的所有数据表) +parameters: + - name: app_token + type: string + required: true + label: + en_US: app_token + zh_Hans: app_token + human_description: + en_US: Unique identifier for the multidimensional table, supports inputting document URL. + zh_Hans: 多维表格的唯一标识符,支持输入文档 URL。 + llm_description: 多维表格的唯一标识符,支持输入文档 URL。 + form: llm + + - name: page_size + type: number + required: false + default: 20 + label: + en_US: page_size + zh_Hans: 分页大小 + human_description: + en_US: | + Page size, default value: 20, maximum value: 100. + zh_Hans: 分页大小,默认值:20,最大值:100。 + llm_description: 分页大小,默认值:20,最大值:100。 + form: form + + - name: page_token + type: string + required: false + label: + en_US: page_token + zh_Hans: 分页标记 + human_description: + en_US: | + Page token, leave empty for the first request to start from the beginning; a new page_token will be returned if there are more items in the paginated query results, which can be used for the next traversal. Example value: "tblsRc9GRRXKqhvW". + zh_Hans: 分页标记,第一次请求不填,表示从头开始遍历;分页查询结果还有更多项时会同时返回新的 page_token,下次遍历可采用该 page_token 获取查询结果。示例值:"tblsRc9GRRXKqhvW"。 + llm_description: 分页标记,第一次请求不填,表示从头开始遍历;分页查询结果还有更多项时会同时返回新的 page_token,下次遍历可采用该 page_token 获取查询结果。示例值:"tblsRc9GRRXKqhvW"。 + form: llm diff --git a/api/core/tools/provider/builtin/lark_base/tools/read_records.py b/api/core/tools/provider/builtin/lark_base/tools/read_records.py new file mode 100644 index 0000000000..5cf25aad84 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_base/tools/read_records.py @@ -0,0 +1,21 @@ +from typing import Any + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.tool.builtin_tool import BuiltinTool +from core.tools.utils.lark_api_utils import LarkRequest + + +class ReadRecordsTool(BuiltinTool): + def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage: + app_id = self.runtime.credentials.get("app_id") + app_secret = self.runtime.credentials.get("app_secret") + client = LarkRequest(app_id, app_secret) + + app_token = tool_parameters.get("app_token") + table_id = tool_parameters.get("table_id") + table_name = tool_parameters.get("table_name") + record_ids = tool_parameters.get("record_ids") + user_id_type = tool_parameters.get("user_id_type", "open_id") + + res = client.read_records(app_token, table_id, table_name, record_ids, user_id_type) + return self.create_json_message(res) diff --git a/api/core/tools/provider/builtin/lark_base/tools/read_records.yaml b/api/core/tools/provider/builtin/lark_base/tools/read_records.yaml new file mode 100644 index 0000000000..911e667cfc --- /dev/null +++ b/api/core/tools/provider/builtin/lark_base/tools/read_records.yaml @@ -0,0 +1,86 @@ +identity: + name: read_records + author: Doug Lea + label: + en_US: Read Records + zh_Hans: 批量获取记录 +description: + human: + en_US: Batch Retrieve Records from Multidimensional Table + zh_Hans: 批量获取多维表格数据表中的记录信息 + llm: A tool for batch retrieving records from a multidimensional table, supporting up to 100 records per call. (批量获取多维表格数据表中的记录信息,单次调用最多支持查询 100 条记录) + +parameters: + - name: app_token + type: string + required: true + label: + en_US: app_token + zh_Hans: app_token + human_description: + en_US: Unique identifier for the multidimensional table, supports inputting document URL. + zh_Hans: 多维表格的唯一标识符,支持输入文档 URL。 + llm_description: 多维表格的唯一标识符,支持输入文档 URL。 + form: llm + + - name: table_id + type: string + required: false + label: + en_US: table_id + zh_Hans: table_id + human_description: + en_US: Unique identifier for the multidimensional table data, either table_id or table_name must be provided, cannot be empty simultaneously. + zh_Hans: 多维表格数据表的唯一标识符,table_id 和 table_name 至少需要提供一个,不能同时为空。 + llm_description: 多维表格数据表的唯一标识符,table_id 和 table_name 至少需要提供一个,不能同时为空。 + form: llm + + - name: table_name + type: string + required: false + label: + en_US: table_name + zh_Hans: table_name + human_description: + en_US: Name of the multidimensional table data, either table_name or table_id must be provided, cannot be empty simultaneously. + zh_Hans: 多维表格数据表的名称,table_name 和 table_id 至少需要提供一个,不能同时为空。 + llm_description: 多维表格数据表的名称,table_name 和 table_id 至少需要提供一个,不能同时为空。 + form: llm + + - name: record_ids + type: string + required: true + label: + en_US: record_ids + zh_Hans: 记录 ID 列表 + human_description: + en_US: List of record IDs, which can be obtained by calling the "Query Records API". + zh_Hans: 记录 ID 列表,可以通过调用"查询记录接口"获取。 + llm_description: 记录 ID 列表,可以通过调用"查询记录接口"获取。 + form: llm + + - name: user_id_type + type: select + required: false + options: + - value: open_id + label: + en_US: open_id + zh_Hans: open_id + - value: union_id + label: + en_US: union_id + zh_Hans: union_id + - value: user_id + label: + en_US: user_id + zh_Hans: user_id + default: "open_id" + label: + en_US: user_id_type + zh_Hans: 用户 ID 类型 + human_description: + en_US: User ID type, optional values are open_id, union_id, user_id, with a default value of open_id. + zh_Hans: 用户 ID 类型,可选值有 open_id、union_id、user_id,默认值为 open_id。 + llm_description: 用户 ID 类型,可选值有 open_id、union_id、user_id,默认值为 open_id。 + form: form diff --git a/api/core/tools/provider/builtin/lark_base/tools/search_records.py b/api/core/tools/provider/builtin/lark_base/tools/search_records.py new file mode 100644 index 0000000000..9b0abcf067 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_base/tools/search_records.py @@ -0,0 +1,39 @@ +from typing import Any + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.tool.builtin_tool import BuiltinTool +from core.tools.utils.lark_api_utils import LarkRequest + + +class SearchRecordsTool(BuiltinTool): + def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage: + app_id = self.runtime.credentials.get("app_id") + app_secret = self.runtime.credentials.get("app_secret") + client = LarkRequest(app_id, app_secret) + + app_token = tool_parameters.get("app_token") + table_id = tool_parameters.get("table_id") + table_name = tool_parameters.get("table_name") + view_id = tool_parameters.get("view_id") + field_names = tool_parameters.get("field_names") + sort = tool_parameters.get("sort") + filters = tool_parameters.get("filter") + page_token = tool_parameters.get("page_token") + automatic_fields = tool_parameters.get("automatic_fields", False) + user_id_type = tool_parameters.get("user_id_type", "open_id") + page_size = tool_parameters.get("page_size", 20) + + res = client.search_record( + app_token, + table_id, + table_name, + view_id, + field_names, + sort, + filters, + page_token, + automatic_fields, + user_id_type, + page_size, + ) + return self.create_json_message(res) diff --git a/api/core/tools/provider/builtin/lark_base/tools/search_records.yaml b/api/core/tools/provider/builtin/lark_base/tools/search_records.yaml new file mode 100644 index 0000000000..edd86ab9d6 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_base/tools/search_records.yaml @@ -0,0 +1,163 @@ +identity: + name: search_records + author: Doug Lea + label: + en_US: Search Records + zh_Hans: 查询记录 +description: + human: + en_US: Query records in a multidimensional table, up to 500 rows per query. + zh_Hans: 查询多维表格数据表中的记录,单次最多查询 500 行记录。 + llm: A tool for querying records in a multidimensional table, up to 500 rows per query. (查询多维表格数据表中的记录,单次最多查询 500 行记录) +parameters: + - name: app_token + type: string + required: true + label: + en_US: app_token + zh_Hans: app_token + human_description: + en_US: Unique identifier for the multidimensional table, supports inputting document URL. + zh_Hans: 多维表格的唯一标识符,支持输入文档 URL。 + llm_description: 多维表格的唯一标识符,支持输入文档 URL。 + form: llm + + - name: table_id + type: string + required: false + label: + en_US: table_id + zh_Hans: table_id + human_description: + en_US: Unique identifier for the multidimensional table data, either table_id or table_name must be provided, cannot be empty simultaneously. + zh_Hans: 多维表格数据表的唯一标识符,table_id 和 table_name 至少需要提供一个,不能同时为空。 + llm_description: 多维表格数据表的唯一标识符,table_id 和 table_name 至少需要提供一个,不能同时为空。 + form: llm + + - name: table_name + type: string + required: false + label: + en_US: table_name + zh_Hans: table_name + human_description: + en_US: Name of the multidimensional table data, either table_name or table_id must be provided, cannot be empty simultaneously. + zh_Hans: 多维表格数据表的名称,table_name 和 table_id 至少需要提供一个,不能同时为空。 + llm_description: 多维表格数据表的名称,table_name 和 table_id 至少需要提供一个,不能同时为空。 + form: llm + + - name: view_id + type: string + required: false + label: + en_US: view_id + zh_Hans: 视图唯一标识 + human_description: + en_US: | + Unique identifier for a view in a multidimensional table. It can be found in the URL's query parameter with the key 'view'. For example: https://lark-japan.jp.larksuite.com/base/XXX0bfYEraW5OWsbhcFjEqj6pxh?table=tbl5I6jqwz8wBRMv&view=vewW5zXVEU. + zh_Hans: 多维表格中视图的唯一标识,可在多维表格的 URL 地址栏中找到,query 参数中 key 为 view 的部分。例如:https://lark-japan.jp.larksuite.com/base/XXX0bfYEraW5OWsbhcFjEqj6pxh?table=tbl5I6jqwz8wBRMv&view=vewW5zXVEU。 + llm_description: 多维表格中视图的唯一标识,可在多维表格的 URL 地址栏中找到,query 参数中 key 为 view 的部分。例如:https://lark-japan.jp.larksuite.com/base/XXX0bfYEraW5OWsbhcFjEqj6pxh?table=tbl5I6jqwz8wBRMv&view=vewW5zXVEU。 + form: llm + + - name: field_names + type: string + required: false + label: + en_US: field_names + zh_Hans: 字段名称 + human_description: + en_US: | + Field names to specify which fields to include in the returned records. Example value: ["Field1", "Field2"]. + zh_Hans: 字段名称,用于指定本次查询返回记录中包含的字段。示例值:["字段1","字段2"]。 + llm_description: 字段名称,用于指定本次查询返回记录中包含的字段。示例值:["字段1","字段2"]。 + form: llm + + - name: sort + type: string + required: false + label: + en_US: sort + zh_Hans: 排序条件 + human_description: + en_US: | + Sorting conditions, for example: [{"field_name":"Multiline Text","desc":true}]. + zh_Hans: 排序条件,例如:[{"field_name":"多行文本","desc":true}]。 + llm_description: 排序条件,例如:[{"field_name":"多行文本","desc":true}]。 + form: llm + + - name: filter + type: string + required: false + label: + en_US: filter + zh_Hans: 筛选条件 + human_description: + en_US: Object containing filter information. For details on how to fill in the filter, refer to the record filter parameter guide (https://open.larkoffice.com/document/uAjLw4CM/ukTMukTMukTM/reference/bitable-v1/app-table-record/record-filter-guide). + zh_Hans: 包含条件筛选信息的对象。了解如何填写 filter,参考记录筛选参数填写指南(https://open.larkoffice.com/document/uAjLw4CM/ukTMukTMukTM/reference/bitable-v1/app-table-record/record-filter-guide)。 + llm_description: 包含条件筛选信息的对象。了解如何填写 filter,参考记录筛选参数填写指南(https://open.larkoffice.com/document/uAjLw4CM/ukTMukTMukTM/reference/bitable-v1/app-table-record/record-filter-guide)。 + form: llm + + - name: automatic_fields + type: boolean + required: false + label: + en_US: automatic_fields + zh_Hans: automatic_fields + human_description: + en_US: Whether to return automatically calculated fields. Default is false, meaning they are not returned. + zh_Hans: 是否返回自动计算的字段。默认为 false,表示不返回。 + llm_description: 是否返回自动计算的字段。默认为 false,表示不返回。 + form: form + + - name: user_id_type + type: select + required: false + options: + - value: open_id + label: + en_US: open_id + zh_Hans: open_id + - value: union_id + label: + en_US: union_id + zh_Hans: union_id + - value: user_id + label: + en_US: user_id + zh_Hans: user_id + default: "open_id" + label: + en_US: user_id_type + zh_Hans: 用户 ID 类型 + human_description: + en_US: User ID type, optional values are open_id, union_id, user_id, with a default value of open_id. + zh_Hans: 用户 ID 类型,可选值有 open_id、union_id、user_id,默认值为 open_id。 + llm_description: 用户 ID 类型,可选值有 open_id、union_id、user_id,默认值为 open_id。 + form: form + + - name: page_size + type: number + required: false + default: 20 + label: + en_US: page_size + zh_Hans: 分页大小 + human_description: + en_US: | + Page size, default value: 20, maximum value: 500. + zh_Hans: 分页大小,默认值:20,最大值:500。 + llm_description: 分页大小,默认值:20,最大值:500。 + form: form + + - name: page_token + type: string + required: false + label: + en_US: page_token + zh_Hans: 分页标记 + human_description: + en_US: | + Page token, leave empty for the first request to start from the beginning; a new page_token will be returned if there are more items in the paginated query results, which can be used for the next traversal. Example value: "tblsRc9GRRXKqhvW". + zh_Hans: 分页标记,第一次请求不填,表示从头开始遍历;分页查询结果还有更多项时会同时返回新的 page_token,下次遍历可采用该 page_token 获取查询结果。示例值:"tblsRc9GRRXKqhvW"。 + llm_description: 分页标记,第一次请求不填,表示从头开始遍历;分页查询结果还有更多项时会同时返回新的 page_token,下次遍历可采用该 page_token 获取查询结果。示例值:"tblsRc9GRRXKqhvW"。 + form: llm diff --git a/api/core/tools/provider/builtin/lark_base/tools/update_records.py b/api/core/tools/provider/builtin/lark_base/tools/update_records.py new file mode 100644 index 0000000000..7c263df2bb --- /dev/null +++ b/api/core/tools/provider/builtin/lark_base/tools/update_records.py @@ -0,0 +1,21 @@ +from typing import Any + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.tool.builtin_tool import BuiltinTool +from core.tools.utils.lark_api_utils import LarkRequest + + +class UpdateRecordsTool(BuiltinTool): + def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage: + app_id = self.runtime.credentials.get("app_id") + app_secret = self.runtime.credentials.get("app_secret") + client = LarkRequest(app_id, app_secret) + + app_token = tool_parameters.get("app_token") + table_id = tool_parameters.get("table_id") + table_name = tool_parameters.get("table_name") + records = tool_parameters.get("records") + user_id_type = tool_parameters.get("user_id_type", "open_id") + + res = client.update_records(app_token, table_id, table_name, records, user_id_type) + return self.create_json_message(res) diff --git a/api/core/tools/provider/builtin/lark_base/tools/update_records.yaml b/api/core/tools/provider/builtin/lark_base/tools/update_records.yaml new file mode 100644 index 0000000000..68117e7136 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_base/tools/update_records.yaml @@ -0,0 +1,91 @@ +identity: + name: update_records + author: Doug Lea + label: + en_US: Update Records + zh_Hans: 更新多条记录 +description: + human: + en_US: Update Multiple Records in Multidimensional Table + zh_Hans: 更新多维表格数据表中的多条记录 + llm: A tool for updating multiple records in a multidimensional table. (更新多维表格数据表中的多条记录) +parameters: + - name: app_token + type: string + required: true + label: + en_US: app_token + zh_Hans: app_token + human_description: + en_US: Unique identifier for the multidimensional table, supports inputting document URL. + zh_Hans: 多维表格的唯一标识符,支持输入文档 URL。 + llm_description: 多维表格的唯一标识符,支持输入文档 URL。 + form: llm + + - name: table_id + type: string + required: false + label: + en_US: table_id + zh_Hans: table_id + human_description: + en_US: Unique identifier for the multidimensional table data, either table_id or table_name must be provided, cannot be empty simultaneously. + zh_Hans: 多维表格数据表的唯一标识符,table_id 和 table_name 至少需要提供一个,不能同时为空。 + llm_description: 多维表格数据表的唯一标识符,table_id 和 table_name 至少需要提供一个,不能同时为空。 + form: llm + + - name: table_name + type: string + required: false + label: + en_US: table_name + zh_Hans: table_name + human_description: + en_US: Name of the multidimensional table data, either table_name or table_id must be provided, cannot be empty simultaneously. + zh_Hans: 多维表格数据表的名称,table_name 和 table_id 至少需要提供一个,不能同时为空。 + llm_description: 多维表格数据表的名称,table_name 和 table_id 至少需要提供一个,不能同时为空。 + form: llm + + - name: records + type: string + required: true + label: + en_US: records + zh_Hans: 记录列表 + human_description: + en_US: | + List of records to be updated in this request. Example value: [{"fields":{"multi-line-text":"text content","single_select":"option 1","date":1674206443000},"record_id":"recupK4f4RM5RX"}]. + For supported field types, refer to the integration guide (https://open.larkoffice.com/document/server-docs/docs/bitable-v1/notification). For data structures of different field types, refer to the data structure overview (https://open.larkoffice.com/document/server-docs/docs/bitable-v1/bitable-structure). + zh_Hans: | + 本次请求将要更新的记录列表,示例值:[{"fields":{"多行文本":"文本内容","单选":"选项 1","日期":1674206443000},"record_id":"recupK4f4RM5RX"}]。 + 当前接口支持的字段类型请参考接入指南(https://open.larkoffice.com/document/server-docs/docs/bitable-v1/notification),不同类型字段的数据结构请参考数据结构概述(https://open.larkoffice.com/document/server-docs/docs/bitable-v1/bitable-structure)。 + llm_description: | + 本次请求将要更新的记录列表,示例值:[{"fields":{"多行文本":"文本内容","单选":"选项 1","日期":1674206443000},"record_id":"recupK4f4RM5RX"}]。 + 当前接口支持的字段类型请参考接入指南(https://open.larkoffice.com/document/server-docs/docs/bitable-v1/notification),不同类型字段的数据结构请参考数据结构概述(https://open.larkoffice.com/document/server-docs/docs/bitable-v1/bitable-structure)。 + form: llm + + - name: user_id_type + type: select + required: false + options: + - value: open_id + label: + en_US: open_id + zh_Hans: open_id + - value: union_id + label: + en_US: union_id + zh_Hans: union_id + - value: user_id + label: + en_US: user_id + zh_Hans: user_id + default: "open_id" + label: + en_US: user_id_type + zh_Hans: 用户 ID 类型 + human_description: + en_US: User ID type, optional values are open_id, union_id, user_id, with a default value of open_id. + zh_Hans: 用户 ID 类型,可选值有 open_id、union_id、user_id,默认值为 open_id。 + llm_description: 用户 ID 类型,可选值有 open_id、union_id、user_id,默认值为 open_id。 + form: form diff --git a/api/core/tools/provider/builtin/lark_calendar/_assets/icon.png b/api/core/tools/provider/builtin/lark_calendar/_assets/icon.png new file mode 100644 index 0000000000..2a934747a9 Binary files /dev/null and b/api/core/tools/provider/builtin/lark_calendar/_assets/icon.png differ diff --git a/api/core/tools/provider/builtin/lark_calendar/lark_calendar.py b/api/core/tools/provider/builtin/lark_calendar/lark_calendar.py new file mode 100644 index 0000000000..871de69cc1 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_calendar/lark_calendar.py @@ -0,0 +1,7 @@ +from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController +from core.tools.utils.lark_api_utils import lark_auth + + +class LarkCalendarProvider(BuiltinToolProviderController): + def _validate_credentials(self, credentials: dict) -> None: + lark_auth(credentials) diff --git a/api/core/tools/provider/builtin/lark_calendar/lark_calendar.yaml b/api/core/tools/provider/builtin/lark_calendar/lark_calendar.yaml new file mode 100644 index 0000000000..72c41e36c0 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_calendar/lark_calendar.yaml @@ -0,0 +1,36 @@ +identity: + author: Doug Lea + name: lark_calendar + label: + en_US: Lark Calendar + zh_Hans: Lark 日历 + description: + en_US: | + Lark calendar, requires the following permissions: calendar:calendar:read、calendar:calendar、contact:user.id:readonly. + zh_Hans: | + Lark 日历,需要开通以下权限: calendar:calendar:read、calendar:calendar、contact:user.id:readonly。 + icon: icon.png + tags: + - social + - productivity +credentials_for_provider: + app_id: + type: text-input + required: true + label: + en_US: APP ID + placeholder: + en_US: Please input your Lark app id + zh_Hans: 请输入你的 Lark app id + help: + en_US: Get your app_id and app_secret from Lark + zh_Hans: 从 Lark 获取您的 app_id 和 app_secret + url: https://open.larksuite.com/app + app_secret: + type: secret-input + required: true + label: + en_US: APP Secret + placeholder: + en_US: Please input your app secret + zh_Hans: 请输入你的 Lark app secret diff --git a/api/core/tools/provider/builtin/lark_calendar/tools/add_event_attendees.py b/api/core/tools/provider/builtin/lark_calendar/tools/add_event_attendees.py new file mode 100644 index 0000000000..f5929893dd --- /dev/null +++ b/api/core/tools/provider/builtin/lark_calendar/tools/add_event_attendees.py @@ -0,0 +1,20 @@ +from typing import Any + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.tool.builtin_tool import BuiltinTool +from core.tools.utils.lark_api_utils import LarkRequest + + +class AddEventAttendeesTool(BuiltinTool): + def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage: + app_id = self.runtime.credentials.get("app_id") + app_secret = self.runtime.credentials.get("app_secret") + client = LarkRequest(app_id, app_secret) + + event_id = tool_parameters.get("event_id") + attendee_phone_or_email = tool_parameters.get("attendee_phone_or_email") + need_notification = tool_parameters.get("need_notification", True) + + res = client.add_event_attendees(event_id, attendee_phone_or_email, need_notification) + + return self.create_json_message(res) diff --git a/api/core/tools/provider/builtin/lark_calendar/tools/add_event_attendees.yaml b/api/core/tools/provider/builtin/lark_calendar/tools/add_event_attendees.yaml new file mode 100644 index 0000000000..9d7a131907 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_calendar/tools/add_event_attendees.yaml @@ -0,0 +1,54 @@ +identity: + name: add_event_attendees + author: Doug Lea + label: + en_US: Add Event Attendees + zh_Hans: 添加日程参会人 +description: + human: + en_US: Add Event Attendees + zh_Hans: 添加日程参会人 + llm: A tool for adding attendees to events in Lark. (在 Lark 中添加日程参会人) +parameters: + - name: event_id + type: string + required: true + label: + en_US: Event ID + zh_Hans: 日程 ID + human_description: + en_US: | + The ID of the event, which will be returned when the event is created. For example: fb2a6406-26d6-4c8d-a487-6f0246c94d2f_0. + zh_Hans: | + 创建日程时会返回日程 ID。例如: fb2a6406-26d6-4c8d-a487-6f0246c94d2f_0。 + llm_description: | + 日程 ID,创建日程时会返回日程 ID。例如: fb2a6406-26d6-4c8d-a487-6f0246c94d2f_0。 + form: llm + + - name: need_notification + type: boolean + required: false + default: true + label: + en_US: Need Notification + zh_Hans: 是否需要通知 + human_description: + en_US: | + Whether to send a Bot notification to attendees. true: send, false: do not send. + zh_Hans: | + 是否给参与人发送 Bot 通知,true: 发送,false: 不发送。 + llm_description: | + 是否给参与人发送 Bot 通知,true: 发送,false: 不发送。 + form: form + + - name: attendee_phone_or_email + type: string + required: true + label: + en_US: Attendee Phone or Email + zh_Hans: 参会人电话或邮箱 + human_description: + en_US: The list of attendee emails or phone numbers, separated by commas. + zh_Hans: 日程参会人邮箱或者手机号列表,使用逗号分隔。 + llm_description: 日程参会人邮箱或者手机号列表,使用逗号分隔。 + form: llm diff --git a/api/core/tools/provider/builtin/lark_calendar/tools/create_event.py b/api/core/tools/provider/builtin/lark_calendar/tools/create_event.py new file mode 100644 index 0000000000..8a0726008c --- /dev/null +++ b/api/core/tools/provider/builtin/lark_calendar/tools/create_event.py @@ -0,0 +1,26 @@ +from typing import Any + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.tool.builtin_tool import BuiltinTool +from core.tools.utils.lark_api_utils import LarkRequest + + +class CreateEventTool(BuiltinTool): + def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage: + app_id = self.runtime.credentials.get("app_id") + app_secret = self.runtime.credentials.get("app_secret") + client = LarkRequest(app_id, app_secret) + + summary = tool_parameters.get("summary") + description = tool_parameters.get("description") + start_time = tool_parameters.get("start_time") + end_time = tool_parameters.get("end_time") + attendee_ability = tool_parameters.get("attendee_ability") + need_notification = tool_parameters.get("need_notification", True) + auto_record = tool_parameters.get("auto_record", False) + + res = client.create_event( + summary, description, start_time, end_time, attendee_ability, need_notification, auto_record + ) + + return self.create_json_message(res) diff --git a/api/core/tools/provider/builtin/lark_calendar/tools/create_event.yaml b/api/core/tools/provider/builtin/lark_calendar/tools/create_event.yaml new file mode 100644 index 0000000000..b738736e63 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_calendar/tools/create_event.yaml @@ -0,0 +1,119 @@ +identity: + name: create_event + author: Doug Lea + label: + en_US: Create Event + zh_Hans: 创建日程 +description: + human: + en_US: Create Event + zh_Hans: 创建日程 + llm: A tool for creating events in Lark.(创建 Lark 日程) +parameters: + - name: summary + type: string + required: false + label: + en_US: Summary + zh_Hans: 日程标题 + human_description: + en_US: The title of the event. If not filled, the event title will display (No Subject). + zh_Hans: 日程标题,若不填则日程标题显示 (无主题)。 + llm_description: 日程标题,若不填则日程标题显示 (无主题)。 + form: llm + + - name: description + type: string + required: false + label: + en_US: Description + zh_Hans: 日程描述 + human_description: + en_US: The description of the event. + zh_Hans: 日程描述。 + llm_description: 日程描述。 + form: llm + + - name: need_notification + type: boolean + required: false + default: true + label: + en_US: Need Notification + zh_Hans: 是否发送通知 + human_description: + en_US: | + Whether to send a bot message when the event is created, true: send, false: do not send. + zh_Hans: 创建日程时是否发送 bot 消息,true:发送,false:不发送。 + llm_description: 创建日程时是否发送 bot 消息,true:发送,false:不发送。 + form: form + + - name: start_time + type: string + required: true + label: + en_US: Start Time + zh_Hans: 开始时间 + human_description: + en_US: | + The start time of the event, format: 2006-01-02 15:04:05. + zh_Hans: 日程开始时间,格式:2006-01-02 15:04:05。 + llm_description: 日程开始时间,格式:2006-01-02 15:04:05。 + form: llm + + - name: end_time + type: string + required: true + label: + en_US: End Time + zh_Hans: 结束时间 + human_description: + en_US: | + The end time of the event, format: 2006-01-02 15:04:05. + zh_Hans: 日程结束时间,格式:2006-01-02 15:04:05。 + llm_description: 日程结束时间,格式:2006-01-02 15:04:05。 + form: llm + + - name: attendee_ability + type: select + required: false + options: + - value: none + label: + en_US: none + zh_Hans: 无 + - value: can_see_others + label: + en_US: can_see_others + zh_Hans: 可以查看参与人列表 + - value: can_invite_others + label: + en_US: can_invite_others + zh_Hans: 可以邀请其它参与人 + - value: can_modify_event + label: + en_US: can_modify_event + zh_Hans: 可以编辑日程 + default: "none" + label: + en_US: attendee_ability + zh_Hans: 参会人权限 + human_description: + en_US: Attendee ability, optional values are none, can_see_others, can_invite_others, can_modify_event, with a default value of none. + zh_Hans: 参会人权限,可选值有无、可以查看参与人列表、可以邀请其它参与人、可以编辑日程,默认值为无。 + llm_description: 参会人权限,可选值有无、可以查看参与人列表、可以邀请其它参与人、可以编辑日程,默认值为无。 + form: form + + - name: auto_record + type: boolean + required: false + default: false + label: + en_US: Auto Record + zh_Hans: 自动录制 + human_description: + en_US: | + Whether to enable automatic recording, true: enabled, automatically record when the meeting starts; false: not enabled. + zh_Hans: 是否开启自动录制,true:开启,会议开始后自动录制;false:不开启。 + llm_description: 是否开启自动录制,true:开启,会议开始后自动录制;false:不开启。 + form: form diff --git a/api/core/tools/provider/builtin/lark_calendar/tools/delete_event.py b/api/core/tools/provider/builtin/lark_calendar/tools/delete_event.py new file mode 100644 index 0000000000..0e4ceac5e5 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_calendar/tools/delete_event.py @@ -0,0 +1,19 @@ +from typing import Any + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.tool.builtin_tool import BuiltinTool +from core.tools.utils.lark_api_utils import LarkRequest + + +class DeleteEventTool(BuiltinTool): + def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage: + app_id = self.runtime.credentials.get("app_id") + app_secret = self.runtime.credentials.get("app_secret") + client = LarkRequest(app_id, app_secret) + + event_id = tool_parameters.get("event_id") + need_notification = tool_parameters.get("need_notification", True) + + res = client.delete_event(event_id, need_notification) + + return self.create_json_message(res) diff --git a/api/core/tools/provider/builtin/lark_calendar/tools/delete_event.yaml b/api/core/tools/provider/builtin/lark_calendar/tools/delete_event.yaml new file mode 100644 index 0000000000..cdd6d7e1bb --- /dev/null +++ b/api/core/tools/provider/builtin/lark_calendar/tools/delete_event.yaml @@ -0,0 +1,38 @@ +identity: + name: delete_event + author: Doug Lea + label: + en_US: Delete Event + zh_Hans: 删除日程 +description: + human: + en_US: Delete Event + zh_Hans: 删除日程 + llm: A tool for deleting events in Lark.(在 Lark 中删除日程) +parameters: + - name: event_id + type: string + required: true + label: + en_US: Event ID + zh_Hans: 日程 ID + human_description: + en_US: | + The ID of the event, for example: e8b9791c-39ae-4908-8ad8-66b13159b9fb_0. + zh_Hans: 日程 ID,例如:e8b9791c-39ae-4908-8ad8-66b13159b9fb_0。 + llm_description: 日程 ID,例如:e8b9791c-39ae-4908-8ad8-66b13159b9fb_0。 + form: llm + + - name: need_notification + type: boolean + required: false + default: true + label: + en_US: Need Notification + zh_Hans: 是否需要通知 + human_description: + en_US: | + Indicates whether to send bot notifications to event participants upon deletion. true: send, false: do not send. + zh_Hans: 删除日程是否给日程参与人发送 bot 通知,true:发送,false:不发送。 + llm_description: 删除日程是否给日程参与人发送 bot 通知,true:发送,false:不发送。 + form: form diff --git a/api/core/tools/provider/builtin/lark_calendar/tools/get_primary_calendar.py b/api/core/tools/provider/builtin/lark_calendar/tools/get_primary_calendar.py new file mode 100644 index 0000000000..d315bf35f0 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_calendar/tools/get_primary_calendar.py @@ -0,0 +1,18 @@ +from typing import Any + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.tool.builtin_tool import BuiltinTool +from core.tools.utils.lark_api_utils import LarkRequest + + +class GetPrimaryCalendarTool(BuiltinTool): + def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage: + app_id = self.runtime.credentials.get("app_id") + app_secret = self.runtime.credentials.get("app_secret") + client = LarkRequest(app_id, app_secret) + + user_id_type = tool_parameters.get("user_id_type", "open_id") + + res = client.get_primary_calendar(user_id_type) + + return self.create_json_message(res) diff --git a/api/core/tools/provider/builtin/lark_calendar/tools/get_primary_calendar.yaml b/api/core/tools/provider/builtin/lark_calendar/tools/get_primary_calendar.yaml new file mode 100644 index 0000000000..fe61594770 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_calendar/tools/get_primary_calendar.yaml @@ -0,0 +1,37 @@ +identity: + name: get_primary_calendar + author: Doug Lea + label: + en_US: Get Primary Calendar + zh_Hans: 查询主日历信息 +description: + human: + en_US: Get Primary Calendar + zh_Hans: 查询主日历信息 + llm: A tool for querying primary calendar information in Lark.(在 Lark 中查询主日历信息) +parameters: + - name: user_id_type + type: select + required: false + options: + - value: open_id + label: + en_US: open_id + zh_Hans: open_id + - value: union_id + label: + en_US: union_id + zh_Hans: union_id + - value: user_id + label: + en_US: user_id + zh_Hans: user_id + default: "open_id" + label: + en_US: user_id_type + zh_Hans: 用户 ID 类型 + human_description: + en_US: User ID type, optional values are open_id, union_id, user_id, with a default value of open_id. + zh_Hans: 用户 ID 类型,可选值有 open_id、union_id、user_id,默认值为 open_id。 + llm_description: 用户 ID 类型,可选值有 open_id、union_id、user_id,默认值为 open_id。 + form: form diff --git a/api/core/tools/provider/builtin/lark_calendar/tools/list_events.py b/api/core/tools/provider/builtin/lark_calendar/tools/list_events.py new file mode 100644 index 0000000000..d74cc049d3 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_calendar/tools/list_events.py @@ -0,0 +1,21 @@ +from typing import Any + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.tool.builtin_tool import BuiltinTool +from core.tools.utils.lark_api_utils import LarkRequest + + +class ListEventsTool(BuiltinTool): + def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage: + app_id = self.runtime.credentials.get("app_id") + app_secret = self.runtime.credentials.get("app_secret") + client = LarkRequest(app_id, app_secret) + + start_time = tool_parameters.get("start_time") + end_time = tool_parameters.get("end_time") + page_token = tool_parameters.get("page_token") + page_size = tool_parameters.get("page_size") + + res = client.list_events(start_time, end_time, page_token, page_size) + + return self.create_json_message(res) diff --git a/api/core/tools/provider/builtin/lark_calendar/tools/list_events.yaml b/api/core/tools/provider/builtin/lark_calendar/tools/list_events.yaml new file mode 100644 index 0000000000..cef332f527 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_calendar/tools/list_events.yaml @@ -0,0 +1,62 @@ +identity: + name: list_events + author: Doug Lea + label: + en_US: List Events + zh_Hans: 获取日程列表 +description: + human: + en_US: List Events + zh_Hans: 获取日程列表 + llm: A tool for listing events in Lark.(在 Lark 中获取日程列表) +parameters: + - name: start_time + type: string + required: false + label: + en_US: Start Time + zh_Hans: 开始时间 + human_description: + en_US: | + The start time, defaults to 0:00 of the current day if not provided, format: 2006-01-02 15:04:05. + zh_Hans: 开始时间,不传值时默认当天 0 点时间,格式为:2006-01-02 15:04:05。 + llm_description: 开始时间,不传值时默认当天 0 点时间,格式为:2006-01-02 15:04:05。 + form: llm + + - name: end_time + type: string + required: false + label: + en_US: End Time + zh_Hans: 结束时间 + human_description: + en_US: | + The end time, defaults to 23:59 of the current day if not provided, format: 2006-01-02 15:04:05. + zh_Hans: 结束时间,不传值时默认当天 23:59 分时间,格式为:2006-01-02 15:04:05。 + llm_description: 结束时间,不传值时默认当天 23:59 分时间,格式为:2006-01-02 15:04:05。 + form: llm + + - name: page_size + type: number + required: false + default: 50 + label: + en_US: Page Size + zh_Hans: 分页大小 + human_description: + en_US: The page size, i.e., the number of data entries returned in a single request. The default value is 50, and the value range is [50,1000]. + zh_Hans: 分页大小,即单次请求所返回的数据条目数。默认值为 50,取值范围为 [50,1000]。 + llm_description: 分页大小,即单次请求所返回的数据条目数。默认值为 50,取值范围为 [50,1000]。 + form: form + + - name: page_token + type: string + required: false + label: + en_US: Page Token + zh_Hans: 分页标记 + human_description: + en_US: The pagination token. Leave it blank for the first request, indicating to start traversing from the beginning; when the pagination query result has more items, a new page_token will be returned simultaneously, which can be used to obtain the query result in the next traversal. + zh_Hans: 分页标记,第一次请求不填,表示从头开始遍历;分页查询结果还有更多项时会同时返回新的 page_token,下次遍历可采用该 page_token 获取查询结果。 + llm_description: 分页标记,第一次请求不填,表示从头开始遍历;分页查询结果还有更多项时会同时返回新的 page_token,下次遍历可采用该 page_token 获取查询结果。 + form: llm diff --git a/api/core/tools/provider/builtin/lark_calendar/tools/search_events.py b/api/core/tools/provider/builtin/lark_calendar/tools/search_events.py new file mode 100644 index 0000000000..a20038e47d --- /dev/null +++ b/api/core/tools/provider/builtin/lark_calendar/tools/search_events.py @@ -0,0 +1,23 @@ +from typing import Any + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.tool.builtin_tool import BuiltinTool +from core.tools.utils.lark_api_utils import LarkRequest + + +class SearchEventsTool(BuiltinTool): + def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage: + app_id = self.runtime.credentials.get("app_id") + app_secret = self.runtime.credentials.get("app_secret") + client = LarkRequest(app_id, app_secret) + + query = tool_parameters.get("query") + start_time = tool_parameters.get("start_time") + end_time = tool_parameters.get("end_time") + page_token = tool_parameters.get("page_token") + user_id_type = tool_parameters.get("user_id_type", "open_id") + page_size = tool_parameters.get("page_size", 20) + + res = client.search_events(query, start_time, end_time, page_token, user_id_type, page_size) + + return self.create_json_message(res) diff --git a/api/core/tools/provider/builtin/lark_calendar/tools/search_events.yaml b/api/core/tools/provider/builtin/lark_calendar/tools/search_events.yaml new file mode 100644 index 0000000000..4d4f8819c1 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_calendar/tools/search_events.yaml @@ -0,0 +1,100 @@ +identity: + name: search_events + author: Doug Lea + label: + en_US: Search Events + zh_Hans: 搜索日程 +description: + human: + en_US: Search Events + zh_Hans: 搜索日程 + llm: A tool for searching events in Lark.(在 Lark 中搜索日程) +parameters: + - name: user_id_type + type: select + required: false + options: + - value: open_id + label: + en_US: open_id + zh_Hans: open_id + - value: union_id + label: + en_US: union_id + zh_Hans: union_id + - value: user_id + label: + en_US: user_id + zh_Hans: user_id + default: "open_id" + label: + en_US: user_id_type + zh_Hans: 用户 ID 类型 + human_description: + en_US: User ID type, optional values are open_id, union_id, user_id, with a default value of open_id. + zh_Hans: 用户 ID 类型,可选值有 open_id、union_id、user_id,默认值为 open_id。 + llm_description: 用户 ID 类型,可选值有 open_id、union_id、user_id,默认值为 open_id。 + form: form + + - name: query + type: string + required: true + label: + en_US: Query + zh_Hans: 搜索关键字 + human_description: + en_US: The search keyword used for fuzzy searching event names, with a maximum input of 200 characters. + zh_Hans: 用于模糊查询日程名称的搜索关键字,最大输入 200 字符。 + llm_description: 用于模糊查询日程名称的搜索关键字,最大输入 200 字符。 + form: llm + + - name: start_time + type: string + required: false + label: + en_US: Start Time + zh_Hans: 开始时间 + human_description: + en_US: | + The start time, defaults to 0:00 of the current day if not provided, format: 2006-01-02 15:04:05. + zh_Hans: 开始时间,不传值时默认当天 0 点时间,格式为:2006-01-02 15:04:05。 + llm_description: 开始时间,不传值时默认当天 0 点时间,格式为:2006-01-02 15:04:05。 + form: llm + + - name: end_time + type: string + required: false + label: + en_US: End Time + zh_Hans: 结束时间 + human_description: + en_US: | + The end time, defaults to 23:59 of the current day if not provided, format: 2006-01-02 15:04:05. + zh_Hans: 结束时间,不传值时默认当天 23:59 分时间,格式为:2006-01-02 15:04:05。 + llm_description: 结束时间,不传值时默认当天 23:59 分时间,格式为:2006-01-02 15:04:05。 + form: llm + + - name: page_size + type: number + required: false + default: 20 + label: + en_US: Page Size + zh_Hans: 分页大小 + human_description: + en_US: The page size, i.e., the number of data entries returned in a single request. The default value is 20, and the value range is [10,100]. + zh_Hans: 分页大小,即单次请求所返回的数据条目数。默认值为 20,取值范围为 [10,100]。 + llm_description: 分页大小,即单次请求所返回的数据条目数。默认值为 20,取值范围为 [10,100]。 + form: form + + - name: page_token + type: string + required: false + label: + en_US: Page Token + zh_Hans: 分页标记 + human_description: + en_US: The pagination token. Leave it blank for the first request, indicating to start traversing from the beginning; when the pagination query result has more items, a new page_token will be returned simultaneously, which can be used to obtain the query result in the next traversal. + zh_Hans: 分页标记,第一次请求不填,表示从头开始遍历;分页查询结果还有更多项时会同时返回新的 page_token,下次遍历可采用该 page_token 获取查询结果。 + llm_description: 分页标记,第一次请求不填,表示从头开始遍历;分页查询结果还有更多项时会同时返回新的 page_token,下次遍历可采用该 page_token 获取查询结果。 + form: llm diff --git a/api/core/tools/provider/builtin/lark_calendar/tools/update_event.py b/api/core/tools/provider/builtin/lark_calendar/tools/update_event.py new file mode 100644 index 0000000000..a04029377f --- /dev/null +++ b/api/core/tools/provider/builtin/lark_calendar/tools/update_event.py @@ -0,0 +1,24 @@ +from typing import Any + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.tool.builtin_tool import BuiltinTool +from core.tools.utils.lark_api_utils import LarkRequest + + +class UpdateEventTool(BuiltinTool): + def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage: + app_id = self.runtime.credentials.get("app_id") + app_secret = self.runtime.credentials.get("app_secret") + client = LarkRequest(app_id, app_secret) + + event_id = tool_parameters.get("event_id") + summary = tool_parameters.get("summary") + description = tool_parameters.get("description") + need_notification = tool_parameters.get("need_notification", True) + start_time = tool_parameters.get("start_time") + end_time = tool_parameters.get("end_time") + auto_record = tool_parameters.get("auto_record", False) + + res = client.update_event(event_id, summary, description, need_notification, start_time, end_time, auto_record) + + return self.create_json_message(res) diff --git a/api/core/tools/provider/builtin/lark_calendar/tools/update_event.yaml b/api/core/tools/provider/builtin/lark_calendar/tools/update_event.yaml new file mode 100644 index 0000000000..b9992e5b03 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_calendar/tools/update_event.yaml @@ -0,0 +1,100 @@ +identity: + name: update_event + author: Doug Lea + label: + en_US: Update Event + zh_Hans: 更新日程 +description: + human: + en_US: Update Event + zh_Hans: 更新日程 + llm: A tool for updating events in Lark.(更新 Lark 中的日程) +parameters: + - name: event_id + type: string + required: true + label: + en_US: Event ID + zh_Hans: 日程 ID + human_description: + en_US: | + The ID of the event, for example: e8b9791c-39ae-4908-8ad8-66b13159b9fb_0. + zh_Hans: 日程 ID,例如:e8b9791c-39ae-4908-8ad8-66b13159b9fb_0。 + llm_description: 日程 ID,例如:e8b9791c-39ae-4908-8ad8-66b13159b9fb_0。 + form: llm + + - name: summary + type: string + required: false + label: + en_US: Summary + zh_Hans: 日程标题 + human_description: + en_US: The title of the event. + zh_Hans: 日程标题。 + llm_description: 日程标题。 + form: llm + + - name: description + type: string + required: false + label: + en_US: Description + zh_Hans: 日程描述 + human_description: + en_US: The description of the event. + zh_Hans: 日程描述。 + llm_description: 日程描述。 + form: llm + + - name: need_notification + type: boolean + required: false + label: + en_US: Need Notification + zh_Hans: 是否发送通知 + human_description: + en_US: | + Whether to send a bot message when the event is updated, true: send, false: do not send. + zh_Hans: 更新日程时是否发送 bot 消息,true:发送,false:不发送。 + llm_description: 更新日程时是否发送 bot 消息,true:发送,false:不发送。 + form: form + + - name: start_time + type: string + required: false + label: + en_US: Start Time + zh_Hans: 开始时间 + human_description: + en_US: | + The start time of the event, format: 2006-01-02 15:04:05. + zh_Hans: 日程开始时间,格式:2006-01-02 15:04:05。 + llm_description: 日程开始时间,格式:2006-01-02 15:04:05。 + form: llm + + - name: end_time + type: string + required: false + label: + en_US: End Time + zh_Hans: 结束时间 + human_description: + en_US: | + The end time of the event, format: 2006-01-02 15:04:05. + zh_Hans: 日程结束时间,格式:2006-01-02 15:04:05。 + llm_description: 日程结束时间,格式:2006-01-02 15:04:05。 + form: llm + + - name: auto_record + type: boolean + required: false + label: + en_US: Auto Record + zh_Hans: 自动录制 + human_description: + en_US: | + Whether to enable automatic recording, true: enabled, automatically record when the meeting starts; false: not enabled. + zh_Hans: 是否开启自动录制,true:开启,会议开始后自动录制;false:不开启。 + llm_description: 是否开启自动录制,true:开启,会议开始后自动录制;false:不开启。 + form: form diff --git a/api/core/tools/provider/builtin/lark_document/_assets/icon.svg b/api/core/tools/provider/builtin/lark_document/_assets/icon.svg new file mode 100644 index 0000000000..5a0a6416b3 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_document/_assets/icon.svg @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/api/core/tools/provider/builtin/lark_document/lark_document.py b/api/core/tools/provider/builtin/lark_document/lark_document.py new file mode 100644 index 0000000000..b128327602 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_document/lark_document.py @@ -0,0 +1,7 @@ +from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController +from core.tools.utils.lark_api_utils import lark_auth + + +class LarkDocumentProvider(BuiltinToolProviderController): + def _validate_credentials(self, credentials: dict) -> None: + lark_auth(credentials) diff --git a/api/core/tools/provider/builtin/lark_document/lark_document.yaml b/api/core/tools/provider/builtin/lark_document/lark_document.yaml new file mode 100644 index 0000000000..0cb4ae1d62 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_document/lark_document.yaml @@ -0,0 +1,36 @@ +identity: + author: Doug Lea + name: lark_document + label: + en_US: Lark Cloud Document + zh_Hans: Lark 云文档 + description: + en_US: | + Lark cloud document, requires the following permissions: docx:document、drive:drive、docs:document.content:read. + zh_Hans: | + Lark 云文档,需要开通以下权限: docx:document、drive:drive、docs:document.content:read。 + icon: icon.svg + tags: + - social + - productivity +credentials_for_provider: + app_id: + type: text-input + required: true + label: + en_US: APP ID + placeholder: + en_US: Please input your Lark app id + zh_Hans: 请输入你的 Lark app id + help: + en_US: Get your app_id and app_secret from Lark + zh_Hans: 从 Lark 获取您的 app_id 和 app_secret + url: https://open.larksuite.com/app + app_secret: + type: secret-input + required: true + label: + en_US: APP Secret + placeholder: + en_US: Please input your app secret + zh_Hans: 请输入你的 Lark app secret diff --git a/api/core/tools/provider/builtin/lark_document/tools/create_document.py b/api/core/tools/provider/builtin/lark_document/tools/create_document.py new file mode 100644 index 0000000000..2b1dae0db5 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_document/tools/create_document.py @@ -0,0 +1,19 @@ +from typing import Any + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.tool.builtin_tool import BuiltinTool +from core.tools.utils.lark_api_utils import LarkRequest + + +class CreateDocumentTool(BuiltinTool): + def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage: + app_id = self.runtime.credentials.get("app_id") + app_secret = self.runtime.credentials.get("app_secret") + client = LarkRequest(app_id, app_secret) + + title = tool_parameters.get("title") + content = tool_parameters.get("content") + folder_token = tool_parameters.get("folder_token") + + res = client.create_document(title, content, folder_token) + return self.create_json_message(res) diff --git a/api/core/tools/provider/builtin/lark_document/tools/create_document.yaml b/api/core/tools/provider/builtin/lark_document/tools/create_document.yaml new file mode 100644 index 0000000000..37a1e23041 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_document/tools/create_document.yaml @@ -0,0 +1,48 @@ +identity: + name: create_document + author: Doug Lea + label: + en_US: Create Lark document + zh_Hans: 创建 Lark 文档 +description: + human: + en_US: Create Lark document + zh_Hans: 创建 Lark 文档,支持创建空文档和带内容的文档,支持 markdown 语法创建。应用需要开启机器人能力(https://open.larksuite.com/document/faq/trouble-shooting/how-to-enable-bot-ability)。 + llm: A tool for creating Lark documents. +parameters: + - name: title + type: string + required: false + label: + en_US: Document title + zh_Hans: 文档标题 + human_description: + en_US: Document title, only supports plain text content. + zh_Hans: 文档标题,只支持纯文本内容。 + llm_description: 文档标题,只支持纯文本内容,可以为空。 + form: llm + + - name: content + type: string + required: false + label: + en_US: Document content + zh_Hans: 文档内容 + human_description: + en_US: Document content, supports markdown syntax, can be empty. + zh_Hans: 文档内容,支持 markdown 语法,可以为空。 + llm_description: 文档内容,支持 markdown 语法,可以为空。 + form: llm + + - name: folder_token + type: string + required: false + label: + en_US: folder_token + zh_Hans: 文档所在文件夹的 Token + human_description: + en_US: | + The token of the folder where the document is located. If it is not passed or is empty, it means the root directory. For Example: https://lark-japan.jp.larksuite.com/drive/folder/Lf8uf6BoAlWkUfdGtpMjUV0PpZd + zh_Hans: 文档所在文件夹的 Token,不传或传空表示根目录。例如:https://lark-japan.jp.larksuite.com/drive/folder/Lf8uf6BoAlWkUfdGtpMjUV0PpZd。 + llm_description: 文档所在文件夹的 Token,不传或传空表示根目录。例如:https://lark-japan.jp.larksuite.com/drive/folder/Lf8uf6BoAlWkUfdGtpMjUV0PpZd。 + form: llm diff --git a/api/core/tools/provider/builtin/lark_document/tools/get_document_content.py b/api/core/tools/provider/builtin/lark_document/tools/get_document_content.py new file mode 100644 index 0000000000..d15211b57e --- /dev/null +++ b/api/core/tools/provider/builtin/lark_document/tools/get_document_content.py @@ -0,0 +1,19 @@ +from typing import Any + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.tool.builtin_tool import BuiltinTool +from core.tools.utils.lark_api_utils import LarkRequest + + +class GetDocumentRawContentTool(BuiltinTool): + def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage: + app_id = self.runtime.credentials.get("app_id") + app_secret = self.runtime.credentials.get("app_secret") + client = LarkRequest(app_id, app_secret) + + document_id = tool_parameters.get("document_id") + mode = tool_parameters.get("mode", "markdown") + lang = tool_parameters.get("lang", "0") + + res = client.get_document_content(document_id, mode, lang) + return self.create_json_message(res) diff --git a/api/core/tools/provider/builtin/lark_document/tools/get_document_content.yaml b/api/core/tools/provider/builtin/lark_document/tools/get_document_content.yaml new file mode 100644 index 0000000000..fd6a033bfd --- /dev/null +++ b/api/core/tools/provider/builtin/lark_document/tools/get_document_content.yaml @@ -0,0 +1,70 @@ +identity: + name: get_document_content + author: Doug Lea + label: + en_US: Get Lark Cloud Document Content + zh_Hans: 获取 Lark 云文档的内容 +description: + human: + en_US: Get lark cloud document content + zh_Hans: 获取 Lark 云文档的内容 + llm: A tool for retrieving content from Lark cloud documents. +parameters: + - name: document_id + type: string + required: true + label: + en_US: document_id + zh_Hans: Lark 文档的唯一标识 + human_description: + en_US: Unique identifier for a Lark document. You can also input the document's URL. + zh_Hans: Lark 文档的唯一标识,支持输入文档的 URL。 + llm_description: Lark 文档的唯一标识,支持输入文档的 URL。 + form: llm + + - name: mode + type: select + required: false + options: + - value: text + label: + en_US: text + zh_Hans: text + - value: markdown + label: + en_US: markdown + zh_Hans: markdown + default: "markdown" + label: + en_US: mode + zh_Hans: 文档返回格式 + human_description: + en_US: Format of the document return, optional values are text, markdown, can be empty, default is markdown. + zh_Hans: 文档返回格式,可选值有 text、markdown,可以为空,默认值为 markdown。 + llm_description: 文档返回格式,可选值有 text、markdown,可以为空,默认值为 markdown。 + form: form + + - name: lang + type: select + required: false + options: + - value: "0" + label: + en_US: User's default name + zh_Hans: 用户的默认名称 + - value: "1" + label: + en_US: User's English name + zh_Hans: 用户的英文名称 + default: "0" + label: + en_US: lang + zh_Hans: 指定@用户的语言 + human_description: + en_US: | + Specifies the language for MentionUser, optional values are [0, 1]. 0: User's default name, 1: User's English name, default is 0. + zh_Hans: | + 指定返回的 MentionUser,即@用户的语言,可选值有 [0,1]。0: 该用户的默认名称,1: 该用户的英文名称,默认值为 0。 + llm_description: | + 指定返回的 MentionUser,即@用户的语言,可选值有 [0,1]。0: 该用户的默认名称,1: 该用户的英文名称,默认值为 0。 + form: form diff --git a/api/core/tools/provider/builtin/lark_document/tools/list_document_blocks.py b/api/core/tools/provider/builtin/lark_document/tools/list_document_blocks.py new file mode 100644 index 0000000000..b96a87489e --- /dev/null +++ b/api/core/tools/provider/builtin/lark_document/tools/list_document_blocks.py @@ -0,0 +1,20 @@ +from typing import Any + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.tool.builtin_tool import BuiltinTool +from core.tools.utils.lark_api_utils import LarkRequest + + +class ListDocumentBlockTool(BuiltinTool): + def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage: + app_id = self.runtime.credentials.get("app_id") + app_secret = self.runtime.credentials.get("app_secret") + client = LarkRequest(app_id, app_secret) + + document_id = tool_parameters.get("document_id") + page_token = tool_parameters.get("page_token", "") + user_id_type = tool_parameters.get("user_id_type", "open_id") + page_size = tool_parameters.get("page_size", 500) + + res = client.list_document_blocks(document_id, page_token, user_id_type, page_size) + return self.create_json_message(res) diff --git a/api/core/tools/provider/builtin/lark_document/tools/list_document_blocks.yaml b/api/core/tools/provider/builtin/lark_document/tools/list_document_blocks.yaml new file mode 100644 index 0000000000..08b673e0ae --- /dev/null +++ b/api/core/tools/provider/builtin/lark_document/tools/list_document_blocks.yaml @@ -0,0 +1,74 @@ +identity: + name: list_document_blocks + author: Doug Lea + label: + en_US: List Lark Document Blocks + zh_Hans: 获取 Lark 文档所有块 +description: + human: + en_US: List lark document blocks + zh_Hans: 获取 Lark 文档所有块的富文本内容并分页返回 + llm: A tool to get all blocks of Lark documents +parameters: + - name: document_id + type: string + required: true + label: + en_US: document_id + zh_Hans: Lark 文档的唯一标识 + human_description: + en_US: Unique identifier for a Lark document. You can also input the document's URL. + zh_Hans: Lark 文档的唯一标识,支持输入文档的 URL。 + llm_description: Lark 文档的唯一标识,支持输入文档的 URL。 + form: llm + + - name: user_id_type + type: select + required: false + options: + - value: open_id + label: + en_US: open_id + zh_Hans: open_id + - value: union_id + label: + en_US: union_id + zh_Hans: union_id + - value: user_id + label: + en_US: user_id + zh_Hans: user_id + default: "open_id" + label: + en_US: user_id_type + zh_Hans: 用户 ID 类型 + human_description: + en_US: User ID type, optional values are open_id, union_id, user_id, with a default value of open_id. + zh_Hans: 用户 ID 类型,可选值有 open_id、union_id、user_id,默认值为 open_id。 + llm_description: 用户 ID 类型,可选值有 open_id、union_id、user_id,默认值为 open_id。 + form: form + + - name: page_size + type: number + required: false + default: 500 + label: + en_US: page_size + zh_Hans: 分页大小 + human_description: + en_US: Paging size, the default and maximum value is 500. + zh_Hans: 分页大小, 默认值和最大值为 500。 + llm_description: 分页大小, 表示一次请求最多返回多少条数据,默认值和最大值为 500。 + form: form + + - name: page_token + type: string + required: false + label: + en_US: page_token + zh_Hans: 分页标记 + human_description: + en_US: Pagination token used to navigate through query results, allowing retrieval of additional items in subsequent requests. + zh_Hans: 分页标记,用于分页查询结果,以便下次遍历时获取更多项。 + llm_description: 分页标记,第一次请求不填,表示从头开始遍历;分页查询结果还有更多项时会同时返回新的 page_token,下次遍历可采用该 page_token 获取查询结果。 + form: llm diff --git a/api/core/tools/provider/builtin/lark_document/tools/write_document.py b/api/core/tools/provider/builtin/lark_document/tools/write_document.py new file mode 100644 index 0000000000..888e0e39fc --- /dev/null +++ b/api/core/tools/provider/builtin/lark_document/tools/write_document.py @@ -0,0 +1,19 @@ +from typing import Any + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.tool.builtin_tool import BuiltinTool +from core.tools.utils.lark_api_utils import LarkRequest + + +class CreateDocumentTool(BuiltinTool): + def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage: + app_id = self.runtime.credentials.get("app_id") + app_secret = self.runtime.credentials.get("app_secret") + client = LarkRequest(app_id, app_secret) + + document_id = tool_parameters.get("document_id") + content = tool_parameters.get("content") + position = tool_parameters.get("position", "end") + + res = client.write_document(document_id, content, position) + return self.create_json_message(res) diff --git a/api/core/tools/provider/builtin/lark_document/tools/write_document.yaml b/api/core/tools/provider/builtin/lark_document/tools/write_document.yaml new file mode 100644 index 0000000000..9cdf034ed0 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_document/tools/write_document.yaml @@ -0,0 +1,57 @@ +identity: + name: write_document + author: Doug Lea + label: + en_US: Write Document + zh_Hans: 在 Lark 文档中新增内容 +description: + human: + en_US: Adding new content to Lark documents + zh_Hans: 在 Lark 文档中新增内容 + llm: A tool for adding new content to Lark documents. +parameters: + - name: document_id + type: string + required: true + label: + en_US: document_id + zh_Hans: Lark 文档的唯一标识 + human_description: + en_US: Unique identifier for a Lark document. You can also input the document's URL. + zh_Hans: Lark 文档的唯一标识,支持输入文档的 URL。 + llm_description: Lark 文档的唯一标识,支持输入文档的 URL。 + form: llm + + - name: content + type: string + required: true + label: + en_US: Plain text or Markdown content + zh_Hans: 纯文本或 Markdown 内容 + human_description: + en_US: Plain text or Markdown content. Note that embedded tables in the document should not have merged cells. + zh_Hans: 纯文本或 Markdown 内容。注意文档的内嵌套表格不允许有单元格合并。 + llm_description: 纯文本或 Markdown 内容,注意文档的内嵌套表格不允许有单元格合并。 + form: llm + + - name: position + type: select + required: false + options: + - value: start + label: + en_US: document start + zh_Hans: 文档开始 + - value: end + label: + en_US: document end + zh_Hans: 文档结束 + default: "end" + label: + en_US: position + zh_Hans: 内容添加位置 + human_description: + en_US: Content insertion position, optional values are start, end. 'start' means adding content at the beginning of the document; 'end' means adding content at the end of the document. The default value is end. + zh_Hans: 内容添加位置,可选值有 start、end。start 表示在文档开头添加内容;end 表示在文档结尾添加内容,默认值为 end。 + llm_description: 内容添加位置,可选值有 start、end。start 表示在文档开头添加内容;end 表示在文档结尾添加内容,默认值为 end。 + form: form diff --git a/api/core/tools/provider/builtin/lark_message_and_group/_assets/icon.png b/api/core/tools/provider/builtin/lark_message_and_group/_assets/icon.png new file mode 100644 index 0000000000..0dfd58a9d5 Binary files /dev/null and b/api/core/tools/provider/builtin/lark_message_and_group/_assets/icon.png differ diff --git a/api/core/tools/provider/builtin/lark_message_and_group/lark_message_and_group.py b/api/core/tools/provider/builtin/lark_message_and_group/lark_message_and_group.py new file mode 100644 index 0000000000..de6997b0bf --- /dev/null +++ b/api/core/tools/provider/builtin/lark_message_and_group/lark_message_and_group.py @@ -0,0 +1,7 @@ +from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController +from core.tools.utils.lark_api_utils import lark_auth + + +class LarkMessageAndGroupProvider(BuiltinToolProviderController): + def _validate_credentials(self, credentials: dict) -> None: + lark_auth(credentials) diff --git a/api/core/tools/provider/builtin/lark_message_and_group/lark_message_and_group.yaml b/api/core/tools/provider/builtin/lark_message_and_group/lark_message_and_group.yaml new file mode 100644 index 0000000000..ad3fe0f361 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_message_and_group/lark_message_and_group.yaml @@ -0,0 +1,36 @@ +identity: + author: Doug Lea + name: lark_message_and_group + label: + en_US: Lark Message And Group + zh_Hans: Lark 消息和群组 + description: + en_US: | + Lark message and group, requires the following permissions: im:message、im:message.group_msg. + zh_Hans: | + Lark 消息和群组,需要开通以下权限: im:message、im:message.group_msg。 + icon: icon.png + tags: + - social + - productivity +credentials_for_provider: + app_id: + type: text-input + required: true + label: + en_US: APP ID + placeholder: + en_US: Please input your Lark app id + zh_Hans: 请输入你的 Lark app id + help: + en_US: Get your app_id and app_secret from Lark + zh_Hans: 从 Lark 获取您的 app_id 和 app_secret + url: https://open.larksuite.com/app + app_secret: + type: secret-input + required: true + label: + en_US: APP Secret + placeholder: + en_US: Please input your app secret + zh_Hans: 请输入你的 Lark app secret diff --git a/api/core/tools/provider/builtin/lark_message_and_group/tools/get_chat_messages.py b/api/core/tools/provider/builtin/lark_message_and_group/tools/get_chat_messages.py new file mode 100644 index 0000000000..118bac7ab7 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_message_and_group/tools/get_chat_messages.py @@ -0,0 +1,23 @@ +from typing import Any + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.tool.builtin_tool import BuiltinTool +from core.tools.utils.lark_api_utils import LarkRequest + + +class GetChatMessagesTool(BuiltinTool): + def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage: + app_id = self.runtime.credentials.get("app_id") + app_secret = self.runtime.credentials.get("app_secret") + client = LarkRequest(app_id, app_secret) + + container_id = tool_parameters.get("container_id") + start_time = tool_parameters.get("start_time") + end_time = tool_parameters.get("end_time") + page_token = tool_parameters.get("page_token") + sort_type = tool_parameters.get("sort_type", "ByCreateTimeAsc") + page_size = tool_parameters.get("page_size", 20) + + res = client.get_chat_messages(container_id, start_time, end_time, page_token, sort_type, page_size) + + return self.create_json_message(res) diff --git a/api/core/tools/provider/builtin/lark_message_and_group/tools/get_chat_messages.yaml b/api/core/tools/provider/builtin/lark_message_and_group/tools/get_chat_messages.yaml new file mode 100644 index 0000000000..965b45a5fb --- /dev/null +++ b/api/core/tools/provider/builtin/lark_message_and_group/tools/get_chat_messages.yaml @@ -0,0 +1,96 @@ +identity: + name: get_chat_messages + author: Doug Lea + label: + en_US: Get Chat Messages + zh_Hans: 获取指定单聊、群聊的消息历史 +description: + human: + en_US: Get Chat Messages + zh_Hans: 获取指定单聊、群聊的消息历史 + llm: A tool for getting chat messages from specific one-on-one chats or group chats.(获取指定单聊、群聊的消息历史) +parameters: + - name: container_id + type: string + required: true + label: + en_US: Container Id + zh_Hans: 群聊或单聊的 ID + human_description: + en_US: The ID of the group chat or single chat. Refer to the group ID description for how to obtain it. https://open.larkoffice.com/document/server-docs/group/chat/chat-id-description + zh_Hans: 群聊或单聊的 ID,获取方式参见群 ID 说明。https://open.larkoffice.com/document/server-docs/group/chat/chat-id-description + llm_description: 群聊或单聊的 ID,获取方式参见群 ID 说明。https://open.larkoffice.com/document/server-docs/group/chat/chat-id-description + form: llm + + - name: start_time + type: string + required: false + label: + en_US: Start Time + zh_Hans: 起始时间 + human_description: + en_US: The start time for querying historical messages, formatted as "2006-01-02 15:04:05". + zh_Hans: 待查询历史信息的起始时间,格式为 "2006-01-02 15:04:05"。 + llm_description: 待查询历史信息的起始时间,格式为 "2006-01-02 15:04:05"。 + form: llm + + - name: end_time + type: string + required: false + label: + en_US: End Time + zh_Hans: 结束时间 + human_description: + en_US: The end time for querying historical messages, formatted as "2006-01-02 15:04:05". + zh_Hans: 待查询历史信息的结束时间,格式为 "2006-01-02 15:04:05"。 + llm_description: 待查询历史信息的结束时间,格式为 "2006-01-02 15:04:05"。 + form: llm + + - name: sort_type + type: select + required: false + options: + - value: ByCreateTimeAsc + label: + en_US: ByCreateTimeAsc + zh_Hans: ByCreateTimeAsc + - value: ByCreateTimeDesc + label: + en_US: ByCreateTimeDesc + zh_Hans: ByCreateTimeDesc + default: "ByCreateTimeAsc" + label: + en_US: Sort Type + zh_Hans: 排序方式 + human_description: + en_US: | + The message sorting method. Optional values are ByCreateTimeAsc: sorted in ascending order by message creation time; ByCreateTimeDesc: sorted in descending order by message creation time. The default value is ByCreateTimeAsc. Note: When using page_token for pagination requests, the sorting method (sort_type) is consistent with the first request and cannot be changed midway. + zh_Hans: | + 消息排序方式,可选值有 ByCreateTimeAsc:按消息创建时间升序排列;ByCreateTimeDesc:按消息创建时间降序排列。默认值为:ByCreateTimeAsc。注意:使用 page_token 分页请求时,排序方式(sort_type)均与第一次请求一致,不支持中途改换排序方式。 + llm_description: 消息排序方式,可选值有 ByCreateTimeAsc:按消息创建时间升序排列;ByCreateTimeDesc:按消息创建时间降序排列。默认值为:ByCreateTimeAsc。注意:使用 page_token 分页请求时,排序方式(sort_type)均与第一次请求一致,不支持中途改换排序方式。 + form: form + + - name: page_size + type: number + required: false + default: 20 + label: + en_US: Page Size + zh_Hans: 分页大小 + human_description: + en_US: The page size, i.e., the number of data entries returned in a single request. The default value is 20, and the value range is [1,50]. + zh_Hans: 分页大小,即单次请求所返回的数据条目数。默认值为 20,取值范围为 [1,50]。 + llm_description: 分页大小,即单次请求所返回的数据条目数。默认值为 20,取值范围为 [1,50]。 + form: form + + - name: page_token + type: string + required: false + label: + en_US: Page Token + zh_Hans: 分页标记 + human_description: + en_US: The pagination token. Leave it blank for the first request, indicating to start traversing from the beginning; when the pagination query result has more items, a new page_token will be returned simultaneously, which can be used to obtain the query result in the next traversal. + zh_Hans: 分页标记,第一次请求不填,表示从头开始遍历;分页查询结果还有更多项时会同时返回新的 page_token,下次遍历可采用该 page_token 获取查询结果。 + llm_description: 分页标记,第一次请求不填,表示从头开始遍历;分页查询结果还有更多项时会同时返回新的 page_token,下次遍历可采用该 page_token 获取查询结果。 + form: llm diff --git a/api/core/tools/provider/builtin/lark_message_and_group/tools/get_thread_messages.py b/api/core/tools/provider/builtin/lark_message_and_group/tools/get_thread_messages.py new file mode 100644 index 0000000000..3509d9bbcf --- /dev/null +++ b/api/core/tools/provider/builtin/lark_message_and_group/tools/get_thread_messages.py @@ -0,0 +1,21 @@ +from typing import Any + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.tool.builtin_tool import BuiltinTool +from core.tools.utils.lark_api_utils import LarkRequest + + +class GetChatMessagesTool(BuiltinTool): + def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage: + app_id = self.runtime.credentials.get("app_id") + app_secret = self.runtime.credentials.get("app_secret") + client = LarkRequest(app_id, app_secret) + + container_id = tool_parameters.get("container_id") + page_token = tool_parameters.get("page_token") + sort_type = tool_parameters.get("sort_type", "ByCreateTimeAsc") + page_size = tool_parameters.get("page_size", 20) + + res = client.get_thread_messages(container_id, page_token, sort_type, page_size) + + return self.create_json_message(res) diff --git a/api/core/tools/provider/builtin/lark_message_and_group/tools/get_thread_messages.yaml b/api/core/tools/provider/builtin/lark_message_and_group/tools/get_thread_messages.yaml new file mode 100644 index 0000000000..5f7a4f0902 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_message_and_group/tools/get_thread_messages.yaml @@ -0,0 +1,72 @@ +identity: + name: get_thread_messages + author: Doug Lea + label: + en_US: Get Thread Messages + zh_Hans: 获取指定话题的消息历史 +description: + human: + en_US: Get Thread Messages + zh_Hans: 获取指定话题的消息历史 + llm: A tool for getting chat messages from specific threads.(获取指定话题的消息历史) +parameters: + - name: container_id + type: string + required: true + label: + en_US: Thread Id + zh_Hans: 话题 ID + human_description: + en_US: The ID of the thread. Refer to the thread overview on how to obtain the thread_id. https://open.larksuite.com/document/uAjLw4CM/ukTMukTMukTM/reference/im-v1/message/thread-introduction + zh_Hans: 话题 ID,获取方式参见话题概述的如何获取 thread_id 章节。https://open.larksuite.com/document/uAjLw4CM/ukTMukTMukTM/reference/im-v1/message/thread-introduction + llm_description: 话题 ID,获取方式参见话题概述的如何获取 thread_id 章节。https://open.larksuite.com/document/uAjLw4CM/ukTMukTMukTM/reference/im-v1/message/thread-introduction + form: llm + + - name: sort_type + type: select + required: false + options: + - value: ByCreateTimeAsc + label: + en_US: ByCreateTimeAsc + zh_Hans: ByCreateTimeAsc + - value: ByCreateTimeDesc + label: + en_US: ByCreateTimeDesc + zh_Hans: ByCreateTimeDesc + default: "ByCreateTimeAsc" + label: + en_US: Sort Type + zh_Hans: 排序方式 + human_description: + en_US: | + The message sorting method. Optional values are ByCreateTimeAsc: sorted in ascending order by message creation time; ByCreateTimeDesc: sorted in descending order by message creation time. The default value is ByCreateTimeAsc. Note: When using page_token for pagination requests, the sorting method (sort_type) is consistent with the first request and cannot be changed midway. + zh_Hans: | + 消息排序方式,可选值有 ByCreateTimeAsc:按消息创建时间升序排列;ByCreateTimeDesc:按消息创建时间降序排列。默认值为:ByCreateTimeAsc。注意:使用 page_token 分页请求时,排序方式(sort_type)均与第一次请求一致,不支持中途改换排序方式。 + llm_description: 消息排序方式,可选值有 ByCreateTimeAsc:按消息创建时间升序排列;ByCreateTimeDesc:按消息创建时间降序排列。默认值为:ByCreateTimeAsc。注意:使用 page_token 分页请求时,排序方式(sort_type)均与第一次请求一致,不支持中途改换排序方式。 + form: form + + - name: page_size + type: number + required: false + default: 20 + label: + en_US: Page Size + zh_Hans: 分页大小 + human_description: + en_US: The page size, i.e., the number of data entries returned in a single request. The default value is 20, and the value range is [1,50]. + zh_Hans: 分页大小,即单次请求所返回的数据条目数。默认值为 20,取值范围为 [1,50]。 + llm_description: 分页大小,即单次请求所返回的数据条目数。默认值为 20,取值范围为 [1,50]。 + form: form + + - name: page_token + type: string + required: false + label: + en_US: Page Token + zh_Hans: 分页标记 + human_description: + en_US: The pagination token. Leave it blank for the first request, indicating to start traversing from the beginning; when the pagination query result has more items, a new page_token will be returned simultaneously, which can be used to obtain the query result in the next traversal. + zh_Hans: 分页标记,第一次请求不填,表示从头开始遍历;分页查询结果还有更多项时会同时返回新的 page_token,下次遍历可采用该 page_token 获取查询结果。 + llm_description: 分页标记,第一次请求不填,表示从头开始遍历;分页查询结果还有更多项时会同时返回新的 page_token,下次遍历可采用该 page_token 获取查询结果。 + form: llm diff --git a/api/core/tools/provider/builtin/lark_message_and_group/tools/send_bot_message.py b/api/core/tools/provider/builtin/lark_message_and_group/tools/send_bot_message.py new file mode 100644 index 0000000000..b0a8df61e8 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_message_and_group/tools/send_bot_message.py @@ -0,0 +1,20 @@ +from typing import Any + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.tool.builtin_tool import BuiltinTool +from core.tools.utils.lark_api_utils import LarkRequest + + +class SendBotMessageTool(BuiltinTool): + def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage: + app_id = self.runtime.credentials.get("app_id") + app_secret = self.runtime.credentials.get("app_secret") + client = LarkRequest(app_id, app_secret) + + receive_id_type = tool_parameters.get("receive_id_type") + receive_id = tool_parameters.get("receive_id") + msg_type = tool_parameters.get("msg_type") + content = tool_parameters.get("content") + + res = client.send_bot_message(receive_id_type, receive_id, msg_type, content) + return self.create_json_message(res) diff --git a/api/core/tools/provider/builtin/lark_message_and_group/tools/send_bot_message.yaml b/api/core/tools/provider/builtin/lark_message_and_group/tools/send_bot_message.yaml new file mode 100644 index 0000000000..b949c5e016 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_message_and_group/tools/send_bot_message.yaml @@ -0,0 +1,125 @@ +identity: + name: send_bot_message + author: Doug Lea + label: + en_US: Send Bot Message + zh_Hans: 发送 Lark 应用消息 +description: + human: + en_US: Send bot message + zh_Hans: 发送 Lark 应用消息 + llm: A tool for sending Lark application messages. +parameters: + - name: receive_id + type: string + required: true + label: + en_US: receive_id + zh_Hans: 消息接收者的 ID + human_description: + en_US: The ID of the message receiver, the ID type is consistent with the value of the query parameter receive_id_type. + zh_Hans: 消息接收者的 ID,ID 类型与查询参数 receive_id_type 的取值一致。 + llm_description: 消息接收者的 ID,ID 类型与查询参数 receive_id_type 的取值一致。 + form: llm + + - name: receive_id_type + type: select + required: true + options: + - value: open_id + label: + en_US: open_id + zh_Hans: open_id + - value: union_id + label: + en_US: union_id + zh_Hans: union_id + - value: user_id + label: + en_US: user_id + zh_Hans: user_id + - value: email + label: + en_US: email + zh_Hans: email + - value: chat_id + label: + en_US: chat_id + zh_Hans: chat_id + label: + en_US: receive_id_type + zh_Hans: 消息接收者的 ID 类型 + human_description: + en_US: The ID type of the message receiver, optional values are open_id, union_id, user_id, email, chat_id, with a default value of open_id. + zh_Hans: 消息接收者的 ID 类型,可选值有 open_id、union_id、user_id、email、chat_id,默认值为 open_id。 + llm_description: 消息接收者的 ID 类型,可选值有 open_id、union_id、user_id、email、chat_id,默认值为 open_id。 + form: form + + - name: msg_type + type: select + required: true + options: + - value: text + label: + en_US: text + zh_Hans: 文本 + - value: interactive + label: + en_US: interactive + zh_Hans: 卡片 + - value: post + label: + en_US: post + zh_Hans: 富文本 + - value: image + label: + en_US: image + zh_Hans: 图片 + - value: file + label: + en_US: file + zh_Hans: 文件 + - value: audio + label: + en_US: audio + zh_Hans: 语音 + - value: media + label: + en_US: media + zh_Hans: 视频 + - value: sticker + label: + en_US: sticker + zh_Hans: 表情包 + - value: share_chat + label: + en_US: share_chat + zh_Hans: 分享群名片 + - value: share_user + label: + en_US: share_user + zh_Hans: 分享个人名片 + - value: system + label: + en_US: system + zh_Hans: 系统消息 + label: + en_US: msg_type + zh_Hans: 消息类型 + human_description: + en_US: Message type. Optional values are text, post, image, file, audio, media, sticker, interactive, share_chat, share_user, system. For detailed introduction of different message types, refer to the message content(https://open.larkoffice.com/document/server-docs/im-v1/message-content-description/create_json). + zh_Hans: 消息类型。可选值有:text、post、image、file、audio、media、sticker、interactive、share_chat、share_user、system。不同消息类型的详细介绍,参见发送消息内容(https://open.larkoffice.com/document/server-docs/im-v1/message-content-description/create_json)。 + llm_description: 消息类型。可选值有:text、post、image、file、audio、media、sticker、interactive、share_chat、share_user、system。不同消息类型的详细介绍,参见发送消息内容(https://open.larkoffice.com/document/server-docs/im-v1/message-content-description/create_json)。 + form: form + + - name: content + type: string + required: true + label: + en_US: content + zh_Hans: 消息内容 + human_description: + en_US: Message content, a JSON structure serialized string. The value of this parameter corresponds to msg_type. For example, if msg_type is text, this parameter needs to pass in text type content. To understand the format and usage limitations of different message types, refer to the message content(https://open.larksuite.com/document/server-docs/im-v1/message-content-description/create_json). + zh_Hans: 消息内容,JSON 结构序列化后的字符串。该参数的取值与 msg_type 对应,例如 msg_type 取值为 text,则该参数需要传入文本类型的内容。了解不同类型的消息内容格式、使用限制,可参见发送消息内容(https://open.larksuite.com/document/server-docs/im-v1/message-content-description/create_json)。 + llm_description: 消息内容,JSON 结构序列化后的字符串。该参数的取值与 msg_type 对应,例如 msg_type 取值为 text,则该参数需要传入文本类型的内容。了解不同类型的消息内容格式、使用限制,可参见发送消息内容(https://open.larksuite.com/document/server-docs/im-v1/message-content-description/create_json)。 + form: llm diff --git a/api/core/tools/provider/builtin/lark_message_and_group/tools/send_webhook_message.py b/api/core/tools/provider/builtin/lark_message_and_group/tools/send_webhook_message.py new file mode 100644 index 0000000000..18a605079f --- /dev/null +++ b/api/core/tools/provider/builtin/lark_message_and_group/tools/send_webhook_message.py @@ -0,0 +1,19 @@ +from typing import Any + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.tool.builtin_tool import BuiltinTool +from core.tools.utils.lark_api_utils import LarkRequest + + +class SendWebhookMessageTool(BuiltinTool): + def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage: + app_id = self.runtime.credentials.get("app_id") + app_secret = self.runtime.credentials.get("app_secret") + client = LarkRequest(app_id, app_secret) + + webhook = tool_parameters.get("webhook") + msg_type = tool_parameters.get("msg_type") + content = tool_parameters.get("content") + + res = client.send_webhook_message(webhook, msg_type, content) + return self.create_json_message(res) diff --git a/api/core/tools/provider/builtin/lark_message_and_group/tools/send_webhook_message.yaml b/api/core/tools/provider/builtin/lark_message_and_group/tools/send_webhook_message.yaml new file mode 100644 index 0000000000..ea13cae52b --- /dev/null +++ b/api/core/tools/provider/builtin/lark_message_and_group/tools/send_webhook_message.yaml @@ -0,0 +1,68 @@ +identity: + name: send_webhook_message + author: Doug Lea + label: + en_US: Send Webhook Message + zh_Hans: 使用自定义机器人发送 Lark 消息 +description: + human: + en_US: Send webhook message + zh_Hans: 使用自定义机器人发送 Lark 消息 + llm: A tool for sending Lark messages using a custom robot. +parameters: + - name: webhook + type: string + required: true + label: + en_US: webhook + zh_Hans: webhook + human_description: + en_US: | + The address of the webhook, the format of the webhook address corresponding to the bot is as follows: https://open.larksuite.com/open-apis/bot/v2/hook/xxxxxxxxxxxxxxxxx. For details, please refer to: Lark Custom Bot Usage Guide(https://open.larkoffice.com/document/client-docs/bot-v3/add-custom-bot) + zh_Hans: | + webhook 的地址,机器人对应的 webhook 地址格式如下: https://open.larksuite.com/open-apis/bot/v2/hook/xxxxxxxxxxxxxxxxx,详情可参考: Lark 自定义机器人使用指南(https://open.larksuite.com/document/client-docs/bot-v3/add-custom-bot) + llm_description: | + webhook 的地址,机器人对应的 webhook 地址格式如下: https://open.larksuite.com/open-apis/bot/v2/hook/xxxxxxxxxxxxxxxxx,详情可参考: Lark 自定义机器人使用指南(https://open.larksuite.com/document/client-docs/bot-v3/add-custom-bot) + form: llm + + - name: msg_type + type: select + required: true + options: + - value: text + label: + en_US: text + zh_Hans: 文本 + - value: interactive + label: + en_US: interactive + zh_Hans: 卡片 + - value: image + label: + en_US: image + zh_Hans: 图片 + - value: share_chat + label: + en_US: share_chat + zh_Hans: 分享群名片 + label: + en_US: msg_type + zh_Hans: 消息类型 + human_description: + en_US: Message type. Optional values are text, image, interactive, share_chat. For detailed introduction of different message types, refer to the message content(https://open.larkoffice.com/document/server-docs/im-v1/message-content-description/create_json). + zh_Hans: 消息类型。可选值有:text、image、interactive、share_chat。不同消息类型的详细介绍,参见发送消息内容(https://open.larkoffice.com/document/server-docs/im-v1/message-content-description/create_json)。 + llm_description: 消息类型。可选值有:text、image、interactive、share_chat。不同消息类型的详细介绍,参见发送消息内容(https://open.larkoffice.com/document/server-docs/im-v1/message-content-description/create_json)。 + form: form + + + - name: content + type: string + required: true + label: + en_US: content + zh_Hans: 消息内容 + human_description: + en_US: Message content, a JSON structure serialized string. The value of this parameter corresponds to msg_type. For example, if msg_type is text, this parameter needs to pass in text type content. To understand the format and usage limitations of different message types, refer to the message content(https://open.larkoffice.com/document/server-docs/im-v1/message-content-description/create_json). + zh_Hans: 消息内容,JSON 结构序列化后的字符串。该参数的取值与 msg_type 对应,例如 msg_type 取值为 text,则该参数需要传入文本类型的内容。了解不同类型的消息内容格式、使用限制,可参见发送消息内容(https://open.larkoffice.com/document/server-docs/im-v1/message-content-description/create_json)。 + llm_description: 消息内容,JSON 结构序列化后的字符串。该参数的取值与 msg_type 对应,例如 msg_type 取值为 text,则该参数需要传入文本类型的内容。了解不同类型的消息内容格式、使用限制,可参见发送消息内容(https://open.larkoffice.com/document/server-docs/im-v1/message-content-description/create_json)。 + form: llm diff --git a/api/core/tools/provider/builtin/lark_spreadsheet/_assets/icon.png b/api/core/tools/provider/builtin/lark_spreadsheet/_assets/icon.png new file mode 100644 index 0000000000..258b361261 Binary files /dev/null and b/api/core/tools/provider/builtin/lark_spreadsheet/_assets/icon.png differ diff --git a/api/core/tools/provider/builtin/lark_spreadsheet/lark_spreadsheet.py b/api/core/tools/provider/builtin/lark_spreadsheet/lark_spreadsheet.py new file mode 100644 index 0000000000..c791363f21 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_spreadsheet/lark_spreadsheet.py @@ -0,0 +1,7 @@ +from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController +from core.tools.utils.lark_api_utils import lark_auth + + +class LarkMessageProvider(BuiltinToolProviderController): + def _validate_credentials(self, credentials: dict) -> None: + lark_auth(credentials) diff --git a/api/core/tools/provider/builtin/lark_spreadsheet/lark_spreadsheet.yaml b/api/core/tools/provider/builtin/lark_spreadsheet/lark_spreadsheet.yaml new file mode 100644 index 0000000000..030b5c9063 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_spreadsheet/lark_spreadsheet.yaml @@ -0,0 +1,36 @@ +identity: + author: Doug Lea + name: lark_spreadsheet + label: + en_US: Lark Spreadsheet + zh_Hans: Lark 电子表格 + description: + en_US: | + Lark Spreadsheet, requires the following permissions: sheets:spreadsheet. + zh_Hans: | + Lark 电子表格,需要开通以下权限: sheets:spreadsheet。 + icon: icon.png + tags: + - social + - productivity +credentials_for_provider: + app_id: + type: text-input + required: true + label: + en_US: APP ID + placeholder: + en_US: Please input your Lark app id + zh_Hans: 请输入你的 Lark app id + help: + en_US: Get your app_id and app_secret from Lark + zh_Hans: 从 Lark 获取您的 app_id 和 app_secret + url: https://open.larksuite.com/app + app_secret: + type: secret-input + required: true + label: + en_US: APP Secret + placeholder: + en_US: Please input your app secret + zh_Hans: 请输入你的 Lark app secret diff --git a/api/core/tools/provider/builtin/lark_spreadsheet/tools/add_cols.py b/api/core/tools/provider/builtin/lark_spreadsheet/tools/add_cols.py new file mode 100644 index 0000000000..deeb5a1ecf --- /dev/null +++ b/api/core/tools/provider/builtin/lark_spreadsheet/tools/add_cols.py @@ -0,0 +1,22 @@ +from typing import Any + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.tool.builtin_tool import BuiltinTool +from core.tools.utils.lark_api_utils import LarkRequest + + +class AddColsTool(BuiltinTool): + def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage: + app_id = self.runtime.credentials.get("app_id") + app_secret = self.runtime.credentials.get("app_secret") + client = LarkRequest(app_id, app_secret) + + spreadsheet_token = tool_parameters.get("spreadsheet_token") + sheet_id = tool_parameters.get("sheet_id") + sheet_name = tool_parameters.get("sheet_name") + length = tool_parameters.get("length") + values = tool_parameters.get("values") + + res = client.add_cols(spreadsheet_token, sheet_id, sheet_name, length, values) + + return self.create_json_message(res) diff --git a/api/core/tools/provider/builtin/lark_spreadsheet/tools/add_cols.yaml b/api/core/tools/provider/builtin/lark_spreadsheet/tools/add_cols.yaml new file mode 100644 index 0000000000..b73335f405 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_spreadsheet/tools/add_cols.yaml @@ -0,0 +1,72 @@ +identity: + name: add_cols + author: Doug Lea + label: + en_US: Add Cols + zh_Hans: 新增多列至工作表最后 +description: + human: + en_US: Add Cols + zh_Hans: 新增多列至工作表最后 + llm: A tool for adding multiple columns to the end of a spreadsheet. (新增多列至工作表最后) +parameters: + - name: spreadsheet_token + type: string + required: true + label: + en_US: spreadsheet_token + zh_Hans: 电子表格 token + human_description: + en_US: Spreadsheet token, supports input of spreadsheet URL. + zh_Hans: 电子表格 token,支持输入电子表格 url。 + llm_description: 电子表格 token,支持输入电子表格 url。 + form: llm + + - name: sheet_id + type: string + required: false + label: + en_US: sheet_id + zh_Hans: 工作表 ID + human_description: + en_US: Sheet ID, either sheet_id or sheet_name must be filled. + zh_Hans: 工作表 ID,与 sheet_name 二者其一必填。 + llm_description: 工作表 ID,与 sheet_name 二者其一必填。 + form: llm + + - name: sheet_name + type: string + required: false + label: + en_US: sheet_name + zh_Hans: 工作表名称 + human_description: + en_US: Sheet name, either sheet_id or sheet_name must be filled. + zh_Hans: 工作表名称,与 sheet_id 二者其一必填。 + llm_description: 工作表名称,与 sheet_id 二者其一必填。 + form: llm + + - name: length + type: number + required: true + label: + en_US: length + zh_Hans: 要增加的列数 + human_description: + en_US: Number of columns to add, range (0-5000]. + zh_Hans: 要增加的列数,范围(0-5000]。 + llm_description: 要增加的列数,范围(0-5000]。 + form: form + + - name: values + type: string + required: false + label: + en_US: values + zh_Hans: 新增列的单元格内容 + human_description: + en_US: | + Content of the new columns, array of objects in string format, each array represents a row of table data, format like: [ [ "ID","Name","Age" ],[ 1,"Zhang San",10 ],[ 2,"Li Si",11 ] ]. + zh_Hans: 新增列的单元格内容,数组对象字符串,每个数组一行表格数据,格式:[["编号","姓名","年龄"],[1,"张三",10],[2,"李四",11]]。 + llm_description: 新增列的单元格内容,数组对象字符串,每个数组一行表格数据,格式:[["编号","姓名","年龄"],[1,"张三",10],[2,"李四",11]]。 + form: llm diff --git a/api/core/tools/provider/builtin/lark_spreadsheet/tools/add_rows.py b/api/core/tools/provider/builtin/lark_spreadsheet/tools/add_rows.py new file mode 100644 index 0000000000..f434b1c603 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_spreadsheet/tools/add_rows.py @@ -0,0 +1,22 @@ +from typing import Any + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.tool.builtin_tool import BuiltinTool +from core.tools.utils.lark_api_utils import LarkRequest + + +class AddRowsTool(BuiltinTool): + def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage: + app_id = self.runtime.credentials.get("app_id") + app_secret = self.runtime.credentials.get("app_secret") + client = LarkRequest(app_id, app_secret) + + spreadsheet_token = tool_parameters.get("spreadsheet_token") + sheet_id = tool_parameters.get("sheet_id") + sheet_name = tool_parameters.get("sheet_name") + length = tool_parameters.get("length") + values = tool_parameters.get("values") + + res = client.add_rows(spreadsheet_token, sheet_id, sheet_name, length, values) + + return self.create_json_message(res) diff --git a/api/core/tools/provider/builtin/lark_spreadsheet/tools/add_rows.yaml b/api/core/tools/provider/builtin/lark_spreadsheet/tools/add_rows.yaml new file mode 100644 index 0000000000..6bce305b98 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_spreadsheet/tools/add_rows.yaml @@ -0,0 +1,72 @@ +identity: + name: add_rows + author: Doug Lea + label: + en_US: Add Rows + zh_Hans: 新增多行至工作表最后 +description: + human: + en_US: Add Rows + zh_Hans: 新增多行至工作表最后 + llm: A tool for adding multiple rows to the end of a spreadsheet. (新增多行至工作表最后) +parameters: + - name: spreadsheet_token + type: string + required: true + label: + en_US: spreadsheet_token + zh_Hans: 电子表格 token + human_description: + en_US: Spreadsheet token, supports input of spreadsheet URL. + zh_Hans: 电子表格 token,支持输入电子表格 url。 + llm_description: 电子表格 token,支持输入电子表格 url。 + form: llm + + - name: sheet_id + type: string + required: false + label: + en_US: sheet_id + zh_Hans: 工作表 ID + human_description: + en_US: Sheet ID, either sheet_id or sheet_name must be filled. + zh_Hans: 工作表 ID,与 sheet_name 二者其一必填。 + llm_description: 工作表 ID,与 sheet_name 二者其一必填。 + form: llm + + - name: sheet_name + type: string + required: false + label: + en_US: sheet_name + zh_Hans: 工作表名称 + human_description: + en_US: Sheet name, either sheet_id or sheet_name must be filled. + zh_Hans: 工作表名称,与 sheet_id 二者其一必填。 + llm_description: 工作表名称,与 sheet_id 二者其一必填。 + form: llm + + - name: length + type: number + required: true + label: + en_US: length + zh_Hans: 要增加行数 + human_description: + en_US: Number of rows to add, range (0-5000]. + zh_Hans: 要增加行数,范围(0-5000]。 + llm_description: 要增加行数,范围(0-5000]。 + form: form + + - name: values + type: string + required: false + label: + en_US: values + zh_Hans: 新增行的表格内容 + human_description: + en_US: | + Content of the new rows, array of objects in string format, each array represents a row of table data, format like: [ [ "ID","Name","Age" ],[ 1,"Zhang San",10 ],[ 2,"Li Si",11 ] ]. + zh_Hans: 新增行的表格内容,数组对象字符串,每个数组一行表格数据,格式,如:[["编号","姓名","年龄"],[1,"张三",10],[2,"李四",11]]。 + llm_description: 新增行的表格内容,数组对象字符串,每个数组一行表格数据,格式,如:[["编号","姓名","年龄"],[1,"张三",10],[2,"李四",11]]。 + form: llm diff --git a/api/core/tools/provider/builtin/lark_spreadsheet/tools/create_spreadsheet.py b/api/core/tools/provider/builtin/lark_spreadsheet/tools/create_spreadsheet.py new file mode 100644 index 0000000000..74b20ac2c8 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_spreadsheet/tools/create_spreadsheet.py @@ -0,0 +1,19 @@ +from typing import Any + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.tool.builtin_tool import BuiltinTool +from core.tools.utils.lark_api_utils import LarkRequest + + +class CreateSpreadsheetTool(BuiltinTool): + def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage: + app_id = self.runtime.credentials.get("app_id") + app_secret = self.runtime.credentials.get("app_secret") + client = LarkRequest(app_id, app_secret) + + title = tool_parameters.get("title") + folder_token = tool_parameters.get("folder_token") + + res = client.create_spreadsheet(title, folder_token) + + return self.create_json_message(res) diff --git a/api/core/tools/provider/builtin/lark_spreadsheet/tools/create_spreadsheet.yaml b/api/core/tools/provider/builtin/lark_spreadsheet/tools/create_spreadsheet.yaml new file mode 100644 index 0000000000..931310e631 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_spreadsheet/tools/create_spreadsheet.yaml @@ -0,0 +1,35 @@ +identity: + name: create_spreadsheet + author: Doug Lea + label: + en_US: Create Spreadsheet + zh_Hans: 创建电子表格 +description: + human: + en_US: Create Spreadsheet + zh_Hans: 创建电子表格 + llm: A tool for creating spreadsheets. (创建电子表格) +parameters: + - name: title + type: string + required: false + label: + en_US: Spreadsheet Title + zh_Hans: 电子表格标题 + human_description: + en_US: The title of the spreadsheet + zh_Hans: 电子表格的标题 + llm_description: 电子表格的标题 + form: llm + + - name: folder_token + type: string + required: false + label: + en_US: Folder Token + zh_Hans: 文件夹 token + human_description: + en_US: The token of the folder, supports folder URL input, e.g., https://bytedance.larkoffice.com/drive/folder/CxHEf4DCSlNkL2dUTCJcPRgentg + zh_Hans: 文件夹 token,支持文件夹 URL 输入,如:https://bytedance.larkoffice.com/drive/folder/CxHEf4DCSlNkL2dUTCJcPRgentg + llm_description: 文件夹 token,支持文件夹 URL 输入,如:https://bytedance.larkoffice.com/drive/folder/CxHEf4DCSlNkL2dUTCJcPRgentg + form: llm diff --git a/api/core/tools/provider/builtin/lark_spreadsheet/tools/get_spreadsheet.py b/api/core/tools/provider/builtin/lark_spreadsheet/tools/get_spreadsheet.py new file mode 100644 index 0000000000..0fe35b6dc6 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_spreadsheet/tools/get_spreadsheet.py @@ -0,0 +1,19 @@ +from typing import Any + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.tool.builtin_tool import BuiltinTool +from core.tools.utils.lark_api_utils import LarkRequest + + +class GetSpreadsheetTool(BuiltinTool): + def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage: + app_id = self.runtime.credentials.get("app_id") + app_secret = self.runtime.credentials.get("app_secret") + client = LarkRequest(app_id, app_secret) + + spreadsheet_token = tool_parameters.get("spreadsheet_token") + user_id_type = tool_parameters.get("user_id_type", "open_id") + + res = client.get_spreadsheet(spreadsheet_token, user_id_type) + + return self.create_json_message(res) diff --git a/api/core/tools/provider/builtin/lark_spreadsheet/tools/get_spreadsheet.yaml b/api/core/tools/provider/builtin/lark_spreadsheet/tools/get_spreadsheet.yaml new file mode 100644 index 0000000000..c519938617 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_spreadsheet/tools/get_spreadsheet.yaml @@ -0,0 +1,49 @@ +identity: + name: get_spreadsheet + author: Doug Lea + label: + en_US: Get Spreadsheet + zh_Hans: 获取电子表格信息 +description: + human: + en_US: Get Spreadsheet + zh_Hans: 获取电子表格信息 + llm: A tool for getting information from spreadsheets. (获取电子表格信息) +parameters: + - name: spreadsheet_token + type: string + required: true + label: + en_US: Spreadsheet Token + zh_Hans: 电子表格 token + human_description: + en_US: Spreadsheet token, supports input of spreadsheet URL. + zh_Hans: 电子表格 token,支持输入电子表格 URL。 + llm_description: 电子表格 token,支持输入电子表格 URL。 + form: llm + + - name: user_id_type + type: select + required: false + options: + - value: open_id + label: + en_US: open_id + zh_Hans: open_id + - value: union_id + label: + en_US: union_id + zh_Hans: union_id + - value: user_id + label: + en_US: user_id + zh_Hans: user_id + default: "open_id" + label: + en_US: user_id_type + zh_Hans: 用户 ID 类型 + human_description: + en_US: User ID type, optional values are open_id, union_id, user_id, with a default value of open_id. + zh_Hans: 用户 ID 类型,可选值有 open_id、union_id、user_id,默认值为 open_id。 + llm_description: 用户 ID 类型,可选值有 open_id、union_id、user_id,默认值为 open_id。 + form: form diff --git a/api/core/tools/provider/builtin/lark_spreadsheet/tools/list_spreadsheet_sheets.py b/api/core/tools/provider/builtin/lark_spreadsheet/tools/list_spreadsheet_sheets.py new file mode 100644 index 0000000000..e711c23780 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_spreadsheet/tools/list_spreadsheet_sheets.py @@ -0,0 +1,18 @@ +from typing import Any + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.tool.builtin_tool import BuiltinTool +from core.tools.utils.lark_api_utils import LarkRequest + + +class ListSpreadsheetSheetsTool(BuiltinTool): + def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage: + app_id = self.runtime.credentials.get("app_id") + app_secret = self.runtime.credentials.get("app_secret") + client = LarkRequest(app_id, app_secret) + + spreadsheet_token = tool_parameters.get("spreadsheet_token") + + res = client.list_spreadsheet_sheets(spreadsheet_token) + + return self.create_json_message(res) diff --git a/api/core/tools/provider/builtin/lark_spreadsheet/tools/list_spreadsheet_sheets.yaml b/api/core/tools/provider/builtin/lark_spreadsheet/tools/list_spreadsheet_sheets.yaml new file mode 100644 index 0000000000..c6a7ef45d4 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_spreadsheet/tools/list_spreadsheet_sheets.yaml @@ -0,0 +1,23 @@ +identity: + name: list_spreadsheet_sheets + author: Doug Lea + label: + en_US: List Spreadsheet Sheets + zh_Hans: 列出电子表格所有工作表 +description: + human: + en_US: List Spreadsheet Sheets + zh_Hans: 列出电子表格所有工作表 + llm: A tool for listing all sheets in a spreadsheet. (列出电子表格所有工作表) +parameters: + - name: spreadsheet_token + type: string + required: true + label: + en_US: Spreadsheet Token + zh_Hans: 电子表格 token + human_description: + en_US: Spreadsheet token, supports input of spreadsheet URL. + zh_Hans: 电子表格 token,支持输入电子表格 URL。 + llm_description: 电子表格 token,支持输入电子表格 URL。 + form: llm diff --git a/api/core/tools/provider/builtin/lark_spreadsheet/tools/read_cols.py b/api/core/tools/provider/builtin/lark_spreadsheet/tools/read_cols.py new file mode 100644 index 0000000000..1df289c1d7 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_spreadsheet/tools/read_cols.py @@ -0,0 +1,23 @@ +from typing import Any + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.tool.builtin_tool import BuiltinTool +from core.tools.utils.lark_api_utils import LarkRequest + + +class ReadColsTool(BuiltinTool): + def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage: + app_id = self.runtime.credentials.get("app_id") + app_secret = self.runtime.credentials.get("app_secret") + client = LarkRequest(app_id, app_secret) + + spreadsheet_token = tool_parameters.get("spreadsheet_token") + sheet_id = tool_parameters.get("sheet_id") + sheet_name = tool_parameters.get("sheet_name") + start_col = tool_parameters.get("start_col") + num_cols = tool_parameters.get("num_cols") + user_id_type = tool_parameters.get("user_id_type", "open_id") + + res = client.read_cols(spreadsheet_token, sheet_id, sheet_name, start_col, num_cols, user_id_type) + + return self.create_json_message(res) diff --git a/api/core/tools/provider/builtin/lark_spreadsheet/tools/read_cols.yaml b/api/core/tools/provider/builtin/lark_spreadsheet/tools/read_cols.yaml new file mode 100644 index 0000000000..34da74592d --- /dev/null +++ b/api/core/tools/provider/builtin/lark_spreadsheet/tools/read_cols.yaml @@ -0,0 +1,97 @@ +identity: + name: read_cols + author: Doug Lea + label: + en_US: Read Cols + zh_Hans: 读取工作表列数据 +description: + human: + en_US: Read Cols + zh_Hans: 读取工作表列数据 + llm: A tool for reading column data from a spreadsheet. (读取工作表列数据) +parameters: + - name: spreadsheet_token + type: string + required: true + label: + en_US: spreadsheet_token + zh_Hans: 电子表格 token + human_description: + en_US: Spreadsheet token, supports input of spreadsheet URL. + zh_Hans: 电子表格 token,支持输入电子表格 url。 + llm_description: 电子表格 token,支持输入电子表格 url。 + form: llm + + - name: sheet_id + type: string + required: false + label: + en_US: sheet_id + zh_Hans: 工作表 ID + human_description: + en_US: Sheet ID, either sheet_id or sheet_name must be filled. + zh_Hans: 工作表 ID,与 sheet_name 二者其一必填。 + llm_description: 工作表 ID,与 sheet_name 二者其一必填。 + form: llm + + - name: sheet_name + type: string + required: false + label: + en_US: sheet_name + zh_Hans: 工作表名称 + human_description: + en_US: Sheet name, either sheet_id or sheet_name must be filled. + zh_Hans: 工作表名称,与 sheet_id 二者其一必填。 + llm_description: 工作表名称,与 sheet_id 二者其一必填。 + form: llm + + - name: user_id_type + type: select + required: false + options: + - value: open_id + label: + en_US: open_id + zh_Hans: open_id + - value: union_id + label: + en_US: union_id + zh_Hans: union_id + - value: user_id + label: + en_US: user_id + zh_Hans: user_id + default: "open_id" + label: + en_US: user_id_type + zh_Hans: 用户 ID 类型 + human_description: + en_US: User ID type, optional values are open_id, union_id, user_id, with a default value of open_id. + zh_Hans: 用户 ID 类型,可选值有 open_id、union_id、user_id,默认值为 open_id。 + llm_description: 用户 ID 类型,可选值有 open_id、union_id、user_id,默认值为 open_id。 + form: form + + - name: start_col + type: number + required: false + label: + en_US: start_col + zh_Hans: 起始列号 + human_description: + en_US: Starting column number, starting from 1. + zh_Hans: 起始列号,从 1 开始。 + llm_description: 起始列号,从 1 开始。 + form: form + + - name: num_cols + type: number + required: true + label: + en_US: num_cols + zh_Hans: 读取列数 + human_description: + en_US: Number of columns to read. + zh_Hans: 读取列数 + llm_description: 读取列数 + form: form diff --git a/api/core/tools/provider/builtin/lark_spreadsheet/tools/read_rows.py b/api/core/tools/provider/builtin/lark_spreadsheet/tools/read_rows.py new file mode 100644 index 0000000000..1cab38a454 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_spreadsheet/tools/read_rows.py @@ -0,0 +1,23 @@ +from typing import Any + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.tool.builtin_tool import BuiltinTool +from core.tools.utils.lark_api_utils import LarkRequest + + +class ReadRowsTool(BuiltinTool): + def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage: + app_id = self.runtime.credentials.get("app_id") + app_secret = self.runtime.credentials.get("app_secret") + client = LarkRequest(app_id, app_secret) + + spreadsheet_token = tool_parameters.get("spreadsheet_token") + sheet_id = tool_parameters.get("sheet_id") + sheet_name = tool_parameters.get("sheet_name") + start_row = tool_parameters.get("start_row") + num_rows = tool_parameters.get("num_rows") + user_id_type = tool_parameters.get("user_id_type", "open_id") + + res = client.read_rows(spreadsheet_token, sheet_id, sheet_name, start_row, num_rows, user_id_type) + + return self.create_json_message(res) diff --git a/api/core/tools/provider/builtin/lark_spreadsheet/tools/read_rows.yaml b/api/core/tools/provider/builtin/lark_spreadsheet/tools/read_rows.yaml new file mode 100644 index 0000000000..5dfa8d5835 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_spreadsheet/tools/read_rows.yaml @@ -0,0 +1,97 @@ +identity: + name: read_rows + author: Doug Lea + label: + en_US: Read Rows + zh_Hans: 读取工作表行数据 +description: + human: + en_US: Read Rows + zh_Hans: 读取工作表行数据 + llm: A tool for reading row data from a spreadsheet. (读取工作表行数据) +parameters: + - name: spreadsheet_token + type: string + required: true + label: + en_US: spreadsheet_token + zh_Hans: 电子表格 token + human_description: + en_US: Spreadsheet token, supports input of spreadsheet URL. + zh_Hans: 电子表格 token,支持输入电子表格 url。 + llm_description: 电子表格 token,支持输入电子表格 url。 + form: llm + + - name: sheet_id + type: string + required: false + label: + en_US: sheet_id + zh_Hans: 工作表 ID + human_description: + en_US: Sheet ID, either sheet_id or sheet_name must be filled. + zh_Hans: 工作表 ID,与 sheet_name 二者其一必填。 + llm_description: 工作表 ID,与 sheet_name 二者其一必填。 + form: llm + + - name: sheet_name + type: string + required: false + label: + en_US: sheet_name + zh_Hans: 工作表名称 + human_description: + en_US: Sheet name, either sheet_id or sheet_name must be filled. + zh_Hans: 工作表名称,与 sheet_id 二者其一必填。 + llm_description: 工作表名称,与 sheet_id 二者其一必填。 + form: llm + + - name: user_id_type + type: select + required: false + options: + - value: open_id + label: + en_US: open_id + zh_Hans: open_id + - value: union_id + label: + en_US: union_id + zh_Hans: union_id + - value: user_id + label: + en_US: user_id + zh_Hans: user_id + default: "open_id" + label: + en_US: user_id_type + zh_Hans: 用户 ID 类型 + human_description: + en_US: User ID type, optional values are open_id, union_id, user_id, with a default value of open_id. + zh_Hans: 用户 ID 类型,可选值有 open_id、union_id、user_id,默认值为 open_id。 + llm_description: 用户 ID 类型,可选值有 open_id、union_id、user_id,默认值为 open_id。 + form: form + + - name: start_row + type: number + required: false + label: + en_US: start_row + zh_Hans: 起始行号 + human_description: + en_US: Starting row number, starting from 1. + zh_Hans: 起始行号,从 1 开始。 + llm_description: 起始行号,从 1 开始。 + form: form + + - name: num_rows + type: number + required: true + label: + en_US: num_rows + zh_Hans: 读取行数 + human_description: + en_US: Number of rows to read. + zh_Hans: 读取行数 + llm_description: 读取行数 + form: form diff --git a/api/core/tools/provider/builtin/lark_spreadsheet/tools/read_table.py b/api/core/tools/provider/builtin/lark_spreadsheet/tools/read_table.py new file mode 100644 index 0000000000..0f05249004 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_spreadsheet/tools/read_table.py @@ -0,0 +1,23 @@ +from typing import Any + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.tool.builtin_tool import BuiltinTool +from core.tools.utils.lark_api_utils import LarkRequest + + +class ReadTableTool(BuiltinTool): + def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage: + app_id = self.runtime.credentials.get("app_id") + app_secret = self.runtime.credentials.get("app_secret") + client = LarkRequest(app_id, app_secret) + + spreadsheet_token = tool_parameters.get("spreadsheet_token") + sheet_id = tool_parameters.get("sheet_id") + sheet_name = tool_parameters.get("sheet_name") + num_range = tool_parameters.get("num_range") + query = tool_parameters.get("query") + user_id_type = tool_parameters.get("user_id_type", "open_id") + + res = client.read_table(spreadsheet_token, sheet_id, sheet_name, num_range, query, user_id_type) + + return self.create_json_message(res) diff --git a/api/core/tools/provider/builtin/lark_spreadsheet/tools/read_table.yaml b/api/core/tools/provider/builtin/lark_spreadsheet/tools/read_table.yaml new file mode 100644 index 0000000000..10534436d6 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_spreadsheet/tools/read_table.yaml @@ -0,0 +1,122 @@ +identity: + name: read_table + author: Doug Lea + label: + en_US: Read Table + zh_Hans: 自定义读取电子表格行列数据 +description: + human: + en_US: Read Table + zh_Hans: 自定义读取电子表格行列数据 + llm: A tool for custom reading of row and column data from a spreadsheet. (自定义读取电子表格行列数据) +parameters: + - name: spreadsheet_token + type: string + required: true + label: + en_US: spreadsheet_token + zh_Hans: 电子表格 token + human_description: + en_US: Spreadsheet token, supports input of spreadsheet URL. + zh_Hans: 电子表格 token,支持输入电子表格 url。 + llm_description: 电子表格 token,支持输入电子表格 url。 + form: llm + + - name: sheet_id + type: string + required: false + label: + en_US: sheet_id + zh_Hans: 工作表 ID + human_description: + en_US: Sheet ID, either sheet_id or sheet_name must be filled. + zh_Hans: 工作表 ID,与 sheet_name 二者其一必填。 + llm_description: 工作表 ID,与 sheet_name 二者其一必填。 + form: llm + + - name: sheet_name + type: string + required: false + label: + en_US: sheet_name + zh_Hans: 工作表名称 + human_description: + en_US: Sheet name, either sheet_id or sheet_name must be filled. + zh_Hans: 工作表名称,与 sheet_id 二者其一必填。 + llm_description: 工作表名称,与 sheet_id 二者其一必填。 + form: llm + + - name: user_id_type + type: select + required: false + options: + - value: open_id + label: + en_US: open_id + zh_Hans: open_id + - value: union_id + label: + en_US: union_id + zh_Hans: union_id + - value: user_id + label: + en_US: user_id + zh_Hans: user_id + default: "open_id" + label: + en_US: user_id_type + zh_Hans: 用户 ID 类型 + human_description: + en_US: User ID type, optional values are open_id, union_id, user_id, with a default value of open_id. + zh_Hans: 用户 ID 类型,可选值有 open_id、union_id、user_id,默认值为 open_id。 + llm_description: 用户 ID 类型,可选值有 open_id、union_id、user_id,默认值为 open_id。 + form: form + + - name: start_row + type: number + required: false + label: + en_US: start_row + zh_Hans: 起始行号 + human_description: + en_US: Starting row number, starting from 1. + zh_Hans: 起始行号,从 1 开始。 + llm_description: 起始行号,从 1 开始。 + form: form + + - name: num_rows + type: number + required: false + label: + en_US: num_rows + zh_Hans: 读取行数 + human_description: + en_US: Number of rows to read. + zh_Hans: 读取行数 + llm_description: 读取行数 + form: form + + - name: range + type: string + required: false + label: + en_US: range + zh_Hans: 取数范围 + human_description: + en_US: | + Data range, format like: A1:B2, can be empty when query=all. + zh_Hans: 取数范围,格式如:A1:B2,query=all 时可为空。 + llm_description: 取数范围,格式如:A1:B2,query=all 时可为空。 + form: llm + + - name: query + type: string + required: false + label: + en_US: query + zh_Hans: 查询 + human_description: + en_US: Pass "all" to query all data in the table, but no more than 100 columns. + zh_Hans: 传 all,表示查询表格所有数据,但最多查询 100 列数据。 + llm_description: 传 all,表示查询表格所有数据,但最多查询 100 列数据。 + form: llm diff --git a/api/core/tools/provider/builtin/lark_task/_assets/icon.png b/api/core/tools/provider/builtin/lark_task/_assets/icon.png new file mode 100644 index 0000000000..26ea6a2eef Binary files /dev/null and b/api/core/tools/provider/builtin/lark_task/_assets/icon.png differ diff --git a/api/core/tools/provider/builtin/lark_task/lark_task.py b/api/core/tools/provider/builtin/lark_task/lark_task.py new file mode 100644 index 0000000000..02cf009f01 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_task/lark_task.py @@ -0,0 +1,7 @@ +from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController +from core.tools.utils.lark_api_utils import lark_auth + + +class LarkTaskProvider(BuiltinToolProviderController): + def _validate_credentials(self, credentials: dict) -> None: + lark_auth(credentials) diff --git a/api/core/tools/provider/builtin/lark_task/lark_task.yaml b/api/core/tools/provider/builtin/lark_task/lark_task.yaml new file mode 100644 index 0000000000..ada068b0aa --- /dev/null +++ b/api/core/tools/provider/builtin/lark_task/lark_task.yaml @@ -0,0 +1,36 @@ +identity: + author: Doug Lea + name: lark_task + label: + en_US: Lark Task + zh_Hans: Lark 任务 + description: + en_US: | + Lark Task, requires the following permissions: task:task:write、contact:user.id:readonly. + zh_Hans: | + Lark 任务,需要开通以下权限: task:task:write、contact:user.id:readonly。 + icon: icon.png + tags: + - social + - productivity +credentials_for_provider: + app_id: + type: text-input + required: true + label: + en_US: APP ID + placeholder: + en_US: Please input your Lark app id + zh_Hans: 请输入你的 Lark app id + help: + en_US: Get your app_id and app_secret from Lark + zh_Hans: 从 Lark 获取您的 app_id 和 app_secret + url: https://open.larksuite.com/app + app_secret: + type: secret-input + required: true + label: + en_US: APP Secret + placeholder: + en_US: Please input your app secret + zh_Hans: 请输入你的 Lark app secret diff --git a/api/core/tools/provider/builtin/lark_task/tools/add_members.py b/api/core/tools/provider/builtin/lark_task/tools/add_members.py new file mode 100644 index 0000000000..9b8e4d68f3 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_task/tools/add_members.py @@ -0,0 +1,20 @@ +from typing import Any + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.tool.builtin_tool import BuiltinTool +from core.tools.utils.lark_api_utils import LarkRequest + + +class AddMembersTool(BuiltinTool): + def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage: + app_id = self.runtime.credentials.get("app_id") + app_secret = self.runtime.credentials.get("app_secret") + client = LarkRequest(app_id, app_secret) + + task_guid = tool_parameters.get("task_guid") + member_phone_or_email = tool_parameters.get("member_phone_or_email") + member_role = tool_parameters.get("member_role", "follower") + + res = client.add_members(task_guid, member_phone_or_email, member_role) + + return self.create_json_message(res) diff --git a/api/core/tools/provider/builtin/lark_task/tools/add_members.yaml b/api/core/tools/provider/builtin/lark_task/tools/add_members.yaml new file mode 100644 index 0000000000..0b12172e0b --- /dev/null +++ b/api/core/tools/provider/builtin/lark_task/tools/add_members.yaml @@ -0,0 +1,58 @@ +identity: + name: add_members + author: Doug Lea + label: + en_US: Add Lark Members + zh_Hans: 添加 Lark 任务成员 +description: + human: + en_US: Add Lark Members + zh_Hans: 添加 Lark 任务成员 + llm: A tool for adding members to a Lark task.(添加 Lark 任务成员) +parameters: + - name: task_guid + type: string + required: true + label: + en_US: Task GUID + zh_Hans: 任务 GUID + human_description: + en_US: | + The GUID of the task to be added, supports passing either the Task ID or the Task link URL. Example of Task ID: 8b5425ec-9f2a-43bd-a3ab-01912f50282b; Example of Task link URL: https://applink.larksuite.com/client/todo/detail?guid=1b066afa-96de-406c-90a3-dfd30159a571&suite_entity_num=t100805 + zh_Hans: 要添加的任务的 GUID,支持传任务 ID 和任务链接 URL。任务 ID 示例:8b5425ec-9f2a-43bd-a3ab-01912f50282b;任务链接 URL 示例:https://applink.larksuite.com/client/todo/detail?guid=1b066afa-96de-406c-90a3-dfd30159a571&suite_entity_num=t100805 + llm_description: 要添加的任务的 GUID,支持传任务 ID 和任务链接 URL。任务 ID 示例:8b5425ec-9f2a-43bd-a3ab-01912f50282b;任务链接 URL 示例:https://applink.larksuite.com/client/todo/detail?guid=1b066afa-96de-406c-90a3-dfd30159a571&suite_entity_num=t100805 + form: llm + + - name: member_phone_or_email + type: string + required: true + label: + en_US: Task Member Phone Or Email + zh_Hans: 任务成员的电话或邮箱 + human_description: + en_US: A list of member emails or phone numbers, separated by commas. + zh_Hans: 任务成员邮箱或者手机号列表,使用逗号分隔。 + llm_description: 任务成员邮箱或者手机号列表,使用逗号分隔。 + form: llm + + - name: member_role + type: select + required: true + options: + - value: assignee + label: + en_US: assignee + zh_Hans: 负责人 + - value: follower + label: + en_US: follower + zh_Hans: 关注人 + default: "follower" + label: + en_US: member_role + zh_Hans: 成员的角色 + human_description: + en_US: Member role, optional values are "assignee" (responsible person) and "follower" (observer), with a default value of "assignee". + zh_Hans: 成员的角色,可选值有 "assignee"(负责人)和 "follower"(关注人),默认值为 "assignee"。 + llm_description: 成员的角色,可选值有 "assignee"(负责人)和 "follower"(关注人),默认值为 "assignee"。 + form: form diff --git a/api/core/tools/provider/builtin/lark_task/tools/create_task.py b/api/core/tools/provider/builtin/lark_task/tools/create_task.py new file mode 100644 index 0000000000..ff37593fbe --- /dev/null +++ b/api/core/tools/provider/builtin/lark_task/tools/create_task.py @@ -0,0 +1,22 @@ +from typing import Any + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.tool.builtin_tool import BuiltinTool +from core.tools.utils.lark_api_utils import LarkRequest + + +class CreateTaskTool(BuiltinTool): + def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage: + app_id = self.runtime.credentials.get("app_id") + app_secret = self.runtime.credentials.get("app_secret") + client = LarkRequest(app_id, app_secret) + + summary = tool_parameters.get("summary") + start_time = tool_parameters.get("start_time") + end_time = tool_parameters.get("end_time") + completed_time = tool_parameters.get("completed_time") + description = tool_parameters.get("description") + + res = client.create_task(summary, start_time, end_time, completed_time, description) + + return self.create_json_message(res) diff --git a/api/core/tools/provider/builtin/lark_task/tools/create_task.yaml b/api/core/tools/provider/builtin/lark_task/tools/create_task.yaml new file mode 100644 index 0000000000..4303763a1d --- /dev/null +++ b/api/core/tools/provider/builtin/lark_task/tools/create_task.yaml @@ -0,0 +1,74 @@ +identity: + name: create_task + author: Doug Lea + label: + en_US: Create Lark Task + zh_Hans: 创建 Lark 任务 +description: + human: + en_US: Create Lark Task + zh_Hans: 创建 Lark 任务 + llm: A tool for creating tasks in Lark.(创建 Lark 任务) +parameters: + - name: summary + type: string + required: true + label: + en_US: Task Title + zh_Hans: 任务标题 + human_description: + en_US: The title of the task. + zh_Hans: 任务标题 + llm_description: 任务标题 + form: llm + + - name: description + type: string + required: false + label: + en_US: Task Description + zh_Hans: 任务备注 + human_description: + en_US: The description or notes for the task. + zh_Hans: 任务备注 + llm_description: 任务备注 + form: llm + + - name: start_time + type: string + required: false + label: + en_US: Start Time + zh_Hans: 任务开始时间 + human_description: + en_US: | + The start time of the task, in the format: 2006-01-02 15:04:05 + zh_Hans: 任务开始时间,格式为:2006-01-02 15:04:05 + llm_description: 任务开始时间,格式为:2006-01-02 15:04:05 + form: llm + + - name: end_time + type: string + required: false + label: + en_US: End Time + zh_Hans: 任务结束时间 + human_description: + en_US: | + The end time of the task, in the format: 2006-01-02 15:04:05 + zh_Hans: 任务结束时间,格式为:2006-01-02 15:04:05 + llm_description: 任务结束时间,格式为:2006-01-02 15:04:05 + form: llm + + - name: completed_time + type: string + required: false + label: + en_US: Completed Time + zh_Hans: 任务完成时间 + human_description: + en_US: | + The completion time of the task, in the format: 2006-01-02 15:04:05. Leave empty to create an incomplete task; fill in a specific time to create a completed task. + zh_Hans: 任务完成时间,格式为:2006-01-02 15:04:05,不填写表示创建一个未完成任务;填写一个具体的时间表示创建一个已完成任务。 + llm_description: 任务完成时间,格式为:2006-01-02 15:04:05,不填写表示创建一个未完成任务;填写一个具体的时间表示创建一个已完成任务。 + form: llm diff --git a/api/core/tools/provider/builtin/lark_task/tools/delete_task.py b/api/core/tools/provider/builtin/lark_task/tools/delete_task.py new file mode 100644 index 0000000000..eca381be2c --- /dev/null +++ b/api/core/tools/provider/builtin/lark_task/tools/delete_task.py @@ -0,0 +1,18 @@ +from typing import Any + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.tool.builtin_tool import BuiltinTool +from core.tools.utils.lark_api_utils import LarkRequest + + +class UpdateTaskTool(BuiltinTool): + def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage: + app_id = self.runtime.credentials.get("app_id") + app_secret = self.runtime.credentials.get("app_secret") + client = LarkRequest(app_id, app_secret) + + task_guid = tool_parameters.get("task_guid") + + res = client.delete_task(task_guid) + + return self.create_json_message(res) diff --git a/api/core/tools/provider/builtin/lark_task/tools/delete_task.yaml b/api/core/tools/provider/builtin/lark_task/tools/delete_task.yaml new file mode 100644 index 0000000000..bc0154d9dc --- /dev/null +++ b/api/core/tools/provider/builtin/lark_task/tools/delete_task.yaml @@ -0,0 +1,24 @@ +identity: + name: delete_task + author: Doug Lea + label: + en_US: Delete Lark Task + zh_Hans: 删除 Lark 任务 +description: + human: + en_US: Delete Lark Task + zh_Hans: 删除 Lark 任务 + llm: A tool for deleting tasks in Lark.(删除 Lark 任务) +parameters: + - name: task_guid + type: string + required: true + label: + en_US: Task GUID + zh_Hans: 任务 GUID + human_description: + en_US: | + The GUID of the task to be deleted, supports passing either the Task ID or the Task link URL. Example of Task ID: 8b5425ec-9f2a-43bd-a3ab-01912f50282b; Example of Task link URL: https://applink.larksuite.com/client/todo/detail?guid=1b066afa-96de-406c-90a3-dfd30159a571&suite_entity_num=t100805 + zh_Hans: 要删除的任务的 GUID,支持传任务 ID 和任务链接 URL。任务 ID 示例:8b5425ec-9f2a-43bd-a3ab-01912f50282b;任务链接 URL 示例:https://applink.larksuite.com/client/todo/detail?guid=1b066afa-96de-406c-90a3-dfd30159a571&suite_entity_num=t100805 + llm_description: 要删除的任务的 GUID,支持传任务 ID 和任务链接 URL。任务 ID 示例:8b5425ec-9f2a-43bd-a3ab-01912f50282b;任务链接 URL 示例:https://applink.larksuite.com/client/todo/detail?guid=1b066afa-96de-406c-90a3-dfd30159a571&suite_entity_num=t100805 + form: llm diff --git a/api/core/tools/provider/builtin/lark_task/tools/update_task.py b/api/core/tools/provider/builtin/lark_task/tools/update_task.py new file mode 100644 index 0000000000..0d3469c91a --- /dev/null +++ b/api/core/tools/provider/builtin/lark_task/tools/update_task.py @@ -0,0 +1,23 @@ +from typing import Any + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.tool.builtin_tool import BuiltinTool +from core.tools.utils.lark_api_utils import LarkRequest + + +class UpdateTaskTool(BuiltinTool): + def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage: + app_id = self.runtime.credentials.get("app_id") + app_secret = self.runtime.credentials.get("app_secret") + client = LarkRequest(app_id, app_secret) + + task_guid = tool_parameters.get("task_guid") + summary = tool_parameters.get("summary") + start_time = tool_parameters.get("start_time") + end_time = tool_parameters.get("end_time") + completed_time = tool_parameters.get("completed_time") + description = tool_parameters.get("description") + + res = client.update_task(task_guid, summary, start_time, end_time, completed_time, description) + + return self.create_json_message(res) diff --git a/api/core/tools/provider/builtin/lark_task/tools/update_task.yaml b/api/core/tools/provider/builtin/lark_task/tools/update_task.yaml new file mode 100644 index 0000000000..a98f037f21 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_task/tools/update_task.yaml @@ -0,0 +1,89 @@ +identity: + name: update_task + author: Doug Lea + label: + en_US: Update Lark Task + zh_Hans: 更新 Lark 任务 +description: + human: + en_US: Update Lark Task + zh_Hans: 更新 Lark 任务 + llm: A tool for updating tasks in Lark.(更新 Lark 任务) +parameters: + - name: task_guid + type: string + required: true + label: + en_US: Task GUID + zh_Hans: 任务 GUID + human_description: + en_US: | + The task ID, supports inputting either the Task ID or the Task link URL. Example of Task ID: 42cad8a0-f8c8-4344-9be2-d1d7e8e91b64; Example of Task link URL: https://applink.larksuite.com/client/todo/detail?guid=1b066afa-96de-406c-90a3-dfd30159a571&suite_entity_num=t100805 + zh_Hans: | + 任务ID,支持传入任务 ID 和任务链接 URL。任务 ID 示例: 42cad8a0-f8c8-4344-9be2-d1d7e8e91b64;任务链接 URL 示例: https://applink.larksuite.com/client/todo/detail?guid=1b066afa-96de-406c-90a3-dfd30159a571&suite_entity_num=t100805 + llm_description: | + 任务ID,支持传入任务 ID 和任务链接 URL。任务 ID 示例: 42cad8a0-f8c8-4344-9be2-d1d7e8e91b64;任务链接 URL 示例: https://applink.larksuite.com/client/todo/detail?guid=1b066afa-96de-406c-90a3-dfd30159a571&suite_entity_num=t100805 + form: llm + + - name: summary + type: string + required: true + label: + en_US: Task Title + zh_Hans: 任务标题 + human_description: + en_US: The title of the task. + zh_Hans: 任务标题 + llm_description: 任务标题 + form: llm + + - name: description + type: string + required: false + label: + en_US: Task Description + zh_Hans: 任务备注 + human_description: + en_US: The description or notes for the task. + zh_Hans: 任务备注 + llm_description: 任务备注 + form: llm + + - name: start_time + type: string + required: false + label: + en_US: Start Time + zh_Hans: 任务开始时间 + human_description: + en_US: | + The start time of the task, in the format: 2006-01-02 15:04:05 + zh_Hans: 任务开始时间,格式为:2006-01-02 15:04:05 + llm_description: 任务开始时间,格式为:2006-01-02 15:04:05 + form: llm + + - name: end_time + type: string + required: false + label: + en_US: End Time + zh_Hans: 任务结束时间 + human_description: + en_US: | + The end time of the task, in the format: 2006-01-02 15:04:05 + zh_Hans: 任务结束时间,格式为:2006-01-02 15:04:05 + llm_description: 任务结束时间,格式为:2006-01-02 15:04:05 + form: llm + + - name: completed_time + type: string + required: false + label: + en_US: Completed Time + zh_Hans: 任务完成时间 + human_description: + en_US: | + The completion time of the task, in the format: 2006-01-02 15:04:05 + zh_Hans: 任务完成时间,格式为:2006-01-02 15:04:05 + llm_description: 任务完成时间,格式为:2006-01-02 15:04:05 + form: llm diff --git a/api/core/tools/provider/builtin/lark_wiki/_assets/icon.png b/api/core/tools/provider/builtin/lark_wiki/_assets/icon.png new file mode 100644 index 0000000000..47f6b8c30e Binary files /dev/null and b/api/core/tools/provider/builtin/lark_wiki/_assets/icon.png differ diff --git a/api/core/tools/provider/builtin/lark_wiki/lark_wiki.py b/api/core/tools/provider/builtin/lark_wiki/lark_wiki.py new file mode 100644 index 0000000000..e6941206ee --- /dev/null +++ b/api/core/tools/provider/builtin/lark_wiki/lark_wiki.py @@ -0,0 +1,7 @@ +from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController +from core.tools.utils.lark_api_utils import lark_auth + + +class LarkWikiProvider(BuiltinToolProviderController): + def _validate_credentials(self, credentials: dict) -> None: + lark_auth(credentials) diff --git a/api/core/tools/provider/builtin/lark_wiki/lark_wiki.yaml b/api/core/tools/provider/builtin/lark_wiki/lark_wiki.yaml new file mode 100644 index 0000000000..86bef00086 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_wiki/lark_wiki.yaml @@ -0,0 +1,36 @@ +identity: + author: Doug Lea + name: lark_wiki + label: + en_US: Lark Wiki + zh_Hans: Lark 知识库 + description: + en_US: | + Lark Wiki, requires the following permissions: wiki:wiki:readonly. + zh_Hans: | + Lark 知识库,需要开通以下权限: wiki:wiki:readonly。 + icon: icon.png + tags: + - social + - productivity +credentials_for_provider: + app_id: + type: text-input + required: true + label: + en_US: APP ID + placeholder: + en_US: Please input your Lark app id + zh_Hans: 请输入你的 Lark app id + help: + en_US: Get your app_id and app_secret from Lark + zh_Hans: 从 Lark 获取您的 app_id 和 app_secret + url: https://open.larksuite.com/app + app_secret: + type: secret-input + required: true + label: + en_US: APP Secret + placeholder: + en_US: Please input your app secret + zh_Hans: 请输入你的 Lark app secret diff --git a/api/core/tools/provider/builtin/lark_wiki/tools/get_wiki_nodes.py b/api/core/tools/provider/builtin/lark_wiki/tools/get_wiki_nodes.py new file mode 100644 index 0000000000..a05f300755 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_wiki/tools/get_wiki_nodes.py @@ -0,0 +1,21 @@ +from typing import Any + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.tool.builtin_tool import BuiltinTool +from core.tools.utils.lark_api_utils import LarkRequest + + +class GetWikiNodesTool(BuiltinTool): + def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage: + app_id = self.runtime.credentials.get("app_id") + app_secret = self.runtime.credentials.get("app_secret") + client = LarkRequest(app_id, app_secret) + + space_id = tool_parameters.get("space_id") + parent_node_token = tool_parameters.get("parent_node_token") + page_token = tool_parameters.get("page_token") + page_size = tool_parameters.get("page_size") + + res = client.get_wiki_nodes(space_id, parent_node_token, page_token, page_size) + + return self.create_json_message(res) diff --git a/api/core/tools/provider/builtin/lark_wiki/tools/get_wiki_nodes.yaml b/api/core/tools/provider/builtin/lark_wiki/tools/get_wiki_nodes.yaml new file mode 100644 index 0000000000..a8c242a2e9 --- /dev/null +++ b/api/core/tools/provider/builtin/lark_wiki/tools/get_wiki_nodes.yaml @@ -0,0 +1,63 @@ +identity: + name: get_wiki_nodes + author: Doug Lea + label: + en_US: Get Wiki Nodes + zh_Hans: 获取知识空间子节点列表 +description: + human: + en_US: | + Get the list of child nodes in Wiki, make sure the app/bot is a member of the wiki space. See How to add an app as a wiki base administrator (member). https://open.larksuite.com/document/server-docs/docs/wiki-v2/wiki-qa + zh_Hans: | + 获取知识库全部子节点列表,请确保应用/机器人为知识空间成员。参阅如何将应用添加为知识库管理员(成员)。https://open.larksuite.com/document/server-docs/docs/wiki-v2/wiki-qa + llm: A tool for getting all sub-nodes of a knowledge base.(获取知识空间子节点列表) +parameters: + - name: space_id + type: string + required: true + label: + en_US: Space Id + zh_Hans: 知识空间 ID + human_description: + en_US: | + The ID of the knowledge space. Supports space link URL, for example: https://lark-japan.jp.larksuite.com/wiki/settings/7431084851517718561 + zh_Hans: 知识空间 ID,支持空间链接 URL,例如:https://lark-japan.jp.larksuite.com/wiki/settings/7431084851517718561 + llm_description: 知识空间 ID,支持空间链接 URL,例如:https://lark-japan.jp.larksuite.com/wiki/settings/7431084851517718561 + form: llm + + - name: page_size + type: number + required: false + default: 10 + label: + en_US: Page Size + zh_Hans: 分页大小 + human_description: + en_US: The size of each page, with a maximum value of 50. + zh_Hans: 分页大小,最大值 50。 + llm_description: 分页大小,最大值 50。 + form: form + + - name: page_token + type: string + required: false + label: + en_US: Page Token + zh_Hans: 分页标记 + human_description: + en_US: The pagination token. Leave empty for the first request to start from the beginning; if the paginated query result has more items, a new page_token will be returned, which can be used to get the next set of results. + zh_Hans: 分页标记,第一次请求不填,表示从头开始遍历;分页查询结果还有更多项时会同时返回新的 page_token,下次遍历可采用该 page_token 获取查询结果。 + llm_description: 分页标记,第一次请求不填,表示从头开始遍历;分页查询结果还有更多项时会同时返回新的 page_token,下次遍历可采用该 page_token 获取查询结果。 + form: llm + + - name: parent_node_token + type: string + required: false + label: + en_US: Parent Node Token + zh_Hans: 父节点 token + human_description: + en_US: The token of the parent node. + zh_Hans: 父节点 token + llm_description: 父节点 token + form: llm diff --git a/api/core/tools/provider/builtin/podcast_generator/podcast_generator.yaml b/api/core/tools/provider/builtin/podcast_generator/podcast_generator.yaml index bd02b32020..d4edb17b28 100644 --- a/api/core/tools/provider/builtin/podcast_generator/podcast_generator.yaml +++ b/api/core/tools/provider/builtin/podcast_generator/podcast_generator.yaml @@ -32,3 +32,15 @@ credentials_for_provider: placeholder: en_US: Enter your TTS service API key zh_Hans: 输入您的 TTS 服务 API 密钥 + openai_base_url: + type: text-input + required: false + label: + en_US: OpenAI base URL + zh_Hans: OpenAI base URL + help: + en_US: Please input your OpenAI base URL + zh_Hans: 请输入你的 OpenAI base URL + placeholder: + en_US: Please input your OpenAI base URL + zh_Hans: 请输入你的 OpenAI base URL diff --git a/api/core/tools/provider/builtin/podcast_generator/tools/podcast_audio_generator.py b/api/core/tools/provider/builtin/podcast_generator/tools/podcast_audio_generator.py index 476e2d01e1..165e93956e 100644 --- a/api/core/tools/provider/builtin/podcast_generator/tools/podcast_audio_generator.py +++ b/api/core/tools/provider/builtin/podcast_generator/tools/podcast_audio_generator.py @@ -5,6 +5,7 @@ import warnings from typing import Any, Literal, Optional, Union import openai +from yarl import URL from core.tools.entities.tool_entities import ToolInvokeMessage from core.tools.errors import ToolParameterValidationError, ToolProviderCredentialValidationError @@ -53,15 +54,24 @@ class PodcastAudioGeneratorTool(BuiltinTool): if not host1_voice or not host2_voice: raise ToolParameterValidationError("Host voices are required") - # Get OpenAI API key from credentials + # Ensure runtime and credentials if not self.runtime or not self.runtime.credentials: raise ToolProviderCredentialValidationError("Tool runtime or credentials are missing") + + # Get OpenAI API key from credentials api_key = self.runtime.credentials.get("api_key") if not api_key: raise ToolProviderCredentialValidationError("OpenAI API key is missing") + # Get OpenAI base URL + openai_base_url = self.runtime.credentials.get("openai_base_url", None) + openai_base_url = str(URL(openai_base_url) / "v1") if openai_base_url else None + # Initialize OpenAI client - client = openai.OpenAI(api_key=api_key) + client = openai.OpenAI( + api_key=api_key, + base_url=openai_base_url, + ) # Create a thread pool max_workers = 5 diff --git a/api/core/tools/provider/builtin/vanna/tools/vanna.yaml b/api/core/tools/provider/builtin/vanna/tools/vanna.yaml index 12ca8a862e..309681321b 100644 --- a/api/core/tools/provider/builtin/vanna/tools/vanna.yaml +++ b/api/core/tools/provider/builtin/vanna/tools/vanna.yaml @@ -32,7 +32,7 @@ parameters: en_US: RAG Model for your database DDL zh_Hans: 存储数据库训练数据的RAG模型 llm_description: RAG Model for generating SQL - form: form + form: llm - name: db_type type: select required: true @@ -136,7 +136,7 @@ parameters: human_description: en_US: DDL statements for training data zh_Hans: 用于训练RAG Model的建表语句 - form: form + form: llm - name: question type: string required: false @@ -146,7 +146,7 @@ parameters: human_description: en_US: Question-SQL Pairs zh_Hans: Question-SQL中的问题 - form: form + form: llm - name: sql type: string required: false @@ -156,7 +156,7 @@ parameters: human_description: en_US: SQL queries to your training data zh_Hans: 用于训练RAG Model的SQL语句 - form: form + form: llm - name: memos type: string required: false @@ -166,7 +166,7 @@ parameters: human_description: en_US: Sometimes you may want to add documentation about your business terminology or definitions zh_Hans: 添加更多关于数据库的业务说明 - form: form + form: llm - name: enable_training type: boolean required: false diff --git a/api/core/tools/provider/builtin/vectorizer/vectorizer.py b/api/core/tools/provider/builtin/vectorizer/vectorizer.py index 8140348723..211ec78f4d 100644 --- a/api/core/tools/provider/builtin/vectorizer/vectorizer.py +++ b/api/core/tools/provider/builtin/vectorizer/vectorizer.py @@ -1,19 +1,23 @@ from typing import Any -from core.file import File -from core.file.enums import FileTransferMethod, FileType +from core.file import FileTransferMethod, FileType from core.tools.errors import ToolProviderCredentialValidationError from core.tools.provider.builtin.vectorizer.tools.vectorizer import VectorizerTool from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController +from factories import file_factory class VectorizerProvider(BuiltinToolProviderController): def _validate_credentials(self, credentials: dict[str, Any]) -> None: - test_img = File( + mapping = { + "transfer_method": FileTransferMethod.TOOL_FILE, + "type": FileType.IMAGE, + "id": "test_id", + "url": "https://cloud.dify.ai/logo/logo-site.png", + } + test_img = file_factory.build_from_mapping( + mapping=mapping, tenant_id="__test_123", - remote_url="https://cloud.dify.ai/logo/logo-site.png", - type=FileType.IMAGE, - transfer_method=FileTransferMethod.REMOTE_URL, ) try: VectorizerTool().fork_tool_runtime( diff --git a/api/core/tools/tool_file_manager.py b/api/core/tools/tool_file_manager.py index 1a28df31bc..ff56e20e87 100644 --- a/api/core/tools/tool_file_manager.py +++ b/api/core/tools/tool_file_manager.py @@ -98,7 +98,7 @@ class ToolFileManager: response.raise_for_status() blob = response.content except Exception as e: - logger.error(f"Failed to download file from {file_url}: {e}") + logger.exception(f"Failed to download file from {file_url}: {e}") raise mimetype = guess_type(file_url)[0] or "octet/stream" diff --git a/api/core/tools/tool_manager.py b/api/core/tools/tool_manager.py index 6abe0a9cba..d2723df7b2 100644 --- a/api/core/tools/tool_manager.py +++ b/api/core/tools/tool_manager.py @@ -388,7 +388,7 @@ class ToolManager: yield provider except Exception as e: - logger.error(f"load builtin provider {provider} error: {e}") + logger.exception(f"load builtin provider {provider} error: {e}") continue # set builtin providers loaded cls._builtin_providers_loaded = True @@ -555,6 +555,7 @@ class ToolManager: """ get tool provider """ + provider_name = provider provider: ApiToolProvider = ( db.session.query(ApiToolProvider) .filter( @@ -565,7 +566,7 @@ class ToolManager: ) if provider is None: - raise ValueError(f"you have not added provider {provider}") + raise ValueError(f"you have not added provider {provider_name}") try: credentials = json.loads(provider.credentials_str) or {} diff --git a/api/core/tools/utils/feishu_api_utils.py b/api/core/tools/utils/feishu_api_utils.py index 722cf4b538..ea28037df0 100644 --- a/api/core/tools/utils/feishu_api_utils.py +++ b/api/core/tools/utils/feishu_api_utils.py @@ -127,7 +127,9 @@ class FeishuRequest: "folder_token": folder_token, } res = self._send_request(url, payload=payload) - return res.get("data") + if "data" in res: + return res.get("data") + return res def write_document(self, document_id: str, content: str, position: str = "end") -> dict: url = f"{self.API_BASE_URL}/document/write_document" @@ -135,7 +137,7 @@ class FeishuRequest: res = self._send_request(url, payload=payload) return res - def get_document_content(self, document_id: str, mode: str = "markdown", lang: str = "0") -> dict: + def get_document_content(self, document_id: str, mode: str = "markdown", lang: str = "0") -> str: """ API url: https://open.larkoffice.com/document/server-docs/docs/docs/docx-v1/document/raw_content Example Response: @@ -154,7 +156,9 @@ class FeishuRequest: } url = f"{self.API_BASE_URL}/document/get_document_content" res = self._send_request(url, method="GET", params=params) - return res.get("data").get("content") + if "data" in res: + return res.get("data").get("content") + return "" def list_document_blocks( self, document_id: str, page_token: str, user_id_type: str = "open_id", page_size: int = 500 @@ -170,7 +174,9 @@ class FeishuRequest: } url = f"{self.API_BASE_URL}/document/list_document_blocks" res = self._send_request(url, method="GET", params=params) - return res.get("data") + if "data" in res: + return res.get("data") + return res def send_bot_message(self, receive_id_type: str, receive_id: str, msg_type: str, content: str) -> dict: """ @@ -186,7 +192,9 @@ class FeishuRequest: "content": content.strip('"').replace(r"\"", '"').replace(r"\\", "\\"), } res = self._send_request(url, params=params, payload=payload) - return res.get("data") + if "data" in res: + return res.get("data") + return res def send_webhook_message(self, webhook: str, msg_type: str, content: str) -> dict: url = f"{self.API_BASE_URL}/message/send_webhook_message" @@ -220,7 +228,9 @@ class FeishuRequest: "page_size": page_size, } res = self._send_request(url, method="GET", params=params) - return res.get("data") + if "data" in res: + return res.get("data") + return res def get_thread_messages( self, container_id: str, page_token: str, sort_type: str = "ByCreateTimeAsc", page_size: int = 20 @@ -236,7 +246,9 @@ class FeishuRequest: "page_size": page_size, } res = self._send_request(url, method="GET", params=params) - return res.get("data") + if "data" in res: + return res.get("data") + return res def create_task(self, summary: str, start_time: str, end_time: str, completed_time: str, description: str) -> dict: # 创建任务 @@ -249,7 +261,9 @@ class FeishuRequest: "description": description, } res = self._send_request(url, payload=payload) - return res.get("data") + if "data" in res: + return res.get("data") + return res def update_task( self, task_guid: str, summary: str, start_time: str, end_time: str, completed_time: str, description: str @@ -265,7 +279,9 @@ class FeishuRequest: "description": description, } res = self._send_request(url, method="PATCH", payload=payload) - return res.get("data") + if "data" in res: + return res.get("data") + return res def delete_task(self, task_guid: str) -> dict: # 删除任务 @@ -297,7 +313,9 @@ class FeishuRequest: "page_size": page_size, } res = self._send_request(url, payload=payload) - return res.get("data") + if "data" in res: + return res.get("data") + return res def get_primary_calendar(self, user_id_type: str = "open_id") -> dict: url = f"{self.API_BASE_URL}/calendar/get_primary_calendar" @@ -305,7 +323,9 @@ class FeishuRequest: "user_id_type": user_id_type, } res = self._send_request(url, method="GET", params=params) - return res.get("data") + if "data" in res: + return res.get("data") + return res def create_event( self, @@ -328,7 +348,9 @@ class FeishuRequest: "attendee_ability": attendee_ability, } res = self._send_request(url, payload=payload) - return res.get("data") + if "data" in res: + return res.get("data") + return res def update_event( self, @@ -374,7 +396,9 @@ class FeishuRequest: "page_size": page_size, } res = self._send_request(url, method="GET", params=params) - return res.get("data") + if "data" in res: + return res.get("data") + return res def search_events( self, @@ -395,7 +419,9 @@ class FeishuRequest: "page_size": page_size, } res = self._send_request(url, payload=payload) - return res.get("data") + if "data" in res: + return res.get("data") + return res def add_event_attendees(self, event_id: str, attendee_phone_or_email: str, need_notification: bool = True) -> dict: # 参加日程参会人 @@ -406,7 +432,9 @@ class FeishuRequest: "need_notification": need_notification, } res = self._send_request(url, payload=payload) - return res.get("data") + if "data" in res: + return res.get("data") + return res def create_spreadsheet( self, @@ -420,7 +448,9 @@ class FeishuRequest: "folder_token": folder_token, } res = self._send_request(url, payload=payload) - return res.get("data") + if "data" in res: + return res.get("data") + return res def get_spreadsheet( self, @@ -434,7 +464,9 @@ class FeishuRequest: "user_id_type": user_id_type, } res = self._send_request(url, method="GET", params=params) - return res.get("data") + if "data" in res: + return res.get("data") + return res def list_spreadsheet_sheets( self, @@ -446,7 +478,9 @@ class FeishuRequest: "spreadsheet_token": spreadsheet_token, } res = self._send_request(url, method="GET", params=params) - return res.get("data") + if "data" in res: + return res.get("data") + return res def add_rows( self, @@ -466,7 +500,9 @@ class FeishuRequest: "values": values, } res = self._send_request(url, payload=payload) - return res.get("data") + if "data" in res: + return res.get("data") + return res def add_cols( self, @@ -486,7 +522,9 @@ class FeishuRequest: "values": values, } res = self._send_request(url, payload=payload) - return res.get("data") + if "data" in res: + return res.get("data") + return res def read_rows( self, @@ -508,7 +546,9 @@ class FeishuRequest: "user_id_type": user_id_type, } res = self._send_request(url, method="GET", params=params) - return res.get("data") + if "data" in res: + return res.get("data") + return res def read_cols( self, @@ -530,7 +570,9 @@ class FeishuRequest: "user_id_type": user_id_type, } res = self._send_request(url, method="GET", params=params) - return res.get("data") + if "data" in res: + return res.get("data") + return res def read_table( self, @@ -552,7 +594,9 @@ class FeishuRequest: "user_id_type": user_id_type, } res = self._send_request(url, method="GET", params=params) - return res.get("data") + if "data" in res: + return res.get("data") + return res def create_base( self, @@ -566,7 +610,9 @@ class FeishuRequest: "folder_token": folder_token, } res = self._send_request(url, payload=payload) - return res.get("data") + if "data" in res: + return res.get("data") + return res def add_records( self, @@ -588,7 +634,9 @@ class FeishuRequest: "records": convert_add_records(records), } res = self._send_request(url, params=params, payload=payload) - return res.get("data") + if "data" in res: + return res.get("data") + return res def update_records( self, @@ -610,7 +658,9 @@ class FeishuRequest: "records": convert_update_records(records), } res = self._send_request(url, params=params, payload=payload) - return res.get("data") + if "data" in res: + return res.get("data") + return res def delete_records( self, @@ -637,7 +687,9 @@ class FeishuRequest: "records": record_id_list, } res = self._send_request(url, params=params, payload=payload) - return res.get("data") + if "data" in res: + return res.get("data") + return res def search_record( self, @@ -701,7 +753,10 @@ class FeishuRequest: if automatic_fields: payload["automatic_fields"] = automatic_fields res = self._send_request(url, params=params, payload=payload) - return res.get("data") + + if "data" in res: + return res.get("data") + return res def get_base_info( self, @@ -713,7 +768,9 @@ class FeishuRequest: "app_token": app_token, } res = self._send_request(url, method="GET", params=params) - return res.get("data") + if "data" in res: + return res.get("data") + return res def create_table( self, @@ -741,7 +798,9 @@ class FeishuRequest: if default_view_name: payload["default_view_name"] = default_view_name res = self._send_request(url, params=params, payload=payload) - return res.get("data") + if "data" in res: + return res.get("data") + return res def delete_tables( self, @@ -774,8 +833,11 @@ class FeishuRequest: "table_ids": table_id_list, "table_names": table_name_list, } + res = self._send_request(url, params=params, payload=payload) - return res.get("data") + if "data" in res: + return res.get("data") + return res def list_tables( self, @@ -791,7 +853,9 @@ class FeishuRequest: "page_size": page_size, } res = self._send_request(url, method="GET", params=params) - return res.get("data") + if "data" in res: + return res.get("data") + return res def read_records( self, @@ -819,4 +883,6 @@ class FeishuRequest: "user_id_type": user_id_type, } res = self._send_request(url, method="GET", params=params, payload=payload) - return res.get("data") + if "data" in res: + return res.get("data") + return res diff --git a/api/core/tools/utils/lark_api_utils.py b/api/core/tools/utils/lark_api_utils.py new file mode 100644 index 0000000000..30cb0cb141 --- /dev/null +++ b/api/core/tools/utils/lark_api_utils.py @@ -0,0 +1,820 @@ +import json +from typing import Optional + +import httpx + +from core.tools.errors import ToolProviderCredentialValidationError +from extensions.ext_redis import redis_client + + +def lark_auth(credentials): + app_id = credentials.get("app_id") + app_secret = credentials.get("app_secret") + if not app_id or not app_secret: + raise ToolProviderCredentialValidationError("app_id and app_secret is required") + try: + assert LarkRequest(app_id, app_secret).tenant_access_token is not None + except Exception as e: + raise ToolProviderCredentialValidationError(str(e)) + + +class LarkRequest: + API_BASE_URL = "https://lark-plugin-api.solutionsuite.ai/lark-plugin" + + def __init__(self, app_id: str, app_secret: str): + self.app_id = app_id + self.app_secret = app_secret + + def convert_add_records(self, json_str): + try: + data = json.loads(json_str) + if not isinstance(data, list): + raise ValueError("Parsed data must be a list") + converted_data = [{"fields": json.dumps(item, ensure_ascii=False)} for item in data] + return converted_data + except json.JSONDecodeError: + raise ValueError("The input string is not valid JSON") + except Exception as e: + raise ValueError(f"An error occurred while processing the data: {e}") + + def convert_update_records(self, json_str): + try: + data = json.loads(json_str) + if not isinstance(data, list): + raise ValueError("Parsed data must be a list") + + converted_data = [ + {"fields": json.dumps(record["fields"], ensure_ascii=False), "record_id": record["record_id"]} + for record in data + if "fields" in record and "record_id" in record + ] + + if len(converted_data) != len(data): + raise ValueError("Each record must contain 'fields' and 'record_id'") + + return converted_data + except json.JSONDecodeError: + raise ValueError("The input string is not valid JSON") + except Exception as e: + raise ValueError(f"An error occurred while processing the data: {e}") + + @property + def tenant_access_token(self) -> str: + feishu_tenant_access_token = f"tools:{self.app_id}:feishu_tenant_access_token" + if redis_client.exists(feishu_tenant_access_token): + return redis_client.get(feishu_tenant_access_token).decode() + res = self.get_tenant_access_token(self.app_id, self.app_secret) + redis_client.setex(feishu_tenant_access_token, res.get("expire"), res.get("tenant_access_token")) + if "tenant_access_token" in res: + return res.get("tenant_access_token") + return "" + + def _send_request( + self, + url: str, + method: str = "post", + require_token: bool = True, + payload: Optional[dict] = None, + params: Optional[dict] = None, + ): + headers = { + "Content-Type": "application/json", + "user-agent": "Dify", + } + if require_token: + headers["tenant-access-token"] = f"{self.tenant_access_token}" + res = httpx.request(method=method, url=url, headers=headers, json=payload, params=params, timeout=30).json() + if res.get("code") != 0: + raise Exception(res) + return res + + def get_tenant_access_token(self, app_id: str, app_secret: str) -> dict: + url = f"{self.API_BASE_URL}/access_token/get_tenant_access_token" + payload = {"app_id": app_id, "app_secret": app_secret} + res = self._send_request(url, require_token=False, payload=payload) + return res + + def create_document(self, title: str, content: str, folder_token: str) -> dict: + url = f"{self.API_BASE_URL}/document/create_document" + payload = { + "title": title, + "content": content, + "folder_token": folder_token, + } + res = self._send_request(url, payload=payload) + if "data" in res: + return res.get("data") + return res + + def write_document(self, document_id: str, content: str, position: str = "end") -> dict: + url = f"{self.API_BASE_URL}/document/write_document" + payload = {"document_id": document_id, "content": content, "position": position} + res = self._send_request(url, payload=payload) + return res + + def get_document_content(self, document_id: str, mode: str = "markdown", lang: str = "0") -> str | dict: + params = { + "document_id": document_id, + "mode": mode, + "lang": lang, + } + url = f"{self.API_BASE_URL}/document/get_document_content" + res = self._send_request(url, method="GET", params=params) + if "data" in res: + return res.get("data").get("content") + return "" + + def list_document_blocks( + self, document_id: str, page_token: str, user_id_type: str = "open_id", page_size: int = 500 + ) -> dict: + params = { + "user_id_type": user_id_type, + "document_id": document_id, + "page_size": page_size, + "page_token": page_token, + } + url = f"{self.API_BASE_URL}/document/list_document_blocks" + res = self._send_request(url, method="GET", params=params) + if "data" in res: + return res.get("data") + return res + + def send_bot_message(self, receive_id_type: str, receive_id: str, msg_type: str, content: str) -> dict: + url = f"{self.API_BASE_URL}/message/send_bot_message" + params = { + "receive_id_type": receive_id_type, + } + payload = { + "receive_id": receive_id, + "msg_type": msg_type, + "content": content.strip('"').replace(r"\"", '"').replace(r"\\", "\\"), + } + res = self._send_request(url, params=params, payload=payload) + if "data" in res: + return res.get("data") + return res + + def send_webhook_message(self, webhook: str, msg_type: str, content: str) -> dict: + url = f"{self.API_BASE_URL}/message/send_webhook_message" + payload = { + "webhook": webhook, + "msg_type": msg_type, + "content": content.strip('"').replace(r"\"", '"').replace(r"\\", "\\"), + } + res = self._send_request(url, require_token=False, payload=payload) + return res + + def get_chat_messages( + self, + container_id: str, + start_time: str, + end_time: str, + page_token: str, + sort_type: str = "ByCreateTimeAsc", + page_size: int = 20, + ) -> dict: + url = f"{self.API_BASE_URL}/message/get_chat_messages" + params = { + "container_id": container_id, + "start_time": start_time, + "end_time": end_time, + "sort_type": sort_type, + "page_token": page_token, + "page_size": page_size, + } + res = self._send_request(url, method="GET", params=params) + if "data" in res: + return res.get("data") + return res + + def get_thread_messages( + self, container_id: str, page_token: str, sort_type: str = "ByCreateTimeAsc", page_size: int = 20 + ) -> dict: + url = f"{self.API_BASE_URL}/message/get_thread_messages" + params = { + "container_id": container_id, + "sort_type": sort_type, + "page_token": page_token, + "page_size": page_size, + } + res = self._send_request(url, method="GET", params=params) + if "data" in res: + return res.get("data") + return res + + def create_task(self, summary: str, start_time: str, end_time: str, completed_time: str, description: str) -> dict: + url = f"{self.API_BASE_URL}/task/create_task" + payload = { + "summary": summary, + "start_time": start_time, + "end_time": end_time, + "completed_at": completed_time, + "description": description, + } + res = self._send_request(url, payload=payload) + if "data" in res: + return res.get("data") + return res + + def update_task( + self, task_guid: str, summary: str, start_time: str, end_time: str, completed_time: str, description: str + ) -> dict: + url = f"{self.API_BASE_URL}/task/update_task" + payload = { + "task_guid": task_guid, + "summary": summary, + "start_time": start_time, + "end_time": end_time, + "completed_time": completed_time, + "description": description, + } + res = self._send_request(url, method="PATCH", payload=payload) + if "data" in res: + return res.get("data") + return res + + def delete_task(self, task_guid: str) -> dict: + url = f"{self.API_BASE_URL}/task/delete_task" + payload = { + "task_guid": task_guid, + } + res = self._send_request(url, method="DELETE", payload=payload) + if "data" in res: + return res.get("data") + return res + + def add_members(self, task_guid: str, member_phone_or_email: str, member_role: str) -> dict: + url = f"{self.API_BASE_URL}/task/add_members" + payload = { + "task_guid": task_guid, + "member_phone_or_email": member_phone_or_email, + "member_role": member_role, + } + res = self._send_request(url, payload=payload) + if "data" in res: + return res.get("data") + return res + + def get_wiki_nodes(self, space_id: str, parent_node_token: str, page_token: str, page_size: int = 20) -> dict: + url = f"{self.API_BASE_URL}/wiki/get_wiki_nodes" + payload = { + "space_id": space_id, + "parent_node_token": parent_node_token, + "page_token": page_token, + "page_size": page_size, + } + res = self._send_request(url, payload=payload) + if "data" in res: + return res.get("data") + return res + + def get_primary_calendar(self, user_id_type: str = "open_id") -> dict: + url = f"{self.API_BASE_URL}/calendar/get_primary_calendar" + params = { + "user_id_type": user_id_type, + } + res = self._send_request(url, method="GET", params=params) + if "data" in res: + return res.get("data") + return res + + def create_event( + self, + summary: str, + description: str, + start_time: str, + end_time: str, + attendee_ability: str, + need_notification: bool = True, + auto_record: bool = False, + ) -> dict: + url = f"{self.API_BASE_URL}/calendar/create_event" + payload = { + "summary": summary, + "description": description, + "need_notification": need_notification, + "start_time": start_time, + "end_time": end_time, + "auto_record": auto_record, + "attendee_ability": attendee_ability, + } + res = self._send_request(url, payload=payload) + if "data" in res: + return res.get("data") + return res + + def update_event( + self, + event_id: str, + summary: str, + description: str, + need_notification: bool, + start_time: str, + end_time: str, + auto_record: bool, + ) -> dict: + url = f"{self.API_BASE_URL}/calendar/update_event/{event_id}" + payload = {} + if summary: + payload["summary"] = summary + if description: + payload["description"] = description + if start_time: + payload["start_time"] = start_time + if end_time: + payload["end_time"] = end_time + if need_notification: + payload["need_notification"] = need_notification + if auto_record: + payload["auto_record"] = auto_record + res = self._send_request(url, method="PATCH", payload=payload) + return res + + def delete_event(self, event_id: str, need_notification: bool = True) -> dict: + url = f"{self.API_BASE_URL}/calendar/delete_event/{event_id}" + params = { + "need_notification": need_notification, + } + res = self._send_request(url, method="DELETE", params=params) + return res + + def list_events(self, start_time: str, end_time: str, page_token: str, page_size: int = 50) -> dict: + url = f"{self.API_BASE_URL}/calendar/list_events" + params = { + "start_time": start_time, + "end_time": end_time, + "page_token": page_token, + "page_size": page_size, + } + res = self._send_request(url, method="GET", params=params) + if "data" in res: + return res.get("data") + return res + + def search_events( + self, + query: str, + start_time: str, + end_time: str, + page_token: str, + user_id_type: str = "open_id", + page_size: int = 20, + ) -> dict: + url = f"{self.API_BASE_URL}/calendar/search_events" + payload = { + "query": query, + "start_time": start_time, + "end_time": end_time, + "page_token": page_token, + "user_id_type": user_id_type, + "page_size": page_size, + } + res = self._send_request(url, payload=payload) + if "data" in res: + return res.get("data") + return res + + def add_event_attendees(self, event_id: str, attendee_phone_or_email: str, need_notification: bool = True) -> dict: + url = f"{self.API_BASE_URL}/calendar/add_event_attendees" + payload = { + "event_id": event_id, + "attendee_phone_or_email": attendee_phone_or_email, + "need_notification": need_notification, + } + res = self._send_request(url, payload=payload) + if "data" in res: + return res.get("data") + return res + + def create_spreadsheet( + self, + title: str, + folder_token: str, + ) -> dict: + url = f"{self.API_BASE_URL}/spreadsheet/create_spreadsheet" + payload = { + "title": title, + "folder_token": folder_token, + } + res = self._send_request(url, payload=payload) + if "data" in res: + return res.get("data") + return res + + def get_spreadsheet( + self, + spreadsheet_token: str, + user_id_type: str = "open_id", + ) -> dict: + url = f"{self.API_BASE_URL}/spreadsheet/get_spreadsheet" + params = { + "spreadsheet_token": spreadsheet_token, + "user_id_type": user_id_type, + } + res = self._send_request(url, method="GET", params=params) + if "data" in res: + return res.get("data") + return res + + def list_spreadsheet_sheets( + self, + spreadsheet_token: str, + ) -> dict: + url = f"{self.API_BASE_URL}/spreadsheet/list_spreadsheet_sheets" + params = { + "spreadsheet_token": spreadsheet_token, + } + res = self._send_request(url, method="GET", params=params) + if "data" in res: + return res.get("data") + return res + + def add_rows( + self, + spreadsheet_token: str, + sheet_id: str, + sheet_name: str, + length: int, + values: str, + ) -> dict: + url = f"{self.API_BASE_URL}/spreadsheet/add_rows" + payload = { + "spreadsheet_token": spreadsheet_token, + "sheet_id": sheet_id, + "sheet_name": sheet_name, + "length": length, + "values": values, + } + res = self._send_request(url, payload=payload) + if "data" in res: + return res.get("data") + return res + + def add_cols( + self, + spreadsheet_token: str, + sheet_id: str, + sheet_name: str, + length: int, + values: str, + ) -> dict: + url = f"{self.API_BASE_URL}/spreadsheet/add_cols" + payload = { + "spreadsheet_token": spreadsheet_token, + "sheet_id": sheet_id, + "sheet_name": sheet_name, + "length": length, + "values": values, + } + res = self._send_request(url, payload=payload) + if "data" in res: + return res.get("data") + return res + + def read_rows( + self, + spreadsheet_token: str, + sheet_id: str, + sheet_name: str, + start_row: int, + num_rows: int, + user_id_type: str = "open_id", + ) -> dict: + url = f"{self.API_BASE_URL}/spreadsheet/read_rows" + params = { + "spreadsheet_token": spreadsheet_token, + "sheet_id": sheet_id, + "sheet_name": sheet_name, + "start_row": start_row, + "num_rows": num_rows, + "user_id_type": user_id_type, + } + res = self._send_request(url, method="GET", params=params) + if "data" in res: + return res.get("data") + return res + + def read_cols( + self, + spreadsheet_token: str, + sheet_id: str, + sheet_name: str, + start_col: int, + num_cols: int, + user_id_type: str = "open_id", + ) -> dict: + url = f"{self.API_BASE_URL}/spreadsheet/read_cols" + params = { + "spreadsheet_token": spreadsheet_token, + "sheet_id": sheet_id, + "sheet_name": sheet_name, + "start_col": start_col, + "num_cols": num_cols, + "user_id_type": user_id_type, + } + res = self._send_request(url, method="GET", params=params) + if "data" in res: + return res.get("data") + return res + + def read_table( + self, + spreadsheet_token: str, + sheet_id: str, + sheet_name: str, + num_range: str, + query: str, + user_id_type: str = "open_id", + ) -> dict: + url = f"{self.API_BASE_URL}/spreadsheet/read_table" + params = { + "spreadsheet_token": spreadsheet_token, + "sheet_id": sheet_id, + "sheet_name": sheet_name, + "range": num_range, + "query": query, + "user_id_type": user_id_type, + } + res = self._send_request(url, method="GET", params=params) + if "data" in res: + return res.get("data") + return res + + def create_base( + self, + name: str, + folder_token: str, + ) -> dict: + url = f"{self.API_BASE_URL}/base/create_base" + payload = { + "name": name, + "folder_token": folder_token, + } + res = self._send_request(url, payload=payload) + if "data" in res: + return res.get("data") + return res + + def add_records( + self, + app_token: str, + table_id: str, + table_name: str, + records: str, + user_id_type: str = "open_id", + ) -> dict: + url = f"{self.API_BASE_URL}/base/add_records" + params = { + "app_token": app_token, + "table_id": table_id, + "table_name": table_name, + "user_id_type": user_id_type, + } + payload = { + "records": self.convert_add_records(records), + } + res = self._send_request(url, params=params, payload=payload) + if "data" in res: + return res.get("data") + return res + + def update_records( + self, + app_token: str, + table_id: str, + table_name: str, + records: str, + user_id_type: str, + ) -> dict: + url = f"{self.API_BASE_URL}/base/update_records" + params = { + "app_token": app_token, + "table_id": table_id, + "table_name": table_name, + "user_id_type": user_id_type, + } + payload = { + "records": self.convert_update_records(records), + } + res = self._send_request(url, params=params, payload=payload) + if "data" in res: + return res.get("data") + return res + + def delete_records( + self, + app_token: str, + table_id: str, + table_name: str, + record_ids: str, + ) -> dict: + url = f"{self.API_BASE_URL}/base/delete_records" + params = { + "app_token": app_token, + "table_id": table_id, + "table_name": table_name, + } + if not record_ids: + record_id_list = [] + else: + try: + record_id_list = json.loads(record_ids) + except json.JSONDecodeError: + raise ValueError("The input string is not valid JSON") + payload = { + "records": record_id_list, + } + res = self._send_request(url, params=params, payload=payload) + if "data" in res: + return res.get("data") + return res + + def search_record( + self, + app_token: str, + table_id: str, + table_name: str, + view_id: str, + field_names: str, + sort: str, + filters: str, + page_token: str, + automatic_fields: bool = False, + user_id_type: str = "open_id", + page_size: int = 20, + ) -> dict: + url = f"{self.API_BASE_URL}/base/search_record" + + params = { + "app_token": app_token, + "table_id": table_id, + "table_name": table_name, + "user_id_type": user_id_type, + "page_token": page_token, + "page_size": page_size, + } + + if not field_names: + field_name_list = [] + else: + try: + field_name_list = json.loads(field_names) + except json.JSONDecodeError: + raise ValueError("The input string is not valid JSON") + + if not sort: + sort_list = [] + else: + try: + sort_list = json.loads(sort) + except json.JSONDecodeError: + raise ValueError("The input string is not valid JSON") + + if not filters: + filter_dict = {} + else: + try: + filter_dict = json.loads(filters) + except json.JSONDecodeError: + raise ValueError("The input string is not valid JSON") + + payload = {} + + if view_id: + payload["view_id"] = view_id + if field_names: + payload["field_names"] = field_name_list + if sort: + payload["sort"] = sort_list + if filters: + payload["filter"] = filter_dict + if automatic_fields: + payload["automatic_fields"] = automatic_fields + res = self._send_request(url, params=params, payload=payload) + if "data" in res: + return res.get("data") + return res + + def get_base_info( + self, + app_token: str, + ) -> dict: + url = f"{self.API_BASE_URL}/base/get_base_info" + params = { + "app_token": app_token, + } + res = self._send_request(url, method="GET", params=params) + if "data" in res: + return res.get("data") + return res + + def create_table( + self, + app_token: str, + table_name: str, + default_view_name: str, + fields: str, + ) -> dict: + url = f"{self.API_BASE_URL}/base/create_table" + params = { + "app_token": app_token, + } + if not fields: + fields_list = [] + else: + try: + fields_list = json.loads(fields) + except json.JSONDecodeError: + raise ValueError("The input string is not valid JSON") + payload = { + "name": table_name, + "fields": fields_list, + } + if default_view_name: + payload["default_view_name"] = default_view_name + res = self._send_request(url, params=params, payload=payload) + if "data" in res: + return res.get("data") + return res + + def delete_tables( + self, + app_token: str, + table_ids: str, + table_names: str, + ) -> dict: + url = f"{self.API_BASE_URL}/base/delete_tables" + params = { + "app_token": app_token, + } + if not table_ids: + table_id_list = [] + else: + try: + table_id_list = json.loads(table_ids) + except json.JSONDecodeError: + raise ValueError("The input string is not valid JSON") + + if not table_names: + table_name_list = [] + else: + try: + table_name_list = json.loads(table_names) + except json.JSONDecodeError: + raise ValueError("The input string is not valid JSON") + + payload = { + "table_ids": table_id_list, + "table_names": table_name_list, + } + res = self._send_request(url, params=params, payload=payload) + if "data" in res: + return res.get("data") + return res + + def list_tables( + self, + app_token: str, + page_token: str, + page_size: int = 20, + ) -> dict: + url = f"{self.API_BASE_URL}/base/list_tables" + params = { + "app_token": app_token, + "page_token": page_token, + "page_size": page_size, + } + res = self._send_request(url, method="GET", params=params) + if "data" in res: + return res.get("data") + return res + + def read_records( + self, + app_token: str, + table_id: str, + table_name: str, + record_ids: str, + user_id_type: str = "open_id", + ) -> dict: + url = f"{self.API_BASE_URL}/base/read_records" + params = { + "app_token": app_token, + "table_id": table_id, + "table_name": table_name, + } + if not record_ids: + record_id_list = [] + else: + try: + record_id_list = json.loads(record_ids) + except json.JSONDecodeError: + raise ValueError("The input string is not valid JSON") + payload = { + "record_ids": record_id_list, + "user_id_type": user_id_type, + } + res = self._send_request(url, method="POST", params=params, payload=payload) + if "data" in res: + return res.get("data") + return res diff --git a/api/core/tools/utils/web_reader_tool.py b/api/core/tools/utils/web_reader_tool.py index 5807d61b94..3aae31e93a 100644 --- a/api/core/tools/utils/web_reader_tool.py +++ b/api/core/tools/utils/web_reader_tool.py @@ -356,3 +356,19 @@ def content_digest(element): digest.update(child.encode("utf-8")) digest = digest.hexdigest() return digest + + +def get_image_upload_file_ids(content): + pattern = r"!\[image\]\((http?://.*?(file-preview|image-preview))\)" + matches = re.findall(pattern, content) + image_upload_file_ids = [] + for match in matches: + if match[1] == "file-preview": + content_pattern = r"files/([^/]+)/file-preview" + else: + content_pattern = r"files/([^/]+)/image-preview" + content_match = re.search(content_pattern, match[0]) + if content_match: + image_upload_file_id = content_match.group(1) + image_upload_file_ids.append(image_upload_file_id) + return image_upload_file_ids diff --git a/api/core/variables/__init__.py b/api/core/variables/__init__.py index 87f9e3ed45..144c1b899f 100644 --- a/api/core/variables/__init__.py +++ b/api/core/variables/__init__.py @@ -17,6 +17,7 @@ from .segments import ( from .types import SegmentType from .variables import ( ArrayAnyVariable, + ArrayFileVariable, ArrayNumberVariable, ArrayObjectVariable, ArrayStringVariable, @@ -58,4 +59,5 @@ __all__ = [ "ArrayStringSegment", "FileSegment", "FileVariable", + "ArrayFileVariable", ] diff --git a/api/core/variables/variables.py b/api/core/variables/variables.py index ddc6914192..c902303eef 100644 --- a/api/core/variables/variables.py +++ b/api/core/variables/variables.py @@ -1,9 +1,13 @@ +from collections.abc import Sequence +from uuid import uuid4 + from pydantic import Field from core.helper import encrypter from .segments import ( ArrayAnySegment, + ArrayFileSegment, ArrayNumberSegment, ArrayObjectSegment, ArrayStringSegment, @@ -24,11 +28,12 @@ class Variable(Segment): """ id: str = Field( - default="", - description="Unique identity for variable. It's only used by environment variables now.", + default=lambda _: str(uuid4()), + description="Unique identity for variable.", ) name: str description: str = Field(default="", description="Description of the variable.") + selector: Sequence[str] = Field(default_factory=list) class StringVariable(StringSegment, Variable): @@ -78,3 +83,7 @@ class NoneVariable(NoneSegment, Variable): class FileVariable(FileSegment, Variable): pass + + +class ArrayFileVariable(ArrayFileSegment, Variable): + pass diff --git a/api/core/workflow/entities/node_entities.py b/api/core/workflow/entities/node_entities.py index 7e10cddc71..a747266661 100644 --- a/api/core/workflow/entities/node_entities.py +++ b/api/core/workflow/entities/node_entities.py @@ -24,6 +24,7 @@ class NodeRunMetadataKey(str, Enum): PARENT_PARALLEL_ID = "parent_parallel_id" PARENT_PARALLEL_START_NODE_ID = "parent_parallel_start_node_id" PARALLEL_MODE_RUN_ID = "parallel_mode_run_id" + ITERATION_DURATION_MAP = "iteration_duration_map" # single iteration duration if iteration node runs class NodeRunResult(BaseModel): diff --git a/api/core/workflow/entities/variable_pool.py b/api/core/workflow/entities/variable_pool.py index 3dc3395da1..844b46f352 100644 --- a/api/core/workflow/entities/variable_pool.py +++ b/api/core/workflow/entities/variable_pool.py @@ -95,13 +95,16 @@ class VariablePool(BaseModel): if len(selector) < 2: raise ValueError("Invalid selector") + if isinstance(value, Variable): + variable = value if isinstance(value, Segment): - v = value + variable = variable_factory.segment_to_variable(segment=value, selector=selector) else: - v = variable_factory.build_segment(value) + segment = variable_factory.build_segment(value) + variable = variable_factory.segment_to_variable(segment=segment, selector=selector) hash_key = hash(tuple(selector[1:])) - self.variable_dictionary[selector[0]][hash_key] = v + self.variable_dictionary[selector[0]][hash_key] = variable def get(self, selector: Sequence[str], /) -> Segment | None: """ diff --git a/api/core/workflow/graph_engine/entities/event.py b/api/core/workflow/graph_engine/entities/event.py index bacea191dd..3736e632c3 100644 --- a/api/core/workflow/graph_engine/entities/event.py +++ b/api/core/workflow/graph_engine/entities/event.py @@ -148,6 +148,7 @@ class IterationRunStartedEvent(BaseIterationEvent): class IterationRunNextEvent(BaseIterationEvent): index: int = Field(..., description="index") pre_iteration_output: Optional[Any] = Field(None, description="pre iteration output") + duration: Optional[float] = Field(None, description="duration") class IterationRunSucceededEvent(BaseIterationEvent): @@ -156,6 +157,7 @@ class IterationRunSucceededEvent(BaseIterationEvent): outputs: Optional[dict[str, Any]] = None metadata: Optional[dict[str, Any]] = None steps: int = 0 + iteration_duration_map: Optional[dict[str, float]] = None class IterationRunFailedEvent(BaseIterationEvent): diff --git a/api/core/workflow/nodes/base/node.py b/api/core/workflow/nodes/base/node.py index 053a339ba7..1433c8eaed 100644 --- a/api/core/workflow/nodes/base/node.py +++ b/api/core/workflow/nodes/base/node.py @@ -69,7 +69,7 @@ class BaseNode(Generic[GenericNodeData]): try: result = self._run() except Exception as e: - logger.error(f"Node {self.node_id} failed to run: {e}") + logger.exception(f"Node {self.node_id} failed to run: {e}") result = NodeRunResult( status=WorkflowNodeExecutionStatus.FAILED, error=str(e), diff --git a/api/core/workflow/nodes/code/code_node.py b/api/core/workflow/nodes/code/code_node.py index de70af58dd..ce283e38ec 100644 --- a/api/core/workflow/nodes/code/code_node.py +++ b/api/core/workflow/nodes/code/code_node.py @@ -49,13 +49,7 @@ class CodeNode(BaseNode[CodeNodeData]): for variable_selector in self.node_data.variables: variable_name = variable_selector.variable variable = self.graph_runtime_state.variable_pool.get(variable_selector.value_selector) - if variable is None: - return NodeRunResult( - status=WorkflowNodeExecutionStatus.FAILED, - inputs=variables, - error=f"Variable `{variable_selector.value_selector}` not found", - ) - variables[variable_name] = variable.to_object() + variables[variable_name] = variable.to_object() if variable else None # Run code try: result = CodeExecutor.execute_workflow_code_template( diff --git a/api/core/workflow/nodes/http_request/executor.py b/api/core/workflow/nodes/http_request/executor.py index d90dfcc766..80b322b068 100644 --- a/api/core/workflow/nodes/http_request/executor.py +++ b/api/core/workflow/nodes/http_request/executor.py @@ -97,15 +97,6 @@ class Executor: headers = self.variable_pool.convert_template(self.node_data.headers).text self.headers = _plain_text_to_dict(headers) - body = self.node_data.body - if body is None: - return - if "content-type" not in (k.lower() for k in self.headers) and body.type in BODY_TYPE_TO_CONTENT_TYPE: - self.headers["Content-Type"] = BODY_TYPE_TO_CONTENT_TYPE[body.type] - if body.type == "form-data": - self.boundary = f"----WebKitFormBoundary{_generate_random_string(16)}" - self.headers["Content-Type"] = f"multipart/form-data; boundary={self.boundary}" - def _init_body(self): body = self.node_data.body if body is not None: @@ -154,9 +145,8 @@ class Executor: for k, v in files.items() if v.related_id is not None } - self.data = form_data - self.files = files + self.files = files or None def _assembling_headers(self) -> dict[str, Any]: authorization = deepcopy(self.auth) @@ -217,6 +207,7 @@ class Executor: "timeout": (self.timeout.connect, self.timeout.read, self.timeout.write), "follow_redirects": True, } + # request_args = {k: v for k, v in request_args.items() if v is not None} response = getattr(ssrf_proxy, self.method)(**request_args) return response @@ -244,6 +235,13 @@ class Executor: raw += f"Host: {url_parts.netloc}\r\n" headers = self._assembling_headers() + body = self.node_data.body + boundary = f"----WebKitFormBoundary{_generate_random_string(16)}" + if body: + if "content-type" not in (k.lower() for k in self.headers) and body.type in BODY_TYPE_TO_CONTENT_TYPE: + headers["Content-Type"] = BODY_TYPE_TO_CONTENT_TYPE[body.type] + if body.type == "form-data": + headers["Content-Type"] = f"multipart/form-data; boundary={boundary}" for k, v in headers.items(): if self.auth.type == "api-key": authorization_header = "Authorization" @@ -256,7 +254,6 @@ class Executor: body = "" if self.files: - boundary = self.boundary for k, v in self.files.items(): body += f"--{boundary}\r\n" body += f'Content-Disposition: form-data; name="{k}"\r\n\r\n' @@ -271,7 +268,6 @@ class Executor: elif self.data and self.node_data.body.type == "x-www-form-urlencoded": body = urlencode(self.data) elif self.data and self.node_data.body.type == "form-data": - boundary = self.boundary for key, value in self.data.items(): body += f"--{boundary}\r\n" body += f'Content-Disposition: form-data; name="{key}"\r\n\r\n' diff --git a/api/core/workflow/nodes/http_request/node.py b/api/core/workflow/nodes/http_request/node.py index 61c661e587..5b399bed63 100644 --- a/api/core/workflow/nodes/http_request/node.py +++ b/api/core/workflow/nodes/http_request/node.py @@ -13,6 +13,7 @@ from core.workflow.nodes.base import BaseNode from core.workflow.nodes.enums import NodeType from core.workflow.nodes.http_request.executor import Executor from core.workflow.utils import variable_template_parser +from factories import file_factory from models.workflow import WorkflowNodeExecutionStatus from .entities import ( @@ -161,16 +162,15 @@ class HttpRequestNode(BaseNode[HttpRequestNodeData]): mimetype=content_type, ) - files.append( - File( - tenant_id=self.tenant_id, - type=FileType.IMAGE, - transfer_method=FileTransferMethod.TOOL_FILE, - related_id=tool_file.id, - filename=filename, - extension=extension, - mime_type=content_type, - ) + mapping = { + "tool_file_id": tool_file.id, + "type": FileType.IMAGE.value, + "transfer_method": FileTransferMethod.TOOL_FILE.value, + } + file = file_factory.build_from_mapping( + mapping=mapping, + tenant_id=self.tenant_id, ) + files.append(file) return files diff --git a/api/core/workflow/nodes/iteration/iteration_node.py b/api/core/workflow/nodes/iteration/iteration_node.py index e1d2b88360..d5428f0286 100644 --- a/api/core/workflow/nodes/iteration/iteration_node.py +++ b/api/core/workflow/nodes/iteration/iteration_node.py @@ -156,7 +156,8 @@ class IterationNode(BaseNode[IterationNodeData]): index=0, pre_iteration_output=None, ) - outputs: list[Any] = [] + iter_run_map: dict[str, float] = {} + outputs: list[Any] = [None] * len(iterator_list_value) try: if self.node_data.is_parallel: futures: list[Future] = [] @@ -175,6 +176,7 @@ class IterationNode(BaseNode[IterationNodeData]): iteration_graph, index, item, + iter_run_map, ) future.add_done_callback(thread_pool.task_done_callback) futures.append(future) @@ -213,7 +215,10 @@ class IterationNode(BaseNode[IterationNodeData]): start_at, graph_engine, iteration_graph, + iter_run_map, ) + if self.node_data.error_handle_mode == ErrorHandleMode.REMOVE_ABNORMAL_OUTPUT: + outputs = [output for output in outputs if output is not None] yield IterationRunSucceededEvent( iteration_id=self.id, iteration_node_id=self.node_id, @@ -228,7 +233,9 @@ class IterationNode(BaseNode[IterationNodeData]): yield RunCompletedEvent( run_result=NodeRunResult( - status=WorkflowNodeExecutionStatus.SUCCEEDED, outputs={"output": jsonable_encoder(outputs)} + status=WorkflowNodeExecutionStatus.SUCCEEDED, + outputs={"output": jsonable_encoder(outputs)}, + metadata={NodeRunMetadataKey.ITERATION_DURATION_MAP: iter_run_map}, ) ) except IterationNodeError as e: @@ -354,15 +361,19 @@ class IterationNode(BaseNode[IterationNodeData]): start_at: datetime, graph_engine: "GraphEngine", iteration_graph: Graph, + iter_run_map: dict[str, float], parallel_mode_run_id: Optional[str] = None, ) -> Generator[NodeEvent | InNodeEvent, None, None]: """ run single iteration """ + iter_start_at = datetime.now(timezone.utc).replace(tzinfo=None) + try: rst = graph_engine.run() # get current iteration index current_index = variable_pool.get([self.node_id, "index"]).value + iteration_run_id = parallel_mode_run_id if parallel_mode_run_id is not None else f"{current_index}" next_index = int(current_index) + 1 if current_index is None: @@ -425,10 +436,12 @@ class IterationNode(BaseNode[IterationNodeData]): yield NodeInIterationFailedEvent( **metadata_event.model_dump(), ) - outputs.insert(current_index, None) + outputs[current_index] = None variable_pool.add([self.node_id, "index"], next_index) if next_index < len(iterator_list_value): variable_pool.add([self.node_id, "item"], iterator_list_value[next_index]) + duration = (datetime.now(timezone.utc).replace(tzinfo=None) - iter_start_at).total_seconds() + iter_run_map[iteration_run_id] = duration yield IterationRunNextEvent( iteration_id=self.id, iteration_node_id=self.node_id, @@ -437,6 +450,7 @@ class IterationNode(BaseNode[IterationNodeData]): index=next_index, parallel_mode_run_id=parallel_mode_run_id, pre_iteration_output=None, + duration=duration, ) return elif self.node_data.error_handle_mode == ErrorHandleMode.REMOVE_ABNORMAL_OUTPUT: @@ -447,6 +461,8 @@ class IterationNode(BaseNode[IterationNodeData]): if next_index < len(iterator_list_value): variable_pool.add([self.node_id, "item"], iterator_list_value[next_index]) + duration = (datetime.now(timezone.utc).replace(tzinfo=None) - iter_start_at).total_seconds() + iter_run_map[iteration_run_id] = duration yield IterationRunNextEvent( iteration_id=self.id, iteration_node_id=self.node_id, @@ -455,6 +471,7 @@ class IterationNode(BaseNode[IterationNodeData]): index=next_index, parallel_mode_run_id=parallel_mode_run_id, pre_iteration_output=None, + duration=duration, ) return elif self.node_data.error_handle_mode == ErrorHandleMode.TERMINATED: @@ -472,8 +489,11 @@ class IterationNode(BaseNode[IterationNodeData]): ) yield metadata_event - current_iteration_output = variable_pool.get(self.node_data.output_selector).value - outputs.insert(current_index, current_iteration_output) + current_output_segment = variable_pool.get(self.node_data.output_selector) + if current_output_segment is None: + raise IterationNodeError("iteration output selector not found") + current_iteration_output = current_output_segment.value + outputs[current_index] = current_iteration_output # remove all nodes outputs from variable pool for node_id in iteration_graph.node_ids: variable_pool.remove([node_id]) @@ -483,6 +503,8 @@ class IterationNode(BaseNode[IterationNodeData]): if next_index < len(iterator_list_value): variable_pool.add([self.node_id, "item"], iterator_list_value[next_index]) + duration = (datetime.now(timezone.utc).replace(tzinfo=None) - iter_start_at).total_seconds() + iter_run_map[iteration_run_id] = duration yield IterationRunNextEvent( iteration_id=self.id, iteration_node_id=self.node_id, @@ -491,6 +513,7 @@ class IterationNode(BaseNode[IterationNodeData]): index=next_index, parallel_mode_run_id=parallel_mode_run_id, pre_iteration_output=jsonable_encoder(current_iteration_output) if current_iteration_output else None, + duration=duration, ) except IterationNodeError as e: @@ -526,6 +549,7 @@ class IterationNode(BaseNode[IterationNodeData]): iteration_graph: Graph, index: int, item: Any, + iter_run_map: dict[str, float], ) -> Generator[NodeEvent | InNodeEvent, None, None]: """ run single iteration in parallel mode @@ -544,6 +568,7 @@ class IterationNode(BaseNode[IterationNodeData]): start_at=start_at, graph_engine=graph_engine_copy, iteration_graph=iteration_graph, + iter_run_map=iter_run_map, parallel_mode_run_id=parallel_mode_run_id, ): q.put(event) diff --git a/api/core/workflow/nodes/list_operator/entities.py b/api/core/workflow/nodes/list_operator/entities.py index 79cef1c27a..75df784a92 100644 --- a/api/core/workflow/nodes/list_operator/entities.py +++ b/api/core/workflow/nodes/list_operator/entities.py @@ -49,8 +49,14 @@ class Limit(BaseModel): size: int = -1 +class ExtractConfig(BaseModel): + enabled: bool = False + serial: str = "1" + + class ListOperatorNodeData(BaseNodeData): variable: Sequence[str] = Field(default_factory=list) filter_by: FilterBy order_by: OrderBy limit: Limit + extract_by: ExtractConfig = Field(default_factory=ExtractConfig) diff --git a/api/core/workflow/nodes/list_operator/node.py b/api/core/workflow/nodes/list_operator/node.py index 49e7ca85fd..79066cece4 100644 --- a/api/core/workflow/nodes/list_operator/node.py +++ b/api/core/workflow/nodes/list_operator/node.py @@ -58,6 +58,10 @@ class ListOperatorNode(BaseNode[ListOperatorNodeData]): if self.node_data.filter_by.enabled: variable = self._apply_filter(variable) + # Extract + if self.node_data.extract_by.enabled: + variable = self._extract_slice(variable) + # Order if self.node_data.order_by.enabled: variable = self._apply_order(variable) @@ -140,6 +144,16 @@ class ListOperatorNode(BaseNode[ListOperatorNodeData]): result = variable.value[: self.node_data.limit.size] return variable.model_copy(update={"value": result}) + def _extract_slice( + self, variable: Union[ArrayFileSegment, ArrayNumberSegment, ArrayStringSegment] + ) -> Union[ArrayFileSegment, ArrayNumberSegment, ArrayStringSegment]: + value = int(self.graph_runtime_state.variable_pool.convert_template(self.node_data.extract_by.serial).text) - 1 + if len(variable.value) > int(value): + result = variable.value[value] + else: + result = "" + return variable.model_copy(update={"value": [result]}) + def _get_file_extract_number_func(*, key: str) -> Callable[[File], int]: match key: diff --git a/api/core/workflow/nodes/llm/node.py b/api/core/workflow/nodes/llm/node.py index 47b0e25d9c..eb4d1c9d87 100644 --- a/api/core/workflow/nodes/llm/node.py +++ b/api/core/workflow/nodes/llm/node.py @@ -14,6 +14,7 @@ from core.model_runtime.entities import ( PromptMessage, PromptMessageContentType, TextPromptMessageContent, + VideoPromptMessageContent, ) from core.model_runtime.entities.llm_entities import LLMResult, LLMUsage from core.model_runtime.entities.model_entities import ModelType @@ -560,7 +561,9 @@ class LLMNode(BaseNode[LLMNodeData]): # cuz vision detail is related to the configuration from FileUpload feature. content_item.detail = vision_detail prompt_message_content.append(content_item) - elif isinstance(content_item, TextPromptMessageContent | AudioPromptMessageContent): + elif isinstance( + content_item, TextPromptMessageContent | AudioPromptMessageContent | VideoPromptMessageContent + ): prompt_message_content.append(content_item) if len(prompt_message_content) > 1: diff --git a/api/core/workflow/nodes/question_classifier/question_classifier_node.py b/api/core/workflow/nodes/question_classifier/question_classifier_node.py index 0489020e5e..744dfd3d8d 100644 --- a/api/core/workflow/nodes/question_classifier/question_classifier_node.py +++ b/api/core/workflow/nodes/question_classifier/question_classifier_node.py @@ -127,7 +127,7 @@ class QuestionClassifierNode(LLMNode): category_id = category_id_result except OutputParserError: - logging.error(f"Failed to parse result text: {result_text}") + logging.exception(f"Failed to parse result text: {result_text}") try: process_data = { "model_mode": model_config.mode, diff --git a/api/core/workflow/nodes/template_transform/template_transform_node.py b/api/core/workflow/nodes/template_transform/template_transform_node.py index 0ee66784c5..22a1b21888 100644 --- a/api/core/workflow/nodes/template_transform/template_transform_node.py +++ b/api/core/workflow/nodes/template_transform/template_transform_node.py @@ -34,12 +34,7 @@ class TemplateTransformNode(BaseNode[TemplateTransformNodeData]): for variable_selector in self.node_data.variables: variable_name = variable_selector.variable value = self.graph_runtime_state.variable_pool.get(variable_selector.value_selector) - if value is None: - return NodeRunResult( - status=WorkflowNodeExecutionStatus.FAILED, - error=f"Variable {variable_name} not found in variable pool", - ) - variables[variable_name] = value.to_object() + variables[variable_name] = value.to_object() if value else None # Run code try: result = CodeExecutor.execute_workflow_code_template( diff --git a/api/core/workflow/nodes/tool/tool_node.py b/api/core/workflow/nodes/tool/tool_node.py index 42e870c46c..6870b7467d 100644 --- a/api/core/workflow/nodes/tool/tool_node.py +++ b/api/core/workflow/nodes/tool/tool_node.py @@ -17,6 +17,7 @@ from core.workflow.nodes.base import BaseNode from core.workflow.nodes.enums import NodeType from core.workflow.utils.variable_template_parser import VariableTemplateParser from extensions.ext_database import db +from factories import file_factory from models import ToolFile from models.workflow import WorkflowNodeExecutionStatus @@ -189,19 +190,17 @@ class ToolNode(BaseNode[ToolNodeData]): if tool_file is None: raise ToolFileError(f"Tool file {tool_file_id} does not exist") - result.append( - File( - tenant_id=self.tenant_id, - type=FileType.IMAGE, - transfer_method=transfer_method, - remote_url=url, - related_id=tool_file.id, - filename=tool_file.name, - extension=ext, - mime_type=tool_file.mimetype, - size=tool_file.size, - ) + mapping = { + "tool_file_id": tool_file_id, + "type": FileType.IMAGE, + "transfer_method": transfer_method, + "url": url, + } + file = file_factory.build_from_mapping( + mapping=mapping, + tenant_id=self.tenant_id, ) + result.append(file) elif response.type == ToolInvokeMessage.MessageType.BLOB: # get tool file id tool_file_id = str(response.message).split("/")[-1].split(".")[0] @@ -209,19 +208,17 @@ class ToolNode(BaseNode[ToolNodeData]): stmt = select(ToolFile).where(ToolFile.id == tool_file_id) tool_file = session.scalar(stmt) if tool_file is None: - raise ToolFileError(f"Tool file {tool_file_id} does not exist") - result.append( - File( - tenant_id=self.tenant_id, - type=FileType.IMAGE, - transfer_method=FileTransferMethod.TOOL_FILE, - related_id=tool_file.id, - filename=tool_file.name, - extension=path.splitext(response.save_as)[1], - mime_type=tool_file.mimetype, - size=tool_file.size, - ) + raise ValueError(f"tool file {tool_file_id} not exists") + mapping = { + "tool_file_id": tool_file_id, + "type": FileType.IMAGE, + "transfer_method": FileTransferMethod.TOOL_FILE, + } + file = file_factory.build_from_mapping( + mapping=mapping, + tenant_id=self.tenant_id, ) + result.append(file) elif response.type == ToolInvokeMessage.MessageType.LINK: url = str(response.message) transfer_method = FileTransferMethod.TOOL_FILE @@ -235,16 +232,15 @@ class ToolNode(BaseNode[ToolNodeData]): extension = "." + url.split("/")[-1].split(".")[1] else: extension = ".bin" - file = File( + mapping = { + "tool_file_id": tool_file_id, + "type": FileType.IMAGE, + "transfer_method": transfer_method, + "url": url, + } + file = file_factory.build_from_mapping( + mapping=mapping, tenant_id=self.tenant_id, - type=FileType(response.save_as), - transfer_method=transfer_method, - remote_url=url, - filename=tool_file.name, - related_id=tool_file.id, - extension=extension, - mime_type=tool_file.mimetype, - size=tool_file.size, ) result.append(file) diff --git a/api/core/workflow/workflow_entry.py b/api/core/workflow/workflow_entry.py index eb812bad21..84b251223f 100644 --- a/api/core/workflow/workflow_entry.py +++ b/api/core/workflow/workflow_entry.py @@ -5,10 +5,10 @@ from collections.abc import Generator, Mapping, Sequence from typing import Any, Optional, cast from configs import dify_config -from core.app.app_config.entities import FileExtraConfig +from core.app.app_config.entities import FileUploadConfig from core.app.apps.base_app_queue_manager import GenerateTaskStoppedError from core.app.entities.app_invoke_entities import InvokeFrom -from core.file.models import File, FileTransferMethod, FileType, ImageConfig +from core.file.models import File, FileTransferMethod, ImageConfig from core.workflow.callbacks import WorkflowCallback from core.workflow.entities.variable_pool import VariablePool from core.workflow.errors import WorkflowNodeRunFailedError @@ -22,6 +22,7 @@ from core.workflow.nodes.base import BaseNode, BaseNodeData from core.workflow.nodes.event import NodeEvent from core.workflow.nodes.llm import LLMNodeData from core.workflow.nodes.node_mapping import node_type_classes_mapping +from factories import file_factory from models.enums import UserFrom from models.workflow import ( Workflow, @@ -271,19 +272,17 @@ class WorkflowEntry: for item in input_value: if isinstance(item, dict) and "type" in item and item["type"] == "image": transfer_method = FileTransferMethod.value_of(item.get("transfer_method")) - file = File( + mapping = { + "id": item.get("id"), + "transfer_method": transfer_method, + "upload_file_id": item.get("upload_file_id"), + "url": item.get("url"), + } + config = FileUploadConfig(image_config=ImageConfig(detail=detail) if detail else None) + file = file_factory.build_from_mapping( + mapping=mapping, tenant_id=tenant_id, - type=FileType.IMAGE, - transfer_method=transfer_method, - remote_url=item.get("url") - if transfer_method == FileTransferMethod.REMOTE_URL - else None, - related_id=item.get("upload_file_id") - if transfer_method == FileTransferMethod.LOCAL_FILE - else None, - _extra_config=FileExtraConfig( - image_config=ImageConfig(detail=detail) if detail else None - ), + config=config, ) new_value.append(file) diff --git a/api/extensions/ext_celery.py b/api/extensions/ext_celery.py index 504899c276..42012eee8e 100644 --- a/api/extensions/ext_celery.py +++ b/api/extensions/ext_celery.py @@ -1,5 +1,6 @@ from datetime import timedelta +import pytz from celery import Celery, Task from celery.schedules import crontab from flask import Flask @@ -43,6 +44,10 @@ def init_app(app: Flask) -> Celery: result_backend=dify_config.CELERY_RESULT_BACKEND, broker_transport_options=broker_transport_options, broker_connection_retry_on_startup=True, + worker_log_format=dify_config.LOG_FORMAT, + worker_task_log_format=dify_config.LOG_FORMAT, + worker_hijack_root_logger=False, + timezone=pytz.timezone(dify_config.LOG_TZ), ) if dify_config.BROKER_USE_SSL: @@ -50,6 +55,11 @@ def init_app(app: Flask) -> Celery: broker_use_ssl=ssl_options, # Add the SSL options to the broker configuration ) + if dify_config.LOG_FILE: + celery_app.conf.update( + worker_logfile=dify_config.LOG_FILE, + ) + celery_app.set_default() app.extensions["celery"] = celery_app diff --git a/api/extensions/ext_logging.py b/api/extensions/ext_logging.py index 56b1d6bd28..a15c73bd71 100644 --- a/api/extensions/ext_logging.py +++ b/api/extensions/ext_logging.py @@ -9,19 +9,21 @@ from configs import dify_config def init_app(app: Flask): - log_handlers = None + log_handlers = [] log_file = dify_config.LOG_FILE if log_file: log_dir = os.path.dirname(log_file) os.makedirs(log_dir, exist_ok=True) - log_handlers = [ + log_handlers.append( RotatingFileHandler( filename=log_file, maxBytes=dify_config.LOG_FILE_MAX_SIZE * 1024 * 1024, backupCount=dify_config.LOG_FILE_BACKUP_COUNT, - ), - logging.StreamHandler(sys.stdout), - ] + ) + ) + + # Always add StreamHandler to log to console + log_handlers.append(logging.StreamHandler(sys.stdout)) logging.basicConfig( level=dify_config.LOG_LEVEL, diff --git a/api/extensions/storage/aliyun_oss_storage.py b/api/extensions/storage/aliyun_oss_storage.py index 67635b129e..58c917dbd3 100644 --- a/api/extensions/storage/aliyun_oss_storage.py +++ b/api/extensions/storage/aliyun_oss_storage.py @@ -1,3 +1,4 @@ +import posixpath from collections.abc import Generator import oss2 as aliyun_s3 @@ -50,9 +51,4 @@ class AliyunOssStorage(BaseStorage): self.client.delete_object(self.__wrapper_folder_filename(filename)) def __wrapper_folder_filename(self, filename) -> str: - if self.folder: - if self.folder.endswith("/"): - filename = self.folder + filename - else: - filename = self.folder + "/" + filename - return filename + return posixpath.join(self.folder, filename) if self.folder else filename diff --git a/api/factories/file_factory.py b/api/factories/file_factory.py index 1066dc8862..738b2b3478 100644 --- a/api/factories/file_factory.py +++ b/api/factories/file_factory.py @@ -1,23 +1,21 @@ import mimetypes -from collections.abc import Mapping, Sequence +from collections.abc import Callable, Mapping, Sequence from typing import Any import httpx from sqlalchemy import select -from constants import AUDIO_EXTENSIONS, DOCUMENT_EXTENSIONS, IMAGE_EXTENSIONS, VIDEO_EXTENSIONS -from core.file import File, FileBelongsTo, FileExtraConfig, FileTransferMethod, FileType +from core.file import File, FileBelongsTo, FileTransferMethod, FileType, FileUploadConfig from core.helper import ssrf_proxy from extensions.ext_database import db from models import MessageFile, ToolFile, UploadFile -from models.enums import CreatedByRole def build_from_message_files( *, message_files: Sequence["MessageFile"], tenant_id: str, - config: FileExtraConfig, + config: FileUploadConfig, ) -> Sequence[File]: results = [ build_from_message_file(message_file=file, tenant_id=tenant_id, config=config) @@ -31,7 +29,7 @@ def build_from_message_file( *, message_file: "MessageFile", tenant_id: str, - config: FileExtraConfig, + config: FileUploadConfig, ): mapping = { "transfer_method": message_file.transfer_method, @@ -43,8 +41,6 @@ def build_from_message_file( return build_from_mapping( mapping=mapping, tenant_id=tenant_id, - user_id=message_file.created_by, - role=CreatedByRole(message_file.created_by_role), config=config, ) @@ -53,38 +49,30 @@ def build_from_mapping( *, mapping: Mapping[str, Any], tenant_id: str, - user_id: str, - role: "CreatedByRole", - config: FileExtraConfig, -): + config: FileUploadConfig | None = None, +) -> File: + config = config or FileUploadConfig() + transfer_method = FileTransferMethod.value_of(mapping.get("transfer_method")) - match transfer_method: - case FileTransferMethod.REMOTE_URL: - file = _build_from_remote_url( - mapping=mapping, - tenant_id=tenant_id, - config=config, - transfer_method=transfer_method, - ) - case FileTransferMethod.LOCAL_FILE: - file = _build_from_local_file( - mapping=mapping, - tenant_id=tenant_id, - user_id=user_id, - role=role, - config=config, - transfer_method=transfer_method, - ) - case FileTransferMethod.TOOL_FILE: - file = _build_from_tool_file( - mapping=mapping, - tenant_id=tenant_id, - user_id=user_id, - config=config, - transfer_method=transfer_method, - ) - case _: - raise ValueError(f"Invalid file transfer method: {transfer_method}") + + build_functions: dict[FileTransferMethod, Callable] = { + FileTransferMethod.LOCAL_FILE: _build_from_local_file, + FileTransferMethod.REMOTE_URL: _build_from_remote_url, + FileTransferMethod.TOOL_FILE: _build_from_tool_file, + } + + build_func = build_functions.get(transfer_method) + if not build_func: + raise ValueError(f"Invalid file transfer method: {transfer_method}") + + file = build_func( + mapping=mapping, + tenant_id=tenant_id, + transfer_method=transfer_method, + ) + + if not _is_file_valid_with_config(file=file, config=config): + raise ValueError(f"File validation failed for file: {file.filename}") return file @@ -92,10 +80,8 @@ def build_from_mapping( def build_from_mappings( *, mappings: Sequence[Mapping[str, Any]], - config: FileExtraConfig | None, + config: FileUploadConfig | None, tenant_id: str, - user_id: str, - role: "CreatedByRole", ) -> Sequence[File]: if not config: return [] @@ -104,8 +90,6 @@ def build_from_mappings( build_from_mapping( mapping=mapping, tenant_id=tenant_id, - user_id=user_id, - role=role, config=config, ) for mapping in mappings @@ -128,31 +112,20 @@ def _build_from_local_file( *, mapping: Mapping[str, Any], tenant_id: str, - user_id: str, - role: "CreatedByRole", - config: FileExtraConfig, transfer_method: FileTransferMethod, -): - # check if the upload file exists. +) -> File: file_type = FileType.value_of(mapping.get("type")) stmt = select(UploadFile).where( UploadFile.id == mapping.get("upload_file_id"), UploadFile.tenant_id == tenant_id, - UploadFile.created_by == user_id, - UploadFile.created_by_role == role, ) - if file_type == FileType.IMAGE: - stmt = stmt.where(UploadFile.extension.in_(IMAGE_EXTENSIONS)) - elif file_type == FileType.VIDEO: - stmt = stmt.where(UploadFile.extension.in_(VIDEO_EXTENSIONS)) - elif file_type == FileType.AUDIO: - stmt = stmt.where(UploadFile.extension.in_(AUDIO_EXTENSIONS)) - elif file_type == FileType.DOCUMENT: - stmt = stmt.where(UploadFile.extension.in_(DOCUMENT_EXTENSIONS)) + row = db.session.scalar(stmt) + if row is None: raise ValueError("Invalid upload file") - file = File( + + return File( id=mapping.get("id"), filename=row.name, extension="." + row.extension, @@ -162,23 +135,37 @@ def _build_from_local_file( transfer_method=transfer_method, remote_url=row.source_url, related_id=mapping.get("upload_file_id"), - _extra_config=config, size=row.size, ) - return file def _build_from_remote_url( *, mapping: Mapping[str, Any], tenant_id: str, - config: FileExtraConfig, transfer_method: FileTransferMethod, -): +) -> File: url = mapping.get("url") if not url: raise ValueError("Invalid file url") + mime_type, filename, file_size = _get_remote_file_info(url) + extension = mimetypes.guess_extension(mime_type) or "." + filename.split(".")[-1] if "." in filename else ".bin" + + return File( + id=mapping.get("id"), + filename=filename, + tenant_id=tenant_id, + type=FileType.value_of(mapping.get("type")), + transfer_method=transfer_method, + remote_url=url, + mime_type=mime_type, + extension=extension, + size=file_size, + ) + + +def _get_remote_file_info(url: str): mime_type = mimetypes.guess_type(url)[0] or "" file_size = -1 filename = url.split("/")[-1].split("?")[0] or "unknown_file" @@ -186,56 +173,34 @@ def _build_from_remote_url( resp = ssrf_proxy.head(url, follow_redirects=True) if resp.status_code == httpx.codes.OK: if content_disposition := resp.headers.get("Content-Disposition"): - filename = content_disposition.split("filename=")[-1].strip('"') + filename = str(content_disposition.split("filename=")[-1].strip('"')) file_size = int(resp.headers.get("Content-Length", file_size)) mime_type = mime_type or str(resp.headers.get("Content-Type", "")) - # Determine file extension - extension = mimetypes.guess_extension(mime_type) or "." + filename.split(".")[-1] if "." in filename else ".bin" - - if not mime_type: - mime_type, _ = mimetypes.guess_type(url) - file = File( - id=mapping.get("id"), - filename=filename, - tenant_id=tenant_id, - type=FileType.value_of(mapping.get("type")), - transfer_method=transfer_method, - remote_url=url, - _extra_config=config, - mime_type=mime_type, - extension=extension, - size=file_size, - ) - return file + return mime_type, filename, file_size def _build_from_tool_file( *, mapping: Mapping[str, Any], tenant_id: str, - user_id: str, - config: FileExtraConfig, transfer_method: FileTransferMethod, -): +) -> File: tool_file = ( db.session.query(ToolFile) .filter( ToolFile.id == mapping.get("tool_file_id"), ToolFile.tenant_id == tenant_id, - ToolFile.user_id == user_id, ) .first() ) + if tool_file is None: raise ValueError(f"ToolFile {mapping.get('tool_file_id')} not found") - path = tool_file.file_key - if "." in path: - extension = "." + path.split("/")[-1].split(".")[-1] - else: - extension = ".bin" - file = File( + extension = "." + tool_file.file_key.split(".")[-1] if "." in tool_file.file_key else ".bin" + + return File( id=mapping.get("id"), tenant_id=tenant_id, filename=tool_file.name, @@ -246,6 +211,21 @@ def _build_from_tool_file( extension=extension, mime_type=tool_file.mimetype, size=tool_file.size, - _extra_config=config, ) - return file + + +def _is_file_valid_with_config(*, file: File, config: FileUploadConfig) -> bool: + if config.allowed_file_types and file.type not in config.allowed_file_types and file.type != FileType.CUSTOM: + return False + + if config.allowed_extensions and file.extension not in config.allowed_extensions: + return False + + if config.allowed_upload_methods and file.transfer_method not in config.allowed_upload_methods: + return False + + if file.type == FileType.IMAGE and config.image_config: + if config.image_config.transfer_methods and file.transfer_method not in config.image_config.transfer_methods: + return False + + return True diff --git a/api/factories/variable_factory.py b/api/factories/variable_factory.py index 0191102b90..5b004405b4 100644 --- a/api/factories/variable_factory.py +++ b/api/factories/variable_factory.py @@ -1,34 +1,65 @@ -from collections.abc import Mapping +from collections.abc import Mapping, Sequence from typing import Any +from uuid import uuid4 from configs import dify_config from core.file import File -from core.variables import ( +from core.variables.exc import VariableError +from core.variables.segments import ( ArrayAnySegment, ArrayFileSegment, ArrayNumberSegment, - ArrayNumberVariable, ArrayObjectSegment, - ArrayObjectVariable, ArraySegment, ArrayStringSegment, - ArrayStringVariable, FileSegment, FloatSegment, - FloatVariable, IntegerSegment, - IntegerVariable, NoneSegment, ObjectSegment, + Segment, + StringSegment, +) +from core.variables.types import SegmentType +from core.variables.variables import ( + ArrayAnyVariable, + ArrayFileVariable, + ArrayNumberVariable, + ArrayObjectVariable, + ArrayStringVariable, + FileVariable, + FloatVariable, + IntegerVariable, + NoneVariable, ObjectVariable, SecretVariable, - Segment, - SegmentType, - StringSegment, StringVariable, Variable, ) -from core.variables.exc import VariableError + + +class InvalidSelectorError(ValueError): + pass + + +class UnsupportedSegmentTypeError(Exception): + pass + + +# Define the constant +SEGMENT_TO_VARIABLE_MAP = { + StringSegment: StringVariable, + IntegerSegment: IntegerVariable, + FloatSegment: FloatVariable, + ObjectSegment: ObjectVariable, + FileSegment: FileVariable, + ArrayStringSegment: ArrayStringVariable, + ArrayNumberSegment: ArrayNumberVariable, + ArrayObjectSegment: ArrayObjectVariable, + ArrayFileSegment: ArrayFileVariable, + ArrayAnySegment: ArrayAnyVariable, + NoneSegment: NoneVariable, +} def build_variable_from_mapping(mapping: Mapping[str, Any], /) -> Variable: @@ -96,3 +127,30 @@ def build_segment(value: Any, /) -> Segment: case _: raise ValueError(f"not supported value {value}") raise ValueError(f"not supported value {value}") + + +def segment_to_variable( + *, + segment: Segment, + selector: Sequence[str], + id: str | None = None, + name: str | None = None, + description: str = "", +) -> Variable: + if isinstance(segment, Variable): + return segment + name = name or selector[-1] + id = id or str(uuid4()) + + segment_type = type(segment) + if segment_type not in SEGMENT_TO_VARIABLE_MAP: + raise UnsupportedSegmentTypeError(f"not supported segment type {segment_type}") + + variable_class = SEGMENT_TO_VARIABLE_MAP[segment_type] + return variable_class( + id=id, + name=name, + description=description, + value=segment.value, + selector=selector, + ) diff --git a/api/fields/conversation_fields.py b/api/fields/conversation_fields.py index 2eb19c2667..5bd21be807 100644 --- a/api/fields/conversation_fields.py +++ b/api/fields/conversation_fields.py @@ -202,6 +202,10 @@ simple_conversation_fields = { "updated_at": TimestampField, } +conversation_delete_fields = { + "result": fields.String, +} + conversation_infinite_scroll_pagination_fields = { "limit": fields.Integer, "has_more": fields.Boolean, diff --git a/api/libs/smtp.py b/api/libs/smtp.py index bd7de7dd68..d57d99f3b7 100644 --- a/api/libs/smtp.py +++ b/api/libs/smtp.py @@ -39,13 +39,13 @@ class SMTPClient: smtp.sendmail(self._from, mail["to"], msg.as_string()) except smtplib.SMTPException as e: - logging.error(f"SMTP error occurred: {str(e)}") + logging.exception(f"SMTP error occurred: {str(e)}") raise except TimeoutError as e: - logging.error(f"Timeout occurred while sending email: {str(e)}") + logging.exception(f"Timeout occurred while sending email: {str(e)}") raise except Exception as e: - logging.error(f"Unexpected error occurred while sending email: {str(e)}") + logging.exception(f"Unexpected error occurred while sending email: {str(e)}") raise finally: if smtp: diff --git a/api/migrations/versions/2024_10_09_1329-d8e744d88ed6_fix_wrong_service_api_history.py b/api/migrations/versions/2024_10_09_1329-d8e744d88ed6_fix_wrong_service_api_history.py index b3b8dfa7d4..38a5cdf8e5 100644 --- a/api/migrations/versions/2024_10_09_1329-d8e744d88ed6_fix_wrong_service_api_history.py +++ b/api/migrations/versions/2024_10_09_1329-d8e744d88ed6_fix_wrong_service_api_history.py @@ -23,7 +23,7 @@ v0_9_0_release_date= '2024-09-29 12:00:00' def upgrade(): # ### commands auto generated by Alembic - please adjust! ### sql = f"""UPDATE - public.messages + messages SET parent_message_id = '{UUID_NIL}' WHERE @@ -37,7 +37,7 @@ WHERE def downgrade(): # ### commands auto generated by Alembic - please adjust! ### sql = f"""UPDATE - public.messages + messages SET parent_message_id = NULL WHERE diff --git a/api/models/model.py b/api/models/model.py index d049cd373d..e909d53e3e 100644 --- a/api/models/model.py +++ b/api/models/model.py @@ -13,7 +13,7 @@ from sqlalchemy import Float, func, text from sqlalchemy.orm import Mapped, mapped_column from configs import dify_config -from core.file import FILE_MODEL_IDENTITY, File, FileExtraConfig, FileTransferMethod, FileType +from core.file import FILE_MODEL_IDENTITY, File, FileTransferMethod, FileType from core.file import helpers as file_helpers from core.file.tool_file_parser import ToolFileParser from extensions.ext_database import db @@ -949,9 +949,6 @@ class Message(db.Model): "type": message_file.type, }, tenant_id=current_app.tenant_id, - user_id=self.from_account_id or self.from_end_user_id or "", - role=CreatedByRole(message_file.created_by_role), - config=FileExtraConfig(), ) elif message_file.transfer_method == "remote_url": if message_file.url is None: @@ -964,9 +961,6 @@ class Message(db.Model): "url": message_file.url, }, tenant_id=current_app.tenant_id, - user_id=self.from_account_id or self.from_end_user_id or "", - role=CreatedByRole(message_file.created_by_role), - config=FileExtraConfig(), ) elif message_file.transfer_method == "tool_file": if message_file.upload_file_id is None: @@ -981,9 +975,6 @@ class Message(db.Model): file = file_factory.build_from_mapping( mapping=mapping, tenant_id=current_app.tenant_id, - user_id=self.from_account_id or self.from_end_user_id or "", - role=CreatedByRole(message_file.created_by_role), - config=FileExtraConfig(), ) else: raise ValueError( diff --git a/api/poetry.lock b/api/poetry.lock index 6cd5e24dec..259ede6898 100644 --- a/api/poetry.lock +++ b/api/poetry.lock @@ -125,13 +125,13 @@ speedups = ["Brotli", "aiodns (>=3.2.0)", "brotlicffi"] [[package]] name = "aiohttp-retry" -version = "2.8.3" +version = "2.9.0" description = "Simple retry client for aiohttp" optional = false python-versions = ">=3.7" files = [ - {file = "aiohttp_retry-2.8.3-py3-none-any.whl", hash = "sha256:3aeeead8f6afe48272db93ced9440cf4eda8b6fd7ee2abb25357b7eb28525b45"}, - {file = "aiohttp_retry-2.8.3.tar.gz", hash = "sha256:9a8e637e31682ad36e1ff9f8bcba912fcfc7d7041722bc901a4b948da4d71ea9"}, + {file = "aiohttp_retry-2.9.0-py3-none-any.whl", hash = "sha256:7661af92471e9a96c69d9b8f32021360272073397e6a15bc44c1726b12f46056"}, + {file = "aiohttp_retry-2.9.0.tar.gz", hash = "sha256:92c47f1580040208bac95d9a8389a87227ef22758530f2e3f4683395e42c41b5"}, ] [package.dependencies] @@ -172,12 +172,12 @@ tz = ["backports.zoneinfo"] [[package]] name = "alibabacloud-credentials" -version = "0.3.5" +version = "0.3.6" description = "The alibabacloud credentials module of alibabaCloud Python SDK." optional = false python-versions = ">=3.6" files = [ - {file = "alibabacloud_credentials-0.3.5.tar.gz", hash = "sha256:ad065ec95921eaf51939195485d0e5cc9e0ea050282059c7d8bf74bdb5496177"}, + {file = "alibabacloud_credentials-0.3.6.tar.gz", hash = "sha256:caa82cf258648dcbe1ca14aeba50ba21845567d6ac3cd48d318e0a445fff7f96"}, ] [package.dependencies] @@ -847,13 +847,13 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "botocore" -version = "1.35.47" +version = "1.35.52" description = "Low-level, data-driven core of boto 3." optional = false python-versions = ">=3.8" files = [ - {file = "botocore-1.35.47-py3-none-any.whl", hash = "sha256:05f4493119a96799ff84d43e78691efac3177e1aec8840cca99511de940e342a"}, - {file = "botocore-1.35.47.tar.gz", hash = "sha256:f8f703463d3cd8b6abe2bedc443a7ab29f0e2ff1588a2e83164b108748645547"}, + {file = "botocore-1.35.52-py3-none-any.whl", hash = "sha256:cdbb5e43c9c3a977763e2a10d3b8b9c405d51279f9fcfd4ca4800763b22acba5"}, + {file = "botocore-1.35.52.tar.gz", hash = "sha256:1fe7485ea13d638b089103addd818c12984ff1e4d208de15f180b1e25ad944c5"}, ] [package.dependencies] @@ -1098,13 +1098,13 @@ files = [ [[package]] name = "celery" -version = "5.3.6" +version = "5.4.0" description = "Distributed Task Queue." optional = false python-versions = ">=3.8" files = [ - {file = "celery-5.3.6-py3-none-any.whl", hash = "sha256:9da4ea0118d232ce97dff5ed4974587fb1c0ff5c10042eb15278487cdd27d1af"}, - {file = "celery-5.3.6.tar.gz", hash = "sha256:870cc71d737c0200c397290d730344cc991d13a057534353d124c9380267aab9"}, + {file = "celery-5.4.0-py3-none-any.whl", hash = "sha256:369631eb580cf8c51a82721ec538684994f8277637edde2dfc0dacd73ed97f64"}, + {file = "celery-5.4.0.tar.gz", hash = "sha256:504a19140e8d3029d5acad88330c541d4c3f64c789d85f94756762d8bca7e706"}, ] [package.dependencies] @@ -1120,7 +1120,7 @@ vine = ">=5.1.0,<6.0" [package.extras] arangodb = ["pyArango (>=2.0.2)"] -auth = ["cryptography (==41.0.5)"] +auth = ["cryptography (==42.0.5)"] azureblockblob = ["azure-storage-blob (>=12.15.0)"] brotli = ["brotli (>=1.0.0)", "brotlipy (>=0.7.0)"] cassandra = ["cassandra-driver (>=3.25.0,<4)"] @@ -1130,22 +1130,23 @@ couchbase = ["couchbase (>=3.0.0)"] couchdb = ["pycouchdb (==1.14.2)"] django = ["Django (>=2.2.28)"] dynamodb = ["boto3 (>=1.26.143)"] -elasticsearch = ["elastic-transport (<=8.10.0)", "elasticsearch (<=8.11.0)"] +elasticsearch = ["elastic-transport (<=8.13.0)", "elasticsearch (<=8.13.0)"] eventlet = ["eventlet (>=0.32.0)"] +gcs = ["google-cloud-storage (>=2.10.0)"] gevent = ["gevent (>=1.5.0)"] librabbitmq = ["librabbitmq (>=2.0.0)"] memcache = ["pylibmc (==1.6.3)"] mongodb = ["pymongo[srv] (>=4.0.2)"] -msgpack = ["msgpack (==1.0.7)"] -pymemcache = ["python-memcached (==1.59)"] +msgpack = ["msgpack (==1.0.8)"] +pymemcache = ["python-memcached (>=1.61)"] pyro = ["pyro4 (==4.82)"] -pytest = ["pytest-celery (==0.0.0)"] +pytest = ["pytest-celery[all] (>=1.0.0)"] redis = ["redis (>=4.5.2,!=4.5.5,<6.0.0)"] s3 = ["boto3 (>=1.26.143)"] slmq = ["softlayer-messaging (>=1.0.3)"] solar = ["ephem (==4.1.5)"] sqlalchemy = ["sqlalchemy (>=1.4.48,<2.1)"] -sqs = ["boto3 (>=1.26.143)", "kombu[sqs] (>=5.3.0)", "pycurl (>=7.43.0.5)", "urllib3 (>=1.26.16)"] +sqs = ["boto3 (>=1.26.143)", "kombu[sqs] (>=5.3.4)", "pycurl (>=7.43.0.5)", "urllib3 (>=1.26.16)"] tblib = ["tblib (>=1.3.0)", "tblib (>=1.5.0)"] yaml = ["PyYAML (>=3.10)"] zookeeper = ["kazoo (>=1.3.1)"] @@ -2257,18 +2258,18 @@ files = [ [[package]] name = "duckduckgo-search" -version = "6.3.2" +version = "6.3.3" description = "Search for words, documents, images, news, maps and text translation using the DuckDuckGo.com search engine." optional = false python-versions = ">=3.8" files = [ - {file = "duckduckgo_search-6.3.2-py3-none-any.whl", hash = "sha256:cd631275292460d590d1d496995d002bf2fe6db9752713fab17b9e95924ced98"}, - {file = "duckduckgo_search-6.3.2.tar.gz", hash = "sha256:53dbf45f8749bfc67483eb9f281f2e722a5fe644d61c54ed9e551d26cb6bcbf2"}, + {file = "duckduckgo_search-6.3.3-py3-none-any.whl", hash = "sha256:63e5d6b958bd532016bc8a53e8b18717751bf7ef51b1c83e59b9f5780c79e64c"}, + {file = "duckduckgo_search-6.3.3.tar.gz", hash = "sha256:4d49508f01f85c8675765fdd4cc25eedbb3450e129b35209897fded874f6568f"}, ] [package.dependencies] click = ">=8.1.7" -primp = ">=0.6.4" +primp = ">=0.6.5" [package.extras] dev = ["mypy (>=1.11.1)", "pytest (>=8.3.1)", "pytest-asyncio (>=0.23.8)", "ruff (>=0.6.1)"] @@ -2373,13 +2374,13 @@ pycryptodome = ">=3.10.1" [[package]] name = "et-xmlfile" -version = "1.1.0" +version = "2.0.0" description = "An implementation of lxml.xmlfile for the standard library" optional = false -python-versions = ">=3.6" +python-versions = ">=3.8" files = [ - {file = "et_xmlfile-1.1.0-py3-none-any.whl", hash = "sha256:a2ba85d1d6a74ef63837eed693bcb89c3f752169b0e3e7ae5b16ca5e1b3deada"}, - {file = "et_xmlfile-1.1.0.tar.gz", hash = "sha256:8eb9e2bc2f8c97e37a2dc85a09ecdcdec9d8a396530a6d5a33b30b9a92da0c5c"}, + {file = "et_xmlfile-2.0.0-py3-none-any.whl", hash = "sha256:7a91720bc756843502c3b7504c77b8fe44217c85c537d85037f0f536151b2caa"}, + {file = "et_xmlfile-2.0.0.tar.gz", hash = "sha256:dab3f4764309081ce75662649be815c4c9081e88f0837825f90fd28317d4da54"}, ] [[package]] @@ -2412,13 +2413,13 @@ test = ["pytest (>=6)"] [[package]] name = "fastapi" -version = "0.115.3" +version = "0.115.4" description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" optional = false python-versions = ">=3.8" files = [ - {file = "fastapi-0.115.3-py3-none-any.whl", hash = "sha256:8035e8f9a2b0aa89cea03b6c77721178ed5358e1aea4cd8570d9466895c0638c"}, - {file = "fastapi-0.115.3.tar.gz", hash = "sha256:c091c6a35599c036d676fa24bd4a6e19fa30058d93d950216cdc672881f6f7db"}, + {file = "fastapi-0.115.4-py3-none-any.whl", hash = "sha256:0b504a063ffb3cf96a5e27dc1bc32c80ca743a2528574f9cdc77daa2d31b4742"}, + {file = "fastapi-0.115.4.tar.gz", hash = "sha256:db653475586b091cb8b2fec2ac54a680ac6a158e07406e1abae31679e8826349"}, ] [package.dependencies] @@ -3336,13 +3337,13 @@ grpc = ["grpcio (>=1.38.0,<2.0dev)", "grpcio-status (>=1.38.0,<2.0.dev0)"] [[package]] name = "google-cloud-resource-manager" -version = "1.12.5" +version = "1.13.0" description = "Google Cloud Resource Manager API client library" optional = false python-versions = ">=3.7" files = [ - {file = "google_cloud_resource_manager-1.12.5-py2.py3-none-any.whl", hash = "sha256:2708a718b45c79464b7b21559c701b5c92e6b0b1ab2146d0a256277a623dc175"}, - {file = "google_cloud_resource_manager-1.12.5.tar.gz", hash = "sha256:b7af4254401ed4efa3aba3a929cb3ddb803fa6baf91a78485e45583597de5891"}, + {file = "google_cloud_resource_manager-1.13.0-py2.py3-none-any.whl", hash = "sha256:33beb4528c2b7aee7a97ed843710581a7b4a27f3dd1fa41a0bf3359b3d68853f"}, + {file = "google_cloud_resource_manager-1.13.0.tar.gz", hash = "sha256:ae4bf69443f14b37007d4d84150115b0942e8b01650fd7a1fc6ff4dc1760e5c4"}, ] [package.dependencies] @@ -3606,70 +3607,70 @@ protobuf = ">=3.20.2,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4 [[package]] name = "grpcio" -version = "1.67.0" +version = "1.67.1" description = "HTTP/2-based RPC framework" optional = false python-versions = ">=3.8" files = [ - {file = "grpcio-1.67.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:bd79929b3bb96b54df1296cd3bf4d2b770bd1df6c2bdf549b49bab286b925cdc"}, - {file = "grpcio-1.67.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:16724ffc956ea42967f5758c2f043faef43cb7e48a51948ab593570570d1e68b"}, - {file = "grpcio-1.67.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:2b7183c80b602b0ad816315d66f2fb7887614ead950416d60913a9a71c12560d"}, - {file = "grpcio-1.67.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:efe32b45dd6d118f5ea2e5deaed417d8a14976325c93812dd831908522b402c9"}, - {file = "grpcio-1.67.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe89295219b9c9e47780a0f1c75ca44211e706d1c598242249fe717af3385ec8"}, - {file = "grpcio-1.67.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:aa8d025fae1595a207b4e47c2e087cb88d47008494db258ac561c00877d4c8f8"}, - {file = "grpcio-1.67.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f95e15db43e75a534420e04822df91f645664bf4ad21dfaad7d51773c80e6bb4"}, - {file = "grpcio-1.67.0-cp310-cp310-win32.whl", hash = "sha256:a6b9a5c18863fd4b6624a42e2712103fb0f57799a3b29651c0e5b8119a519d65"}, - {file = "grpcio-1.67.0-cp310-cp310-win_amd64.whl", hash = "sha256:b6eb68493a05d38b426604e1dc93bfc0137c4157f7ab4fac5771fd9a104bbaa6"}, - {file = "grpcio-1.67.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:e91d154689639932305b6ea6f45c6e46bb51ecc8ea77c10ef25aa77f75443ad4"}, - {file = "grpcio-1.67.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:cb204a742997277da678611a809a8409657b1398aaeebf73b3d9563b7d154c13"}, - {file = "grpcio-1.67.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:ae6de510f670137e755eb2a74b04d1041e7210af2444103c8c95f193340d17ee"}, - {file = "grpcio-1.67.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:74b900566bdf68241118f2918d312d3bf554b2ce0b12b90178091ea7d0a17b3d"}, - {file = "grpcio-1.67.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a4e95e43447a02aa603abcc6b5e727d093d161a869c83b073f50b9390ecf0fa8"}, - {file = "grpcio-1.67.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:0bb94e66cd8f0baf29bd3184b6aa09aeb1a660f9ec3d85da615c5003154bc2bf"}, - {file = "grpcio-1.67.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:82e5bd4b67b17c8c597273663794a6a46a45e44165b960517fe6d8a2f7f16d23"}, - {file = "grpcio-1.67.0-cp311-cp311-win32.whl", hash = "sha256:7fc1d2b9fd549264ae585026b266ac2db53735510a207381be509c315b4af4e8"}, - {file = "grpcio-1.67.0-cp311-cp311-win_amd64.whl", hash = "sha256:ac11ecb34a86b831239cc38245403a8de25037b448464f95c3315819e7519772"}, - {file = "grpcio-1.67.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:227316b5631260e0bef8a3ce04fa7db4cc81756fea1258b007950b6efc90c05d"}, - {file = "grpcio-1.67.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:d90cfdafcf4b45a7a076e3e2a58e7bc3d59c698c4f6470b0bb13a4d869cf2273"}, - {file = "grpcio-1.67.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:77196216d5dd6f99af1c51e235af2dd339159f657280e65ce7e12c1a8feffd1d"}, - {file = "grpcio-1.67.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:15c05a26a0f7047f720da41dc49406b395c1470eef44ff7e2c506a47ac2c0591"}, - {file = "grpcio-1.67.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3840994689cc8cbb73d60485c594424ad8adb56c71a30d8948d6453083624b52"}, - {file = "grpcio-1.67.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:5a1e03c3102b6451028d5dc9f8591131d6ab3c8a0e023d94c28cb930ed4b5f81"}, - {file = "grpcio-1.67.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:682968427a63d898759474e3b3178d42546e878fdce034fd7474ef75143b64e3"}, - {file = "grpcio-1.67.0-cp312-cp312-win32.whl", hash = "sha256:d01793653248f49cf47e5695e0a79805b1d9d4eacef85b310118ba1dfcd1b955"}, - {file = "grpcio-1.67.0-cp312-cp312-win_amd64.whl", hash = "sha256:985b2686f786f3e20326c4367eebdaed3e7aa65848260ff0c6644f817042cb15"}, - {file = "grpcio-1.67.0-cp313-cp313-linux_armv7l.whl", hash = "sha256:8c9a35b8bc50db35ab8e3e02a4f2a35cfba46c8705c3911c34ce343bd777813a"}, - {file = "grpcio-1.67.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:42199e704095b62688998c2d84c89e59a26a7d5d32eed86d43dc90e7a3bd04aa"}, - {file = "grpcio-1.67.0-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:c4c425f440fb81f8d0237c07b9322fc0fb6ee2b29fbef5f62a322ff8fcce240d"}, - {file = "grpcio-1.67.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:323741b6699cd2b04a71cb38f502db98f90532e8a40cb675393d248126a268af"}, - {file = "grpcio-1.67.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:662c8e105c5e5cee0317d500eb186ed7a93229586e431c1bf0c9236c2407352c"}, - {file = "grpcio-1.67.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:f6bd2ab135c64a4d1e9e44679a616c9bc944547357c830fafea5c3caa3de5153"}, - {file = "grpcio-1.67.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:2f55c1e0e2ae9bdd23b3c63459ee4c06d223b68aeb1961d83c48fb63dc29bc03"}, - {file = "grpcio-1.67.0-cp313-cp313-win32.whl", hash = "sha256:fd6bc27861e460fe28e94226e3673d46e294ca4673d46b224428d197c5935e69"}, - {file = "grpcio-1.67.0-cp313-cp313-win_amd64.whl", hash = "sha256:cf51d28063338608cd8d3cd64677e922134837902b70ce00dad7f116e3998210"}, - {file = "grpcio-1.67.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:7f200aca719c1c5dc72ab68be3479b9dafccdf03df530d137632c534bb6f1ee3"}, - {file = "grpcio-1.67.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:0892dd200ece4822d72dd0952f7112c542a487fc48fe77568deaaa399c1e717d"}, - {file = "grpcio-1.67.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:f4d613fbf868b2e2444f490d18af472ccb47660ea3df52f068c9c8801e1f3e85"}, - {file = "grpcio-1.67.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0c69bf11894cad9da00047f46584d5758d6ebc9b5950c0dc96fec7e0bce5cde9"}, - {file = "grpcio-1.67.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b9bca3ca0c5e74dea44bf57d27e15a3a3996ce7e5780d61b7c72386356d231db"}, - {file = "grpcio-1.67.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:014dfc020e28a0d9be7e93a91f85ff9f4a87158b7df9952fe23cc42d29d31e1e"}, - {file = "grpcio-1.67.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d4ea4509d42c6797539e9ec7496c15473177ce9abc89bc5c71e7abe50fc25737"}, - {file = "grpcio-1.67.0-cp38-cp38-win32.whl", hash = "sha256:9d75641a2fca9ae1ae86454fd25d4c298ea8cc195dbc962852234d54a07060ad"}, - {file = "grpcio-1.67.0-cp38-cp38-win_amd64.whl", hash = "sha256:cff8e54d6a463883cda2fab94d2062aad2f5edd7f06ae3ed030f2a74756db365"}, - {file = "grpcio-1.67.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:62492bd534979e6d7127b8a6b29093161a742dee3875873e01964049d5250a74"}, - {file = "grpcio-1.67.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:eef1dce9d1a46119fd09f9a992cf6ab9d9178b696382439446ca5f399d7b96fe"}, - {file = "grpcio-1.67.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:f623c57a5321461c84498a99dddf9d13dac0e40ee056d884d6ec4ebcab647a78"}, - {file = "grpcio-1.67.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54d16383044e681f8beb50f905249e4e7261dd169d4aaf6e52eab67b01cbbbe2"}, - {file = "grpcio-1.67.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b2a44e572fb762c668e4812156b81835f7aba8a721b027e2d4bb29fb50ff4d33"}, - {file = "grpcio-1.67.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:391df8b0faac84d42f5b8dfc65f5152c48ed914e13c522fd05f2aca211f8bfad"}, - {file = "grpcio-1.67.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:cfd9306511fdfc623a1ba1dc3bc07fbd24e6cfbe3c28b4d1e05177baa2f99617"}, - {file = "grpcio-1.67.0-cp39-cp39-win32.whl", hash = "sha256:30d47dbacfd20cbd0c8be9bfa52fdb833b395d4ec32fe5cff7220afc05d08571"}, - {file = "grpcio-1.67.0-cp39-cp39-win_amd64.whl", hash = "sha256:f55f077685f61f0fbd06ea355142b71e47e4a26d2d678b3ba27248abfe67163a"}, - {file = "grpcio-1.67.0.tar.gz", hash = "sha256:e090b2553e0da1c875449c8e75073dd4415dd71c9bde6a406240fdf4c0ee467c"}, + {file = "grpcio-1.67.1-cp310-cp310-linux_armv7l.whl", hash = "sha256:8b0341d66a57f8a3119b77ab32207072be60c9bf79760fa609c5609f2deb1f3f"}, + {file = "grpcio-1.67.1-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:f5a27dddefe0e2357d3e617b9079b4bfdc91341a91565111a21ed6ebbc51b22d"}, + {file = "grpcio-1.67.1-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:43112046864317498a33bdc4797ae6a268c36345a910de9b9c17159d8346602f"}, + {file = "grpcio-1.67.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c9b929f13677b10f63124c1a410994a401cdd85214ad83ab67cc077fc7e480f0"}, + {file = "grpcio-1.67.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e7d1797a8a3845437d327145959a2c0c47c05947c9eef5ff1a4c80e499dcc6fa"}, + {file = "grpcio-1.67.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:0489063974d1452436139501bf6b180f63d4977223ee87488fe36858c5725292"}, + {file = "grpcio-1.67.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9fd042de4a82e3e7aca44008ee2fb5da01b3e5adb316348c21980f7f58adc311"}, + {file = "grpcio-1.67.1-cp310-cp310-win32.whl", hash = "sha256:638354e698fd0c6c76b04540a850bf1db27b4d2515a19fcd5cf645c48d3eb1ed"}, + {file = "grpcio-1.67.1-cp310-cp310-win_amd64.whl", hash = "sha256:608d87d1bdabf9e2868b12338cd38a79969eaf920c89d698ead08f48de9c0f9e"}, + {file = "grpcio-1.67.1-cp311-cp311-linux_armv7l.whl", hash = "sha256:7818c0454027ae3384235a65210bbf5464bd715450e30a3d40385453a85a70cb"}, + {file = "grpcio-1.67.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ea33986b70f83844cd00814cee4451055cd8cab36f00ac64a31f5bb09b31919e"}, + {file = "grpcio-1.67.1-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:c7a01337407dd89005527623a4a72c5c8e2894d22bead0895306b23c6695698f"}, + {file = "grpcio-1.67.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:80b866f73224b0634f4312a4674c1be21b2b4afa73cb20953cbbb73a6b36c3cc"}, + {file = "grpcio-1.67.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9fff78ba10d4250bfc07a01bd6254a6d87dc67f9627adece85c0b2ed754fa96"}, + {file = "grpcio-1.67.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:8a23cbcc5bb11ea7dc6163078be36c065db68d915c24f5faa4f872c573bb400f"}, + {file = "grpcio-1.67.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1a65b503d008f066e994f34f456e0647e5ceb34cfcec5ad180b1b44020ad4970"}, + {file = "grpcio-1.67.1-cp311-cp311-win32.whl", hash = "sha256:e29ca27bec8e163dca0c98084040edec3bc49afd10f18b412f483cc68c712744"}, + {file = "grpcio-1.67.1-cp311-cp311-win_amd64.whl", hash = "sha256:786a5b18544622bfb1e25cc08402bd44ea83edfb04b93798d85dca4d1a0b5be5"}, + {file = "grpcio-1.67.1-cp312-cp312-linux_armv7l.whl", hash = "sha256:267d1745894200e4c604958da5f856da6293f063327cb049a51fe67348e4f953"}, + {file = "grpcio-1.67.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:85f69fdc1d28ce7cff8de3f9c67db2b0ca9ba4449644488c1e0303c146135ddb"}, + {file = "grpcio-1.67.1-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:f26b0b547eb8d00e195274cdfc63ce64c8fc2d3e2d00b12bf468ece41a0423a0"}, + {file = "grpcio-1.67.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4422581cdc628f77302270ff839a44f4c24fdc57887dc2a45b7e53d8fc2376af"}, + {file = "grpcio-1.67.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d7616d2ded471231c701489190379e0c311ee0a6c756f3c03e6a62b95a7146e"}, + {file = "grpcio-1.67.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8a00efecde9d6fcc3ab00c13f816313c040a28450e5e25739c24f432fc6d3c75"}, + {file = "grpcio-1.67.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:699e964923b70f3101393710793289e42845791ea07565654ada0969522d0a38"}, + {file = "grpcio-1.67.1-cp312-cp312-win32.whl", hash = "sha256:4e7b904484a634a0fff132958dabdb10d63e0927398273917da3ee103e8d1f78"}, + {file = "grpcio-1.67.1-cp312-cp312-win_amd64.whl", hash = "sha256:5721e66a594a6c4204458004852719b38f3d5522082be9061d6510b455c90afc"}, + {file = "grpcio-1.67.1-cp313-cp313-linux_armv7l.whl", hash = "sha256:aa0162e56fd10a5547fac8774c4899fc3e18c1aa4a4759d0ce2cd00d3696ea6b"}, + {file = "grpcio-1.67.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:beee96c8c0b1a75d556fe57b92b58b4347c77a65781ee2ac749d550f2a365dc1"}, + {file = "grpcio-1.67.1-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:a93deda571a1bf94ec1f6fcda2872dad3ae538700d94dc283c672a3b508ba3af"}, + {file = "grpcio-1.67.1-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0e6f255980afef598a9e64a24efce87b625e3e3c80a45162d111a461a9f92955"}, + {file = "grpcio-1.67.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e838cad2176ebd5d4a8bb03955138d6589ce9e2ce5d51c3ada34396dbd2dba8"}, + {file = "grpcio-1.67.1-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:a6703916c43b1d468d0756c8077b12017a9fcb6a1ef13faf49e67d20d7ebda62"}, + {file = "grpcio-1.67.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:917e8d8994eed1d86b907ba2a61b9f0aef27a2155bca6cbb322430fc7135b7bb"}, + {file = "grpcio-1.67.1-cp313-cp313-win32.whl", hash = "sha256:e279330bef1744040db8fc432becc8a727b84f456ab62b744d3fdb83f327e121"}, + {file = "grpcio-1.67.1-cp313-cp313-win_amd64.whl", hash = "sha256:fa0c739ad8b1996bd24823950e3cb5152ae91fca1c09cc791190bf1627ffefba"}, + {file = "grpcio-1.67.1-cp38-cp38-linux_armv7l.whl", hash = "sha256:178f5db771c4f9a9facb2ab37a434c46cb9be1a75e820f187ee3d1e7805c4f65"}, + {file = "grpcio-1.67.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:0f3e49c738396e93b7ba9016e153eb09e0778e776df6090c1b8c91877cc1c426"}, + {file = "grpcio-1.67.1-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:24e8a26dbfc5274d7474c27759b54486b8de23c709d76695237515bc8b5baeab"}, + {file = "grpcio-1.67.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3b6c16489326d79ead41689c4b84bc40d522c9a7617219f4ad94bc7f448c5085"}, + {file = "grpcio-1.67.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60e6a4dcf5af7bbc36fd9f81c9f372e8ae580870a9e4b6eafe948cd334b81cf3"}, + {file = "grpcio-1.67.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:95b5f2b857856ed78d72da93cd7d09b6db8ef30102e5e7fe0961fe4d9f7d48e8"}, + {file = "grpcio-1.67.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b49359977c6ec9f5d0573ea4e0071ad278ef905aa74e420acc73fd28ce39e9ce"}, + {file = "grpcio-1.67.1-cp38-cp38-win32.whl", hash = "sha256:f5b76ff64aaac53fede0cc93abf57894ab2a7362986ba22243d06218b93efe46"}, + {file = "grpcio-1.67.1-cp38-cp38-win_amd64.whl", hash = "sha256:804c6457c3cd3ec04fe6006c739579b8d35c86ae3298ffca8de57b493524b771"}, + {file = "grpcio-1.67.1-cp39-cp39-linux_armv7l.whl", hash = "sha256:a25bdea92b13ff4d7790962190bf6bf5c4639876e01c0f3dda70fc2769616335"}, + {file = "grpcio-1.67.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:cdc491ae35a13535fd9196acb5afe1af37c8237df2e54427be3eecda3653127e"}, + {file = "grpcio-1.67.1-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:85f862069b86a305497e74d0dc43c02de3d1d184fc2c180993aa8aa86fbd19b8"}, + {file = "grpcio-1.67.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ec74ef02010186185de82cc594058a3ccd8d86821842bbac9873fd4a2cf8be8d"}, + {file = "grpcio-1.67.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01f616a964e540638af5130469451cf580ba8c7329f45ca998ab66e0c7dcdb04"}, + {file = "grpcio-1.67.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:299b3d8c4f790c6bcca485f9963b4846dd92cf6f1b65d3697145d005c80f9fe8"}, + {file = "grpcio-1.67.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:60336bff760fbb47d7e86165408126f1dded184448e9a4c892189eb7c9d3f90f"}, + {file = "grpcio-1.67.1-cp39-cp39-win32.whl", hash = "sha256:5ed601c4c6008429e3d247ddb367fe8c7259c355757448d7c1ef7bd4a6739e8e"}, + {file = "grpcio-1.67.1-cp39-cp39-win_amd64.whl", hash = "sha256:5db70d32d6703b89912af16d6d45d78406374a8b8ef0d28140351dd0ec610e98"}, + {file = "grpcio-1.67.1.tar.gz", hash = "sha256:3dc2ed4cabea4dc14d5e708c2b426205956077cc5de419b4d4079315017e9732"}, ] [package.extras] -protobuf = ["grpcio-tools (>=1.67.0)"] +protobuf = ["grpcio-tools (>=1.67.1)"] [[package]] name = "grpcio-status" @@ -4644,13 +4645,13 @@ openai = ["openai (>=0.27.8)"] [[package]] name = "langsmith" -version = "0.1.137" +version = "0.1.138" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langsmith-0.1.137-py3-none-any.whl", hash = "sha256:4256d5c61133749890f7b5c88321dbb133ce0f440c621ea28e76513285859b81"}, - {file = "langsmith-0.1.137.tar.gz", hash = "sha256:56cdfcc6c74cb20a3f437d5bd144feb5bf93f54c5a2918d1e568cbd084a372d4"}, + {file = "langsmith-0.1.138-py3-none-any.whl", hash = "sha256:5c2bd5c11c75f7b3d06a0f06b115186e7326ca969fd26d66ffc65a0669012aee"}, + {file = "langsmith-0.1.138.tar.gz", hash = "sha256:1ecf613bb52f6bf17f1510e24ad8b70d4b0259bc9d3dbfd69b648c66d4644f0b"}, ] [package.dependencies] @@ -5611,12 +5612,12 @@ twitter = ["twython"] [[package]] name = "nomic" -version = "3.1.2" +version = "3.1.3" description = "The official Nomic python client." optional = false python-versions = "*" files = [ - {file = "nomic-3.1.2.tar.gz", hash = "sha256:2de1ab1dcf2429011c92987bb2f1eafe1a3a4901c3185b18f994bf89616f606d"}, + {file = "nomic-3.1.3.tar.gz", hash = "sha256:b06744b79fbe47451874ca7b272cafa1bb272cfb82acc79c64abfc943a98e035"}, ] [package.dependencies] @@ -5636,7 +5637,7 @@ tqdm = "*" [package.extras] all = ["nomic[aws,local]"] aws = ["boto3", "sagemaker"] -dev = ["black (==24.3.0)", "cairosvg", "coverage", "isort", "mkautodoc", "mkdocs-jupyter", "mkdocs-material", "mkdocstrings[python]", "myst-parser", "nomic[all]", "pandas", "pillow", "pylint", "pyright", "pytest", "pytorch-lightning", "twine"] +dev = ["black (==24.3.0)", "cairosvg", "coverage", "isort", "mkautodoc", "mkdocs-jupyter", "mkdocs-material", "mkdocstrings[python]", "myst-parser", "nomic[all]", "pandas", "pillow", "pylint", "pyright (<=1.1.377)", "pytest", "pytorch-lightning", "twine"] local = ["gpt4all (>=2.5.0,<3)"] [[package]] @@ -6705,19 +6706,19 @@ dill = ["dill (>=0.3.9)"] [[package]] name = "primp" -version = "0.6.4" +version = "0.6.5" description = "HTTP client that can impersonate web browsers, mimicking their headers and `TLS/JA3/JA4/HTTP2` fingerprints" optional = false python-versions = ">=3.8" files = [ - {file = "primp-0.6.4-cp38-abi3-macosx_10_12_x86_64.whl", hash = "sha256:e627330c1f2b723b523dc2e47caacbc5b5d0cd51ca11583b42fb8cde4da60d7d"}, - {file = "primp-0.6.4-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:e0cb7c05dd56c8b9741042fd568c0983fc19b0f3aa209a3940ecc04b4fd60314"}, - {file = "primp-0.6.4-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a4adc200ccb39e130c478d8b1a94f43a5b359068c6cb65b7c848812f96d96992"}, - {file = "primp-0.6.4-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:0ebae2d3aa36b04028e4accf2609d31d2e6981659e8e2effb09ee8ba960192e1"}, - {file = "primp-0.6.4-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:77f5fa5b34eaf251815622258419a484a2a9179dcbae2a1e702a254d91f613f1"}, - {file = "primp-0.6.4-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:14cddf535cd2c4987412e90ca3ca35ae52cddbee6e0f0953d26b33a652a95692"}, - {file = "primp-0.6.4-cp38-abi3-win_amd64.whl", hash = "sha256:96177ec2dadc47eaecbf0b22d2e93aeaf964a1be9a71e6e318d2ffb9e4242743"}, - {file = "primp-0.6.4.tar.gz", hash = "sha256:0a3de63e46a50664bcdc76e7aaf7060bf8443698efa902864669c5fca0d1abdd"}, + {file = "primp-0.6.5-cp38-abi3-macosx_10_12_x86_64.whl", hash = "sha256:b2bab0250d38c02a437c75ed94b99e3a8c03a281ba9a4c33780ccd04999c741b"}, + {file = "primp-0.6.5-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:0aedb33515d86df4c1f91b9d5772e1b74d1593dfe8978c258b136c171f8ab94c"}, + {file = "primp-0.6.5-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0e8850be30fbfefeb76c1eb5859a55c5f11c8c285a4a03ebf99c73fea964b2a"}, + {file = "primp-0.6.5-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:e9b71ac07a79cbb401390e2ee5a5767d0bf202a956a533fd084957020fcb2a64"}, + {file = "primp-0.6.5-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:79c65fcb07b36bd0f8c3966a4a18c4f6a6d624a33a0b0133b0f0cc8d0050c351"}, + {file = "primp-0.6.5-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:5a55e450bb52a88f4a2891db50577c8f20b134d17d37e93361ee51de1a6fe8c8"}, + {file = "primp-0.6.5-cp38-abi3-win_amd64.whl", hash = "sha256:cbe584de5c177b9f0656b77e88721296ae6151b6c4565e2e0a342b6473990f27"}, + {file = "primp-0.6.5.tar.gz", hash = "sha256:abb46c579ae682f34c1f339faac38709c85ab76c056ec3711a26823334ab8124"}, ] [package.extras] @@ -6904,52 +6905,55 @@ files = [ [[package]] name = "pyarrow" -version = "17.0.0" +version = "18.0.0" description = "Python library for Apache Arrow" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "pyarrow-17.0.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:a5c8b238d47e48812ee577ee20c9a2779e6a5904f1708ae240f53ecbee7c9f07"}, - {file = "pyarrow-17.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:db023dc4c6cae1015de9e198d41250688383c3f9af8f565370ab2b4cb5f62655"}, - {file = "pyarrow-17.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da1e060b3876faa11cee287839f9cc7cdc00649f475714b8680a05fd9071d545"}, - {file = "pyarrow-17.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75c06d4624c0ad6674364bb46ef38c3132768139ddec1c56582dbac54f2663e2"}, - {file = "pyarrow-17.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:fa3c246cc58cb5a4a5cb407a18f193354ea47dd0648194e6265bd24177982fe8"}, - {file = "pyarrow-17.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:f7ae2de664e0b158d1607699a16a488de3d008ba99b3a7aa5de1cbc13574d047"}, - {file = "pyarrow-17.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:5984f416552eea15fd9cee03da53542bf4cddaef5afecefb9aa8d1010c335087"}, - {file = "pyarrow-17.0.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:1c8856e2ef09eb87ecf937104aacfa0708f22dfeb039c363ec99735190ffb977"}, - {file = "pyarrow-17.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2e19f569567efcbbd42084e87f948778eb371d308e137a0f97afe19bb860ccb3"}, - {file = "pyarrow-17.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b244dc8e08a23b3e352899a006a26ae7b4d0da7bb636872fa8f5884e70acf15"}, - {file = "pyarrow-17.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b72e87fe3e1db343995562f7fff8aee354b55ee83d13afba65400c178ab2597"}, - {file = "pyarrow-17.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:dc5c31c37409dfbc5d014047817cb4ccd8c1ea25d19576acf1a001fe07f5b420"}, - {file = "pyarrow-17.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:e3343cb1e88bc2ea605986d4b94948716edc7a8d14afd4e2c097232f729758b4"}, - {file = "pyarrow-17.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:a27532c38f3de9eb3e90ecab63dfda948a8ca859a66e3a47f5f42d1e403c4d03"}, - {file = "pyarrow-17.0.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:9b8a823cea605221e61f34859dcc03207e52e409ccf6354634143e23af7c8d22"}, - {file = "pyarrow-17.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f1e70de6cb5790a50b01d2b686d54aaf73da01266850b05e3af2a1bc89e16053"}, - {file = "pyarrow-17.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0071ce35788c6f9077ff9ecba4858108eebe2ea5a3f7cf2cf55ebc1dbc6ee24a"}, - {file = "pyarrow-17.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:757074882f844411fcca735e39aae74248a1531367a7c80799b4266390ae51cc"}, - {file = "pyarrow-17.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:9ba11c4f16976e89146781a83833df7f82077cdab7dc6232c897789343f7891a"}, - {file = "pyarrow-17.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:b0c6ac301093b42d34410b187bba560b17c0330f64907bfa4f7f7f2444b0cf9b"}, - {file = "pyarrow-17.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:392bc9feabc647338e6c89267635e111d71edad5fcffba204425a7c8d13610d7"}, - {file = "pyarrow-17.0.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:af5ff82a04b2171415f1410cff7ebb79861afc5dae50be73ce06d6e870615204"}, - {file = "pyarrow-17.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:edca18eaca89cd6382dfbcff3dd2d87633433043650c07375d095cd3517561d8"}, - {file = "pyarrow-17.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7c7916bff914ac5d4a8fe25b7a25e432ff921e72f6f2b7547d1e325c1ad9d155"}, - {file = "pyarrow-17.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f553ca691b9e94b202ff741bdd40f6ccb70cdd5fbf65c187af132f1317de6145"}, - {file = "pyarrow-17.0.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:0cdb0e627c86c373205a2f94a510ac4376fdc523f8bb36beab2e7f204416163c"}, - {file = "pyarrow-17.0.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:d7d192305d9d8bc9082d10f361fc70a73590a4c65cf31c3e6926cd72b76bc35c"}, - {file = "pyarrow-17.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:02dae06ce212d8b3244dd3e7d12d9c4d3046945a5933d28026598e9dbbda1fca"}, - {file = "pyarrow-17.0.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:13d7a460b412f31e4c0efa1148e1d29bdf18ad1411eb6757d38f8fbdcc8645fb"}, - {file = "pyarrow-17.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9b564a51fbccfab5a04a80453e5ac6c9954a9c5ef2890d1bcf63741909c3f8df"}, - {file = "pyarrow-17.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:32503827abbc5aadedfa235f5ece8c4f8f8b0a3cf01066bc8d29de7539532687"}, - {file = "pyarrow-17.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a155acc7f154b9ffcc85497509bcd0d43efb80d6f733b0dc3bb14e281f131c8b"}, - {file = "pyarrow-17.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:dec8d129254d0188a49f8a1fc99e0560dc1b85f60af729f47de4046015f9b0a5"}, - {file = "pyarrow-17.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:a48ddf5c3c6a6c505904545c25a4ae13646ae1f8ba703c4df4a1bfe4f4006bda"}, - {file = "pyarrow-17.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:42bf93249a083aca230ba7e2786c5f673507fa97bbd9725a1e2754715151a204"}, - {file = "pyarrow-17.0.0.tar.gz", hash = "sha256:4beca9521ed2c0921c1023e68d097d0299b62c362639ea315572a58f3f50fd28"}, + {file = "pyarrow-18.0.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:2333f93260674e185cfbf208d2da3007132572e56871f451ba1a556b45dae6e2"}, + {file = "pyarrow-18.0.0-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:4c381857754da44326f3a49b8b199f7f87a51c2faacd5114352fc78de30d3aba"}, + {file = "pyarrow-18.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:603cd8ad4976568954598ef0a6d4ed3dfb78aff3d57fa8d6271f470f0ce7d34f"}, + {file = "pyarrow-18.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58a62549a3e0bc9e03df32f350e10e1efb94ec6cf63e3920c3385b26663948ce"}, + {file = "pyarrow-18.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:bc97316840a349485fbb137eb8d0f4d7057e1b2c1272b1a20eebbbe1848f5122"}, + {file = "pyarrow-18.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:2e549a748fa8b8715e734919923f69318c953e077e9c02140ada13e59d043310"}, + {file = "pyarrow-18.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:606e9a3dcb0f52307c5040698ea962685fb1c852d72379ee9412be7de9c5f9e2"}, + {file = "pyarrow-18.0.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:d5795e37c0a33baa618c5e054cd61f586cf76850a251e2b21355e4085def6280"}, + {file = "pyarrow-18.0.0-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:5f0510608ccd6e7f02ca8596962afb8c6cc84c453e7be0da4d85f5f4f7b0328a"}, + {file = "pyarrow-18.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:616ea2826c03c16e87f517c46296621a7c51e30400f6d0a61be645f203aa2b93"}, + {file = "pyarrow-18.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a1824f5b029ddd289919f354bc285992cb4e32da518758c136271cf66046ef22"}, + {file = "pyarrow-18.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:6dd1b52d0d58dd8f685ced9971eb49f697d753aa7912f0a8f50833c7a7426319"}, + {file = "pyarrow-18.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:320ae9bd45ad7ecc12ec858b3e8e462578de060832b98fc4d671dee9f10d9954"}, + {file = "pyarrow-18.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:2c992716cffb1088414f2b478f7af0175fd0a76fea80841b1706baa8fb0ebaad"}, + {file = "pyarrow-18.0.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:e7ab04f272f98ebffd2a0661e4e126036f6936391ba2889ed2d44c5006237802"}, + {file = "pyarrow-18.0.0-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:03f40b65a43be159d2f97fd64dc998f769d0995a50c00f07aab58b0b3da87e1f"}, + {file = "pyarrow-18.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:be08af84808dff63a76860847c48ec0416928a7b3a17c2f49a072cac7c45efbd"}, + {file = "pyarrow-18.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c70c1965cde991b711a98448ccda3486f2a336457cf4ec4dca257a926e149c9"}, + {file = "pyarrow-18.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:00178509f379415a3fcf855af020e3340254f990a8534294ec3cf674d6e255fd"}, + {file = "pyarrow-18.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:a71ab0589a63a3e987beb2bc172e05f000a5c5be2636b4b263c44034e215b5d7"}, + {file = "pyarrow-18.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:fe92efcdbfa0bcf2fa602e466d7f2905500f33f09eb90bf0bcf2e6ca41b574c8"}, + {file = "pyarrow-18.0.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:907ee0aa8ca576f5e0cdc20b5aeb2ad4d3953a3b4769fc4b499e00ef0266f02f"}, + {file = "pyarrow-18.0.0-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:66dcc216ebae2eb4c37b223feaf82f15b69d502821dde2da138ec5a3716e7463"}, + {file = "pyarrow-18.0.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bc1daf7c425f58527900876354390ee41b0ae962a73ad0959b9d829def583bb1"}, + {file = "pyarrow-18.0.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:871b292d4b696b09120ed5bde894f79ee2a5f109cb84470546471df264cae136"}, + {file = "pyarrow-18.0.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:082ba62bdcb939824ba1ce10b8acef5ab621da1f4c4805e07bfd153617ac19d4"}, + {file = "pyarrow-18.0.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:2c664ab88b9766413197733c1720d3dcd4190e8fa3bbdc3710384630a0a7207b"}, + {file = "pyarrow-18.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:dc892be34dbd058e8d189b47db1e33a227d965ea8805a235c8a7286f7fd17d3a"}, + {file = "pyarrow-18.0.0-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:28f9c39a56d2c78bf6b87dcc699d520ab850919d4a8c7418cd20eda49874a2ea"}, + {file = "pyarrow-18.0.0-cp313-cp313t-macosx_12_0_x86_64.whl", hash = "sha256:f1a198a50c409ab2d009fbf20956ace84567d67f2c5701511d4dd561fae6f32e"}, + {file = "pyarrow-18.0.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5bd7fd32e3ace012d43925ea4fc8bd1b02cc6cc1e9813b518302950e89b5a22"}, + {file = "pyarrow-18.0.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:336addb8b6f5208be1b2398442c703a710b6b937b1a046065ee4db65e782ff5a"}, + {file = "pyarrow-18.0.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:45476490dd4adec5472c92b4d253e245258745d0ccaabe706f8d03288ed60a79"}, + {file = "pyarrow-18.0.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:b46591222c864e7da7faa3b19455196416cd8355ff6c2cc2e65726a760a3c420"}, + {file = "pyarrow-18.0.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:eb7e3abcda7e1e6b83c2dc2909c8d045881017270a119cc6ee7fdcfe71d02df8"}, + {file = "pyarrow-18.0.0-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:09f30690b99ce34e0da64d20dab372ee54431745e4efb78ac938234a282d15f9"}, + {file = "pyarrow-18.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d5ca5d707e158540312e09fd907f9f49bacbe779ab5236d9699ced14d2293b8"}, + {file = "pyarrow-18.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d6331f280c6e4521c69b201a42dd978f60f7e129511a55da9e0bfe426b4ebb8d"}, + {file = "pyarrow-18.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:3ac24b2be732e78a5a3ac0b3aa870d73766dd00beba6e015ea2ea7394f8b4e55"}, + {file = "pyarrow-18.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b30a927c6dff89ee702686596f27c25160dd6c99be5bcc1513a763ae5b1bfc03"}, + {file = "pyarrow-18.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:8f40ec677e942374e3d7f2fad6a67a4c2811a8b975e8703c6fd26d3b168a90e2"}, + {file = "pyarrow-18.0.0.tar.gz", hash = "sha256:a6aa027b1a9d2970cf328ccd6dbe4a996bc13c39fd427f502782f5bdb9ca20f5"}, ] -[package.dependencies] -numpy = ">=1.16.6" - [package.extras] test = ["cffi", "hypothesis", "pandas", "pytest", "pytz"] @@ -7257,13 +7261,13 @@ tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] [[package]] name = "pymilvus" -version = "2.4.8" +version = "2.4.9" description = "Python Sdk for Milvus" optional = false python-versions = ">=3.8" files = [ - {file = "pymilvus-2.4.8-py3-none-any.whl", hash = "sha256:5824f8ef4ecb14cfd4b205bf976aa52576c3a83c3cd848d21c8f5f9bb99b29e1"}, - {file = "pymilvus-2.4.8.tar.gz", hash = "sha256:0ddd18a060635fc8f1d1ab5635d9cc340ef29a97783b73db186df6334fa31ee2"}, + {file = "pymilvus-2.4.9-py3-none-any.whl", hash = "sha256:45313607d2c164064bdc44e0f933cb6d6afa92e9efcc7f357c5240c57db58fbe"}, + {file = "pymilvus-2.4.9.tar.gz", hash = "sha256:0937663700007c23a84cfc0656160b301f6ff9247aaec4c96d599a6b43572136"}, ] [package.dependencies] @@ -7372,23 +7376,24 @@ diagrams = ["jinja2", "railroad-diagrams"] [[package]] name = "pypdf" -version = "5.0.1" +version = "5.1.0" description = "A pure-python PDF library capable of splitting, merging, cropping, and transforming PDF files" optional = false python-versions = ">=3.8" files = [ - {file = "pypdf-5.0.1-py3-none-any.whl", hash = "sha256:ff8a32da6c7a63fea9c32fa4dd837cdd0db7966adf6c14f043e3f12592e992db"}, - {file = "pypdf-5.0.1.tar.gz", hash = "sha256:a361c3c372b4a659f9c8dd438d5ce29a753c79c620dc6e1fd66977651f5547ea"}, + {file = "pypdf-5.1.0-py3-none-any.whl", hash = "sha256:3bd4f503f4ebc58bae40d81e81a9176c400cbbac2ba2d877367595fb524dfdfc"}, + {file = "pypdf-5.1.0.tar.gz", hash = "sha256:425a129abb1614183fd1aca6982f650b47f8026867c0ce7c4b9f281c443d2740"}, ] [package.dependencies] typing_extensions = {version = ">=4.0", markers = "python_version < \"3.11\""} [package.extras] -crypto = ["PyCryptodome", "cryptography"] +crypto = ["cryptography"] +cryptodome = ["PyCryptodome"] dev = ["black", "flit", "pip-tools", "pre-commit (<2.18.0)", "pytest-cov", "pytest-socket", "pytest-timeout", "pytest-xdist", "wheel"] docs = ["myst_parser", "sphinx", "sphinx_rtd_theme"] -full = ["Pillow (>=8.0.0)", "PyCryptodome", "cryptography"] +full = ["Pillow (>=8.0.0)", "cryptography"] image = ["Pillow (>=8.0.0)"] [[package]] @@ -7924,99 +7929,99 @@ dev = ["pytest"] [[package]] name = "rapidfuzz" -version = "3.10.0" +version = "3.10.1" description = "rapid fuzzy string matching" optional = false python-versions = ">=3.9" files = [ - {file = "rapidfuzz-3.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:884453860de029380dded8f3c1918af2d8eb5adf8010261645c7e5c88c2b5428"}, - {file = "rapidfuzz-3.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:718c9bd369288aca5fa929df6dbf66fdbe9768d90940a940c0b5cdc96ade4309"}, - {file = "rapidfuzz-3.10.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a68e3724b7dab761c01816aaa64b0903734d999d5589daf97c14ef5cc0629a8e"}, - {file = "rapidfuzz-3.10.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1af60988d47534246d9525f77288fdd9de652608a4842815d9018570b959acc6"}, - {file = "rapidfuzz-3.10.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3084161fc3e963056232ef8d937449a2943852e07101f5a136c8f3cfa4119217"}, - {file = "rapidfuzz-3.10.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6cd67d3d017296d98ff505529104299f78433e4b8af31b55003d901a62bbebe9"}, - {file = "rapidfuzz-3.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b11a127ac590fc991e8a02c2d7e1ac86e8141c92f78546f18b5c904064a0552c"}, - {file = "rapidfuzz-3.10.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:aadce42147fc09dcef1afa892485311e824c050352e1aa6e47f56b9b27af4cf0"}, - {file = "rapidfuzz-3.10.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:b54853c2371bf0e38d67da379519deb6fbe70055efb32f6607081641af3dc752"}, - {file = "rapidfuzz-3.10.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:ce19887268e90ee81a3957eef5e46a70ecc000713796639f83828b950343f49e"}, - {file = "rapidfuzz-3.10.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:f39a2a5ded23b9b9194ec45740dce57177b80f86c6d8eba953d3ff1a25c97766"}, - {file = "rapidfuzz-3.10.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:0ec338d5f4ad8d9339a88a08db5c23e7f7a52c2b2a10510c48a0cef1fb3f0ddc"}, - {file = "rapidfuzz-3.10.0-cp310-cp310-win32.whl", hash = "sha256:56fd15ea8f4c948864fa5ebd9261c67cf7b89a1c517a0caef4df75446a7af18c"}, - {file = "rapidfuzz-3.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:43dfc5e733808962a822ff6d9c29f3039a3cfb3620706f5953e17cfe4496724c"}, - {file = "rapidfuzz-3.10.0-cp310-cp310-win_arm64.whl", hash = "sha256:ae7966f205b5a7fde93b44ca8fed37c1c8539328d7f179b1197de34eceaceb5f"}, - {file = "rapidfuzz-3.10.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bb0013795b40db5cf361e6f21ee7cda09627cf294977149b50e217d7fe9a2f03"}, - {file = "rapidfuzz-3.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:69ef5b363afff7150a1fbe788007e307b9802a2eb6ad92ed51ab94e6ad2674c6"}, - {file = "rapidfuzz-3.10.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c582c46b1bb0b19f1a5f4c1312f1b640c21d78c371a6615c34025b16ee56369b"}, - {file = "rapidfuzz-3.10.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:288f6f6e7410cacb115fb851f3f18bf0e4231eb3f6cb5bd1cec0e7b25c4d039d"}, - {file = "rapidfuzz-3.10.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c9e29a13d2fd9be3e7d8c26c7ef4ba60b5bc7efbc9dbdf24454c7e9ebba31768"}, - {file = "rapidfuzz-3.10.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ea2da0459b951ee461bd4e02b8904890bd1c4263999d291c5cd01e6620177ad4"}, - {file = "rapidfuzz-3.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:457827ba82261aa2ae6ac06a46d0043ab12ba7216b82d87ae1434ec0f29736d6"}, - {file = "rapidfuzz-3.10.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5d350864269d56f51ab81ab750c9259ae5cad3152c0680baef143dcec92206a1"}, - {file = "rapidfuzz-3.10.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:a9b8f51e08c3f983d857c3889930af9ddecc768453822076683664772d87e374"}, - {file = "rapidfuzz-3.10.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:7f3a6aa6e70fc27e4ff5c479f13cc9fc26a56347610f5f8b50396a0d344c5f55"}, - {file = "rapidfuzz-3.10.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:803f255f10d63420979b1909ef976e7d30dec42025c9b067fc1d2040cc365a7e"}, - {file = "rapidfuzz-3.10.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:2026651761bf83a0f31495cc0f70840d5c0d54388f41316e3f9cb51bd85e49a5"}, - {file = "rapidfuzz-3.10.0-cp311-cp311-win32.whl", hash = "sha256:4df75b3ebbb8cfdb9bf8b213b168620b88fd92d0c16a8bc9f9234630b282db59"}, - {file = "rapidfuzz-3.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:f9f0bbfb6787b97c51516f3ccf97737d504db5d239ad44527673b81f598b84ab"}, - {file = "rapidfuzz-3.10.0-cp311-cp311-win_arm64.whl", hash = "sha256:10fdad800441b9c97d471a937ba7d42625f1b530db05e572f1cb7d401d95c893"}, - {file = "rapidfuzz-3.10.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7dc87073ba3a40dd65591a2100aa71602107443bf10770579ff9c8a3242edb94"}, - {file = "rapidfuzz-3.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a425a0a868cf8e9c6e93e1cda4b758cdfd314bb9a4fc916c5742c934e3613480"}, - {file = "rapidfuzz-3.10.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a86d5d1d75e61df060c1e56596b6b0a4422a929dff19cc3dbfd5eee762c86b61"}, - {file = "rapidfuzz-3.10.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:34f213d59219a9c3ca14e94a825f585811a68ac56b4118b4dc388b5b14afc108"}, - {file = "rapidfuzz-3.10.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:96ad46f5f56f70fab2be9e5f3165a21be58d633b90bf6e67fc52a856695e4bcf"}, - {file = "rapidfuzz-3.10.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9178277f72d144a6c7704d7ae7fa15b7b86f0f0796f0e1049c7b4ef748a662ef"}, - {file = "rapidfuzz-3.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76a35e9e19a7c883c422ffa378e9a04bc98cb3b29648c5831596401298ee51e6"}, - {file = "rapidfuzz-3.10.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8a6405d34c394c65e4f73a1d300c001f304f08e529d2ed6413b46ee3037956eb"}, - {file = "rapidfuzz-3.10.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:bd393683129f446a75d8634306aed7e377627098a1286ff3af2a4f1736742820"}, - {file = "rapidfuzz-3.10.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:b0445fa9880ead81f5a7d0efc0b9c977a947d8052c43519aceeaf56eabaf6843"}, - {file = "rapidfuzz-3.10.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:c50bc308fa29767ed8f53a8d33b7633a9e14718ced038ed89d41b886e301da32"}, - {file = "rapidfuzz-3.10.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e89605afebbd2d4b045bccfdc12a14b16fe8ccbae05f64b4b4c64a97dad1c891"}, - {file = "rapidfuzz-3.10.0-cp312-cp312-win32.whl", hash = "sha256:2db9187f3acf3cd33424ecdbaad75414c298ecd1513470df7bda885dcb68cc15"}, - {file = "rapidfuzz-3.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:50e3d0c72ea15391ba9531ead7f2068a67c5b18a6a365fef3127583aaadd1725"}, - {file = "rapidfuzz-3.10.0-cp312-cp312-win_arm64.whl", hash = "sha256:9eac95b4278bd53115903d89118a2c908398ee8bdfd977ae844f1bd2b02b917c"}, - {file = "rapidfuzz-3.10.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:fe5231e8afd069c742ac5b4f96344a0fe4aff52df8e53ef87faebf77f827822c"}, - {file = "rapidfuzz-3.10.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:886882367dbc985f5736356105798f2ae6e794e671fc605476cbe2e73838a9bb"}, - {file = "rapidfuzz-3.10.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b33e13e537e3afd1627d421a142a12bbbe601543558a391a6fae593356842f6e"}, - {file = "rapidfuzz-3.10.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:094c26116d55bf9c53abd840d08422f20da78ec4c4723e5024322321caedca48"}, - {file = "rapidfuzz-3.10.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:545fc04f2d592e4350f59deb0818886c1b444ffba3bec535b4fbb97191aaf769"}, - {file = "rapidfuzz-3.10.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:916a6abf3632e592b937c3d04c00a6efadd8fd30539cdcd4e6e4d92be7ca5d90"}, - {file = "rapidfuzz-3.10.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb6ec40cef63b1922083d33bfef2f91fc0b0bc07b5b09bfee0b0f1717d558292"}, - {file = "rapidfuzz-3.10.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c77a7330dd15c7eb5fd3631dc646fc96327f98db8181138766bd14d3e905f0ba"}, - {file = "rapidfuzz-3.10.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:949b5e9eeaa4ecb4c7e9c2a4689dddce60929dd1ff9c76a889cdbabe8bbf2171"}, - {file = "rapidfuzz-3.10.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:b5363932a5aab67010ae1a6205c567d1ef256fb333bc23c27582481606be480c"}, - {file = "rapidfuzz-3.10.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:5dd6eec15b13329abe66cc241b484002ecb0e17d694491c944a22410a6a9e5e2"}, - {file = "rapidfuzz-3.10.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:79e7f98525b60b3c14524e0a4e1fedf7654657b6e02eb25f1be897ab097706f3"}, - {file = "rapidfuzz-3.10.0-cp313-cp313-win32.whl", hash = "sha256:d29d1b9857c65f8cb3a29270732e1591b9bacf89de9d13fa764f79f07d8f1fd2"}, - {file = "rapidfuzz-3.10.0-cp313-cp313-win_amd64.whl", hash = "sha256:fa9720e56663cc3649d62b4b5f3145e94b8f5611e8a8e1b46507777249d46aad"}, - {file = "rapidfuzz-3.10.0-cp313-cp313-win_arm64.whl", hash = "sha256:eda4c661e68dddd56c8fbfe1ca35e40dd2afd973f7ebb1605f4d151edc63dff8"}, - {file = "rapidfuzz-3.10.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cffbc50e0767396ed483900900dd58ce4351bc0d40e64bced8694bd41864cc71"}, - {file = "rapidfuzz-3.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c038b9939da3035afb6cb2f465f18163e8f070aba0482923ecff9443def67178"}, - {file = "rapidfuzz-3.10.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca366c2e2a54e2f663f4529b189fdeb6e14d419b1c78b754ec1744f3c01070d4"}, - {file = "rapidfuzz-3.10.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7c4c82b1689b23b1b5e6a603164ed2be41b6f6de292a698b98ba2381e889eb9d"}, - {file = "rapidfuzz-3.10.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98f6ebe28831a482981ecfeedc8237047878424ad0c1add2c7f366ba44a20452"}, - {file = "rapidfuzz-3.10.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4bd1a7676ee2a4c8e2f7f2550bece994f9f89e58afb96088964145a83af7408b"}, - {file = "rapidfuzz-3.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec9139baa3f85b65adc700eafa03ed04995ca8533dd56c924f0e458ffec044ab"}, - {file = "rapidfuzz-3.10.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:26de93e6495078b6af4c4d93a42ca067b16cc0e95699526c82ab7d1025b4d3bf"}, - {file = "rapidfuzz-3.10.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:f3a0bda83c18195c361b5500377d0767749f128564ca95b42c8849fd475bb327"}, - {file = "rapidfuzz-3.10.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:63e4c175cbce8c3adc22dca5e6154588ae673f6c55374d156f3dac732c88d7de"}, - {file = "rapidfuzz-3.10.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:4dd3d8443970eaa02ab5ae45ce584b061f2799cd9f7e875190e2617440c1f9d4"}, - {file = "rapidfuzz-3.10.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e5ddb2388610799fc46abe389600625058f2a73867e63e20107c5ad5ffa57c47"}, - {file = "rapidfuzz-3.10.0-cp39-cp39-win32.whl", hash = "sha256:2e9be5d05cd960914024412b5406fb75a82f8562f45912ff86255acbfdbfb78e"}, - {file = "rapidfuzz-3.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:47aca565a39c9a6067927871973ca827023e8b65ba6c5747f4c228c8d7ddc04f"}, - {file = "rapidfuzz-3.10.0-cp39-cp39-win_arm64.whl", hash = "sha256:b0732343cdc4273b5921268026dd7266f75466eb21873cb7635a200d9d9c3fac"}, - {file = "rapidfuzz-3.10.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:f744b5eb1469bf92dd143d36570d2bdbbdc88fe5cb0b5405e53dd34f479cbd8a"}, - {file = "rapidfuzz-3.10.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b67cc21a14327a0eb0f47bc3d7e59ec08031c7c55220ece672f9476e7a8068d3"}, - {file = "rapidfuzz-3.10.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2fe5783676f0afba4a522c80b15e99dbf4e393c149ab610308a8ef1f04c6bcc8"}, - {file = "rapidfuzz-3.10.0-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d4688862f957c8629d557d084f20b2d803f8738b6c4066802a0b1cc472e088d9"}, - {file = "rapidfuzz-3.10.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:20bd153aacc244e4c907d772c703fea82754c4db14f8aa64d75ff81b7b8ab92d"}, - {file = "rapidfuzz-3.10.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:50484d563f8bfa723c74c944b0bb15b9e054db9c889348c8c307abcbee75ab92"}, - {file = "rapidfuzz-3.10.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5897242d455461f2c5b82d7397b29341fd11e85bf3608a522177071044784ee8"}, - {file = "rapidfuzz-3.10.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:116c71a81e046ba56551d8ab68067ca7034d94b617545316d460a452c5c3c289"}, - {file = "rapidfuzz-3.10.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0a547e4350d1fa32624d3eab51eff8cf329f4cae110b4ea0402486b1da8be40"}, - {file = "rapidfuzz-3.10.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:399b9b79ccfcf50ca3bad7692bc098bb8eade88d7d5e15773b7f866c91156d0c"}, - {file = "rapidfuzz-3.10.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7947a425d1be3e744707ee58c6cb318b93a56e08f080722dcc0347e0b7a1bb9a"}, - {file = "rapidfuzz-3.10.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:94c48b4a2a4b1d22246f48e2b11cae01ec7d23f0c9123f8bb822839ad79d0a88"}, - {file = "rapidfuzz-3.10.0.tar.gz", hash = "sha256:6b62af27e65bb39276a66533655a2fa3c60a487b03935721c45b7809527979be"}, + {file = "rapidfuzz-3.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f17d9f21bf2f2f785d74f7b0d407805468b4c173fa3e52c86ec94436b338e74a"}, + {file = "rapidfuzz-3.10.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b31f358a70efc143909fb3d75ac6cd3c139cd41339aa8f2a3a0ead8315731f2b"}, + {file = "rapidfuzz-3.10.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f4f43f2204b56a61448ec2dd061e26fd344c404da99fb19f3458200c5874ba2"}, + {file = "rapidfuzz-3.10.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9d81bf186a453a2757472133b24915768abc7c3964194406ed93e170e16c21cb"}, + {file = "rapidfuzz-3.10.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3611c8f45379a12063d70075c75134f2a8bd2e4e9b8a7995112ddae95ca1c982"}, + {file = "rapidfuzz-3.10.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3c3b537b97ac30da4b73930fa8a4fe2f79c6d1c10ad535c5c09726612cd6bed9"}, + {file = "rapidfuzz-3.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:231ef1ec9cf7b59809ce3301006500b9d564ddb324635f4ea8f16b3e2a1780da"}, + {file = "rapidfuzz-3.10.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ed4f3adc1294834955b7e74edd3c6bd1aad5831c007f2d91ea839e76461a5879"}, + {file = "rapidfuzz-3.10.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:7b6015da2e707bf632a71772a2dbf0703cff6525732c005ad24987fe86e8ec32"}, + {file = "rapidfuzz-3.10.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:1b35a118d61d6f008e8e3fb3a77674d10806a8972c7b8be433d6598df4d60b01"}, + {file = "rapidfuzz-3.10.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:bc308d79a7e877226f36bdf4e149e3ed398d8277c140be5c1fd892ec41739e6d"}, + {file = "rapidfuzz-3.10.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f017dbfecc172e2d0c37cf9e3d519179d71a7f16094b57430dffc496a098aa17"}, + {file = "rapidfuzz-3.10.1-cp310-cp310-win32.whl", hash = "sha256:36c0e1483e21f918d0f2f26799fe5ac91c7b0c34220b73007301c4f831a9c4c7"}, + {file = "rapidfuzz-3.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:10746c1d4c8cd8881c28a87fd7ba0c9c102346dfe7ff1b0d021cdf093e9adbff"}, + {file = "rapidfuzz-3.10.1-cp310-cp310-win_arm64.whl", hash = "sha256:dfa64b89dcb906835e275187569e51aa9d546a444489e97aaf2cc84011565fbe"}, + {file = "rapidfuzz-3.10.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:92958ae075c87fef393f835ed02d4fe8d5ee2059a0934c6c447ea3417dfbf0e8"}, + {file = "rapidfuzz-3.10.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ba7521e072c53e33c384e78615d0718e645cab3c366ecd3cc8cb732befd94967"}, + {file = "rapidfuzz-3.10.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:00d02cbd75d283c287471b5b3738b3e05c9096150f93f2d2dfa10b3d700f2db9"}, + {file = "rapidfuzz-3.10.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:efa1582a397da038e2f2576c9cd49b842f56fde37d84a6b0200ffebc08d82350"}, + {file = "rapidfuzz-3.10.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f12912acee1f506f974f58de9fdc2e62eea5667377a7e9156de53241c05fdba8"}, + {file = "rapidfuzz-3.10.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:666d5d8b17becc3f53447bcb2b6b33ce6c2df78792495d1fa82b2924cd48701a"}, + {file = "rapidfuzz-3.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26f71582c0d62445067ee338ddad99b655a8f4e4ed517a90dcbfbb7d19310474"}, + {file = "rapidfuzz-3.10.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8a2ef08b27167bcff230ffbfeedd4c4fa6353563d6aaa015d725dd3632fc3de7"}, + {file = "rapidfuzz-3.10.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:365e4fc1a2b95082c890f5e98489b894e6bf8c338c6ac89bb6523c2ca6e9f086"}, + {file = "rapidfuzz-3.10.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:1996feb7a61609fa842e6b5e0c549983222ffdedaf29644cc67e479902846dfe"}, + {file = "rapidfuzz-3.10.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:cf654702f144beaa093103841a2ea6910d617d0bb3fccb1d1fd63c54dde2cd49"}, + {file = "rapidfuzz-3.10.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ec108bf25de674781d0a9a935030ba090c78d49def3d60f8724f3fc1e8e75024"}, + {file = "rapidfuzz-3.10.1-cp311-cp311-win32.whl", hash = "sha256:031f8b367e5d92f7a1e27f7322012f3c321c3110137b43cc3bf678505583ef48"}, + {file = "rapidfuzz-3.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:f98f36c6a1bb9a6c8bbec99ad87c8c0e364f34761739b5ea9adf7b48129ae8cf"}, + {file = "rapidfuzz-3.10.1-cp311-cp311-win_arm64.whl", hash = "sha256:f1da2028cb4e41be55ee797a82d6c1cf589442504244249dfeb32efc608edee7"}, + {file = "rapidfuzz-3.10.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:1340b56340896bede246f612b6ecf685f661a56aabef3d2512481bfe23ac5835"}, + {file = "rapidfuzz-3.10.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2316515169b7b5a453f0ce3adbc46c42aa332cae9f2edb668e24d1fc92b2f2bb"}, + {file = "rapidfuzz-3.10.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e06fe6a12241ec1b72c0566c6b28cda714d61965d86569595ad24793d1ab259"}, + {file = "rapidfuzz-3.10.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d99c1cd9443b19164ec185a7d752f4b4db19c066c136f028991a480720472e23"}, + {file = "rapidfuzz-3.10.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a1d9aa156ed52d3446388ba4c2f335e312191d1ca9d1f5762ee983cf23e4ecf6"}, + {file = "rapidfuzz-3.10.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:54bcf4efaaee8e015822be0c2c28214815f4f6b4f70d8362cfecbd58a71188ac"}, + {file = "rapidfuzz-3.10.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0c955e32afdbfdf6e9ee663d24afb25210152d98c26d22d399712d29a9b976b"}, + {file = "rapidfuzz-3.10.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:191633722203f5b7717efcb73a14f76f3b124877d0608c070b827c5226d0b972"}, + {file = "rapidfuzz-3.10.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:195baad28057ec9609e40385991004e470af9ef87401e24ebe72c064431524ab"}, + {file = "rapidfuzz-3.10.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:0fff4a6b87c07366662b62ae994ffbeadc472e72f725923f94b72a3db49f4671"}, + {file = "rapidfuzz-3.10.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:4ffed25f9fdc0b287f30a98467493d1e1ce5b583f6317f70ec0263b3c97dbba6"}, + {file = "rapidfuzz-3.10.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d02cf8e5af89a9ac8f53c438ddff6d773f62c25c6619b29db96f4aae248177c0"}, + {file = "rapidfuzz-3.10.1-cp312-cp312-win32.whl", hash = "sha256:f3bb81d4fe6a5d20650f8c0afcc8f6e1941f6fecdb434f11b874c42467baded0"}, + {file = "rapidfuzz-3.10.1-cp312-cp312-win_amd64.whl", hash = "sha256:aaf83e9170cb1338922ae42d320699dccbbdca8ffed07faeb0b9257822c26e24"}, + {file = "rapidfuzz-3.10.1-cp312-cp312-win_arm64.whl", hash = "sha256:c5da802a0d085ad81b0f62828fb55557996c497b2d0b551bbdfeafd6d447892f"}, + {file = "rapidfuzz-3.10.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:fc22d69a1c9cccd560a5c434c0371b2df0f47c309c635a01a913e03bbf183710"}, + {file = "rapidfuzz-3.10.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:38b0dac2c8e057562b8f0d8ae5b663d2d6a28c5ab624de5b73cef9abb6129a24"}, + {file = "rapidfuzz-3.10.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6fde3bbb14e92ce8fcb5c2edfff72e474d0080cadda1c97785bf4822f037a309"}, + {file = "rapidfuzz-3.10.1-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9141fb0592e55f98fe9ac0f3ce883199b9c13e262e0bf40c5b18cdf926109d16"}, + {file = "rapidfuzz-3.10.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:237bec5dd1bfc9b40bbd786cd27949ef0c0eb5fab5eb491904c6b5df59d39d3c"}, + {file = "rapidfuzz-3.10.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18123168cba156ab5794ea6de66db50f21bb3c66ae748d03316e71b27d907b95"}, + {file = "rapidfuzz-3.10.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b75fe506c8e02769cc47f5ab21ce3e09b6211d3edaa8f8f27331cb6988779be"}, + {file = "rapidfuzz-3.10.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9da82aa4b46973aaf9e03bb4c3d6977004648c8638febfc0f9d237e865761270"}, + {file = "rapidfuzz-3.10.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:c34c022d5ad564f1a5a57a4a89793bd70d7bad428150fb8ff2760b223407cdcf"}, + {file = "rapidfuzz-3.10.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:1e96c84d6c2a0ca94e15acb5399118fff669f4306beb98a6d8ec6f5dccab4412"}, + {file = "rapidfuzz-3.10.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:e8e154b84a311263e1aca86818c962e1fa9eefdd643d1d5d197fcd2738f88cb9"}, + {file = "rapidfuzz-3.10.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:335fee93188f8cd585552bb8057228ce0111bd227fa81bfd40b7df6b75def8ab"}, + {file = "rapidfuzz-3.10.1-cp313-cp313-win32.whl", hash = "sha256:6729b856166a9e95c278410f73683957ea6100c8a9d0a8dbe434c49663689255"}, + {file = "rapidfuzz-3.10.1-cp313-cp313-win_amd64.whl", hash = "sha256:0e06d99ad1ad97cb2ef7f51ec6b1fedd74a3a700e4949353871cf331d07b382a"}, + {file = "rapidfuzz-3.10.1-cp313-cp313-win_arm64.whl", hash = "sha256:8d1b7082104d596a3eb012e0549b2634ed15015b569f48879701e9d8db959dbb"}, + {file = "rapidfuzz-3.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:779027d3307e1a2b1dc0c03c34df87a470a368a1a0840a9d2908baf2d4067956"}, + {file = "rapidfuzz-3.10.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:440b5608ab12650d0390128d6858bc839ae77ffe5edf0b33a1551f2fa9860651"}, + {file = "rapidfuzz-3.10.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82cac41a411e07a6f3dc80dfbd33f6be70ea0abd72e99c59310819d09f07d945"}, + {file = "rapidfuzz-3.10.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:958473c9f0bca250590200fd520b75be0dbdbc4a7327dc87a55b6d7dc8d68552"}, + {file = "rapidfuzz-3.10.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9ef60dfa73749ef91cb6073be1a3e135f4846ec809cc115f3cbfc6fe283a5584"}, + {file = "rapidfuzz-3.10.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a7fbac18f2c19fc983838a60611e67e3262e36859994c26f2ee85bb268de2355"}, + {file = "rapidfuzz-3.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a0d519ff39db887cd73f4e297922786d548f5c05d6b51f4e6754f452a7f4296"}, + {file = "rapidfuzz-3.10.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:bebb7bc6aeb91cc57e4881b222484c26759ca865794187217c9dcea6c33adae6"}, + {file = "rapidfuzz-3.10.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:fe07f8b9c3bb5c5ad1d2c66884253e03800f4189a60eb6acd6119ebaf3eb9894"}, + {file = "rapidfuzz-3.10.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:bfa48a4a2d45a41457f0840c48e579db157a927f4e97acf6e20df8fc521c79de"}, + {file = "rapidfuzz-3.10.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:2cf44d01bfe8ee605b7eaeecbc2b9ca64fc55765f17b304b40ed8995f69d7716"}, + {file = "rapidfuzz-3.10.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1e6bbca9246d9eedaa1c84e04a7f555493ba324d52ae4d9f3d9ddd1b740dcd87"}, + {file = "rapidfuzz-3.10.1-cp39-cp39-win32.whl", hash = "sha256:567f88180f2c1423b4fe3f3ad6e6310fc97b85bdba574801548597287fc07028"}, + {file = "rapidfuzz-3.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:6b2cd7c29d6ecdf0b780deb587198f13213ac01c430ada6913452fd0c40190fc"}, + {file = "rapidfuzz-3.10.1-cp39-cp39-win_arm64.whl", hash = "sha256:9f912d459e46607ce276128f52bea21ebc3e9a5ccf4cccfef30dd5bddcf47be8"}, + {file = "rapidfuzz-3.10.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:ac4452f182243cfab30ba4668ef2de101effaedc30f9faabb06a095a8c90fd16"}, + {file = "rapidfuzz-3.10.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:565c2bd4f7d23c32834652b27b51dd711814ab614b4e12add8476be4e20d1cf5"}, + {file = "rapidfuzz-3.10.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:187d9747149321607be4ccd6f9f366730078bed806178ec3eeb31d05545e9e8f"}, + {file = "rapidfuzz-3.10.1-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:616290fb9a8fa87e48cb0326d26f98d4e29f17c3b762c2d586f2b35c1fd2034b"}, + {file = "rapidfuzz-3.10.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:073a5b107e17ebd264198b78614c0206fa438cce749692af5bc5f8f484883f50"}, + {file = "rapidfuzz-3.10.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:39c4983e2e2ccb9732f3ac7d81617088822f4a12291d416b09b8a1eadebb3e29"}, + {file = "rapidfuzz-3.10.1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:ac7adee6bcf0c6fee495d877edad1540a7e0f5fc208da03ccb64734b43522d7a"}, + {file = "rapidfuzz-3.10.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:425f4ac80b22153d391ee3f94bc854668a0c6c129f05cf2eaf5ee74474ddb69e"}, + {file = "rapidfuzz-3.10.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:65a2fa13e8a219f9b5dcb9e74abe3ced5838a7327e629f426d333dfc8c5a6e66"}, + {file = "rapidfuzz-3.10.1-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:75561f3df9a906aaa23787e9992b228b1ab69007932dc42070f747103e177ba8"}, + {file = "rapidfuzz-3.10.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:edd062490537e97ca125bc6c7f2b7331c2b73d21dc304615afe61ad1691e15d5"}, + {file = "rapidfuzz-3.10.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:cfcc8feccf63245a22dfdd16e222f1a39771a44b870beb748117a0e09cbb4a62"}, + {file = "rapidfuzz-3.10.1.tar.gz", hash = "sha256:5a15546d847a915b3f42dc79ef9b0c78b998b4e2c53b252e7166284066585979"}, ] [package.extras] @@ -8680,13 +8685,13 @@ test = ["accelerate (>=0.24.1,<=0.27.0)", "apache-airflow (==2.9.3)", "apache-ai [[package]] name = "sagemaker-core" -version = "1.0.10" +version = "1.0.11" description = "An python package for sagemaker core functionalities" optional = false python-versions = ">=3.8" files = [ - {file = "sagemaker_core-1.0.10-py3-none-any.whl", hash = "sha256:0bdcf6a467db988919cc6b6d0077f74871ee24c24adf7f759f9cb98460e08953"}, - {file = "sagemaker_core-1.0.10.tar.gz", hash = "sha256:6d34a9b6dc5e17e8bfffd1d0650726865779c92b3b8f1b59fc15d42061a0dd29"}, + {file = "sagemaker_core-1.0.11-py3-none-any.whl", hash = "sha256:d8ee3db83759073aa8c9f2bd4899113088a7c2acf340597e76cf9934e384d915"}, + {file = "sagemaker_core-1.0.11.tar.gz", hash = "sha256:fb48a5dcb859a54de7461c71cf58562a3be259294dcd39c317020a9b018f5016"}, ] [package.dependencies] @@ -8735,6 +8740,11 @@ files = [ {file = "scikit_learn-1.5.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f60021ec1574e56632be2a36b946f8143bf4e5e6af4a06d85281adc22938e0dd"}, {file = "scikit_learn-1.5.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:394397841449853c2290a32050382edaec3da89e35b3e03d6cc966aebc6a8ae6"}, {file = "scikit_learn-1.5.2-cp312-cp312-win_amd64.whl", hash = "sha256:57cc1786cfd6bd118220a92ede80270132aa353647684efa385a74244a41e3b1"}, + {file = "scikit_learn-1.5.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9a702e2de732bbb20d3bad29ebd77fc05a6b427dc49964300340e4c9328b3f5"}, + {file = "scikit_learn-1.5.2-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:b0768ad641981f5d3a198430a1d31c3e044ed2e8a6f22166b4d546a5116d7908"}, + {file = "scikit_learn-1.5.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:178ddd0a5cb0044464fc1bfc4cca5b1833bfc7bb022d70b05db8530da4bb3dd3"}, + {file = "scikit_learn-1.5.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f7284ade780084d94505632241bf78c44ab3b6f1e8ccab3d2af58e0e950f9c12"}, + {file = "scikit_learn-1.5.2-cp313-cp313-win_amd64.whl", hash = "sha256:b7b0f9a0b1040830d38c39b91b3a44e1b643f4b36e36567b80b7c6bd2202a27f"}, {file = "scikit_learn-1.5.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:757c7d514ddb00ae249832fe87100d9c73c6ea91423802872d9e74970a0e40b9"}, {file = "scikit_learn-1.5.2-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:52788f48b5d8bca5c0736c175fa6bdaab2ef00a8f536cda698db61bd89c551c1"}, {file = "scikit_learn-1.5.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:643964678f4b5fbdc95cbf8aec638acc7aa70f5f79ee2cdad1eec3df4ba6ead8"}, @@ -8860,23 +8870,23 @@ tornado = ["tornado (>=5)"] [[package]] name = "setuptools" -version = "75.2.0" +version = "75.3.0" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "setuptools-75.2.0-py3-none-any.whl", hash = "sha256:a7fcb66f68b4d9e8e66b42f9876150a3371558f98fa32222ffaa5bced76406f8"}, - {file = "setuptools-75.2.0.tar.gz", hash = "sha256:753bb6ebf1f465a1912e19ed1d41f403a79173a9acf66a42e7e6aec45c3c16ec"}, + {file = "setuptools-75.3.0-py3-none-any.whl", hash = "sha256:f2504966861356aa38616760c0f66568e535562374995367b4e69c7143cf6bcd"}, + {file = "setuptools-75.3.0.tar.gz", hash = "sha256:fba5dd4d766e97be1b1681d98712680ae8f2f26d7881245f2ce9e40714f1a686"}, ] [package.extras] check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)", "ruff (>=0.5.2)"] -core = ["importlib-metadata (>=6)", "importlib-resources (>=5.10.2)", "jaraco.collections", "jaraco.functools", "jaraco.text (>=3.7)", "more-itertools", "more-itertools (>=8.8)", "packaging", "packaging (>=24)", "platformdirs (>=2.6.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"] +core = ["importlib-metadata (>=6)", "importlib-resources (>=5.10.2)", "jaraco.collections", "jaraco.functools", "jaraco.text (>=3.7)", "more-itertools", "more-itertools (>=8.8)", "packaging", "packaging (>=24)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"] cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"] enabler = ["pytest-enabler (>=2.2)"] -test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] -type = ["importlib-metadata (>=7.0.2)", "jaraco.develop (>=7.21)", "mypy (==1.11.*)", "pytest-mypy"] +test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test (>=5.5)", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] +type = ["importlib-metadata (>=7.0.2)", "jaraco.develop (>=7.21)", "mypy (==1.12.*)", "pytest-mypy"] [[package]] name = "sgmllib3k" @@ -9625,13 +9635,13 @@ six = "*" [[package]] name = "tqdm" -version = "4.66.5" +version = "4.66.6" description = "Fast, Extensible Progress Meter" optional = false python-versions = ">=3.7" files = [ - {file = "tqdm-4.66.5-py3-none-any.whl", hash = "sha256:90279a3770753eafc9194a0364852159802111925aa30eb3f9d85b0e805ac7cd"}, - {file = "tqdm-4.66.5.tar.gz", hash = "sha256:e1020aef2e5096702d8a025ac7d16b1577279c9d63f8375b63083e9a5f0fcbad"}, + {file = "tqdm-4.66.6-py3-none-any.whl", hash = "sha256:223e8b5359c2efc4b30555531f09e9f2f3589bcd7fdd389271191031b49b7a63"}, + {file = "tqdm-4.66.6.tar.gz", hash = "sha256:4bdd694238bef1485ce839d67967ab50af8f9272aab687c0d7702a01da0be090"}, ] [package.dependencies] @@ -9885,13 +9895,13 @@ files = [ [[package]] name = "unstructured" -version = "0.16.1" +version = "0.16.3" description = "A library that prepares raw documents for downstream ML tasks." optional = false python-versions = "<3.13,>=3.9.0" files = [ - {file = "unstructured-0.16.1-py3-none-any.whl", hash = "sha256:7512281a2917809a563cbb186876b77d5a361e1f3089eca61e9219aecd1218f9"}, - {file = "unstructured-0.16.1.tar.gz", hash = "sha256:03608b5189a004412cd618ce2d083ff926c56dbbca41b41c92e08ffa9e2bac3a"}, + {file = "unstructured-0.16.3-py3-none-any.whl", hash = "sha256:e0e3b56531b44e62154d17cbfdae7fd7fa1d795b7cf510fb654c6714d4257655"}, + {file = "unstructured-0.16.3.tar.gz", hash = "sha256:f9528636773c910a53c8a34e32d4733ea54b79cbd507d0e956e299ab1da3003f"}, ] [package.dependencies] @@ -9922,19 +9932,19 @@ unstructured-client = "*" wrapt = "*" [package.extras] -all-docs = ["effdet", "google-cloud-vision", "markdown", "networkx", "onnx", "openpyxl", "pandas", "pdf2image", "pdfminer.six", "pi-heif", "pikepdf", "pypandoc", "pypdf", "python-docx (>=1.1.2)", "python-pptx (>=1.0.1)", "unstructured-inference (==0.8.0)", "unstructured.pytesseract (>=0.3.12)", "xlrd"] +all-docs = ["effdet", "google-cloud-vision", "markdown", "networkx", "onnx", "openpyxl", "pandas", "pdf2image", "pdfminer.six", "pi-heif", "pikepdf", "pypandoc", "pypdf", "python-docx (>=1.1.2)", "python-pptx (>=1.0.1)", "unstructured-inference (==0.8.1)", "unstructured.pytesseract (>=0.3.12)", "xlrd"] csv = ["pandas"] doc = ["python-docx (>=1.1.2)"] docx = ["python-docx (>=1.1.2)"] epub = ["pypandoc"] huggingface = ["langdetect", "sacremoses", "sentencepiece", "torch", "transformers"] -image = ["effdet", "google-cloud-vision", "onnx", "pdf2image", "pdfminer.six", "pi-heif", "pikepdf", "pypdf", "unstructured-inference (==0.8.0)", "unstructured.pytesseract (>=0.3.12)"] -local-inference = ["effdet", "google-cloud-vision", "markdown", "networkx", "onnx", "openpyxl", "pandas", "pdf2image", "pdfminer.six", "pi-heif", "pikepdf", "pypandoc", "pypdf", "python-docx (>=1.1.2)", "python-pptx (>=1.0.1)", "unstructured-inference (==0.8.0)", "unstructured.pytesseract (>=0.3.12)", "xlrd"] +image = ["effdet", "google-cloud-vision", "onnx", "pdf2image", "pdfminer.six", "pi-heif", "pikepdf", "pypdf", "unstructured-inference (==0.8.1)", "unstructured.pytesseract (>=0.3.12)"] +local-inference = ["effdet", "google-cloud-vision", "markdown", "networkx", "onnx", "openpyxl", "pandas", "pdf2image", "pdfminer.six", "pi-heif", "pikepdf", "pypandoc", "pypdf", "python-docx (>=1.1.2)", "python-pptx (>=1.0.1)", "unstructured-inference (==0.8.1)", "unstructured.pytesseract (>=0.3.12)", "xlrd"] md = ["markdown"] odt = ["pypandoc", "python-docx (>=1.1.2)"] org = ["pypandoc"] paddleocr = ["paddlepaddle (==3.0.0b1)", "unstructured.paddleocr (==2.8.1.0)"] -pdf = ["effdet", "google-cloud-vision", "onnx", "pdf2image", "pdfminer.six", "pi-heif", "pikepdf", "pypdf", "unstructured-inference (==0.8.0)", "unstructured.pytesseract (>=0.3.12)"] +pdf = ["effdet", "google-cloud-vision", "onnx", "pdf2image", "pdfminer.six", "pi-heif", "pikepdf", "pypdf", "unstructured-inference (==0.8.1)", "unstructured.pytesseract (>=0.3.12)"] ppt = ["python-pptx (>=1.0.1)"] pptx = ["python-pptx (>=1.0.1)"] rst = ["pypandoc"] @@ -9944,13 +9954,13 @@ xlsx = ["networkx", "openpyxl", "pandas", "xlrd"] [[package]] name = "unstructured-client" -version = "0.26.1" +version = "0.26.2" description = "Python Client SDK for Unstructured API" optional = false python-versions = "<4.0,>=3.8" files = [ - {file = "unstructured_client-0.26.1-py3-none-any.whl", hash = "sha256:b8b839d477122bab3f37242cbe44b39f7eb7b564b07b53500321f953710119b6"}, - {file = "unstructured_client-0.26.1.tar.gz", hash = "sha256:907cceb470529b45b0fddb2d0f1bbf4d6568f347c757ab68639a7bb620ec2484"}, + {file = "unstructured_client-0.26.2-py3-none-any.whl", hash = "sha256:0adb22b7d175814f333ee2425a279005f253220a55f459fd5830a6779b679780"}, + {file = "unstructured_client-0.26.2.tar.gz", hash = "sha256:02f7183ab16db6ec48ad1ac75c01b05967c87c561a89e96d9ffb836baed902d7"}, ] [package.dependencies] @@ -10445,13 +10455,13 @@ files = [ [[package]] name = "werkzeug" -version = "3.0.4" +version = "3.0.6" description = "The comprehensive WSGI web application library." optional = false python-versions = ">=3.8" files = [ - {file = "werkzeug-3.0.4-py3-none-any.whl", hash = "sha256:02c9eb92b7d6c06f31a782811505d2157837cea66aaede3e217c7c27c039476c"}, - {file = "werkzeug-3.0.4.tar.gz", hash = "sha256:34f2371506b250df4d4f84bfe7b0921e4762525762bbd936614909fe25cd7306"}, + {file = "werkzeug-3.0.6-py3-none-any.whl", hash = "sha256:1bc0c2310d2fbb07b1dd1105eba2f7af72f322e1e455f2f93c993bee8c8a5f17"}, + {file = "werkzeug-3.0.6.tar.gz", hash = "sha256:a8dd59d4de28ca70471a34cba79bed5f7ef2e036a76b3ab0835474246eb41f8d"}, ] [package.dependencies] @@ -10745,13 +10755,13 @@ multidict = ">=4.0" [[package]] name = "yfinance" -version = "0.2.46" +version = "0.2.48" description = "Download market data from Yahoo! Finance API" optional = false python-versions = "*" files = [ - {file = "yfinance-0.2.46-py2.py3-none-any.whl", hash = "sha256:371860d532cae76605195678a540e29382bfd0607f8aa61695f753e714916ffc"}, - {file = "yfinance-0.2.46.tar.gz", hash = "sha256:a6e2a128915532a54b8f6614cfdb7a8c242d2386e05f95c89b15865b5d9c0352"}, + {file = "yfinance-0.2.48-py2.py3-none-any.whl", hash = "sha256:eda797145faa4536595eb629f869d3616e58ed7e71de36856b19f1abaef71a5b"}, + {file = "yfinance-0.2.48.tar.gz", hash = "sha256:1434cd8bf22f345fa27ef1ed82bfdd291c1bb5b6fe3067118a94e256aa90c4eb"}, ] [package.dependencies] @@ -10995,4 +11005,4 @@ cffi = ["cffi (>=1.11)"] [metadata] lock-version = "2.0" python-versions = ">=3.10,<3.13" -content-hash = "bb8385625eb61de086b7a7156745066b4fb171d9ca67afd1d092fa7e872f3abd" +content-hash = "f20bd678044926913dbbc24bd0cf22503a75817aa55f59457ff7822032139b77" diff --git a/api/pyproject.toml b/api/pyproject.toml index 4438cf61db..40485a8efa 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -34,6 +34,7 @@ select = [ "RUF101", # redirected-noqa "S506", # unsafe-yaml-load "SIM", # flake8-simplify rules + "TRY400", # error-instead-of-exception "UP", # pyupgrade rules "W191", # tab-indentation "W605", # invalid-escape-sequence @@ -117,7 +118,7 @@ beautifulsoup4 = "4.12.2" boto3 = "1.35.17" bs4 = "~0.0.1" cachetools = "~5.3.0" -celery = "~5.3.6" +celery = "~5.4.0" chardet = "~5.1.0" cohere = "~5.2.4" dashscope = { version = "~1.17.0", extras = ["tokenizer"] } diff --git a/api/schedule/create_tidb_serverless_task.py b/api/schedule/create_tidb_serverless_task.py index 42d6c04beb..a20b500308 100644 --- a/api/schedule/create_tidb_serverless_task.py +++ b/api/schedule/create_tidb_serverless_task.py @@ -12,6 +12,8 @@ from models.dataset import TidbAuthBinding @app.celery.task(queue="dataset") def create_tidb_serverless_task(): click.echo(click.style("Start create tidb serverless task.", fg="green")) + if not dify_config.CREATE_TIDB_SERVICE_JOB_ENABLED: + return tidb_serverless_number = dify_config.TIDB_SERVERLESS_NUMBER start_at = time.perf_counter() while True: diff --git a/api/services/account_service.py b/api/services/account_service.py index dceca06185..963a055948 100644 --- a/api/services/account_service.py +++ b/api/services/account_service.py @@ -821,7 +821,7 @@ class RegisterService: db.session.rollback() except Exception as e: db.session.rollback() - logging.error(f"Register failed: {e}") + logging.exception(f"Register failed: {e}") raise AccountRegisterError(f"Registration failed: {e}") from e return account diff --git a/api/services/conversation_service.py b/api/services/conversation_service.py index 7bfe59afa0..f9e41988c0 100644 --- a/api/services/conversation_service.py +++ b/api/services/conversation_service.py @@ -160,4 +160,5 @@ class ConversationService: conversation = cls.get_conversation(app_model, conversation_id, user) conversation.is_deleted = True + conversation.updated_at = datetime.now(timezone.utc).replace(tzinfo=None) db.session.commit() diff --git a/api/services/dataset_service.py b/api/services/dataset_service.py index 50da547fd8..8562dad1d3 100644 --- a/api/services/dataset_service.py +++ b/api/services/dataset_service.py @@ -14,8 +14,6 @@ from configs import dify_config from core.errors.error import LLMBadRequestError, ProviderTokenNotInitError from core.model_manager import ModelManager from core.model_runtime.entities.model_entities import ModelType -from core.rag.datasource.keyword.keyword_factory import Keyword -from core.rag.models.document import Document as RAGDocument from core.rag.retrieval.retrieval_methods import RetrievalMethod from events.dataset_event import dataset_was_deleted from events.document_event import document_was_deleted @@ -37,6 +35,7 @@ from models.dataset import ( ) from models.model import UploadFile from models.source import DataSourceOauthBinding +from services.entities.knowledge_entities.knowledge_entities import SegmentUpdateEntity from services.errors.account import NoPermissionError from services.errors.dataset import DatasetNameDuplicateError from services.errors.document import DocumentIndexingError @@ -1415,9 +1414,13 @@ class SegmentService: created_by=current_user.id, ) if document.doc_form == "qa_model": + segment_document.word_count += len(args["answer"]) segment_document.answer = args["answer"] db.session.add(segment_document) + # update document word count + document.word_count += segment_document.word_count + db.session.add(document) db.session.commit() # save vector index @@ -1436,6 +1439,7 @@ class SegmentService: @classmethod def multi_create_segment(cls, segments: list, document: Document, dataset: Dataset): lock_name = "multi_add_segment_lock_document_id_{}".format(document.id) + increment_word_count = 0 with redis_client.lock(lock_name, timeout=600): embedding_model = None if dataset.indexing_technique == "high_quality": @@ -1461,7 +1465,10 @@ class SegmentService: tokens = 0 if dataset.indexing_technique == "high_quality" and embedding_model: # calc embedding use tokens - tokens = embedding_model.get_text_embedding_num_tokens(texts=[content]) + if document.doc_form == "qa_model": + tokens = embedding_model.get_text_embedding_num_tokens(texts=[content + segment_item["answer"]]) + else: + tokens = embedding_model.get_text_embedding_num_tokens(texts=[content]) segment_document = DocumentSegment( tenant_id=current_user.current_tenant_id, dataset_id=document.dataset_id, @@ -1479,6 +1486,8 @@ class SegmentService: ) if document.doc_form == "qa_model": segment_document.answer = segment_item["answer"] + segment_document.word_count += len(segment_item["answer"]) + increment_word_count += segment_document.word_count db.session.add(segment_document) segment_data_list.append(segment_document) @@ -1487,7 +1496,9 @@ class SegmentService: keywords_list.append(segment_item["keywords"]) else: keywords_list.append(None) - + # update document word count + document.word_count += increment_word_count + db.session.add(document) try: # save vector index VectorService.create_segments_vector(keywords_list, pre_segment_data_list, dataset) @@ -1503,12 +1514,13 @@ class SegmentService: @classmethod def update_segment(cls, args: dict, segment: DocumentSegment, document: Document, dataset: Dataset): + segment_update_entity = SegmentUpdateEntity(**args) indexing_cache_key = "segment_{}_indexing".format(segment.id) cache_result = redis_client.get(indexing_cache_key) if cache_result is not None: raise ValueError("Segment is indexing, please try again later") - if "enabled" in args and args["enabled"] is not None: - action = args["enabled"] + if segment_update_entity.enabled is not None: + action = segment_update_entity.enabled if segment.enabled != action: if not action: segment.enabled = action @@ -1521,37 +1533,34 @@ class SegmentService: disable_segment_from_index_task.delay(segment.id) return segment if not segment.enabled: - if "enabled" in args and args["enabled"] is not None: - if not args["enabled"]: + if segment_update_entity.enabled is not None: + if not segment_update_entity.enabled: raise ValueError("Can't update disabled segment") else: raise ValueError("Can't update disabled segment") try: - content = args["content"] + word_count_change = segment.word_count + content = segment_update_entity.content if segment.content == content: + segment.word_count = len(content) if document.doc_form == "qa_model": - segment.answer = args["answer"] - if args.get("keywords"): - segment.keywords = args["keywords"] + segment.answer = segment_update_entity.answer + segment.word_count += len(segment_update_entity.answer) + word_count_change = segment.word_count - word_count_change + if segment_update_entity.keywords: + segment.keywords = segment_update_entity.keywords segment.enabled = True segment.disabled_at = None segment.disabled_by = None db.session.add(segment) db.session.commit() + # update document word count + if word_count_change != 0: + document.word_count = max(0, document.word_count + word_count_change) + db.session.add(document) # update segment index task - if "keywords" in args: - keyword = Keyword(dataset) - keyword.delete_by_ids([segment.index_node_id]) - document = RAGDocument( - page_content=segment.content, - metadata={ - "doc_id": segment.index_node_id, - "doc_hash": segment.index_node_hash, - "document_id": segment.document_id, - "dataset_id": segment.dataset_id, - }, - ) - keyword.add_texts([document], keywords_list=[args["keywords"]]) + if segment_update_entity.enabled: + VectorService.create_segments_vector([segment_update_entity.keywords], [segment], dataset) else: segment_hash = helper.generate_text_hash(content) tokens = 0 @@ -1565,7 +1574,10 @@ class SegmentService: ) # calc embedding use tokens - tokens = embedding_model.get_text_embedding_num_tokens(texts=[content]) + if document.doc_form == "qa_model": + tokens = embedding_model.get_text_embedding_num_tokens(texts=[content + segment.answer]) + else: + tokens = embedding_model.get_text_embedding_num_tokens(texts=[content]) segment.content = content segment.index_node_hash = segment_hash segment.word_count = len(content) @@ -1579,11 +1591,17 @@ class SegmentService: segment.disabled_at = None segment.disabled_by = None if document.doc_form == "qa_model": - segment.answer = args["answer"] + segment.answer = segment_update_entity.answer + segment.word_count += len(segment_update_entity.answer) + word_count_change = segment.word_count - word_count_change + # update document word count + if word_count_change != 0: + document.word_count = max(0, document.word_count + word_count_change) + db.session.add(document) db.session.add(segment) db.session.commit() # update segment vector index - VectorService.update_segment_vector(args["keywords"], segment, dataset) + VectorService.update_segment_vector(segment_update_entity.keywords, segment, dataset) except Exception as e: logging.exception("update segment index failed") @@ -1608,6 +1626,9 @@ class SegmentService: redis_client.setex(indexing_cache_key, 600, 1) delete_segment_from_index_task.delay(segment.id, segment.index_node_id, dataset.id, document.id) db.session.delete(segment) + # update document word count + document.word_count -= segment.word_count + db.session.add(document) db.session.commit() diff --git a/api/services/entities/knowledge_entities/knowledge_entities.py b/api/services/entities/knowledge_entities/knowledge_entities.py new file mode 100644 index 0000000000..449b79f339 --- /dev/null +++ b/api/services/entities/knowledge_entities/knowledge_entities.py @@ -0,0 +1,10 @@ +from typing import Optional + +from pydantic import BaseModel + + +class SegmentUpdateEntity(BaseModel): + content: str + answer: Optional[str] = None + keywords: Optional[list[str]] = None + enabled: Optional[bool] = None diff --git a/api/services/tools/api_tools_manage_service.py b/api/services/tools/api_tools_manage_service.py index 257c6cf52b..b6b0143fac 100644 --- a/api/services/tools/api_tools_manage_service.py +++ b/api/services/tools/api_tools_manage_service.py @@ -113,8 +113,10 @@ class ApiToolManageService: if schema_type not in [member.value for member in ApiProviderSchemaType]: raise ValueError(f"invalid schema type {schema}") + provider_name = provider_name.strip() + # check if the provider exists - provider: ApiToolProvider = ( + provider = ( db.session.query(ApiToolProvider) .filter( ApiToolProvider.tenant_id == tenant_id, @@ -193,27 +195,27 @@ class ApiToolManageService: # try to parse schema, avoid SSRF attack ApiToolManageService.parser_api_schema(schema) except Exception as e: - logger.error(f"parse api schema error: {str(e)}") + logger.exception(f"parse api schema error: {str(e)}") raise ValueError("invalid schema, please check the url you provided") return {"schema": schema} @staticmethod - def list_api_tool_provider_tools(user_id: str, tenant_id: str, provider: str) -> list[UserTool]: + def list_api_tool_provider_tools(user_id: str, tenant_id: str, provider_name: str) -> list[UserTool]: """ list api tool provider tools """ - provider: ApiToolProvider = ( + provider = ( db.session.query(ApiToolProvider) .filter( ApiToolProvider.tenant_id == tenant_id, - ApiToolProvider.name == provider, + ApiToolProvider.name == provider_name, ) .first() ) if provider is None: - raise ValueError(f"you have not added provider {provider}") + raise ValueError(f"you have not added provider {provider_name}") controller = ToolTransformService.api_provider_to_controller(db_provider=provider) labels = ToolLabelManager.get_tool_labels(controller) @@ -246,8 +248,10 @@ class ApiToolManageService: if schema_type not in [member.value for member in ApiProviderSchemaType]: raise ValueError(f"invalid schema type {schema}") + provider_name = provider_name.strip() + # check if the provider exists - provider: ApiToolProvider = ( + provider = ( db.session.query(ApiToolProvider) .filter( ApiToolProvider.tenant_id == tenant_id, @@ -314,7 +318,7 @@ class ApiToolManageService: """ delete tool provider """ - provider: ApiToolProvider = ( + provider = ( db.session.query(ApiToolProvider) .filter( ApiToolProvider.tenant_id == tenant_id, @@ -364,7 +368,7 @@ class ApiToolManageService: if tool_bundle is None: raise ValueError(f"invalid tool name {tool_name}") - db_provider: ApiToolProvider = ( + db_provider = ( db.session.query(ApiToolProvider) .filter( ApiToolProvider.tenant_id == tenant_id, diff --git a/api/services/tools/tools_transform_service.py b/api/services/tools/tools_transform_service.py index 4af73d5063..e535ddb575 100644 --- a/api/services/tools/tools_transform_service.py +++ b/api/services/tools/tools_transform_service.py @@ -183,7 +183,7 @@ class ToolTransformService: try: username = db_provider.user.name except Exception as e: - logger.error(f"failed to get user name for api provider {db_provider.id}: {str(e)}") + logger.exception(f"failed to get user name for api provider {db_provider.id}: {str(e)}") # add provider into providers credentials = db_provider.credentials result = UserToolProvider( diff --git a/api/services/workflow/workflow_converter.py b/api/services/workflow/workflow_converter.py index 75c11afa94..90b5cc4836 100644 --- a/api/services/workflow/workflow_converter.py +++ b/api/services/workflow/workflow_converter.py @@ -13,7 +13,7 @@ from core.app.app_config.entities import ( from core.app.apps.agent_chat.app_config_manager import AgentChatAppConfigManager from core.app.apps.chat.app_config_manager import ChatAppConfigManager from core.app.apps.completion.app_config_manager import CompletionAppConfigManager -from core.file.models import FileExtraConfig +from core.file.models import FileUploadConfig from core.helper import encrypter from core.model_runtime.entities.llm_entities import LLMMode from core.model_runtime.utils.encoders import jsonable_encoder @@ -381,7 +381,7 @@ class WorkflowConverter: graph: dict, model_config: ModelConfigEntity, prompt_template: PromptTemplateEntity, - file_upload: Optional[FileExtraConfig] = None, + file_upload: Optional[FileUploadConfig] = None, external_data_variable_node_mapping: dict[str, str] | None = None, ) -> dict: """ diff --git a/api/tasks/batch_create_segment_to_index_task.py b/api/tasks/batch_create_segment_to_index_task.py index de7f0ddec1..d1b41f2675 100644 --- a/api/tasks/batch_create_segment_to_index_task.py +++ b/api/tasks/batch_create_segment_to_index_task.py @@ -57,7 +57,7 @@ def batch_create_segment_to_index_task( model_type=ModelType.TEXT_EMBEDDING, model=dataset.embedding_model, ) - + word_count_change = 0 for segment in content: content = segment["content"] doc_id = str(uuid.uuid4()) @@ -86,8 +86,13 @@ def batch_create_segment_to_index_task( ) if dataset_document.doc_form == "qa_model": segment_document.answer = segment["answer"] + segment_document.word_count += len(segment["answer"]) + word_count_change += segment_document.word_count db.session.add(segment_document) document_segments.append(segment_document) + # update document word count + dataset_document.word_count += word_count_change + db.session.add(dataset_document) # add index to db indexing_runner = IndexingRunner() indexing_runner.batch_add_segments(document_segments, dataset) diff --git a/api/tasks/clean_dataset_task.py b/api/tasks/clean_dataset_task.py index 3624903801..4d45df4d2a 100644 --- a/api/tasks/clean_dataset_task.py +++ b/api/tasks/clean_dataset_task.py @@ -5,6 +5,7 @@ import click from celery import shared_task from core.rag.index_processor.index_processor_factory import IndexProcessorFactory +from core.tools.utils.web_reader_tool import get_image_upload_file_ids from extensions.ext_database import db from extensions.ext_storage import storage from models.dataset import ( @@ -67,6 +68,16 @@ def clean_dataset_task( db.session.delete(document) for segment in segments: + image_upload_file_ids = get_image_upload_file_ids(segment.content) + for upload_file_id in image_upload_file_ids: + image_file = db.session.query(UploadFile).filter(UploadFile.id == upload_file_id).first() + try: + storage.delete(image_file.key) + except Exception: + logging.exception( + "Delete image_files failed when storage deleted, \ + image_upload_file_is: {}".format(upload_file_id) + ) db.session.delete(segment) db.session.query(DatasetProcessRule).filter(DatasetProcessRule.dataset_id == dataset_id).delete() diff --git a/api/tasks/clean_document_task.py b/api/tasks/clean_document_task.py index ae2855aa2e..54c89450c9 100644 --- a/api/tasks/clean_document_task.py +++ b/api/tasks/clean_document_task.py @@ -6,6 +6,7 @@ import click from celery import shared_task from core.rag.index_processor.index_processor_factory import IndexProcessorFactory +from core.tools.utils.web_reader_tool import get_image_upload_file_ids from extensions.ext_database import db from extensions.ext_storage import storage from models.dataset import Dataset, DocumentSegment @@ -40,6 +41,16 @@ def clean_document_task(document_id: str, dataset_id: str, doc_form: str, file_i index_processor.clean(dataset, index_node_ids) for segment in segments: + image_upload_file_ids = get_image_upload_file_ids(segment.content) + for upload_file_id in image_upload_file_ids: + image_file = db.session.query(UploadFile).filter(UploadFile.id == upload_file_id).first() + try: + storage.delete(image_file.key) + except Exception: + logging.exception( + "Delete image_files failed when storage deleted, \ + image_upload_file_is: {}".format(upload_file_id) + ) db.session.delete(segment) db.session.commit() diff --git a/api/tasks/ops_trace_task.py b/api/tasks/ops_trace_task.py index 260069c6e2..34c62dc923 100644 --- a/api/tasks/ops_trace_task.py +++ b/api/tasks/ops_trace_task.py @@ -1,17 +1,20 @@ +import json import logging -import time from celery import shared_task from flask import current_app +from core.ops.entities.config_entity import OPS_FILE_PATH, OPS_TRACE_FAILED_KEY from core.ops.entities.trace_entity import trace_info_info_map from core.rag.models.document import Document +from extensions.ext_redis import redis_client +from extensions.ext_storage import storage from models.model import Message from models.workflow import WorkflowRun @shared_task(queue="ops_trace") -def process_trace_tasks(tasks_data): +def process_trace_tasks(file_info): """ Async process trace tasks :param tasks_data: List of dictionaries containing task data @@ -20,9 +23,12 @@ def process_trace_tasks(tasks_data): """ from core.ops.ops_trace_manager import OpsTraceManager - trace_info = tasks_data.get("trace_info") - app_id = tasks_data.get("app_id") - trace_info_type = tasks_data.get("trace_info_type") + app_id = file_info.get("app_id") + file_id = file_info.get("file_id") + file_path = f"{OPS_FILE_PATH}{app_id}/{file_id}.json" + file_data = json.loads(storage.load(file_path)) + trace_info = file_data.get("trace_info") + trace_info_type = file_data.get("trace_info_type") trace_instance = OpsTraceManager.get_ops_trace_instance(app_id) if trace_info.get("message_data"): @@ -39,6 +45,10 @@ def process_trace_tasks(tasks_data): if trace_type: trace_info = trace_type(**trace_info) trace_instance.trace(trace_info) - end_at = time.perf_counter() + logging.info(f"Processing trace tasks success, app_id: {app_id}") except Exception: - logging.exception("Processing trace tasks failed") + failed_key = f"{OPS_TRACE_FAILED_KEY}_{app_id}" + redis_client.incr(failed_key) + logging.info(f"Processing trace tasks failed, app_id: {app_id}") + finally: + storage.delete(file_path) diff --git a/api/tests/integration_tests/workflow/nodes/test_http.py b/api/tests/integration_tests/workflow/nodes/test_http.py index 0da6622658..9eea63f722 100644 --- a/api/tests/integration_tests/workflow/nodes/test_http.py +++ b/api/tests/integration_tests/workflow/nodes/test_http.py @@ -430,37 +430,3 @@ def test_multi_colons_parse(setup_http_mock): assert urlencode({"Redirect": "http://example2.com"}) in result.process_data.get("request", "") assert 'form-data; name="Redirect"\r\n\r\nhttp://example6.com' in result.process_data.get("request", "") # assert "http://example3.com" == resp.get("headers", {}).get("referer") - - -def test_image_file(monkeypatch): - from types import SimpleNamespace - - monkeypatch.setattr( - "core.tools.tool_file_manager.ToolFileManager.create_file_by_raw", - lambda *args, **kwargs: SimpleNamespace(id="1"), - ) - - node = init_http_node( - config={ - "id": "1", - "data": { - "title": "http", - "desc": "", - "method": "get", - "url": "https://cloud.dify.ai/logo/logo-site.png", - "authorization": { - "type": "no-auth", - "config": None, - }, - "params": "", - "headers": "", - "body": None, - }, - } - ) - - result = node._run() - assert result.process_data is not None - assert result.outputs is not None - resp = result.outputs - assert len(resp.get("files", [])) == 1 diff --git a/api/tests/unit_tests/core/app/app_config/features/file_upload/test_manager.py b/api/tests/unit_tests/core/app/app_config/features/file_upload/test_manager.py new file mode 100644 index 0000000000..50a612ec5f --- /dev/null +++ b/api/tests/unit_tests/core/app/app_config/features/file_upload/test_manager.py @@ -0,0 +1,61 @@ +from core.app.app_config.features.file_upload.manager import FileUploadConfigManager +from core.file.models import FileTransferMethod, FileUploadConfig, ImageConfig +from core.model_runtime.entities.message_entities import ImagePromptMessageContent + + +def test_convert_with_vision(): + config = { + "file_upload": { + "enabled": True, + "number_limits": 5, + "allowed_file_upload_methods": [FileTransferMethod.REMOTE_URL], + "image": {"detail": "high"}, + } + } + result = FileUploadConfigManager.convert(config, is_vision=True) + expected = FileUploadConfig( + image_config=ImageConfig( + number_limits=5, + transfer_methods=[FileTransferMethod.REMOTE_URL], + detail=ImagePromptMessageContent.DETAIL.HIGH, + ) + ) + assert result == expected + + +def test_convert_without_vision(): + config = { + "file_upload": { + "enabled": True, + "number_limits": 5, + "allowed_file_upload_methods": [FileTransferMethod.REMOTE_URL], + } + } + result = FileUploadConfigManager.convert(config, is_vision=False) + expected = FileUploadConfig( + image_config=ImageConfig(number_limits=5, transfer_methods=[FileTransferMethod.REMOTE_URL]) + ) + assert result == expected + + +def test_validate_and_set_defaults(): + config = {} + result, keys = FileUploadConfigManager.validate_and_set_defaults(config) + assert "file_upload" in result + assert keys == ["file_upload"] + + +def test_validate_and_set_defaults_with_existing_config(): + config = { + "file_upload": { + "enabled": True, + "number_limits": 5, + "allowed_file_upload_methods": [FileTransferMethod.REMOTE_URL], + } + } + result, keys = FileUploadConfigManager.validate_and_set_defaults(config) + assert "file_upload" in result + assert keys == ["file_upload"] + assert result["file_upload"]["enabled"] is True + assert result["file_upload"]["number_limits"] == 5 + assert result["file_upload"]["allowed_file_upload_methods"] == [FileTransferMethod.REMOTE_URL] diff --git a/api/tests/unit_tests/core/app/segments/test_segment.py b/api/tests/unit_tests/core/app/segments/test_segment.py index 3b1715ab45..1b035d01a7 100644 --- a/api/tests/unit_tests/core/app/segments/test_segment.py +++ b/api/tests/unit_tests/core/app/segments/test_segment.py @@ -1,5 +1,5 @@ from core.helper import encrypter -from core.variables import SecretVariable, StringSegment +from core.variables import SecretVariable, StringVariable from core.workflow.entities.variable_pool import VariablePool from core.workflow.enums import SystemVariableKey @@ -54,4 +54,5 @@ def test_convert_variable_to_segment_group(): segments_group = variable_pool.convert_template(template) assert segments_group.text == "fake-user-id" assert segments_group.log == "fake-user-id" - assert segments_group.value == [StringSegment(value="fake-user-id")] + assert isinstance(segments_group.value[0], StringVariable) + assert segments_group.value[0].value == "fake-user-id" diff --git a/api/tests/unit_tests/core/prompt/test_advanced_prompt_transform.py b/api/tests/unit_tests/core/prompt/test_advanced_prompt_transform.py index ece2173090..7d19cff3e8 100644 --- a/api/tests/unit_tests/core/prompt/test_advanced_prompt_transform.py +++ b/api/tests/unit_tests/core/prompt/test_advanced_prompt_transform.py @@ -3,7 +3,7 @@ from unittest.mock import MagicMock, patch import pytest from core.app.app_config.entities import ModelConfigEntity -from core.file import File, FileExtraConfig, FileTransferMethod, FileType, ImageConfig +from core.file import File, FileTransferMethod, FileType, FileUploadConfig, ImageConfig from core.memory.token_buffer_memory import TokenBufferMemory from core.model_runtime.entities.message_entities import ( AssistantPromptMessage, @@ -134,7 +134,6 @@ def test__get_chat_model_prompt_messages_with_files_no_memory(get_chat_model_arg type=FileType.IMAGE, transfer_method=FileTransferMethod.REMOTE_URL, remote_url="https://example.com/image1.jpg", - _extra_config=FileExtraConfig(image_config=ImageConfig(detail=ImagePromptMessageContent.DETAIL.HIGH)), ) ] diff --git a/api/tests/unit_tests/core/workflow/nodes/http_request/test_http_request_executor.py b/api/tests/unit_tests/core/workflow/nodes/http_request/test_http_request_executor.py index 12c469a81a..7c19de6078 100644 --- a/api/tests/unit_tests/core/workflow/nodes/http_request/test_http_request_executor.py +++ b/api/tests/unit_tests/core/workflow/nodes/http_request/test_http_request_executor.py @@ -196,3 +196,72 @@ def test_extract_selectors_from_template_with_newline(): ) assert executor.params == {"test": "line1\nline2"} + + +def test_executor_with_form_data(): + # Prepare the variable pool + variable_pool = VariablePool( + system_variables={}, + user_inputs={}, + ) + variable_pool.add(["pre_node_id", "text_field"], "Hello, World!") + variable_pool.add(["pre_node_id", "number_field"], 42) + + # Prepare the node data + node_data = HttpRequestNodeData( + title="Test Form Data", + method="post", + url="https://api.example.com/upload", + authorization=HttpRequestNodeAuthorization(type="no-auth"), + headers="Content-Type: multipart/form-data", + params="", + body=HttpRequestNodeBody( + type="form-data", + data=[ + BodyData( + key="text_field", + type="text", + value="{{#pre_node_id.text_field#}}", + ), + BodyData( + key="number_field", + type="text", + value="{{#pre_node_id.number_field#}}", + ), + ], + ), + ) + + # Initialize the Executor + executor = Executor( + node_data=node_data, + timeout=HttpRequestNodeTimeout(connect=10, read=30, write=30), + variable_pool=variable_pool, + ) + + # Check the executor's data + assert executor.method == "post" + assert executor.url == "https://api.example.com/upload" + assert "Content-Type" in executor.headers + assert "multipart/form-data" in executor.headers["Content-Type"] + assert executor.params == {} + assert executor.json is None + assert executor.files is None + assert executor.content is None + + # Check that the form data is correctly loaded in executor.data + assert isinstance(executor.data, dict) + assert "text_field" in executor.data + assert executor.data["text_field"] == "Hello, World!" + assert "number_field" in executor.data + assert executor.data["number_field"] == "42" + + # Check the raw request (to_log method) + raw_request = executor.to_log() + assert "POST /upload HTTP/1.1" in raw_request + assert "Host: api.example.com" in raw_request + assert "Content-Type: multipart/form-data" in raw_request + assert "text_field" in raw_request + assert "Hello, World!" in raw_request + assert "number_field" in raw_request + assert "42" in raw_request diff --git a/api/tests/unit_tests/core/workflow/nodes/test_list_operator.py b/api/tests/unit_tests/core/workflow/nodes/test_list_operator.py index 0f5c8bf51b..d20dfc5b31 100644 --- a/api/tests/unit_tests/core/workflow/nodes/test_list_operator.py +++ b/api/tests/unit_tests/core/workflow/nodes/test_list_operator.py @@ -4,7 +4,14 @@ import pytest from core.file import File, FileTransferMethod, FileType from core.variables import ArrayFileSegment -from core.workflow.nodes.list_operator.entities import FilterBy, FilterCondition, Limit, ListOperatorNodeData, OrderBy +from core.workflow.nodes.list_operator.entities import ( + ExtractConfig, + FilterBy, + FilterCondition, + Limit, + ListOperatorNodeData, + OrderBy, +) from core.workflow.nodes.list_operator.exc import InvalidKeyError from core.workflow.nodes.list_operator.node import ListOperatorNode, _get_file_extract_string_func from models.workflow import WorkflowNodeExecutionStatus @@ -22,6 +29,7 @@ def list_operator_node(): ), "order_by": OrderBy(enabled=False, value="asc"), "limit": Limit(enabled=False, size=0), + "extract_by": ExtractConfig(enabled=False, serial="1"), "title": "Test Title", } node_data = ListOperatorNodeData(**config) diff --git a/docker-legacy/docker-compose.yaml b/docker-legacy/docker-compose.yaml index 88650194ec..9c2a1fe980 100644 --- a/docker-legacy/docker-compose.yaml +++ b/docker-legacy/docker-compose.yaml @@ -2,7 +2,7 @@ version: '3' services: # API service api: - image: langgenius/dify-api:0.11.0 + image: langgenius/dify-api:0.11.1 restart: always environment: # Startup mode, 'api' starts the API server. @@ -227,7 +227,7 @@ services: # worker service # The Celery worker for processing the queue. worker: - image: langgenius/dify-api:0.11.0 + image: langgenius/dify-api:0.11.1 restart: always environment: CONSOLE_WEB_URL: '' @@ -384,6 +384,7 @@ services: NOTION_INTERNAL_SECRET: you-internal-secret # Indexing configuration INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: 1000 + CREATE_TIDB_SERVICE_JOB_ENABLED: false depends_on: - db - redis @@ -396,7 +397,7 @@ services: # Frontend web application. web: - image: langgenius/dify-web:0.11.0 + image: langgenius/dify-web:0.11.1 restart: always environment: # The base URL of console application api server, refers to the Console base URL of WEB service if console domain is diff --git a/docker/.env.example b/docker/.env.example index 9a178dc44c..0b181d831a 100644 --- a/docker/.env.example +++ b/docker/.env.example @@ -54,6 +54,10 @@ LOG_FILE= LOG_FILE_MAX_SIZE=20 # Log file max backup count LOG_FILE_BACKUP_COUNT=5 +# Log dateformat +LOG_DATEFORMAT=%Y-%m-%d %H:%M:%S +# Log Timezone +LOG_TZ=UTC # Debug mode, default is false. # It is recommended to turn on this configuration for local development @@ -583,12 +587,13 @@ CODE_GENERATION_MAX_TOKENS=1024 # Multi-modal Configuration # ------------------------------ -# The format of the image sent when the multi-modal model is input, +# The format of the image/video sent when the multi-modal model is input, # the default is base64, optional url. # The delay of the call in url mode will be lower than that in base64 mode. # It is generally recommended to use the more compatible base64 mode. -# If configured as url, you need to configure FILES_URL as an externally accessible address so that the multi-modal model can access the image. +# If configured as url, you need to configure FILES_URL as an externally accessible address so that the multi-modal model can access the image/video. MULTIMODAL_SEND_IMAGE_FORMAT=base64 +MULTIMODAL_SEND_VIDEO_FORMAT=base64 # Upload image file size limit, default 10M. UPLOAD_IMAGE_FILE_SIZE_LIMIT=10 @@ -906,3 +911,6 @@ POSITION_PROVIDER_EXCLUDES= # CSP https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP CSP_WHITELIST= + +# Enable or disable create tidb service job +CREATE_TIDB_SERVICE_JOB_ENABLED=false \ No newline at end of file diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index a7cb8576fd..d9ff965473 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -4,6 +4,10 @@ x-shared-env: &shared-api-worker-env LOG_FILE: ${LOG_FILE:-} LOG_FILE_MAX_SIZE: ${LOG_FILE_MAX_SIZE:-20} LOG_FILE_BACKUP_COUNT: ${LOG_FILE_BACKUP_COUNT:-5} + # Log dateformat + LOG_DATEFORMAT: ${LOG_DATEFORMAT:-%Y-%m-%d %H:%M:%S} + # Log Timezone + LOG_TZ: ${LOG_TZ:-UTC} DEBUG: ${DEBUG:-false} FLASK_DEBUG: ${FLASK_DEBUG:-false} SECRET_KEY: ${SECRET_KEY:-sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U} @@ -214,6 +218,7 @@ x-shared-env: &shared-api-worker-env PROMPT_GENERATION_MAX_TOKENS: ${PROMPT_GENERATION_MAX_TOKENS:-512} CODE_GENERATION_MAX_TOKENS: ${CODE_GENERATION_MAX_TOKENS:-1024} MULTIMODAL_SEND_IMAGE_FORMAT: ${MULTIMODAL_SEND_IMAGE_FORMAT:-base64} + MULTIMODAL_SEND_VIDEO_FORMAT: ${MULTIMODAL_SEND_VIDEO_FORMAT:-base64} UPLOAD_IMAGE_FILE_SIZE_LIMIT: ${UPLOAD_IMAGE_FILE_SIZE_LIMIT:-10} UPLOAD_VIDEO_FILE_SIZE_LIMIT: ${UPLOAD_VIDEO_FILE_SIZE_LIMIT:-100} UPLOAD_AUDIO_FILE_SIZE_LIMIT: ${UPLOAD_AUDIO_FILE_SIZE_LIMIT:-50} @@ -270,11 +275,12 @@ x-shared-env: &shared-api-worker-env OCEANBASE_VECTOR_DATABASE: ${OCEANBASE_VECTOR_DATABASE:-test} OCEANBASE_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai} OCEANBASE_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G} + CREATE_TIDB_SERVICE_JOB_ENABLED: ${CREATE_TIDB_SERVICE_JOB_ENABLED:-false} services: # API service api: - image: langgenius/dify-api:0.11.0 + image: langgenius/dify-api:0.11.1 restart: always environment: # Use the shared environment variables. @@ -294,7 +300,7 @@ services: # worker service # The Celery worker for processing the queue. worker: - image: langgenius/dify-api:0.11.0 + image: langgenius/dify-api:0.11.1 restart: always environment: # Use the shared environment variables. @@ -313,7 +319,7 @@ services: # Frontend web application. web: - image: langgenius/dify-web:0.11.0 + image: langgenius/dify-web:0.11.1 restart: always environment: CONSOLE_API_URL: ${CONSOLE_API_URL:-} diff --git a/web/app/(commonLayout)/datasets/template/template.en.mdx b/web/app/(commonLayout)/datasets/template/template.en.mdx index 263230d049..02e23429ce 100644 --- a/web/app/(commonLayout)/datasets/template/template.en.mdx +++ b/web/app/(commonLayout)/datasets/template/template.en.mdx @@ -1115,7 +1115,7 @@ import { Row, Col, Properties, Property, Heading, SubProperty, Paragraph } from title="Request" tag="POST" label="/datasets/{dataset_id}/retrieve" - targetCode={`curl --location --request GET '${props.apiBaseUrl}/datasets/{dataset_id}/retrieve' \\\n--header 'Authorization: Bearer {api_key}'\\\n--header 'Content-Type: application/json'\\\n--data-raw '{ + targetCode={`curl --location --request POST '${props.apiBaseUrl}/datasets/{dataset_id}/retrieve' \\\n--header 'Authorization: Bearer {api_key}'\\\n--header 'Content-Type: application/json'\\\n--data-raw '{ "query": "test", "retrieval_model": { "search_method": "keyword_search", diff --git a/web/app/(commonLayout)/datasets/template/template.zh.mdx b/web/app/(commonLayout)/datasets/template/template.zh.mdx index 9c25d1e7bb..e5d5f56120 100644 --- a/web/app/(commonLayout)/datasets/template/template.zh.mdx +++ b/web/app/(commonLayout)/datasets/template/template.zh.mdx @@ -1116,7 +1116,7 @@ import { Row, Col, Properties, Property, Heading, SubProperty, Paragraph } from title="Request" tag="POST" label="/datasets/{dataset_id}/retrieve" - targetCode={`curl --location --request GET '${props.apiBaseUrl}/datasets/{dataset_id}/retrieve' \\\n--header 'Authorization: Bearer {api_key}'\\\n--header 'Content-Type: application/json'\\\n--data-raw '{ + targetCode={`curl --location --request POST '${props.apiBaseUrl}/datasets/{dataset_id}/retrieve' \\\n--header 'Authorization: Bearer {api_key}'\\\n--header 'Content-Type: application/json'\\\n--data-raw '{ "query": "test", "retrieval_model": { "search_method": "keyword_search", diff --git a/web/app/components/app/configuration/dataset-config/index.tsx b/web/app/components/app/configuration/dataset-config/index.tsx index 0d9d575c1e..78b49f81d0 100644 --- a/web/app/components/app/configuration/dataset-config/index.tsx +++ b/web/app/components/app/configuration/dataset-config/index.tsx @@ -47,12 +47,16 @@ const DatasetConfig: FC = () => { const { currentModel: currentRerankModel, + currentProvider: currentRerankProvider, } = useModelListAndDefaultModelAndCurrentProviderAndModel(ModelTypeEnum.rerank) const onRemove = (id: string) => { const filteredDataSets = dataSet.filter(item => item.id !== id) setDataSet(filteredDataSets) - const retrievalConfig = getMultipleRetrievalConfig(datasetConfigs as any, filteredDataSets, dataSet, !!currentRerankModel) + const retrievalConfig = getMultipleRetrievalConfig(datasetConfigs as any, filteredDataSets, dataSet, { + provider: currentRerankProvider?.provider, + model: currentRerankModel?.model, + }) setDatasetConfigs({ ...(datasetConfigs as any), ...retrievalConfig, diff --git a/web/app/components/app/configuration/dataset-config/params-config/config-content.tsx b/web/app/components/app/configuration/dataset-config/params-config/config-content.tsx index 5bd748382e..dcb2b1a3fd 100644 --- a/web/app/components/app/configuration/dataset-config/params-config/config-content.tsx +++ b/web/app/components/app/configuration/dataset-config/params-config/config-content.tsx @@ -172,7 +172,7 @@ const ConfigContent: FC = ({ return false return datasetConfigs.reranking_enable - }, [canManuallyToggleRerank, datasetConfigs.reranking_enable]) + }, [canManuallyToggleRerank, datasetConfigs.reranking_enable, isRerankDefaultModelValid]) const handleDisabledSwitchClick = useCallback(() => { if (!currentRerankModel && !showRerankModel) diff --git a/web/app/components/app/configuration/dataset-config/params-config/index.tsx b/web/app/components/app/configuration/dataset-config/params-config/index.tsx index 94920fbd39..7f7a4799d1 100644 --- a/web/app/components/app/configuration/dataset-config/params-config/index.tsx +++ b/web/app/components/app/configuration/dataset-config/params-config/index.tsx @@ -43,6 +43,7 @@ const ParamsConfig = ({ const { defaultModel: rerankDefaultModel, currentModel: isRerankDefaultModelValid, + currentProvider: rerankDefaultProvider, } = useModelListAndDefaultModelAndCurrentProviderAndModel(ModelTypeEnum.rerank) const isValid = () => { @@ -91,7 +92,10 @@ const ParamsConfig = ({ reranking_mode: restConfigs.reranking_mode, weights: restConfigs.weights, reranking_enable: restConfigs.reranking_enable, - }, selectedDatasets, selectedDatasets, !!isRerankDefaultModelValid) + }, selectedDatasets, selectedDatasets, { + provider: rerankDefaultProvider?.provider, + model: isRerankDefaultModelValid?.model, + }) setTempDataSetConfigs({ ...retrievalConfig, diff --git a/web/app/components/app/configuration/index.tsx b/web/app/components/app/configuration/index.tsx index 639cb2fad1..b5b7e98d43 100644 --- a/web/app/components/app/configuration/index.tsx +++ b/web/app/components/app/configuration/index.tsx @@ -226,6 +226,7 @@ const Configuration: FC = () => { const [rerankSettingModalOpen, setRerankSettingModalOpen] = useState(false) const { currentModel: currentRerankModel, + currentProvider: currentRerankProvider, } = useModelListAndDefaultModelAndCurrentProviderAndModel(ModelTypeEnum.rerank) const handleSelect = (data: DataSet[]) => { if (isEqual(data.map(item => item.id), dataSets.map(item => item.id))) { @@ -279,7 +280,10 @@ const Configuration: FC = () => { reranking_mode: restConfigs.reranking_mode, weights: restConfigs.weights, reranking_enable: restConfigs.reranking_enable, - }, newDatasets, dataSets, !!currentRerankModel) + }, newDatasets, dataSets, { + provider: currentRerankProvider?.provider, + model: currentRerankModel?.model, + }) setDatasetConfigs({ ...retrievalConfig, @@ -468,8 +472,8 @@ const Configuration: FC = () => { transfer_methods: modelConfig.file_upload?.image?.transfer_methods || ['local_file', 'remote_url'], }, enabled: !!(modelConfig.file_upload?.enabled || modelConfig.file_upload?.image?.enabled), - allowed_file_types: modelConfig.file_upload?.allowed_file_types || [SupportUploadFileTypes.image], - allowed_file_extensions: modelConfig.file_upload?.allowed_file_extensions || FILE_EXTS[SupportUploadFileTypes.image].map(ext => `.${ext}`), + allowed_file_types: modelConfig.file_upload?.allowed_file_types || [SupportUploadFileTypes.image, SupportUploadFileTypes.video], + allowed_file_extensions: modelConfig.file_upload?.allowed_file_extensions || [...FILE_EXTS[SupportUploadFileTypes.image], ...FILE_EXTS[SupportUploadFileTypes.video]].map(ext => `.${ext}`), allowed_file_upload_methods: modelConfig.file_upload?.allowed_file_upload_methods || modelConfig.file_upload?.image?.transfer_methods || ['local_file', 'remote_url'], number_limits: modelConfig.file_upload?.number_limits || modelConfig.file_upload?.image?.number_limits || 3, fileUploadConfig: fileUploadConfigResponse, @@ -620,7 +624,10 @@ const Configuration: FC = () => { syncToPublishedConfig(config) setPublishedConfig(config) - const retrievalConfig = getMultipleRetrievalConfig(modelConfig.dataset_configs, datasets, datasets, !!currentRerankModel) + const retrievalConfig = getMultipleRetrievalConfig(modelConfig.dataset_configs, datasets, datasets, { + provider: currentRerankProvider?.provider, + model: currentRerankModel?.model, + }) setDatasetConfigs({ retrieval_model: RETRIEVE_TYPE.multiWay, ...modelConfig.dataset_configs, diff --git a/web/app/components/app/overview/settings/index.tsx b/web/app/components/app/overview/settings/index.tsx index a8ab456f43..e7cc4148ef 100644 --- a/web/app/components/app/overview/settings/index.tsx +++ b/web/app/components/app/overview/settings/index.tsx @@ -261,6 +261,10 @@ const SettingsModal: FC = ({ onChange={onChange('chatColorTheme')} placeholder='E.g #A020F0' /> +
+

{t(`${prefixSettings}.chatColorThemeInverted`)}

+ setInputInfo({ ...inputInfo, chatColorThemeInverted: v })}> +
} {systemFeatures.enable_web_sso_switch_component &&

{t(`${prefixSettings}.sso.label`)}

diff --git a/web/app/components/base/chat/chat-with-history/hooks.tsx b/web/app/components/base/chat/chat-with-history/hooks.tsx index d4fa170e4c..a67cc3cd88 100644 --- a/web/app/components/base/chat/chat-with-history/hooks.tsx +++ b/web/app/components/base/chat/chat-with-history/hooks.tsx @@ -173,7 +173,7 @@ export const useChatWithHistory = (installedAppInfo?: InstalledApp) => { const conversationInputs: Record = {} inputsForms.forEach((item: any) => { - conversationInputs[item.variable] = item.default || '' + conversationInputs[item.variable] = item.default || null }) handleNewConversationInputsChange(conversationInputs) }, [handleNewConversationInputsChange, inputsForms]) diff --git a/web/app/components/base/chat/chat/chat-input-area/index.tsx b/web/app/components/base/chat/chat/chat-input-area/index.tsx index 05aaaa6bc2..32d841148a 100644 --- a/web/app/components/base/chat/chat/chat-input-area/index.tsx +++ b/web/app/components/base/chat/chat/chat-input-area/index.tsx @@ -1,6 +1,5 @@ import { useCallback, - useRef, useState, } from 'react' import Textarea from 'rc-textarea' @@ -63,7 +62,6 @@ const ChatInputArea = ({ isMultipleLine, } = useTextAreaHeight() const [query, setQuery] = useState('') - const isUseInputMethod = useRef(false) const [showVoiceInput, setShowVoiceInput] = useState(false) const filesStore = useFileStore() const { @@ -95,20 +93,11 @@ const ChatInputArea = ({ } } - const handleKeyUp = (e: React.KeyboardEvent) => { - if (e.key === 'Enter') { - e.preventDefault() - // prevent send message when using input method enter - if (!e.shiftKey && !isUseInputMethod.current) - handleSend() - } - } - const handleKeyDown = (e: React.KeyboardEvent) => { - isUseInputMethod.current = e.nativeEvent.isComposing - if (e.key === 'Enter' && !e.shiftKey) { - setQuery(query.replace(/\n$/, '')) + if (e.key === 'Enter' && !e.shiftKey && !e.nativeEvent.isComposing) { e.preventDefault() + setQuery(query.replace(/\n$/, '')) + handleSend() } } @@ -165,7 +154,6 @@ const ChatInputArea = ({ setQuery(e.target.value) handleTextareaResize() }} - onKeyUp={handleKeyUp} onKeyDown={handleKeyDown} onPaste={handleClipboardPasteFile} onDragEnter={handleDragFileEnter} diff --git a/web/app/components/base/chat/embedded-chatbot/hooks.tsx b/web/app/components/base/chat/embedded-chatbot/hooks.tsx index 631d3b56bc..0a8bc0993f 100644 --- a/web/app/components/base/chat/embedded-chatbot/hooks.tsx +++ b/web/app/components/base/chat/embedded-chatbot/hooks.tsx @@ -159,7 +159,7 @@ export const useEmbeddedChatbot = () => { const conversationInputs: Record = {} inputsForms.forEach((item: any) => { - conversationInputs[item.variable] = item.default || '' + conversationInputs[item.variable] = item.default || null }) handleNewConversationInputsChange(conversationInputs) }, [handleNewConversationInputsChange, inputsForms]) diff --git a/web/app/components/base/markdown-blocks/form.tsx b/web/app/components/base/markdown-blocks/form.tsx index f87f2dcd91..7ce3e82b1d 100644 --- a/web/app/components/base/markdown-blocks/form.tsx +++ b/web/app/components/base/markdown-blocks/form.tsx @@ -1,3 +1,4 @@ +import React, { useEffect, useState } from 'react' import Button from '@/app/components/base/button' import Input from '@/app/components/base/input' import Textarea from '@/app/components/base/textarea' @@ -32,20 +33,31 @@ const MarkdownForm = ({ node }: any) => { // const { onSend } = useChatContext() - const getFormValues = (children: any) => { - const formValues: { [key: string]: any } = {} - children.forEach((child: any) => { - if (child.tagName === SUPPORTED_TAGS.INPUT) - formValues[child.properties.name] = child.properties.value - if (child.tagName === SUPPORTED_TAGS.TEXTAREA) - formValues[child.properties.name] = child.properties.value + const [formValues, setFormValues] = useState<{ [key: string]: any }>({}) + + useEffect(() => { + const initialValues: { [key: string]: any } = {} + node.children.forEach((child: any) => { + if ([SUPPORTED_TAGS.INPUT, SUPPORTED_TAGS.TEXTAREA].includes(child.tagName)) + initialValues[child.properties.name] = child.properties.value }) - return formValues + setFormValues(initialValues) + }, [node.children]) + + const getFormValues = (children: any) => { + const values: { [key: string]: any } = {} + children.forEach((child: any) => { + if ([SUPPORTED_TAGS.INPUT, SUPPORTED_TAGS.TEXTAREA].includes(child.tagName)) + values[child.properties.name] = formValues[child.properties.name] + }) + return values } + const onSubmit = (e: any) => { e.preventDefault() const format = node.properties.dataFormat || DATA_FORMAT.TEXT const result = getFormValues(node.children) + if (format === DATA_FORMAT.JSON) { onSend?.(JSON.stringify(result)) } @@ -77,25 +89,22 @@ const MarkdownForm = ({ node }: any) => { ) } - if (child.tagName === SUPPORTED_TAGS.INPUT) { - if (Object.values(SUPPORTED_TYPES).includes(child.properties.type)) { - return ( - { - e.preventDefault() - child.properties.value = e.target.value - }} - /> - ) - } - else { - return

Unsupported input type: {child.properties.type}

- } + if (child.tagName === SUPPORTED_TAGS.INPUT && Object.values(SUPPORTED_TYPES).includes(child.properties.type)) { + return ( + { + setFormValues(prevValues => ({ + ...prevValues, + [child.properties.name]: e.target.value, + })) + }} + /> + ) } if (child.tagName === SUPPORTED_TAGS.TEXTAREA) { return ( @@ -103,10 +112,12 @@ const MarkdownForm = ({ node }: any) => { key={index} name={child.properties.name} placeholder={child.properties.placeholder} - value={child.properties.value} + value={formValues[child.properties.name]} onChange={(e) => { - e.preventDefault() - child.properties.value = e.target.value + setFormValues(prevValues => ({ + ...prevValues, + [child.properties.name]: e.target.value, + })) }} /> ) diff --git a/web/app/components/develop/template/template_advanced_chat.en.mdx b/web/app/components/develop/template/template_advanced_chat.en.mdx index 6642c5cedc..c3c3f7c6f3 100644 --- a/web/app/components/develop/template/template_advanced_chat.en.mdx +++ b/web/app/components/develop/template/template_advanced_chat.en.mdx @@ -774,6 +774,10 @@ Chat applications support session persistence, allowing previous chat history to ### Request Body + Rename the session, the session name is used for display on clients that support multiple sessions. + + ### Path + - `conversation_id` (string) Conversation ID @@ -796,10 +800,10 @@ Chat applications support session persistence, allowing previous chat history to - + ```bash {{ title: 'cURL' }} - curl -X POST '${props.appDetail.api_base_url}/conversations/:conversation_id/name' \ + curl -X POST '${props.appDetail.api_base_url}/conversations/{conversation_id}/name' \ --header 'Content-Type: application/json' \ --header 'Authorization: Bearer {api_key}' \ --data-raw '{ diff --git a/web/app/components/develop/template/template_advanced_chat.zh.mdx b/web/app/components/develop/template/template_advanced_chat.zh.mdx index 8e64d63ac5..f3ddd6933c 100755 --- a/web/app/components/develop/template/template_advanced_chat.zh.mdx +++ b/web/app/components/develop/template/template_advanced_chat.zh.mdx @@ -810,6 +810,9 @@ import { Row, Col, Properties, Property, Heading, SubProperty } from '../md.tsx' 对会话进行重命名,会话名称用于显示在支持多会话的客户端上。 + ### Path + - `conversation_id` (string) 会话 ID + ### Request Body @@ -833,10 +836,10 @@ import { Row, Col, Properties, Property, Heading, SubProperty } from '../md.tsx' - + ```bash {{ title: 'cURL' }} - curl -X POST '${props.appDetail.api_base_url}/conversations/:conversation_id/name' \ + curl -X POST '${props.appDetail.api_base_url}/conversations/{conversation_id}/name' \ --header 'Authorization: Bearer {api_key}' \ --header 'Content-Type: application/json' \ --data-raw '{ diff --git a/web/app/components/develop/template/template_chat.en.mdx b/web/app/components/develop/template/template_chat.en.mdx index a94016ca3a..f44f991b89 100644 --- a/web/app/components/develop/template/template_chat.en.mdx +++ b/web/app/components/develop/template/template_chat.en.mdx @@ -808,6 +808,10 @@ Chat applications support session persistence, allowing previous chat history to ### Request Body + Rename the session, the session name is used for display on clients that support multiple sessions. + + ### Path + - `conversation_id` (string) Conversation ID @@ -830,10 +834,10 @@ Chat applications support session persistence, allowing previous chat history to - + ```bash {{ title: 'cURL' }} - curl -X POST '${props.appDetail.api_base_url}/conversations/:conversation_id/name' \ + curl -X POST '${props.appDetail.api_base_url}/conversations/{conversation_id}/name' \ --header 'Content-Type: application/json' \ --header 'Authorization: Bearer {api_key}' \ --data-raw '{ diff --git a/web/app/components/develop/template/template_chat.zh.mdx b/web/app/components/develop/template/template_chat.zh.mdx index 92b13b2c7d..c786d56980 100644 --- a/web/app/components/develop/template/template_chat.zh.mdx +++ b/web/app/components/develop/template/template_chat.zh.mdx @@ -824,6 +824,9 @@ import { Row, Col, Properties, Property, Heading, SubProperty } from '../md.tsx' 对会话进行重命名,会话名称用于显示在支持多会话的客户端上。 + ### Path + - `conversation_id` (string) 会话 ID + ### Request Body @@ -847,10 +850,10 @@ import { Row, Col, Properties, Property, Heading, SubProperty } from '../md.tsx' - + ```bash {{ title: 'cURL' }} - curl -X POST '${props.appDetail.api_base_url}/conversations/:conversation_id/name' \ + curl -X POST '${props.appDetail.api_base_url}/conversations/{conversation_id}/name' \ --header 'Authorization: Bearer {api_key}' \ --header 'Content-Type: application/json' \ --data-raw '{ diff --git a/web/app/components/share/text-generation/index.tsx b/web/app/components/share/text-generation/index.tsx index 0860560e7c..b853100b69 100644 --- a/web/app/components/share/text-generation/index.tsx +++ b/web/app/components/share/text-generation/index.tsx @@ -94,6 +94,7 @@ const TextGeneration: FC = ({ const [isCallBatchAPI, setIsCallBatchAPI] = useState(false) const isInBatchTab = currentTab === 'batch' const [inputs, setInputs] = useState>({}) + const inputsRef = useRef(inputs) const [appId, setAppId] = useState('') const [siteInfo, setSiteInfo] = useState(null) const [canReplaceLogo, setCanReplaceLogo] = useState(false) @@ -604,6 +605,7 @@ const TextGeneration: FC = ({ + inputsRef: React.MutableRefObject> onInputsChange: (inputs: Record) => void onSend: () => void visionConfig: VisionSettings @@ -27,6 +28,7 @@ export type IRunOnceProps = { const RunOnce: FC = ({ promptConfig, inputs, + inputsRef, onInputsChange, onSend, visionConfig, @@ -47,6 +49,11 @@ const RunOnce: FC = ({ onSend() } + const handleInputsChange = useCallback((newInputs: Record) => { + onInputsChange(newInputs) + inputsRef.current = newInputs + }, [onInputsChange, inputsRef]) + return (
@@ -60,7 +67,7 @@ const RunOnce: FC = ({ setTempCredential({ ...tempCredential, api_key_header: e.target.value })} - className='w-full h-10 px-3 text-sm font-normal bg-gray-100 rounded-lg grow' + className='w-full h-10 px-3 text-sm font-normal border border-transparent bg-gray-100 rounded-lg grow outline-none focus:bg-components-input-bg-active focus:border-components-input-border-active focus:shadow-xs' placeholder={t('tools.createTool.authMethod.types.apiKeyPlaceholder')!} />
@@ -129,7 +129,7 @@ const ConfigCredential: FC = ({ setTempCredential({ ...tempCredential, api_key_value: e.target.value })} - className='w-full h-10 px-3 text-sm font-normal bg-gray-100 rounded-lg grow' + className='w-full h-10 px-3 text-sm font-normal border border-transparent bg-gray-100 rounded-lg grow outline-none focus:bg-components-input-bg-active focus:border-components-input-border-active focus:shadow-xs' placeholder={t('tools.createTool.authMethod.types.apiValuePlaceholder')!} />
diff --git a/web/app/components/tools/edit-custom-collection-modal/get-schema.tsx b/web/app/components/tools/edit-custom-collection-modal/get-schema.tsx index 7b0244e3e3..2552c67568 100644 --- a/web/app/components/tools/edit-custom-collection-modal/get-schema.tsx +++ b/web/app/components/tools/edit-custom-collection-modal/get-schema.tsx @@ -70,7 +70,7 @@ const GetSchema: FC = ({
setImportUrl(e.target.value)} @@ -89,7 +89,7 @@ const GetSchema: FC = ({
)} -
+
{expandedIterations[index] &&
void + onShowIterationDetail?: (detail: NodeTracing[][], iterDurationMap: IterationDurationMap) => void notShowIterationNav?: boolean justShowIterationNavArrow?: boolean } @@ -90,7 +90,7 @@ const NodePanel: FC = ({ const handleOnShowIterationDetail = (e: React.MouseEvent) => { e.stopPropagation() e.nativeEvent.stopImmediatePropagation() - onShowIterationDetail?.(nodeInfo.details || []) + onShowIterationDetail?.(nodeInfo.details || [], nodeInfo?.iterDurationMap || nodeInfo.execution_metadata?.iteration_duration_map || {}) } return (
diff --git a/web/app/components/workflow/run/tracing-panel.tsx b/web/app/components/workflow/run/tracing-panel.tsx index 613c10198d..57b3a5cf5f 100644 --- a/web/app/components/workflow/run/tracing-panel.tsx +++ b/web/app/components/workflow/run/tracing-panel.tsx @@ -16,11 +16,11 @@ import NodePanel from './node' import { BlockEnum, } from '@/app/components/workflow/types' -import type { NodeTracing } from '@/types/workflow' +import type { IterationDurationMap, NodeTracing } from '@/types/workflow' type TracingPanelProps = { list: NodeTracing[] - onShowIterationDetail?: (detail: NodeTracing[][]) => void + onShowIterationDetail?: (detail: NodeTracing[][], iterDurationMap: IterationDurationMap) => void className?: string hideNodeInfo?: boolean hideNodeProcessDetail?: boolean diff --git a/web/i18n/en-US/workflow.ts b/web/i18n/en-US/workflow.ts index 1c6639aba0..b3fece702a 100644 --- a/web/i18n/en-US/workflow.ts +++ b/web/i18n/en-US/workflow.ts @@ -369,6 +369,7 @@ const translation = { inputVars: 'Input Variables', api: 'API', apiPlaceholder: 'Enter URL, type ‘/’ insert variable', + extractListPlaceholder: 'Enter list item index, type ‘/’ insert variable', notStartWithHttp: 'API should start with http:// or https://', key: 'Key', type: 'Type', @@ -568,9 +569,9 @@ const translation = { MaxParallelismDesc: 'The maximum parallelism is used to control the number of tasks executed simultaneously in a single iteration.', errorResponseMethod: 'Error response method', ErrorMethod: { - operationTerminated: 'terminated', - continueOnError: 'continue-on-error', - removeAbnormalOutput: 'remove-abnormal-output', + operationTerminated: 'Terminated', + continueOnError: 'Continue on Error', + removeAbnormalOutput: 'Remove Abnormal Output', }, answerNodeWarningDesc: 'Parallel mode warning: Answer nodes, conversation variable assignments, and persistent read/write operations within iterations may cause exceptions.', }, @@ -605,6 +606,7 @@ const translation = { inputVar: 'Input Variable', filterCondition: 'Filter Condition', filterConditionKey: 'Filter Condition Key', + extractsCondition: 'Extract the N item', filterConditionComparisonOperator: 'Filter Condition Comparison Operator', filterConditionComparisonValue: 'Filter Condition value', selectVariableKeyPlaceholder: 'Select sub variable key', diff --git a/web/i18n/zh-Hans/workflow.ts b/web/i18n/zh-Hans/workflow.ts index 1229ba8c03..98f34672d3 100644 --- a/web/i18n/zh-Hans/workflow.ts +++ b/web/i18n/zh-Hans/workflow.ts @@ -369,6 +369,7 @@ const translation = { inputVars: '输入变量', api: 'API', apiPlaceholder: '输入 URL,输入变量时请键入‘/’', + extractListPlaceholder: '输入提取列表编号,输入变量时请键入‘/’', notStartWithHttp: 'API 应该以 http:// 或 https:// 开头', key: '键', type: '类型', @@ -608,6 +609,7 @@ const translation = { filterConditionComparisonOperator: '过滤条件比较操作符', filterConditionComparisonValue: '过滤条件比较值', selectVariableKeyPlaceholder: '选择子变量的 Key', + extractsCondition: '取第 N 项', limit: '取前 N 项', orderBy: '排序', asc: '升序', diff --git a/web/package.json b/web/package.json index de01eb4d48..d863ba13d3 100644 --- a/web/package.json +++ b/web/package.json @@ -1,6 +1,6 @@ { "name": "dify-web", - "version": "0.11.0", + "version": "0.11.1", "private": true, "engines": { "node": ">=18.17.0" diff --git a/web/service/base.ts b/web/service/base.ts index fcf8d8bd7d..6cb732c55d 100644 --- a/web/service/base.ts +++ b/web/service/base.ts @@ -321,7 +321,9 @@ const baseFetch = ( } const urlPrefix = isPublicAPI ? PUBLIC_API_PREFIX : API_PREFIX - let urlWithPrefix = `${urlPrefix}${url.startsWith('/') ? url : `/${url}`}` + let urlWithPrefix = (url.startsWith('http://') || url.startsWith('https://')) + ? url + : `${urlPrefix}${url.startsWith('/') ? url : `/${url}`}` const { method, params, body } = options // handle query @@ -494,7 +496,9 @@ export const ssePost = ( getAbortController?.(abortController) const urlPrefix = isPublicAPI ? PUBLIC_API_PREFIX : API_PREFIX - const urlWithPrefix = `${urlPrefix}${url.startsWith('/') ? url : `/${url}`}` + const urlWithPrefix = (url.startsWith('http://') || url.startsWith('https://')) + ? url + : `${urlPrefix}${url.startsWith('/') ? url : `/${url}`}` const { body } = options if (body) diff --git a/web/service/refresh-token.ts b/web/service/refresh-token.ts index 8bd2215041..b193779629 100644 --- a/web/service/refresh-token.ts +++ b/web/service/refresh-token.ts @@ -1,11 +1,13 @@ import { apiPrefix } from '@/config' import { fetchWithRetry } from '@/utils' +const LOCAL_STORAGE_KEY = 'is_other_tab_refreshing' + let isRefreshing = false function waitUntilTokenRefreshed() { return new Promise((resolve, reject) => { function _check() { - const isRefreshingSign = localStorage.getItem('is_refreshing') + const isRefreshingSign = globalThis.localStorage.getItem(LOCAL_STORAGE_KEY) if ((isRefreshingSign && isRefreshingSign === '1') || isRefreshing) { setTimeout(() => { _check() @@ -22,13 +24,14 @@ function waitUntilTokenRefreshed() { // only one request can send async function getNewAccessToken(): Promise { try { - const isRefreshingSign = localStorage.getItem('is_refreshing') + const isRefreshingSign = globalThis.localStorage.getItem(LOCAL_STORAGE_KEY) if ((isRefreshingSign && isRefreshingSign === '1') || isRefreshing) { await waitUntilTokenRefreshed() } else { - globalThis.localStorage.setItem('is_refreshing', '1') isRefreshing = true + globalThis.localStorage.setItem(LOCAL_STORAGE_KEY, '1') + globalThis.addEventListener('beforeunload', releaseRefreshLock) const refresh_token = globalThis.localStorage.getItem('refresh_token') // Do not use baseFetch to refresh tokens. @@ -61,15 +64,21 @@ async function getNewAccessToken(): Promise { return Promise.reject(error) } finally { + releaseRefreshLock() + } +} + +function releaseRefreshLock() { + if (isRefreshing) { isRefreshing = false - globalThis.localStorage.removeItem('is_refreshing') + globalThis.localStorage.removeItem(LOCAL_STORAGE_KEY) + globalThis.removeEventListener('beforeunload', releaseRefreshLock) } } export async function refreshAccessTokenOrRelogin(timeout: number) { return Promise.race([new Promise((resolve, reject) => setTimeout(() => { - isRefreshing = false - globalThis.localStorage.removeItem('is_refreshing') + releaseRefreshLock() reject(new Error('request timeout')) }, timeout)), getNewAccessToken()]) } diff --git a/web/types/workflow.ts b/web/types/workflow.ts index 3c0675b605..34b08e878e 100644 --- a/web/types/workflow.ts +++ b/web/types/workflow.ts @@ -33,6 +33,7 @@ export type NodeTracing = { parent_parallel_id?: string parent_parallel_start_node_id?: string parallel_mode_run_id?: string + iteration_duration_map?: IterationDurationMap } metadata: { iterator_length: number @@ -44,6 +45,7 @@ export type NodeTracing = { name: string email: string } + iterDurationMap?: IterationDurationMap finished_at: number extras?: any expand?: boolean // for UI @@ -207,7 +209,10 @@ export type IterationNextResponse = { parallel_mode_run_id: string execution_metadata: { parallel_id?: string + iteration_index: number + parallel_mode_run_id?: string } + duration?: number } } @@ -323,3 +328,5 @@ export type ConversationVariableResponse = { total: number page: number } + +export type IterationDurationMap = Record diff --git a/web/yarn.lock b/web/yarn.lock index 6f54ec23d4..9fddfc1948 100644 --- a/web/yarn.lock +++ b/web/yarn.lock @@ -6450,9 +6450,9 @@ elkjs@^0.9.0: integrity sha512-f/ZeWvW/BCXbhGEf1Ujp29EASo/lk1FDnETgNKwJrsVvGZhUWCZyg3xLJjAsxfOmt8KjswHmI5EwCQcPMpOYhQ== elliptic@^6.5.3, elliptic@^6.5.5: - version "6.5.7" - resolved "https://registry.yarnpkg.com/elliptic/-/elliptic-6.5.7.tgz#8ec4da2cb2939926a1b9a73619d768207e647c8b" - integrity sha512-ESVCtTwiA+XhY3wyh24QqRGBoP3rEdDUl3EDUUo9tft074fi19IrdpH7hLCMMP3CIj7jb3W96rn8lt/BqIlt5Q== + version "6.6.0" + resolved "https://registry.yarnpkg.com/elliptic/-/elliptic-6.6.0.tgz#5919ec723286c1edf28685aa89261d4761afa210" + integrity sha512-dpwoQcLc/2WLQvJvLRHKZ+f9FgOdjnq11rurqwekGQygGPsYSK29OMMD2WalatiqQ+XGFDglTNixpPfI+lpaAA== dependencies: bn.js "^4.11.9" brorand "^1.1.0"