Merge branch 'feat/plugins' of https://github.com/langgenius/dify into feat/plugins

This commit is contained in:
twwu 2024-11-08 11:32:34 +08:00
commit a710858d09
357 changed files with 8233 additions and 2254 deletions

View File

@ -1,3 +1,3 @@
#!/bin/bash #!/bin/bash
poetry install -C api cd api && poetry install

View File

@ -7,6 +7,7 @@ on:
paths: paths:
- api/** - api/**
- docker/** - docker/**
- .github/workflows/api-tests.yml
concurrency: concurrency:
group: api-tests-${{ github.head_ref || github.run_id }} group: api-tests-${{ github.head_ref || github.run_id }}
@ -27,16 +28,15 @@ jobs:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v4 uses: actions/checkout@v4
- name: Install Poetry
uses: abatilo/actions-poetry@v3
- name: Set up Python ${{ matrix.python-version }} - name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5 uses: actions/setup-python@v5
with: with:
python-version: ${{ matrix.python-version }} python-version: ${{ matrix.python-version }}
cache-dependency-path: | cache: poetry
api/pyproject.toml cache-dependency-path: api/poetry.lock
api/poetry.lock
- name: Install Poetry
uses: abatilo/actions-poetry@v3
- name: Check Poetry lockfile - name: Check Poetry lockfile
run: | run: |
@ -67,7 +67,7 @@ jobs:
run: sh .github/workflows/expose_service_ports.sh run: sh .github/workflows/expose_service_ports.sh
- name: Set up Sandbox - name: Set up Sandbox
uses: hoverkraft-tech/compose-action@v2.0.0 uses: hoverkraft-tech/compose-action@v2.0.2
with: with:
compose-file: | compose-file: |
docker/docker-compose.middleware.yaml docker/docker-compose.middleware.yaml
@ -77,22 +77,3 @@ jobs:
- name: Run Workflow - name: Run Workflow
run: poetry run -C api bash dev/pytest/pytest_workflow.sh run: poetry run -C api bash dev/pytest/pytest_workflow.sh
- name: Set up Vector Stores (Weaviate, Qdrant, PGVector, Milvus, PgVecto-RS, Chroma, MyScale, ElasticSearch, Couchbase)
uses: hoverkraft-tech/compose-action@v2.0.0
with:
compose-file: |
docker/docker-compose.yaml
services: |
weaviate
qdrant
couchbase-server
etcd
minio
milvus-standalone
pgvecto-rs
pgvector
chroma
elasticsearch
- name: Test Vector Stores
run: poetry run -C api bash dev/pytest/pytest_vdb.sh

View File

@ -49,7 +49,7 @@ jobs:
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
- name: Login to Docker Hub - name: Login to Docker Hub
uses: docker/login-action@v2 uses: docker/login-action@v3
with: with:
username: ${{ env.DOCKERHUB_USER }} username: ${{ env.DOCKERHUB_USER }}
password: ${{ env.DOCKERHUB_TOKEN }} password: ${{ env.DOCKERHUB_TOKEN }}
@ -114,7 +114,7 @@ jobs:
merge-multiple: true merge-multiple: true
- name: Login to Docker Hub - name: Login to Docker Hub
uses: docker/login-action@v2 uses: docker/login-action@v3
with: with:
username: ${{ env.DOCKERHUB_USER }} username: ${{ env.DOCKERHUB_USER }}
password: ${{ env.DOCKERHUB_TOKEN }} password: ${{ env.DOCKERHUB_TOKEN }}

View File

@ -43,7 +43,7 @@ jobs:
cp middleware.env.example middleware.env cp middleware.env.example middleware.env
- name: Set up Middlewares - name: Set up Middlewares
uses: hoverkraft-tech/compose-action@v2.0.0 uses: hoverkraft-tech/compose-action@v2.0.2
with: with:
compose-file: | compose-file: |
docker/docker-compose.middleware.yaml docker/docker-compose.middleware.yaml

View File

@ -24,16 +24,16 @@ jobs:
with: with:
files: api/** files: api/**
- name: Install Poetry
if: steps.changed-files.outputs.any_changed == 'true'
uses: abatilo/actions-poetry@v3
- name: Set up Python - name: Set up Python
uses: actions/setup-python@v5 uses: actions/setup-python@v5
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
with: with:
python-version: '3.10' python-version: '3.10'
- name: Install Poetry
if: steps.changed-files.outputs.any_changed == 'true'
uses: abatilo/actions-poetry@v3
- name: Python dependencies - name: Python dependencies
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: poetry install -C api --only lint run: poetry install -C api --only lint

75
.github/workflows/vdb-tests.yml vendored Normal file
View File

@ -0,0 +1,75 @@
name: Run VDB Tests
on:
pull_request:
branches:
- main
paths:
- api/core/rag/datasource/**
- docker/**
- .github/workflows/vdb-tests.yml
concurrency:
group: vdb-tests-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
test:
name: VDB Tests
runs-on: ubuntu-latest
strategy:
matrix:
python-version:
- "3.10"
- "3.11"
- "3.12"
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install Poetry
uses: abatilo/actions-poetry@v3
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
cache: poetry
cache-dependency-path: api/poetry.lock
- name: Check Poetry lockfile
run: |
poetry check -C api --lock
poetry show -C api
- name: Install dependencies
run: poetry install -C api --with dev
- name: Set up dotenvs
run: |
cp docker/.env.example docker/.env
cp docker/middleware.env.example docker/middleware.env
- name: Expose Service Ports
run: sh .github/workflows/expose_service_ports.sh
- name: Set up Vector Stores (Weaviate, Qdrant, PGVector, Milvus, PgVecto-RS, Chroma, MyScale, ElasticSearch, Couchbase)
uses: hoverkraft-tech/compose-action@v2.0.2
with:
compose-file: |
docker/docker-compose.yaml
services: |
weaviate
qdrant
couchbase-server
etcd
minio
milvus-standalone
pgvecto-rs
pgvector
chroma
elasticsearch
- name: Test Vector Stores
run: poetry run -C api bash dev/pytest/pytest_vdb.sh

1
.gitignore vendored
View File

@ -175,6 +175,7 @@ docker/volumes/pgvector/data/*
docker/volumes/pgvecto_rs/data/* docker/volumes/pgvecto_rs/data/*
docker/volumes/couchbase/* docker/volumes/couchbase/*
docker/volumes/oceanbase/* docker/volumes/oceanbase/*
!docker/volumes/oceanbase/init.d
docker/nginx/conf.d/default.conf docker/nginx/conf.d/default.conf
docker/nginx/ssl/* docker/nginx/ssl/*

View File

@ -81,7 +81,7 @@ Dify requires the following dependencies to build, make sure they're installed o
Dify is composed of a backend and a frontend. Navigate to the backend directory by `cd api/`, then follow the [Backend README](api/README.md) to install it. In a separate terminal, navigate to the frontend directory by `cd web/`, then follow the [Frontend README](web/README.md) to install. Dify is composed of a backend and a frontend. Navigate to the backend directory by `cd api/`, then follow the [Backend README](api/README.md) to install it. In a separate terminal, navigate to the frontend directory by `cd web/`, then follow the [Frontend README](web/README.md) to install.
Check the [installation FAQ](https://docs.dify.ai/learn-more/faq/self-host-faq) for a list of common issues and steps to troubleshoot. Check the [installation FAQ](https://docs.dify.ai/learn-more/faq/install-faq) for a list of common issues and steps to troubleshoot.
### 5. Visit dify in your browser ### 5. Visit dify in your browser

View File

@ -79,7 +79,7 @@ Dify yêu cầu các phụ thuộc sau để build, hãy đảm bảo chúng đ
Dify bao gồm một backend và một frontend. Đi đến thư mục backend bằng lệnh `cd api/`, sau đó làm theo hướng dẫn trong [README của Backend](api/README.md) để cài đặt. Trong một terminal khác, đi đến thư mục frontend bằng lệnh `cd web/`, sau đó làm theo hướng dẫn trong [README của Frontend](web/README.md) để cài đặt. Dify bao gồm một backend và một frontend. Đi đến thư mục backend bằng lệnh `cd api/`, sau đó làm theo hướng dẫn trong [README của Backend](api/README.md) để cài đặt. Trong một terminal khác, đi đến thư mục frontend bằng lệnh `cd web/`, sau đó làm theo hướng dẫn trong [README của Frontend](web/README.md) để cài đặt.
Kiểm tra [FAQ về cài đặt](https://docs.dify.ai/learn-more/faq/self-host-faq) để xem danh sách các vấn đề thường gặp và các bước khắc phục. Kiểm tra [FAQ về cài đặt](https://docs.dify.ai/learn-more/faq/install-faq) để xem danh sách các vấn đề thường gặp và các bước khắc phục.
### 5. Truy cập Dify trong trình duyệt của bạn ### 5. Truy cập Dify trong trình duyệt của bạn

130
README.md
View File

@ -46,9 +46,33 @@
</p> </p>
Dify is an open-source LLM app development platform. Its intuitive interface combines AI workflow, RAG pipeline, agent capabilities, model management, observability features and more, letting you quickly go from prototype to production. Here's a list of the core features: Dify is an open-source LLM app development platform. Its intuitive interface combines agentic AI workflow, RAG pipeline, agent capabilities, model management, observability features and more, letting you quickly go from prototype to production.
</br> </br>
## Quick start
> Before installing Dify, make sure your machine meets the following minimum system requirements:
>
>- CPU >= 2 Core
>- RAM >= 4 GiB
</br>
The easiest way to start the Dify server is through [docker compose](docker/docker-compose.yaml). Before running Dify with the following commands, make sure that [Docker](https://docs.docker.com/get-docker/) and [Docker Compose](https://docs.docker.com/compose/install/) are installed on your machine:
```bash
cd dify
cd docker
cp .env.example .env
docker compose up -d
```
After running, you can access the Dify dashboard in your browser at [http://localhost/install](http://localhost/install) and start the initialization process.
#### Seeking help
Please refer to our [FAQ](https://docs.dify.ai/getting-started/install-self-hosted/faqs) if you encounter problems setting up Dify. Reach out to [the community and us](#community--contact) if you are still having issues.
> If you'd like to contribute to Dify or do additional development, refer to our [guide to deploying from source code](https://docs.dify.ai/getting-started/install-self-hosted/local-source-code)
## Key features
**1. Workflow**: **1. Workflow**:
Build and test powerful AI workflows on a visual canvas, leveraging all the following features and beyond. Build and test powerful AI workflows on a visual canvas, leveraging all the following features and beyond.
@ -79,73 +103,6 @@ Dify is an open-source LLM app development platform. Its intuitive interface com
All of Dify's offerings come with corresponding APIs, so you could effortlessly integrate Dify into your own business logic. All of Dify's offerings come with corresponding APIs, so you could effortlessly integrate Dify into your own business logic.
## Feature comparison
<table style="width: 100%;">
<tr>
<th align="center">Feature</th>
<th align="center">Dify.AI</th>
<th align="center">LangChain</th>
<th align="center">Flowise</th>
<th align="center">OpenAI Assistants API</th>
</tr>
<tr>
<td align="center">Programming Approach</td>
<td align="center">API + App-oriented</td>
<td align="center">Python Code</td>
<td align="center">App-oriented</td>
<td align="center">API-oriented</td>
</tr>
<tr>
<td align="center">Supported LLMs</td>
<td align="center">Rich Variety</td>
<td align="center">Rich Variety</td>
<td align="center">Rich Variety</td>
<td align="center">OpenAI-only</td>
</tr>
<tr>
<td align="center">RAG Engine</td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
</tr>
<tr>
<td align="center">Agent</td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
</tr>
<tr>
<td align="center">Workflow</td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
</tr>
<tr>
<td align="center">Observability</td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
</tr>
<tr>
<td align="center">Enterprise Features (SSO/Access control)</td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
</tr>
<tr>
<td align="center">Local Deployment</td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
</tr>
</table>
## Using Dify ## Using Dify
- **Cloud </br>** - **Cloud </br>**
@ -167,28 +124,7 @@ Star Dify on GitHub and be instantly notified of new releases.
![star-us](https://github.com/langgenius/dify/assets/13230914/b823edc1-6388-4e25-ad45-2f6b187adbb4) ![star-us](https://github.com/langgenius/dify/assets/13230914/b823edc1-6388-4e25-ad45-2f6b187adbb4)
## Advanced Setup
## Quick start
> Before installing Dify, make sure your machine meets the following minimum system requirements:
>
>- CPU >= 2 Core
>- RAM >= 4 GiB
</br>
The easiest way to start the Dify server is to run our [docker-compose.yml](docker/docker-compose.yaml) file. Before running the installation command, make sure that [Docker](https://docs.docker.com/get-docker/) and [Docker Compose](https://docs.docker.com/compose/install/) are installed on your machine:
```bash
cd docker
cp .env.example .env
docker compose up -d
```
After running, you can access the Dify dashboard in your browser at [http://localhost/install](http://localhost/install) and start the initialization process.
> If you'd like to contribute to Dify or do additional development, refer to our [guide to deploying from source code](https://docs.dify.ai/getting-started/install-self-hosted/local-source-code)
## Next steps
If you need to customize the configuration, please refer to the comments in our [.env.example](docker/.env.example) file and update the corresponding values in your `.env` file. Additionally, you might need to make adjustments to the `docker-compose.yaml` file itself, such as changing image versions, port mappings, or volume mounts, based on your specific deployment environment and requirements. After making any changes, please re-run `docker-compose up -d`. You can find the full list of available environment variables [here](https://docs.dify.ai/getting-started/install-self-hosted/environments). If you need to customize the configuration, please refer to the comments in our [.env.example](docker/.env.example) file and update the corresponding values in your `.env` file. Additionally, you might need to make adjustments to the `docker-compose.yaml` file itself, such as changing image versions, port mappings, or volume mounts, based on your specific deployment environment and requirements. After making any changes, please re-run `docker-compose up -d`. You can find the full list of available environment variables [here](https://docs.dify.ai/getting-started/install-self-hosted/environments).
@ -216,12 +152,6 @@ At the same time, please consider supporting Dify by sharing it on social media
> We are looking for contributors to help with translating Dify to languages other than Mandarin or English. If you are interested in helping, please see the [i18n README](https://github.com/langgenius/dify/blob/main/web/i18n/README.md) for more information, and leave us a comment in the `global-users` channel of our [Discord Community Server](https://discord.gg/8Tpq4AcN9c). > We are looking for contributors to help with translating Dify to languages other than Mandarin or English. If you are interested in helping, please see the [i18n README](https://github.com/langgenius/dify/blob/main/web/i18n/README.md) for more information, and leave us a comment in the `global-users` channel of our [Discord Community Server](https://discord.gg/8Tpq4AcN9c).
**Contributors**
<a href="https://github.com/langgenius/dify/graphs/contributors">
<img src="https://contrib.rocks/image?repo=langgenius/dify" />
</a>
## Community & contact ## Community & contact
* [Github Discussion](https://github.com/langgenius/dify/discussions). Best for: sharing feedback and asking questions. * [Github Discussion](https://github.com/langgenius/dify/discussions). Best for: sharing feedback and asking questions.
@ -229,6 +159,12 @@ At the same time, please consider supporting Dify by sharing it on social media
* [Discord](https://discord.gg/FngNHpbcY7). Best for: sharing your applications and hanging out with the community. * [Discord](https://discord.gg/FngNHpbcY7). Best for: sharing your applications and hanging out with the community.
* [X(Twitter)](https://twitter.com/dify_ai). Best for: sharing your applications and hanging out with the community. * [X(Twitter)](https://twitter.com/dify_ai). Best for: sharing your applications and hanging out with the community.
**Contributors**
<a href="https://github.com/langgenius/dify/graphs/contributors">
<img src="https://contrib.rocks/image?repo=langgenius/dify" />
</a>
## Star history ## Star history
[![Star History Chart](https://api.star-history.com/svg?repos=langgenius/dify&type=Date)](https://star-history.com/#langgenius/dify&Date) [![Star History Chart](https://api.star-history.com/svg?repos=langgenius/dify&type=Date)](https://star-history.com/#langgenius/dify&Date)

View File

@ -120,7 +120,8 @@ SUPABASE_URL=your-server-url
WEB_API_CORS_ALLOW_ORIGINS=http://127.0.0.1:3000,* WEB_API_CORS_ALLOW_ORIGINS=http://127.0.0.1:3000,*
CONSOLE_CORS_ALLOW_ORIGINS=http://127.0.0.1:3000,* CONSOLE_CORS_ALLOW_ORIGINS=http://127.0.0.1:3000,*
# Vector database configuration, support: weaviate, qdrant, milvus, myscale, relyt, pgvecto_rs, pgvector, pgvector, chroma, opensearch, tidb_vector, couchbase, vikingdb, upstash
# Vector database configuration, support: weaviate, qdrant, milvus, myscale, relyt, pgvecto_rs, pgvector, pgvector, chroma, opensearch, tidb_vector, couchbase, vikingdb, upstash, lindorm, oceanbase
VECTOR_STORE=weaviate VECTOR_STORE=weaviate
# Weaviate configuration # Weaviate configuration
@ -263,14 +264,20 @@ VIKINGDB_SCHEMA=http
VIKINGDB_CONNECTION_TIMEOUT=30 VIKINGDB_CONNECTION_TIMEOUT=30
VIKINGDB_SOCKET_TIMEOUT=30 VIKINGDB_SOCKET_TIMEOUT=30
# Lindorm configuration
LINDORM_URL=http://ld-*******************-proxy-search-pub.lindorm.aliyuncs.com:30070
LINDORM_USERNAME=admin
LINDORM_PASSWORD=admin
# OceanBase Vector configuration # OceanBase Vector configuration
OCEANBASE_VECTOR_HOST=127.0.0.1 OCEANBASE_VECTOR_HOST=127.0.0.1
OCEANBASE_VECTOR_PORT=2881 OCEANBASE_VECTOR_PORT=2881
OCEANBASE_VECTOR_USER=root@test OCEANBASE_VECTOR_USER=root@test
OCEANBASE_VECTOR_PASSWORD= OCEANBASE_VECTOR_PASSWORD=difyai123456
OCEANBASE_VECTOR_DATABASE=test OCEANBASE_VECTOR_DATABASE=test
OCEANBASE_MEMORY_LIMIT=6G OCEANBASE_MEMORY_LIMIT=6G
# Upload configuration # Upload configuration
UPLOAD_FILE_SIZE_LIMIT=15 UPLOAD_FILE_SIZE_LIMIT=15
UPLOAD_FILE_BATCH_LIMIT=5 UPLOAD_FILE_BATCH_LIMIT=5
@ -313,13 +320,21 @@ ETL_TYPE=dify
UNSTRUCTURED_API_URL= UNSTRUCTURED_API_URL=
UNSTRUCTURED_API_KEY= UNSTRUCTURED_API_KEY=
#ssrf
SSRF_PROXY_HTTP_URL= SSRF_PROXY_HTTP_URL=
SSRF_PROXY_HTTPS_URL= SSRF_PROXY_HTTPS_URL=
SSRF_DEFAULT_MAX_RETRIES=3 SSRF_DEFAULT_MAX_RETRIES=3
SSRF_DEFAULT_TIME_OUT=
SSRF_DEFAULT_CONNECT_TIME_OUT=
SSRF_DEFAULT_READ_TIME_OUT=
SSRF_DEFAULT_WRITE_TIME_OUT=
BATCH_UPLOAD_LIMIT=10 BATCH_UPLOAD_LIMIT=10
KEYWORD_DATA_SOURCE_TYPE=database KEYWORD_DATA_SOURCE_TYPE=database
# Workflow file upload limit
WORKFLOW_FILE_UPLOAD_LIMIT=10
# CODE EXECUTION CONFIGURATION # CODE EXECUTION CONFIGURATION
CODE_EXECUTION_ENDPOINT=http://127.0.0.1:8194 CODE_EXECUTION_ENDPOINT=http://127.0.0.1:8194
CODE_EXECUTION_API_KEY=dify-sandbox CODE_EXECUTION_API_KEY=dify-sandbox

View File

@ -55,7 +55,7 @@ RUN apt-get update \
&& echo "deb http://deb.debian.org/debian testing main" > /etc/apt/sources.list \ && echo "deb http://deb.debian.org/debian testing main" > /etc/apt/sources.list \
&& apt-get update \ && apt-get update \
# For Security # For Security
&& apt-get install -y --no-install-recommends zlib1g=1:1.3.dfsg+really1.3.1-1 expat=2.6.3-1 libldap-2.5-0=2.5.18+dfsg-3+b1 perl=5.40.0-6 libsqlite3-0=3.46.1-1 \ && apt-get install -y --no-install-recommends expat=2.6.3-2 libldap-2.5-0=2.5.18+dfsg-3+b1 perl=5.40.0-6 libsqlite3-0=3.46.1-1 zlib1g=1:1.3.dfsg+really1.3.1-1+b1 \
# install a chinese font to support the use of tools like matplotlib # install a chinese font to support the use of tools like matplotlib
&& apt-get install -y fonts-noto-cjk \ && apt-get install -y fonts-noto-cjk \
&& apt-get autoremove -y \ && apt-get autoremove -y \

View File

@ -76,13 +76,13 @@
1. Install dependencies for both the backend and the test environment 1. Install dependencies for both the backend and the test environment
```bash ```bash
poetry install --with dev poetry install -C api --with dev
``` ```
2. Run the tests locally with mocked system environment variables in `tool.pytest_env` section in `pyproject.toml` 2. Run the tests locally with mocked system environment variables in `tool.pytest_env` section in `pyproject.toml`
```bash ```bash
cd ../
poetry run -C api bash dev/pytest/pytest_all_tests.sh poetry run -C api bash dev/pytest/pytest_all_tests.sh
``` ```

View File

@ -10,7 +10,6 @@ from pydantic import (
PositiveInt, PositiveInt,
computed_field, computed_field,
) )
from pydantic_extra_types.timezone_name import TimeZoneName
from pydantic_settings import BaseSettings from pydantic_settings import BaseSettings
from configs.feature.hosted_service import HostedServiceConfig from configs.feature.hosted_service import HostedServiceConfig
@ -110,7 +109,7 @@ class CodeExecutionSandboxConfig(BaseSettings):
) )
CODE_MAX_PRECISION: PositiveInt = Field( CODE_MAX_PRECISION: PositiveInt = Field(
description="mMaximum number of decimal places for floating-point numbers in code execution", description="Maximum number of decimal places for floating-point numbers in code execution",
default=20, default=20,
) )
@ -217,6 +216,11 @@ class FileUploadConfig(BaseSettings):
default=20, default=20,
) )
WORKFLOW_FILE_UPLOAD_LIMIT: PositiveInt = Field(
description="Maximum number of files allowed in a workflow upload operation",
default=10,
)
class HttpConfig(BaseSettings): class HttpConfig(BaseSettings):
""" """
@ -282,6 +286,26 @@ class HttpConfig(BaseSettings):
default=None, default=None,
) )
SSRF_DEFAULT_TIME_OUT: PositiveFloat = Field(
description="The default timeout period used for network requests (SSRF)",
default=5,
)
SSRF_DEFAULT_CONNECT_TIME_OUT: PositiveFloat = Field(
description="The default connect timeout period used for network requests (SSRF)",
default=5,
)
SSRF_DEFAULT_READ_TIME_OUT: PositiveFloat = Field(
description="The default read timeout period used for network requests (SSRF)",
default=5,
)
SSRF_DEFAULT_WRITE_TIME_OUT: PositiveFloat = Field(
description="The default write timeout period used for network requests (SSRF)",
default=5,
)
RESPECT_XFORWARD_HEADERS_ENABLED: bool = Field( RESPECT_XFORWARD_HEADERS_ENABLED: bool = Field(
description="Enable or disable the X-Forwarded-For Proxy Fix middleware from Werkzeug" description="Enable or disable the X-Forwarded-For Proxy Fix middleware from Werkzeug"
" to respect X-* headers to redirect clients", " to respect X-* headers to redirect clients",
@ -340,9 +364,8 @@ class LoggingConfig(BaseSettings):
default=None, default=None,
) )
LOG_TZ: Optional[TimeZoneName] = Field( LOG_TZ: Optional[str] = Field(
description="Timezone for log timestamps. Allowed timezone values can be referred to IANA Time Zone Database," description="Timezone for log timestamps (e.g., 'America/New_York')",
" e.g., 'America/New_York')",
default=None, default=None,
) )

View File

@ -16,9 +16,11 @@ from configs.middleware.storage.supabase_storage_config import SupabaseStorageCo
from configs.middleware.storage.tencent_cos_storage_config import TencentCloudCOSStorageConfig from configs.middleware.storage.tencent_cos_storage_config import TencentCloudCOSStorageConfig
from configs.middleware.storage.volcengine_tos_storage_config import VolcengineTOSStorageConfig from configs.middleware.storage.volcengine_tos_storage_config import VolcengineTOSStorageConfig
from configs.middleware.vdb.analyticdb_config import AnalyticdbConfig from configs.middleware.vdb.analyticdb_config import AnalyticdbConfig
from configs.middleware.vdb.baidu_vector_config import BaiduVectorDBConfig
from configs.middleware.vdb.chroma_config import ChromaConfig from configs.middleware.vdb.chroma_config import ChromaConfig
from configs.middleware.vdb.couchbase_config import CouchbaseConfig from configs.middleware.vdb.couchbase_config import CouchbaseConfig
from configs.middleware.vdb.elasticsearch_config import ElasticsearchConfig from configs.middleware.vdb.elasticsearch_config import ElasticsearchConfig
from configs.middleware.vdb.lindorm_config import LindormConfig
from configs.middleware.vdb.milvus_config import MilvusConfig from configs.middleware.vdb.milvus_config import MilvusConfig
from configs.middleware.vdb.myscale_config import MyScaleConfig from configs.middleware.vdb.myscale_config import MyScaleConfig
from configs.middleware.vdb.oceanbase_config import OceanBaseVectorConfig from configs.middleware.vdb.oceanbase_config import OceanBaseVectorConfig
@ -258,6 +260,8 @@ class MiddlewareConfig(
VikingDBConfig, VikingDBConfig,
UpstashConfig, UpstashConfig,
TidbOnQdrantConfig, TidbOnQdrantConfig,
LindormConfig,
OceanBaseVectorConfig, OceanBaseVectorConfig,
BaiduVectorDBConfig,
): ):
pass pass

View File

@ -0,0 +1,23 @@
from typing import Optional
from pydantic import Field
from pydantic_settings import BaseSettings
class LindormConfig(BaseSettings):
"""
Lindorm configs
"""
LINDORM_URL: Optional[str] = Field(
description="Lindorm url",
default=None,
)
LINDORM_USERNAME: Optional[str] = Field(
description="Lindorm user",
default=None,
)
LINDORM_PASSWORD: Optional[str] = Field(
description="Lindorm password",
default=None,
)

View File

@ -9,7 +9,7 @@ class PackagingInfo(BaseSettings):
CURRENT_VERSION: str = Field( CURRENT_VERSION: str = Field(
description="Dify version", description="Dify version",
default="0.10.2", default="0.11.0",
) )
COMMIT_SHA: str = Field( COMMIT_SHA: str = Field(

View File

@ -0,0 +1,6 @@
from werkzeug.exceptions import HTTPException
class FilenameNotExistsError(HTTPException):
code = 400
description = "The specified filename does not exist."

View File

@ -0,0 +1,24 @@
from flask_restful import fields
parameters__system_parameters = {
"image_file_size_limit": fields.Integer,
"video_file_size_limit": fields.Integer,
"audio_file_size_limit": fields.Integer,
"file_size_limit": fields.Integer,
"workflow_file_upload_limit": fields.Integer,
}
parameters_fields = {
"opening_statement": fields.String,
"suggested_questions": fields.Raw,
"suggested_questions_after_answer": fields.Raw,
"speech_to_text": fields.Raw,
"text_to_speech": fields.Raw,
"retriever_resource": fields.Raw,
"annotation_reply": fields.Raw,
"more_like_this": fields.Raw,
"user_input_form": fields.Raw,
"sensitive_word_avoidance": fields.Raw,
"file_upload": fields.Raw,
"system_parameters": fields.Nested(parameters__system_parameters),
}

View File

@ -0,0 +1,97 @@
import mimetypes
import os
import re
import urllib.parse
from collections.abc import Mapping
from typing import Any
from uuid import uuid4
import httpx
from pydantic import BaseModel
from configs import dify_config
class FileInfo(BaseModel):
filename: str
extension: str
mimetype: str
size: int
def guess_file_info_from_response(response: httpx.Response):
url = str(response.url)
# Try to extract filename from URL
parsed_url = urllib.parse.urlparse(url)
url_path = parsed_url.path
filename = os.path.basename(url_path)
# If filename couldn't be extracted, use Content-Disposition header
if not filename:
content_disposition = response.headers.get("Content-Disposition")
if content_disposition:
filename_match = re.search(r'filename="?(.+)"?', content_disposition)
if filename_match:
filename = filename_match.group(1)
# If still no filename, generate a unique one
if not filename:
unique_name = str(uuid4())
filename = f"{unique_name}"
# Guess MIME type from filename first, then URL
mimetype, _ = mimetypes.guess_type(filename)
if mimetype is None:
mimetype, _ = mimetypes.guess_type(url)
if mimetype is None:
# If guessing fails, use Content-Type from response headers
mimetype = response.headers.get("Content-Type", "application/octet-stream")
extension = os.path.splitext(filename)[1]
# Ensure filename has an extension
if not extension:
extension = mimetypes.guess_extension(mimetype) or ".bin"
filename = f"{filename}{extension}"
return FileInfo(
filename=filename,
extension=extension,
mimetype=mimetype,
size=int(response.headers.get("Content-Length", -1)),
)
def get_parameters_from_feature_dict(*, features_dict: Mapping[str, Any], user_input_form: list[dict[str, Any]]):
return {
"opening_statement": features_dict.get("opening_statement"),
"suggested_questions": features_dict.get("suggested_questions", []),
"suggested_questions_after_answer": features_dict.get("suggested_questions_after_answer", {"enabled": False}),
"speech_to_text": features_dict.get("speech_to_text", {"enabled": False}),
"text_to_speech": features_dict.get("text_to_speech", {"enabled": False}),
"retriever_resource": features_dict.get("retriever_resource", {"enabled": False}),
"annotation_reply": features_dict.get("annotation_reply", {"enabled": False}),
"more_like_this": features_dict.get("more_like_this", {"enabled": False}),
"user_input_form": user_input_form,
"sensitive_word_avoidance": features_dict.get(
"sensitive_word_avoidance", {"enabled": False, "type": "", "configs": []}
),
"file_upload": features_dict.get(
"file_upload",
{
"image": {
"enabled": False,
"number_limits": 3,
"detail": "high",
"transfer_methods": ["remote_url", "local_file"],
}
},
),
"system_parameters": {
"image_file_size_limit": dify_config.UPLOAD_IMAGE_FILE_SIZE_LIMIT,
"video_file_size_limit": dify_config.UPLOAD_VIDEO_FILE_SIZE_LIMIT,
"audio_file_size_limit": dify_config.UPLOAD_AUDIO_FILE_SIZE_LIMIT,
"file_size_limit": dify_config.UPLOAD_FILE_SIZE_LIMIT,
"workflow_file_upload_limit": dify_config.WORKFLOW_FILE_UPLOAD_LIMIT,
},
}

View File

@ -2,9 +2,21 @@ from flask import Blueprint
from libs.external_api import ExternalApi from libs.external_api import ExternalApi
from .files import FileApi, FilePreviewApi, FileSupportTypeApi
from .remote_files import RemoteFileInfoApi, RemoteFileUploadApi
bp = Blueprint("console", __name__, url_prefix="/console/api") bp = Blueprint("console", __name__, url_prefix="/console/api")
api = ExternalApi(bp) api = ExternalApi(bp)
# File
api.add_resource(FileApi, "/files/upload")
api.add_resource(FilePreviewApi, "/files/<uuid:file_id>/preview")
api.add_resource(FileSupportTypeApi, "/files/support-type")
# Remote files
api.add_resource(RemoteFileInfoApi, "/remote-files/<path:url>")
api.add_resource(RemoteFileUploadApi, "/remote-files/upload")
# Import other controllers # Import other controllers
from . import admin, apikey, extension, feature, ping, setup, version from . import admin, apikey, extension, feature, ping, setup, version
@ -43,7 +55,6 @@ from .datasets import (
datasets_document, datasets_document,
datasets_segments, datasets_segments,
external, external,
file,
hit_testing, hit_testing,
website, website,
) )

View File

@ -10,8 +10,7 @@ from models.dataset import Dataset
from models.model import ApiToken, App from models.model import ApiToken, App
from . import api from . import api
from .setup import setup_required from .wraps import account_initialization_required, setup_required
from .wraps import account_initialization_required
api_key_fields = { api_key_fields = {
"id": fields.String, "id": fields.String,

View File

@ -1,8 +1,7 @@
from flask_restful import Resource, reqparse from flask_restful import Resource, reqparse
from controllers.console import api from controllers.console import api
from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required, setup_required
from controllers.console.wraps import account_initialization_required
from libs.login import login_required from libs.login import login_required
from services.advanced_prompt_template_service import AdvancedPromptTemplateService from services.advanced_prompt_template_service import AdvancedPromptTemplateService

View File

@ -2,8 +2,7 @@ from flask_restful import Resource, reqparse
from controllers.console import api from controllers.console import api
from controllers.console.app.wraps import get_app_model from controllers.console.app.wraps import get_app_model
from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required, setup_required
from controllers.console.wraps import account_initialization_required
from libs.helper import uuid_value from libs.helper import uuid_value
from libs.login import login_required from libs.login import login_required
from models.model import AppMode from models.model import AppMode

View File

@ -6,8 +6,11 @@ from werkzeug.exceptions import Forbidden
from controllers.console import api from controllers.console import api
from controllers.console.app.error import NoFileUploadedError from controllers.console.app.error import NoFileUploadedError
from controllers.console.datasets.error import TooManyFilesError from controllers.console.datasets.error import TooManyFilesError
from controllers.console.setup import setup_required from controllers.console.wraps import (
from controllers.console.wraps import account_initialization_required, cloud_edition_billing_resource_check account_initialization_required,
cloud_edition_billing_resource_check,
setup_required,
)
from extensions.ext_redis import redis_client from extensions.ext_redis import redis_client
from fields.annotation_fields import ( from fields.annotation_fields import (
annotation_fields, annotation_fields,

View File

@ -6,8 +6,11 @@ from werkzeug.exceptions import BadRequest, Forbidden, abort
from controllers.console import api from controllers.console import api
from controllers.console.app.wraps import get_app_model from controllers.console.app.wraps import get_app_model
from controllers.console.setup import setup_required from controllers.console.wraps import (
from controllers.console.wraps import account_initialization_required, cloud_edition_billing_resource_check account_initialization_required,
cloud_edition_billing_resource_check,
setup_required,
)
from core.ops.ops_trace_manager import OpsTraceManager from core.ops.ops_trace_manager import OpsTraceManager
from fields.app_fields import ( from fields.app_fields import (
app_detail_fields, app_detail_fields,

View File

@ -18,8 +18,7 @@ from controllers.console.app.error import (
UnsupportedAudioTypeError, UnsupportedAudioTypeError,
) )
from controllers.console.app.wraps import get_app_model from controllers.console.app.wraps import get_app_model
from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required, setup_required
from controllers.console.wraps import account_initialization_required
from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
from core.model_runtime.errors.invoke import InvokeError from core.model_runtime.errors.invoke import InvokeError
from libs.login import login_required from libs.login import login_required

View File

@ -15,8 +15,7 @@ from controllers.console.app.error import (
ProviderQuotaExceededError, ProviderQuotaExceededError,
) )
from controllers.console.app.wraps import get_app_model from controllers.console.app.wraps import get_app_model
from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required, setup_required
from controllers.console.wraps import account_initialization_required
from controllers.web.error import InvokeRateLimitError as InvokeRateLimitHttpError from controllers.web.error import InvokeRateLimitError as InvokeRateLimitHttpError
from core.app.apps.base_app_queue_manager import AppQueueManager from core.app.apps.base_app_queue_manager import AppQueueManager
from core.app.entities.app_invoke_entities import InvokeFrom from core.app.entities.app_invoke_entities import InvokeFrom

View File

@ -10,8 +10,7 @@ from werkzeug.exceptions import Forbidden, NotFound
from controllers.console import api from controllers.console import api
from controllers.console.app.wraps import get_app_model from controllers.console.app.wraps import get_app_model
from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required, setup_required
from controllers.console.wraps import account_initialization_required
from core.app.entities.app_invoke_entities import InvokeFrom from core.app.entities.app_invoke_entities import InvokeFrom
from extensions.ext_database import db from extensions.ext_database import db
from fields.conversation_fields import ( from fields.conversation_fields import (

View File

@ -4,8 +4,7 @@ from sqlalchemy.orm import Session
from controllers.console import api from controllers.console import api
from controllers.console.app.wraps import get_app_model from controllers.console.app.wraps import get_app_model
from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required, setup_required
from controllers.console.wraps import account_initialization_required
from extensions.ext_database import db from extensions.ext_database import db
from fields.conversation_variable_fields import paginated_conversation_variable_fields from fields.conversation_variable_fields import paginated_conversation_variable_fields
from libs.login import login_required from libs.login import login_required

View File

@ -10,8 +10,7 @@ from controllers.console.app.error import (
ProviderNotInitializeError, ProviderNotInitializeError,
ProviderQuotaExceededError, ProviderQuotaExceededError,
) )
from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required, setup_required
from controllers.console.wraps import account_initialization_required
from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
from core.llm_generator.llm_generator import LLMGenerator from core.llm_generator.llm_generator import LLMGenerator
from core.model_runtime.errors.invoke import InvokeError from core.model_runtime.errors.invoke import InvokeError

View File

@ -14,8 +14,11 @@ from controllers.console.app.error import (
) )
from controllers.console.app.wraps import get_app_model from controllers.console.app.wraps import get_app_model
from controllers.console.explore.error import AppSuggestedQuestionsAfterAnswerDisabledError from controllers.console.explore.error import AppSuggestedQuestionsAfterAnswerDisabledError
from controllers.console.setup import setup_required from controllers.console.wraps import (
from controllers.console.wraps import account_initialization_required, cloud_edition_billing_resource_check account_initialization_required,
cloud_edition_billing_resource_check,
setup_required,
)
from core.app.entities.app_invoke_entities import InvokeFrom from core.app.entities.app_invoke_entities import InvokeFrom
from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
from core.model_runtime.errors.invoke import InvokeError from core.model_runtime.errors.invoke import InvokeError

View File

@ -6,8 +6,7 @@ from flask_restful import Resource
from controllers.console import api from controllers.console import api
from controllers.console.app.wraps import get_app_model from controllers.console.app.wraps import get_app_model
from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required, setup_required
from controllers.console.wraps import account_initialization_required
from core.agent.entities import AgentToolEntity from core.agent.entities import AgentToolEntity
from core.tools.tool_manager import ToolManager from core.tools.tool_manager import ToolManager
from core.tools.utils.configuration import ToolParameterConfigurationManager from core.tools.utils.configuration import ToolParameterConfigurationManager

View File

@ -2,8 +2,7 @@ from flask_restful import Resource, reqparse
from controllers.console import api from controllers.console import api
from controllers.console.app.error import TracingConfigCheckError, TracingConfigIsExist, TracingConfigNotExist from controllers.console.app.error import TracingConfigCheckError, TracingConfigIsExist, TracingConfigNotExist
from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required, setup_required
from controllers.console.wraps import account_initialization_required
from libs.login import login_required from libs.login import login_required
from services.ops_service import OpsService from services.ops_service import OpsService

View File

@ -7,8 +7,7 @@ from werkzeug.exceptions import Forbidden, NotFound
from constants.languages import supported_language from constants.languages import supported_language
from controllers.console import api from controllers.console import api
from controllers.console.app.wraps import get_app_model from controllers.console.app.wraps import get_app_model
from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required, setup_required
from controllers.console.wraps import account_initialization_required
from extensions.ext_database import db from extensions.ext_database import db
from fields.app_fields import app_site_fields from fields.app_fields import app_site_fields
from libs.login import login_required from libs.login import login_required

View File

@ -8,8 +8,7 @@ from flask_restful import Resource, reqparse
from controllers.console import api from controllers.console import api
from controllers.console.app.wraps import get_app_model from controllers.console.app.wraps import get_app_model
from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required, setup_required
from controllers.console.wraps import account_initialization_required
from extensions.ext_database import db from extensions.ext_database import db
from libs.helper import DatetimeString from libs.helper import DatetimeString
from libs.login import login_required from libs.login import login_required

View File

@ -9,8 +9,7 @@ import services
from controllers.console import api from controllers.console import api
from controllers.console.app.error import ConversationCompletedError, DraftWorkflowNotExist, DraftWorkflowNotSync from controllers.console.app.error import ConversationCompletedError, DraftWorkflowNotExist, DraftWorkflowNotSync
from controllers.console.app.wraps import get_app_model from controllers.console.app.wraps import get_app_model
from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required, setup_required
from controllers.console.wraps import account_initialization_required
from core.app.apps.base_app_queue_manager import AppQueueManager from core.app.apps.base_app_queue_manager import AppQueueManager
from core.app.entities.app_invoke_entities import InvokeFrom from core.app.entities.app_invoke_entities import InvokeFrom
from factories import variable_factory from factories import variable_factory

View File

@ -3,8 +3,7 @@ from flask_restful.inputs import int_range
from controllers.console import api from controllers.console import api
from controllers.console.app.wraps import get_app_model from controllers.console.app.wraps import get_app_model
from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required, setup_required
from controllers.console.wraps import account_initialization_required
from fields.workflow_app_log_fields import workflow_app_log_pagination_fields from fields.workflow_app_log_fields import workflow_app_log_pagination_fields
from libs.login import login_required from libs.login import login_required
from models import App from models import App

View File

@ -3,8 +3,7 @@ from flask_restful.inputs import int_range
from controllers.console import api from controllers.console import api
from controllers.console.app.wraps import get_app_model from controllers.console.app.wraps import get_app_model
from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required, setup_required
from controllers.console.wraps import account_initialization_required
from fields.workflow_run_fields import ( from fields.workflow_run_fields import (
advanced_chat_workflow_run_pagination_fields, advanced_chat_workflow_run_pagination_fields,
workflow_run_detail_fields, workflow_run_detail_fields,

View File

@ -8,8 +8,7 @@ from flask_restful import Resource, reqparse
from controllers.console import api from controllers.console import api
from controllers.console.app.wraps import get_app_model from controllers.console.app.wraps import get_app_model
from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required, setup_required
from controllers.console.wraps import account_initialization_required
from extensions.ext_database import db from extensions.ext_database import db
from libs.helper import DatetimeString from libs.helper import DatetimeString
from libs.login import login_required from libs.login import login_required

View File

@ -7,8 +7,7 @@ from controllers.console.auth.error import ApiKeyAuthFailedError
from libs.login import login_required from libs.login import login_required
from services.auth.api_key_auth_service import ApiKeyAuthService from services.auth.api_key_auth_service import ApiKeyAuthService
from ..setup import setup_required from ..wraps import account_initialization_required, setup_required
from ..wraps import account_initialization_required
class ApiKeyAuthDataSource(Resource): class ApiKeyAuthDataSource(Resource):

View File

@ -11,8 +11,7 @@ from controllers.console import api
from libs.login import login_required from libs.login import login_required
from libs.oauth_data_source import NotionOAuth from libs.oauth_data_source import NotionOAuth
from ..setup import setup_required from ..wraps import account_initialization_required, setup_required
from ..wraps import account_initialization_required
def get_oauth_providers(): def get_oauth_providers():

View File

@ -13,7 +13,7 @@ from controllers.console.auth.error import (
PasswordMismatchError, PasswordMismatchError,
) )
from controllers.console.error import EmailSendIpLimitError, NotAllowedRegister from controllers.console.error import EmailSendIpLimitError, NotAllowedRegister
from controllers.console.setup import setup_required from controllers.console.wraps import setup_required
from events.tenant_event import tenant_was_created from events.tenant_event import tenant_was_created
from extensions.ext_database import db from extensions.ext_database import db
from libs.helper import email, extract_remote_ip from libs.helper import email, extract_remote_ip

View File

@ -20,7 +20,7 @@ from controllers.console.error import (
NotAllowedCreateWorkspace, NotAllowedCreateWorkspace,
NotAllowedRegister, NotAllowedRegister,
) )
from controllers.console.setup import setup_required from controllers.console.wraps import setup_required
from events.tenant_event import tenant_was_created from events.tenant_event import tenant_was_created
from libs.helper import email, extract_remote_ip from libs.helper import email, extract_remote_ip
from libs.password import valid_password from libs.password import valid_password

View File

@ -2,8 +2,7 @@ from flask_login import current_user
from flask_restful import Resource, reqparse from flask_restful import Resource, reqparse
from controllers.console import api from controllers.console import api
from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required, only_edition_cloud, setup_required
from controllers.console.wraps import account_initialization_required, only_edition_cloud
from libs.login import login_required from libs.login import login_required
from services.billing_service import BillingService from services.billing_service import BillingService

View File

@ -7,8 +7,7 @@ from flask_restful import Resource, marshal_with, reqparse
from werkzeug.exceptions import NotFound from werkzeug.exceptions import NotFound
from controllers.console import api from controllers.console import api
from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required, setup_required
from controllers.console.wraps import account_initialization_required
from core.indexing_runner import IndexingRunner from core.indexing_runner import IndexingRunner
from core.rag.extractor.entity.extract_setting import ExtractSetting from core.rag.extractor.entity.extract_setting import ExtractSetting
from core.rag.extractor.notion_extractor import NotionExtractor from core.rag.extractor.notion_extractor import NotionExtractor

View File

@ -10,8 +10,7 @@ from controllers.console import api
from controllers.console.apikey import api_key_fields, api_key_list from controllers.console.apikey import api_key_fields, api_key_list
from controllers.console.app.error import ProviderNotInitializeError from controllers.console.app.error import ProviderNotInitializeError
from controllers.console.datasets.error import DatasetInUseError, DatasetNameDuplicateError, IndexingEstimateError from controllers.console.datasets.error import DatasetInUseError, DatasetNameDuplicateError, IndexingEstimateError
from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required, setup_required
from controllers.console.wraps import account_initialization_required
from core.errors.error import LLMBadRequestError, ProviderTokenNotInitError from core.errors.error import LLMBadRequestError, ProviderTokenNotInitError
from core.indexing_runner import IndexingRunner from core.indexing_runner import IndexingRunner
from core.model_runtime.entities.model_entities import ModelType from core.model_runtime.entities.model_entities import ModelType
@ -457,7 +456,7 @@ class DatasetIndexingEstimateApi(Resource):
) )
except LLMBadRequestError: except LLMBadRequestError:
raise ProviderNotInitializeError( raise ProviderNotInitializeError(
"No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider." "No Embedding Model available. Please configure a valid provider " "in the Settings -> Model Provider."
) )
except ProviderTokenNotInitError as ex: except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description) raise ProviderNotInitializeError(ex.description)
@ -621,6 +620,7 @@ class DatasetRetrievalSettingApi(Resource):
case ( case (
VectorType.MILVUS VectorType.MILVUS
| VectorType.RELYT | VectorType.RELYT
| VectorType.PGVECTOR
| VectorType.TIDB_VECTOR | VectorType.TIDB_VECTOR
| VectorType.CHROMA | VectorType.CHROMA
| VectorType.TENCENT | VectorType.TENCENT
@ -641,6 +641,7 @@ class DatasetRetrievalSettingApi(Resource):
| VectorType.ELASTICSEARCH | VectorType.ELASTICSEARCH
| VectorType.PGVECTOR | VectorType.PGVECTOR
| VectorType.TIDB_ON_QDRANT | VectorType.TIDB_ON_QDRANT
| VectorType.LINDORM
| VectorType.COUCHBASE | VectorType.COUCHBASE
): ):
return { return {
@ -683,6 +684,7 @@ class DatasetRetrievalSettingMockApi(Resource):
| VectorType.ELASTICSEARCH | VectorType.ELASTICSEARCH
| VectorType.COUCHBASE | VectorType.COUCHBASE
| VectorType.PGVECTOR | VectorType.PGVECTOR
| VectorType.LINDORM
): ):
return { return {
"retrieval_method": [ "retrieval_method": [

View File

@ -24,8 +24,11 @@ from controllers.console.datasets.error import (
InvalidActionError, InvalidActionError,
InvalidMetadataError, InvalidMetadataError,
) )
from controllers.console.setup import setup_required from controllers.console.wraps import (
from controllers.console.wraps import account_initialization_required, cloud_edition_billing_resource_check account_initialization_required,
cloud_edition_billing_resource_check,
setup_required,
)
from core.errors.error import ( from core.errors.error import (
LLMBadRequestError, LLMBadRequestError,
ModelCurrentlyNotSupportError, ModelCurrentlyNotSupportError,

View File

@ -11,11 +11,11 @@ import services
from controllers.console import api from controllers.console import api
from controllers.console.app.error import ProviderNotInitializeError from controllers.console.app.error import ProviderNotInitializeError
from controllers.console.datasets.error import InvalidActionError, NoFileUploadedError, TooManyFilesError from controllers.console.datasets.error import InvalidActionError, NoFileUploadedError, TooManyFilesError
from controllers.console.setup import setup_required
from controllers.console.wraps import ( from controllers.console.wraps import (
account_initialization_required, account_initialization_required,
cloud_edition_billing_knowledge_limit_check, cloud_edition_billing_knowledge_limit_check,
cloud_edition_billing_resource_check, cloud_edition_billing_resource_check,
setup_required,
) )
from core.errors.error import LLMBadRequestError, ProviderTokenNotInitError from core.errors.error import LLMBadRequestError, ProviderTokenNotInitError
from core.model_manager import ModelManager from core.model_manager import ModelManager

View File

@ -6,8 +6,7 @@ from werkzeug.exceptions import Forbidden, InternalServerError, NotFound
import services import services
from controllers.console import api from controllers.console import api
from controllers.console.datasets.error import DatasetNameDuplicateError from controllers.console.datasets.error import DatasetNameDuplicateError
from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required, setup_required
from controllers.console.wraps import account_initialization_required
from fields.dataset_fields import dataset_detail_fields from fields.dataset_fields import dataset_detail_fields
from libs.login import login_required from libs.login import login_required
from services.dataset_service import DatasetService from services.dataset_service import DatasetService

View File

@ -2,8 +2,7 @@ from flask_restful import Resource
from controllers.console import api from controllers.console import api
from controllers.console.datasets.hit_testing_base import DatasetsHitTestingBase from controllers.console.datasets.hit_testing_base import DatasetsHitTestingBase
from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required, setup_required
from controllers.console.wraps import account_initialization_required
from libs.login import login_required from libs.login import login_required

View File

@ -2,8 +2,7 @@ from flask_restful import Resource, reqparse
from controllers.console import api from controllers.console import api
from controllers.console.datasets.error import WebsiteCrawlError from controllers.console.datasets.error import WebsiteCrawlError
from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required, setup_required
from controllers.console.wraps import account_initialization_required
from libs.login import login_required from libs.login import login_required
from services.website_service import WebsiteService from services.website_service import WebsiteService

View File

@ -62,3 +62,27 @@ class EmailSendIpLimitError(BaseHTTPException):
error_code = "email_send_ip_limit" error_code = "email_send_ip_limit"
description = "Too many emails have been sent from this IP address recently. Please try again later." description = "Too many emails have been sent from this IP address recently. Please try again later."
code = 429 code = 429
class FileTooLargeError(BaseHTTPException):
error_code = "file_too_large"
description = "File size exceeded. {message}"
code = 413
class UnsupportedFileTypeError(BaseHTTPException):
error_code = "unsupported_file_type"
description = "File type not allowed."
code = 415
class TooManyFilesError(BaseHTTPException):
error_code = "too_many_files"
description = "Only one file is allowed."
code = 400
class NoFileUploadedError(BaseHTTPException):
error_code = "no_file_uploaded"
description = "Please upload your file."
code = 400

View File

@ -1,6 +1,7 @@
from flask_restful import fields, marshal_with from flask_restful import marshal_with
from configs import dify_config from controllers.common import fields
from controllers.common import helpers as controller_helpers
from controllers.console import api from controllers.console import api
from controllers.console.app.error import AppUnavailableError from controllers.console.app.error import AppUnavailableError
from controllers.console.explore.wraps import InstalledAppResource from controllers.console.explore.wraps import InstalledAppResource
@ -11,43 +12,14 @@ from services.app_service import AppService
class AppParameterApi(InstalledAppResource): class AppParameterApi(InstalledAppResource):
"""Resource for app variables.""" """Resource for app variables."""
variable_fields = { @marshal_with(fields.parameters_fields)
"key": fields.String,
"name": fields.String,
"description": fields.String,
"type": fields.String,
"default": fields.String,
"max_length": fields.Integer,
"options": fields.List(fields.String),
}
system_parameters_fields = {
"image_file_size_limit": fields.Integer,
"video_file_size_limit": fields.Integer,
"audio_file_size_limit": fields.Integer,
"file_size_limit": fields.Integer,
}
parameters_fields = {
"opening_statement": fields.String,
"suggested_questions": fields.Raw,
"suggested_questions_after_answer": fields.Raw,
"speech_to_text": fields.Raw,
"text_to_speech": fields.Raw,
"retriever_resource": fields.Raw,
"annotation_reply": fields.Raw,
"more_like_this": fields.Raw,
"user_input_form": fields.Raw,
"sensitive_word_avoidance": fields.Raw,
"file_upload": fields.Raw,
"system_parameters": fields.Nested(system_parameters_fields),
}
@marshal_with(parameters_fields)
def get(self, installed_app: InstalledApp): def get(self, installed_app: InstalledApp):
"""Retrieve app parameters.""" """Retrieve app parameters."""
app_model = installed_app.app app_model = installed_app.app
if app_model is None:
raise AppUnavailableError()
if app_model.mode in {AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value}: if app_model.mode in {AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value}:
workflow = app_model.workflow workflow = app_model.workflow
if workflow is None: if workflow is None:
@ -57,43 +29,16 @@ class AppParameterApi(InstalledAppResource):
user_input_form = workflow.user_input_form(to_old_structure=True) user_input_form = workflow.user_input_form(to_old_structure=True)
else: else:
app_model_config = app_model.app_model_config app_model_config = app_model.app_model_config
if app_model_config is None:
raise AppUnavailableError()
features_dict = app_model_config.to_dict() features_dict = app_model_config.to_dict()
user_input_form = features_dict.get("user_input_form", []) user_input_form = features_dict.get("user_input_form", [])
return { return controller_helpers.get_parameters_from_feature_dict(
"opening_statement": features_dict.get("opening_statement"), features_dict=features_dict, user_input_form=user_input_form
"suggested_questions": features_dict.get("suggested_questions", []), )
"suggested_questions_after_answer": features_dict.get(
"suggested_questions_after_answer", {"enabled": False}
),
"speech_to_text": features_dict.get("speech_to_text", {"enabled": False}),
"text_to_speech": features_dict.get("text_to_speech", {"enabled": False}),
"retriever_resource": features_dict.get("retriever_resource", {"enabled": False}),
"annotation_reply": features_dict.get("annotation_reply", {"enabled": False}),
"more_like_this": features_dict.get("more_like_this", {"enabled": False}),
"user_input_form": user_input_form,
"sensitive_word_avoidance": features_dict.get(
"sensitive_word_avoidance", {"enabled": False, "type": "", "configs": []}
),
"file_upload": features_dict.get(
"file_upload",
{
"image": {
"enabled": False,
"number_limits": 3,
"detail": "high",
"transfer_methods": ["remote_url", "local_file"],
}
},
),
"system_parameters": {
"image_file_size_limit": dify_config.UPLOAD_IMAGE_FILE_SIZE_LIMIT,
"video_file_size_limit": dify_config.UPLOAD_VIDEO_FILE_SIZE_LIMIT,
"audio_file_size_limit": dify_config.UPLOAD_AUDIO_FILE_SIZE_LIMIT,
"file_size_limit": dify_config.UPLOAD_FILE_SIZE_LIMIT,
},
}
class ExploreAppMetaApi(InstalledAppResource): class ExploreAppMetaApi(InstalledAppResource):

View File

@ -3,8 +3,7 @@ from flask_restful import Resource, marshal_with, reqparse
from constants import HIDDEN_VALUE from constants import HIDDEN_VALUE
from controllers.console import api from controllers.console import api
from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required, setup_required
from controllers.console.wraps import account_initialization_required
from fields.api_based_extension_fields import api_based_extension_fields from fields.api_based_extension_fields import api_based_extension_fields
from libs.login import login_required from libs.login import login_required
from models.api_based_extension import APIBasedExtension from models.api_based_extension import APIBasedExtension

View File

@ -5,8 +5,7 @@ from libs.login import login_required
from services.feature_service import FeatureService from services.feature_service import FeatureService
from . import api from . import api
from .setup import setup_required from .wraps import account_initialization_required, cloud_utm_record, setup_required
from .wraps import account_initialization_required, cloud_utm_record
class FeatureApi(Resource): class FeatureApi(Resource):

View File

@ -1,25 +1,26 @@
import urllib.parse
from flask import request from flask import request
from flask_login import current_user from flask_login import current_user
from flask_restful import Resource, marshal_with, reqparse from flask_restful import Resource, marshal_with
import services import services
from configs import dify_config from configs import dify_config
from constants import DOCUMENT_EXTENSIONS from constants import DOCUMENT_EXTENSIONS
from controllers.console import api from controllers.common.errors import FilenameNotExistsError
from controllers.console.datasets.error import ( from controllers.console.wraps import (
account_initialization_required,
cloud_edition_billing_resource_check,
setup_required,
)
from fields.file_fields import file_fields, upload_config_fields
from libs.login import login_required
from services.file_service import FileService
from .error import (
FileTooLargeError, FileTooLargeError,
NoFileUploadedError, NoFileUploadedError,
TooManyFilesError, TooManyFilesError,
UnsupportedFileTypeError, UnsupportedFileTypeError,
) )
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required, cloud_edition_billing_resource_check
from core.helper import ssrf_proxy
from fields.file_fields import file_fields, remote_file_info_fields, upload_config_fields
from libs.login import login_required
from services.file_service import FileService
PREVIEW_WORDS_LIMIT = 3000 PREVIEW_WORDS_LIMIT = 3000
@ -36,6 +37,7 @@ class FileApi(Resource):
"image_file_size_limit": dify_config.UPLOAD_IMAGE_FILE_SIZE_LIMIT, "image_file_size_limit": dify_config.UPLOAD_IMAGE_FILE_SIZE_LIMIT,
"video_file_size_limit": dify_config.UPLOAD_VIDEO_FILE_SIZE_LIMIT, "video_file_size_limit": dify_config.UPLOAD_VIDEO_FILE_SIZE_LIMIT,
"audio_file_size_limit": dify_config.UPLOAD_AUDIO_FILE_SIZE_LIMIT, "audio_file_size_limit": dify_config.UPLOAD_AUDIO_FILE_SIZE_LIMIT,
"workflow_file_upload_limit": dify_config.WORKFLOW_FILE_UPLOAD_LIMIT,
}, 200 }, 200
@setup_required @setup_required
@ -44,21 +46,29 @@ class FileApi(Resource):
@marshal_with(file_fields) @marshal_with(file_fields)
@cloud_edition_billing_resource_check("documents") @cloud_edition_billing_resource_check("documents")
def post(self): def post(self):
# get file from request
file = request.files["file"] file = request.files["file"]
source = request.form.get("source")
parser = reqparse.RequestParser()
parser.add_argument("source", type=str, required=False, location="args")
source = parser.parse_args().get("source")
# check file
if "file" not in request.files: if "file" not in request.files:
raise NoFileUploadedError() raise NoFileUploadedError()
if len(request.files) > 1: if len(request.files) > 1:
raise TooManyFilesError() raise TooManyFilesError()
if not file.filename:
raise FilenameNotExistsError
if source not in ("datasets", None):
source = None
try: try:
upload_file = FileService.upload_file(file=file, user=current_user, source=source) upload_file = FileService.upload_file(
filename=file.filename,
content=file.read(),
mimetype=file.mimetype,
user=current_user,
source=source,
)
except services.errors.file.FileTooLargeError as file_too_large_error: except services.errors.file.FileTooLargeError as file_too_large_error:
raise FileTooLargeError(file_too_large_error.description) raise FileTooLargeError(file_too_large_error.description)
except services.errors.file.UnsupportedFileTypeError: except services.errors.file.UnsupportedFileTypeError:
@ -83,23 +93,3 @@ class FileSupportTypeApi(Resource):
@account_initialization_required @account_initialization_required
def get(self): def get(self):
return {"allowed_extensions": DOCUMENT_EXTENSIONS} return {"allowed_extensions": DOCUMENT_EXTENSIONS}
class RemoteFileInfoApi(Resource):
@marshal_with(remote_file_info_fields)
def get(self, url):
decoded_url = urllib.parse.unquote(url)
try:
response = ssrf_proxy.head(decoded_url)
return {
"file_type": response.headers.get("Content-Type", "application/octet-stream"),
"file_length": int(response.headers.get("Content-Length", 0)),
}
except Exception as e:
return {"error": str(e)}, 400
api.add_resource(FileApi, "/files/upload")
api.add_resource(FilePreviewApi, "/files/<uuid:file_id>/preview")
api.add_resource(FileSupportTypeApi, "/files/support-type")
api.add_resource(RemoteFileInfoApi, "/remote-files/<path:url>")

View File

@ -0,0 +1,81 @@
import urllib.parse
from typing import cast
import httpx
from flask_login import current_user
from flask_restful import Resource, marshal_with, reqparse
import services
from controllers.common import helpers
from core.file import helpers as file_helpers
from core.helper import ssrf_proxy
from fields.file_fields import file_fields_with_signed_url, remote_file_info_fields
from models.account import Account
from services.file_service import FileService
from .error import (
FileTooLargeError,
UnsupportedFileTypeError,
)
class RemoteFileInfoApi(Resource):
@marshal_with(remote_file_info_fields)
def get(self, url):
decoded_url = urllib.parse.unquote(url)
resp = ssrf_proxy.head(decoded_url)
if resp.status_code != httpx.codes.OK:
# failed back to get method
resp = ssrf_proxy.get(decoded_url, timeout=3)
resp.raise_for_status()
return {
"file_type": resp.headers.get("Content-Type", "application/octet-stream"),
"file_length": int(resp.headers.get("Content-Length", 0)),
}
class RemoteFileUploadApi(Resource):
@marshal_with(file_fields_with_signed_url)
def post(self):
parser = reqparse.RequestParser()
parser.add_argument("url", type=str, required=True, help="URL is required")
args = parser.parse_args()
url = args["url"]
resp = ssrf_proxy.head(url=url)
if resp.status_code != httpx.codes.OK:
resp = ssrf_proxy.get(url=url, timeout=3)
resp.raise_for_status()
file_info = helpers.guess_file_info_from_response(resp)
if not FileService.is_file_size_within_limit(extension=file_info.extension, file_size=file_info.size):
raise FileTooLargeError
content = resp.content if resp.request.method == "GET" else ssrf_proxy.get(url).content
try:
user = cast(Account, current_user)
upload_file = FileService.upload_file(
filename=file_info.filename,
content=content,
mimetype=file_info.mimetype,
user=user,
source_url=url,
)
except services.errors.file.FileTooLargeError as file_too_large_error:
raise FileTooLargeError(file_too_large_error.description)
except services.errors.file.UnsupportedFileTypeError:
raise UnsupportedFileTypeError()
return {
"id": upload_file.id,
"name": upload_file.name,
"size": upload_file.size,
"extension": upload_file.extension,
"url": file_helpers.get_signed_file_url(upload_file_id=upload_file.id),
"mime_type": upload_file.mime_type,
"created_by": upload_file.created_by,
"created_at": upload_file.created_at,
}, 201

View File

@ -1,5 +1,3 @@
from functools import wraps
from flask import request from flask import request
from flask_restful import Resource, reqparse from flask_restful import Resource, reqparse
@ -10,7 +8,7 @@ from models.model import DifySetup
from services.account_service import RegisterService, TenantService from services.account_service import RegisterService, TenantService
from . import api from . import api
from .error import AlreadySetupError, NotInitValidateError, NotSetupError from .error import AlreadySetupError, NotInitValidateError
from .init_validate import get_init_validate_status from .init_validate import get_init_validate_status
from .wraps import only_edition_self_hosted from .wraps import only_edition_self_hosted
@ -52,26 +50,10 @@ class SetupApi(Resource):
return {"result": "success"}, 201 return {"result": "success"}, 201
def setup_required(view):
@wraps(view)
def decorated(*args, **kwargs):
# check setup
if not get_init_validate_status():
raise NotInitValidateError()
elif not get_setup_status():
raise NotSetupError()
return view(*args, **kwargs)
return decorated
def get_setup_status(): def get_setup_status():
if dify_config.EDITION == "SELF_HOSTED": if dify_config.EDITION == "SELF_HOSTED":
return DifySetup.query.first() return DifySetup.query.first()
else: return True
return True
api.add_resource(SetupApi, "/setup") api.add_resource(SetupApi, "/setup")

View File

@ -4,8 +4,7 @@ from flask_restful import Resource, marshal_with, reqparse
from werkzeug.exceptions import Forbidden from werkzeug.exceptions import Forbidden
from controllers.console import api from controllers.console import api
from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required, setup_required
from controllers.console.wraps import account_initialization_required
from fields.tag_fields import tag_fields from fields.tag_fields import tag_fields
from libs.login import login_required from libs.login import login_required
from models.model import Tag from models.model import Tag

View File

@ -3,6 +3,7 @@ import logging
import requests import requests
from flask_restful import Resource, reqparse from flask_restful import Resource, reqparse
from packaging import version
from configs import dify_config from configs import dify_config
@ -47,43 +48,15 @@ class VersionApi(Resource):
def _has_new_version(*, latest_version: str, current_version: str) -> bool: def _has_new_version(*, latest_version: str, current_version: str) -> bool:
def parse_version(version: str) -> tuple: try:
# Split version into parts and pre-release suffix if any latest = version.parse(latest_version)
parts = version.split("-") current = version.parse(current_version)
version_parts = parts[0].split(".")
pre_release = parts[1] if len(parts) > 1 else None
# Validate version format # Compare versions
if len(version_parts) != 3: return latest > current
raise ValueError(f"Invalid version format: {version}") except version.InvalidVersion:
logging.warning(f"Invalid version format: latest={latest_version}, current={current_version}")
try:
# Convert version parts to integers
major, minor, patch = map(int, version_parts)
return (major, minor, patch, pre_release)
except ValueError:
raise ValueError(f"Invalid version format: {version}")
latest = parse_version(latest_version)
current = parse_version(current_version)
# Compare major, minor, and patch versions
for latest_part, current_part in zip(latest[:3], current[:3]):
if latest_part > current_part:
return True
elif latest_part < current_part:
return False
# If versions are equal, check pre-release suffixes
if latest[3] is None and current[3] is not None:
return True
elif latest[3] is not None and current[3] is None:
return False return False
elif latest[3] is not None and current[3] is not None:
# Simple string comparison for pre-release versions
return latest[3] > current[3]
return False
api.add_resource(VersionApi, "/version") api.add_resource(VersionApi, "/version")

View File

@ -8,14 +8,13 @@ from flask_restful import Resource, fields, marshal_with, reqparse
from configs import dify_config from configs import dify_config
from constants.languages import supported_language from constants.languages import supported_language
from controllers.console import api from controllers.console import api
from controllers.console.setup import setup_required
from controllers.console.workspace.error import ( from controllers.console.workspace.error import (
AccountAlreadyInitedError, AccountAlreadyInitedError,
CurrentPasswordIncorrectError, CurrentPasswordIncorrectError,
InvalidInvitationCodeError, InvalidInvitationCodeError,
RepeatPasswordNotMatchError, RepeatPasswordNotMatchError,
) )
from controllers.console.wraps import account_initialization_required from controllers.console.wraps import account_initialization_required, setup_required
from extensions.ext_database import db from extensions.ext_database import db
from fields.member_fields import account_fields from fields.member_fields import account_fields
from libs.helper import TimestampField, timezone from libs.helper import TimestampField, timezone

View File

@ -2,8 +2,7 @@ from flask_restful import Resource, reqparse
from werkzeug.exceptions import Forbidden from werkzeug.exceptions import Forbidden
from controllers.console import api from controllers.console import api
from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required, setup_required
from controllers.console.wraps import account_initialization_required
from core.model_runtime.entities.model_entities import ModelType from core.model_runtime.entities.model_entities import ModelType
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from libs.login import current_user, login_required from libs.login import current_user, login_required

View File

@ -4,8 +4,11 @@ from flask_restful import Resource, abort, marshal_with, reqparse
import services import services
from configs import dify_config from configs import dify_config
from controllers.console import api from controllers.console import api
from controllers.console.setup import setup_required from controllers.console.wraps import (
from controllers.console.wraps import account_initialization_required, cloud_edition_billing_resource_check account_initialization_required,
cloud_edition_billing_resource_check,
setup_required,
)
from extensions.ext_database import db from extensions.ext_database import db
from fields.member_fields import account_with_role_list_fields from fields.member_fields import account_with_role_list_fields
from libs.login import login_required from libs.login import login_required

View File

@ -6,8 +6,7 @@ from flask_restful import Resource, reqparse
from werkzeug.exceptions import Forbidden from werkzeug.exceptions import Forbidden
from controllers.console import api from controllers.console import api
from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required, setup_required
from controllers.console.wraps import account_initialization_required
from core.model_runtime.entities.model_entities import ModelType from core.model_runtime.entities.model_entities import ModelType
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.utils.encoders import jsonable_encoder from core.model_runtime.utils.encoders import jsonable_encoder

View File

@ -5,8 +5,7 @@ from flask_restful import Resource, reqparse
from werkzeug.exceptions import Forbidden from werkzeug.exceptions import Forbidden
from controllers.console import api from controllers.console import api
from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required, setup_required
from controllers.console.wraps import account_initialization_required
from core.model_runtime.entities.model_entities import ModelType from core.model_runtime.entities.model_entities import ModelType
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.utils.encoders import jsonable_encoder from core.model_runtime.utils.encoders import jsonable_encoder

View File

@ -7,8 +7,7 @@ from werkzeug.exceptions import Forbidden
from configs import dify_config from configs import dify_config
from controllers.console import api from controllers.console import api
from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required, setup_required
from controllers.console.wraps import account_initialization_required
from core.model_runtime.utils.encoders import jsonable_encoder from core.model_runtime.utils.encoders import jsonable_encoder
from libs.helper import alphanumeric, uuid_value from libs.helper import alphanumeric, uuid_value
from libs.login import login_required from libs.login import login_required

View File

@ -6,6 +6,7 @@ from flask_restful import Resource, fields, inputs, marshal, marshal_with, reqpa
from werkzeug.exceptions import Unauthorized from werkzeug.exceptions import Unauthorized
import services import services
from controllers.common.errors import FilenameNotExistsError
from controllers.console import api from controllers.console import api
from controllers.console.admin import admin_required from controllers.console.admin import admin_required
from controllers.console.datasets.error import ( from controllers.console.datasets.error import (
@ -15,8 +16,11 @@ from controllers.console.datasets.error import (
UnsupportedFileTypeError, UnsupportedFileTypeError,
) )
from controllers.console.error import AccountNotLinkTenantError from controllers.console.error import AccountNotLinkTenantError
from controllers.console.setup import setup_required from controllers.console.wraps import (
from controllers.console.wraps import account_initialization_required, cloud_edition_billing_resource_check account_initialization_required,
cloud_edition_billing_resource_check,
setup_required,
)
from extensions.ext_database import db from extensions.ext_database import db
from libs.helper import TimestampField from libs.helper import TimestampField
from libs.login import login_required from libs.login import login_required
@ -193,12 +197,20 @@ class WebappLogoWorkspaceApi(Resource):
if len(request.files) > 1: if len(request.files) > 1:
raise TooManyFilesError() raise TooManyFilesError()
if not file.filename:
raise FilenameNotExistsError
extension = file.filename.split(".")[-1] extension = file.filename.split(".")[-1]
if extension.lower() not in {"svg", "png"}: if extension.lower() not in {"svg", "png"}:
raise UnsupportedFileTypeError() raise UnsupportedFileTypeError()
try: try:
upload_file = FileService.upload_file(file=file, user=current_user) upload_file = FileService.upload_file(
filename=file.filename,
content=file.read(),
mimetype=file.mimetype,
user=current_user,
)
except services.errors.file.FileTooLargeError as file_too_large_error: except services.errors.file.FileTooLargeError as file_too_large_error:
raise FileTooLargeError(file_too_large_error.description) raise FileTooLargeError(file_too_large_error.description)

View File

@ -1,4 +1,5 @@
import json import json
import os
from functools import wraps from functools import wraps
from flask import abort, request from flask import abort, request
@ -6,9 +7,12 @@ from flask_login import current_user
from configs import dify_config from configs import dify_config
from controllers.console.workspace.error import AccountNotInitializedError from controllers.console.workspace.error import AccountNotInitializedError
from models.model import DifySetup
from services.feature_service import FeatureService from services.feature_service import FeatureService
from services.operation_service import OperationService from services.operation_service import OperationService
from .error import NotInitValidateError, NotSetupError
def account_initialization_required(view): def account_initialization_required(view):
@wraps(view) @wraps(view)
@ -124,3 +128,17 @@ def cloud_utm_record(view):
return view(*args, **kwargs) return view(*args, **kwargs)
return decorated return decorated
def setup_required(view):
@wraps(view)
def decorated(*args, **kwargs):
# check setup
if dify_config.EDITION == "SELF_HOSTED" and os.environ.get("INIT_PASSWORD") and not DifySetup.query.first():
raise NotInitValidateError()
elif dify_config.EDITION == "SELF_HOSTED" and not DifySetup.query.first():
raise NotSetupError()
return view(*args, **kwargs)
return decorated

View File

@ -1,6 +1,6 @@
from flask_restful import Resource, reqparse from flask_restful import Resource, reqparse
from controllers.console.setup import setup_required from controllers.console.wraps import setup_required
from controllers.inner_api import api from controllers.inner_api import api
from controllers.inner_api.wraps import inner_api_only from controllers.inner_api.wraps import inner_api_only
from events.tenant_event import tenant_was_created from events.tenant_event import tenant_was_created

View File

@ -1,6 +1,7 @@
from flask_restful import Resource, fields, marshal_with from flask_restful import Resource, marshal_with
from configs import dify_config from controllers.common import fields
from controllers.common import helpers as controller_helpers
from controllers.service_api import api from controllers.service_api import api
from controllers.service_api.app.error import AppUnavailableError from controllers.service_api.app.error import AppUnavailableError
from controllers.service_api.wraps import validate_app_token from controllers.service_api.wraps import validate_app_token
@ -11,40 +12,8 @@ from services.app_service import AppService
class AppParameterApi(Resource): class AppParameterApi(Resource):
"""Resource for app variables.""" """Resource for app variables."""
variable_fields = {
"key": fields.String,
"name": fields.String,
"description": fields.String,
"type": fields.String,
"default": fields.String,
"max_length": fields.Integer,
"options": fields.List(fields.String),
}
system_parameters_fields = {
"image_file_size_limit": fields.Integer,
"video_file_size_limit": fields.Integer,
"audio_file_size_limit": fields.Integer,
"file_size_limit": fields.Integer,
}
parameters_fields = {
"opening_statement": fields.String,
"suggested_questions": fields.Raw,
"suggested_questions_after_answer": fields.Raw,
"speech_to_text": fields.Raw,
"text_to_speech": fields.Raw,
"retriever_resource": fields.Raw,
"annotation_reply": fields.Raw,
"more_like_this": fields.Raw,
"user_input_form": fields.Raw,
"sensitive_word_avoidance": fields.Raw,
"file_upload": fields.Raw,
"system_parameters": fields.Nested(system_parameters_fields),
}
@validate_app_token @validate_app_token
@marshal_with(parameters_fields) @marshal_with(fields.parameters_fields)
def get(self, app_model: App): def get(self, app_model: App):
"""Retrieve app parameters.""" """Retrieve app parameters."""
if app_model.mode in {AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value}: if app_model.mode in {AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value}:
@ -56,43 +25,16 @@ class AppParameterApi(Resource):
user_input_form = workflow.user_input_form(to_old_structure=True) user_input_form = workflow.user_input_form(to_old_structure=True)
else: else:
app_model_config = app_model.app_model_config app_model_config = app_model.app_model_config
if app_model_config is None:
raise AppUnavailableError()
features_dict = app_model_config.to_dict() features_dict = app_model_config.to_dict()
user_input_form = features_dict.get("user_input_form", []) user_input_form = features_dict.get("user_input_form", [])
return { return controller_helpers.get_parameters_from_feature_dict(
"opening_statement": features_dict.get("opening_statement"), features_dict=features_dict, user_input_form=user_input_form
"suggested_questions": features_dict.get("suggested_questions", []), )
"suggested_questions_after_answer": features_dict.get(
"suggested_questions_after_answer", {"enabled": False}
),
"speech_to_text": features_dict.get("speech_to_text", {"enabled": False}),
"text_to_speech": features_dict.get("text_to_speech", {"enabled": False}),
"retriever_resource": features_dict.get("retriever_resource", {"enabled": False}),
"annotation_reply": features_dict.get("annotation_reply", {"enabled": False}),
"more_like_this": features_dict.get("more_like_this", {"enabled": False}),
"user_input_form": user_input_form,
"sensitive_word_avoidance": features_dict.get(
"sensitive_word_avoidance", {"enabled": False, "type": "", "configs": []}
),
"file_upload": features_dict.get(
"file_upload",
{
"image": {
"enabled": False,
"number_limits": 3,
"detail": "high",
"transfer_methods": ["remote_url", "local_file"],
}
},
),
"system_parameters": {
"image_file_size_limit": dify_config.UPLOAD_IMAGE_FILE_SIZE_LIMIT,
"video_file_size_limit": dify_config.UPLOAD_VIDEO_FILE_SIZE_LIMIT,
"audio_file_size_limit": dify_config.UPLOAD_AUDIO_FILE_SIZE_LIMIT,
"file_size_limit": dify_config.UPLOAD_FILE_SIZE_LIMIT,
},
}
class AppMetaApi(Resource): class AppMetaApi(Resource):

View File

@ -2,6 +2,7 @@ from flask import request
from flask_restful import Resource, marshal_with from flask_restful import Resource, marshal_with
import services import services
from controllers.common.errors import FilenameNotExistsError
from controllers.service_api import api from controllers.service_api import api
from controllers.service_api.app.error import ( from controllers.service_api.app.error import (
FileTooLargeError, FileTooLargeError,
@ -31,8 +32,16 @@ class FileApi(Resource):
if len(request.files) > 1: if len(request.files) > 1:
raise TooManyFilesError() raise TooManyFilesError()
if not file.filename:
raise FilenameNotExistsError
try: try:
upload_file = FileService.upload_file(file, end_user) upload_file = FileService.upload_file(
filename=file.filename,
content=file.read(),
mimetype=file.mimetype,
user=end_user,
)
except services.errors.file.FileTooLargeError as file_too_large_error: except services.errors.file.FileTooLargeError as file_too_large_error:
raise FileTooLargeError(file_too_large_error.description) raise FileTooLargeError(file_too_large_error.description)
except services.errors.file.UnsupportedFileTypeError: except services.errors.file.UnsupportedFileTypeError:

View File

@ -6,6 +6,7 @@ from sqlalchemy import desc
from werkzeug.exceptions import NotFound from werkzeug.exceptions import NotFound
import services.dataset_service import services.dataset_service
from controllers.common.errors import FilenameNotExistsError
from controllers.service_api import api from controllers.service_api import api
from controllers.service_api.app.error import ProviderNotInitializeError from controllers.service_api.app.error import ProviderNotInitializeError
from controllers.service_api.dataset.error import ( from controllers.service_api.dataset.error import (
@ -55,7 +56,12 @@ class DocumentAddByTextApi(DatasetApiResource):
if not dataset.indexing_technique and not args["indexing_technique"]: if not dataset.indexing_technique and not args["indexing_technique"]:
raise ValueError("indexing_technique is required.") raise ValueError("indexing_technique is required.")
upload_file = FileService.upload_text(args.get("text"), args.get("name")) text = args.get("text")
name = args.get("name")
if text is None or name is None:
raise ValueError("Both 'text' and 'name' must be non-null values.")
upload_file = FileService.upload_text(text=str(text), text_name=str(name))
data_source = { data_source = {
"type": "upload_file", "type": "upload_file",
"info_list": {"data_source_type": "upload_file", "file_info_list": {"file_ids": [upload_file.id]}}, "info_list": {"data_source_type": "upload_file", "file_info_list": {"file_ids": [upload_file.id]}},
@ -104,7 +110,11 @@ class DocumentUpdateByTextApi(DatasetApiResource):
raise ValueError("Dataset is not exist.") raise ValueError("Dataset is not exist.")
if args["text"]: if args["text"]:
upload_file = FileService.upload_text(args.get("text"), args.get("name")) text = args.get("text")
name = args.get("name")
if text is None or name is None:
raise ValueError("Both text and name must be strings.")
upload_file = FileService.upload_text(text=str(text), text_name=str(name))
data_source = { data_source = {
"type": "upload_file", "type": "upload_file",
"info_list": {"data_source_type": "upload_file", "file_info_list": {"file_ids": [upload_file.id]}}, "info_list": {"data_source_type": "upload_file", "file_info_list": {"file_ids": [upload_file.id]}},
@ -163,7 +173,16 @@ class DocumentAddByFileApi(DatasetApiResource):
if len(request.files) > 1: if len(request.files) > 1:
raise TooManyFilesError() raise TooManyFilesError()
upload_file = FileService.upload_file(file, current_user) if not file.filename:
raise FilenameNotExistsError
upload_file = FileService.upload_file(
filename=file.filename,
content=file.read(),
mimetype=file.mimetype,
user=current_user,
source="datasets",
)
data_source = {"type": "upload_file", "info_list": {"file_info_list": {"file_ids": [upload_file.id]}}} data_source = {"type": "upload_file", "info_list": {"file_info_list": {"file_ids": [upload_file.id]}}}
args["data_source"] = data_source args["data_source"] = data_source
# validate args # validate args
@ -212,7 +231,16 @@ class DocumentUpdateByFileApi(DatasetApiResource):
if len(request.files) > 1: if len(request.files) > 1:
raise TooManyFilesError() raise TooManyFilesError()
upload_file = FileService.upload_file(file, current_user) if not file.filename:
raise FilenameNotExistsError
upload_file = FileService.upload_file(
filename=file.filename,
content=file.read(),
mimetype=file.mimetype,
user=current_user,
source="datasets",
)
data_source = {"type": "upload_file", "info_list": {"file_info_list": {"file_ids": [upload_file.id]}}} data_source = {"type": "upload_file", "info_list": {"file_info_list": {"file_ids": [upload_file.id]}}}
args["data_source"] = data_source args["data_source"] = data_source
# validate args # validate args
@ -331,10 +359,26 @@ class DocumentIndexingStatusApi(DatasetApiResource):
return data return data
api.add_resource(DocumentAddByTextApi, "/datasets/<uuid:dataset_id>/document/create_by_text") api.add_resource(
api.add_resource(DocumentAddByFileApi, "/datasets/<uuid:dataset_id>/document/create_by_file") DocumentAddByTextApi,
api.add_resource(DocumentUpdateByTextApi, "/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/update_by_text") "/datasets/<uuid:dataset_id>/document/create_by_text",
api.add_resource(DocumentUpdateByFileApi, "/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/update_by_file") "/datasets/<uuid:dataset_id>/document/create-by-text",
)
api.add_resource(
DocumentAddByFileApi,
"/datasets/<uuid:dataset_id>/document/create_by_file",
"/datasets/<uuid:dataset_id>/document/create-by-file",
)
api.add_resource(
DocumentUpdateByTextApi,
"/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/update_by_text",
"/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/update-by-text",
)
api.add_resource(
DocumentUpdateByFileApi,
"/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/update_by_file",
"/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/update-by-file",
)
api.add_resource(DocumentDeleteApi, "/datasets/<uuid:dataset_id>/documents/<uuid:document_id>") api.add_resource(DocumentDeleteApi, "/datasets/<uuid:dataset_id>/documents/<uuid:document_id>")
api.add_resource(DocumentListApi, "/datasets/<uuid:dataset_id>/documents") api.add_resource(DocumentListApi, "/datasets/<uuid:dataset_id>/documents")
api.add_resource(DocumentIndexingStatusApi, "/datasets/<uuid:dataset_id>/documents/<string:batch>/indexing-status") api.add_resource(DocumentIndexingStatusApi, "/datasets/<uuid:dataset_id>/documents/<string:batch>/indexing-status")

View File

@ -14,4 +14,4 @@ class HitTestingApi(DatasetApiResource, DatasetsHitTestingBase):
return self.perform_hit_testing(dataset, args) return self.perform_hit_testing(dataset, args)
api.add_resource(HitTestingApi, "/datasets/<uuid:dataset_id>/hit-testing") api.add_resource(HitTestingApi, "/datasets/<uuid:dataset_id>/hit-testing", "/datasets/<uuid:dataset_id>/retrieve")

View File

@ -2,8 +2,17 @@ from flask import Blueprint
from libs.external_api import ExternalApi from libs.external_api import ExternalApi
from .files import FileApi
from .remote_files import RemoteFileInfoApi, RemoteFileUploadApi
bp = Blueprint("web", __name__, url_prefix="/api") bp = Blueprint("web", __name__, url_prefix="/api")
api = ExternalApi(bp) api = ExternalApi(bp)
# Files
api.add_resource(FileApi, "/files/upload")
from . import app, audio, completion, conversation, feature, file, message, passport, saved_message, site, workflow # Remote files
api.add_resource(RemoteFileInfoApi, "/remote-files/<path:url>")
api.add_resource(RemoteFileUploadApi, "/remote-files/upload")
from . import app, audio, completion, conversation, feature, message, passport, saved_message, site, workflow

View File

@ -1,6 +1,7 @@
from flask_restful import fields, marshal_with from flask_restful import marshal_with
from configs import dify_config from controllers.common import fields
from controllers.common import helpers as controller_helpers
from controllers.web import api from controllers.web import api
from controllers.web.error import AppUnavailableError from controllers.web.error import AppUnavailableError
from controllers.web.wraps import WebApiResource from controllers.web.wraps import WebApiResource
@ -11,39 +12,7 @@ from services.app_service import AppService
class AppParameterApi(WebApiResource): class AppParameterApi(WebApiResource):
"""Resource for app variables.""" """Resource for app variables."""
variable_fields = { @marshal_with(fields.parameters_fields)
"key": fields.String,
"name": fields.String,
"description": fields.String,
"type": fields.String,
"default": fields.String,
"max_length": fields.Integer,
"options": fields.List(fields.String),
}
system_parameters_fields = {
"image_file_size_limit": fields.Integer,
"video_file_size_limit": fields.Integer,
"audio_file_size_limit": fields.Integer,
"file_size_limit": fields.Integer,
}
parameters_fields = {
"opening_statement": fields.String,
"suggested_questions": fields.Raw,
"suggested_questions_after_answer": fields.Raw,
"speech_to_text": fields.Raw,
"text_to_speech": fields.Raw,
"retriever_resource": fields.Raw,
"annotation_reply": fields.Raw,
"more_like_this": fields.Raw,
"user_input_form": fields.Raw,
"sensitive_word_avoidance": fields.Raw,
"file_upload": fields.Raw,
"system_parameters": fields.Nested(system_parameters_fields),
}
@marshal_with(parameters_fields)
def get(self, app_model: App, end_user): def get(self, app_model: App, end_user):
"""Retrieve app parameters.""" """Retrieve app parameters."""
if app_model.mode in {AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value}: if app_model.mode in {AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value}:
@ -55,43 +24,16 @@ class AppParameterApi(WebApiResource):
user_input_form = workflow.user_input_form(to_old_structure=True) user_input_form = workflow.user_input_form(to_old_structure=True)
else: else:
app_model_config = app_model.app_model_config app_model_config = app_model.app_model_config
if app_model_config is None:
raise AppUnavailableError()
features_dict = app_model_config.to_dict() features_dict = app_model_config.to_dict()
user_input_form = features_dict.get("user_input_form", []) user_input_form = features_dict.get("user_input_form", [])
return { return controller_helpers.get_parameters_from_feature_dict(
"opening_statement": features_dict.get("opening_statement"), features_dict=features_dict, user_input_form=user_input_form
"suggested_questions": features_dict.get("suggested_questions", []), )
"suggested_questions_after_answer": features_dict.get(
"suggested_questions_after_answer", {"enabled": False}
),
"speech_to_text": features_dict.get("speech_to_text", {"enabled": False}),
"text_to_speech": features_dict.get("text_to_speech", {"enabled": False}),
"retriever_resource": features_dict.get("retriever_resource", {"enabled": False}),
"annotation_reply": features_dict.get("annotation_reply", {"enabled": False}),
"more_like_this": features_dict.get("more_like_this", {"enabled": False}),
"user_input_form": user_input_form,
"sensitive_word_avoidance": features_dict.get(
"sensitive_word_avoidance", {"enabled": False, "type": "", "configs": []}
),
"file_upload": features_dict.get(
"file_upload",
{
"image": {
"enabled": False,
"number_limits": 3,
"detail": "high",
"transfer_methods": ["remote_url", "local_file"],
}
},
),
"system_parameters": {
"image_file_size_limit": dify_config.UPLOAD_IMAGE_FILE_SIZE_LIMIT,
"video_file_size_limit": dify_config.UPLOAD_VIDEO_FILE_SIZE_LIMIT,
"audio_file_size_limit": dify_config.UPLOAD_AUDIO_FILE_SIZE_LIMIT,
"file_size_limit": dify_config.UPLOAD_FILE_SIZE_LIMIT,
},
}
class AppMeta(WebApiResource): class AppMeta(WebApiResource):

View File

@ -1,56 +0,0 @@
import urllib.parse
from flask import request
from flask_restful import marshal_with, reqparse
import services
from controllers.web import api
from controllers.web.error import FileTooLargeError, NoFileUploadedError, TooManyFilesError, UnsupportedFileTypeError
from controllers.web.wraps import WebApiResource
from core.helper import ssrf_proxy
from fields.file_fields import file_fields, remote_file_info_fields
from services.file_service import FileService
class FileApi(WebApiResource):
@marshal_with(file_fields)
def post(self, app_model, end_user):
# get file from request
file = request.files["file"]
parser = reqparse.RequestParser()
parser.add_argument("source", type=str, required=False, location="args")
source = parser.parse_args().get("source")
# check file
if "file" not in request.files:
raise NoFileUploadedError()
if len(request.files) > 1:
raise TooManyFilesError()
try:
upload_file = FileService.upload_file(file=file, user=end_user, source=source)
except services.errors.file.FileTooLargeError as file_too_large_error:
raise FileTooLargeError(file_too_large_error.description)
except services.errors.file.UnsupportedFileTypeError:
raise UnsupportedFileTypeError()
return upload_file, 201
class RemoteFileInfoApi(WebApiResource):
@marshal_with(remote_file_info_fields)
def get(self, url):
decoded_url = urllib.parse.unquote(url)
try:
response = ssrf_proxy.head(decoded_url)
return {
"file_type": response.headers.get("Content-Type", "application/octet-stream"),
"file_length": int(response.headers.get("Content-Length", -1)),
}
except Exception as e:
return {"error": str(e)}, 400
api.add_resource(FileApi, "/files/upload")
api.add_resource(RemoteFileInfoApi, "/remote-files/<path:url>")

View File

@ -0,0 +1,43 @@
from flask import request
from flask_restful import marshal_with
import services
from controllers.common.errors import FilenameNotExistsError
from controllers.web.error import FileTooLargeError, NoFileUploadedError, TooManyFilesError, UnsupportedFileTypeError
from controllers.web.wraps import WebApiResource
from fields.file_fields import file_fields
from services.file_service import FileService
class FileApi(WebApiResource):
@marshal_with(file_fields)
def post(self, app_model, end_user):
file = request.files["file"]
source = request.form.get("source")
if "file" not in request.files:
raise NoFileUploadedError()
if len(request.files) > 1:
raise TooManyFilesError()
if not file.filename:
raise FilenameNotExistsError
if source not in ("datasets", None):
source = None
try:
upload_file = FileService.upload_file(
filename=file.filename,
content=file.read(),
mimetype=file.mimetype,
user=end_user,
source=source,
)
except services.errors.file.FileTooLargeError as file_too_large_error:
raise FileTooLargeError(file_too_large_error.description)
except services.errors.file.UnsupportedFileTypeError:
raise UnsupportedFileTypeError()
return upload_file, 201

View File

@ -0,0 +1,75 @@
import urllib.parse
import httpx
from flask_restful import marshal_with, reqparse
import services
from controllers.common import helpers
from controllers.web.wraps import WebApiResource
from core.file import helpers as file_helpers
from core.helper import ssrf_proxy
from fields.file_fields import file_fields_with_signed_url, remote_file_info_fields
from services.file_service import FileService
from .error import FileTooLargeError, UnsupportedFileTypeError
class RemoteFileInfoApi(WebApiResource):
@marshal_with(remote_file_info_fields)
def get(self, app_model, end_user, url):
decoded_url = urllib.parse.unquote(url)
resp = ssrf_proxy.head(decoded_url)
if resp.status_code != httpx.codes.OK:
# failed back to get method
resp = ssrf_proxy.get(decoded_url, timeout=3)
resp.raise_for_status()
return {
"file_type": resp.headers.get("Content-Type", "application/octet-stream"),
"file_length": int(resp.headers.get("Content-Length", -1)),
}
class RemoteFileUploadApi(WebApiResource):
@marshal_with(file_fields_with_signed_url)
def post(self, app_model, end_user): # Add app_model and end_user parameters
parser = reqparse.RequestParser()
parser.add_argument("url", type=str, required=True, help="URL is required")
args = parser.parse_args()
url = args["url"]
resp = ssrf_proxy.head(url=url)
if resp.status_code != httpx.codes.OK:
resp = ssrf_proxy.get(url=url, timeout=3)
resp.raise_for_status()
file_info = helpers.guess_file_info_from_response(resp)
if not FileService.is_file_size_within_limit(extension=file_info.extension, file_size=file_info.size):
raise FileTooLargeError
content = resp.content if resp.request.method == "GET" else ssrf_proxy.get(url).content
try:
upload_file = FileService.upload_file(
filename=file_info.filename,
content=content,
mimetype=file_info.mimetype,
user=end_user,
source_url=url,
)
except services.errors.file.FileTooLargeError as file_too_large_error:
raise FileTooLargeError(file_too_large_error.description)
except services.errors.file.UnsupportedFileTypeError:
raise UnsupportedFileTypeError
return {
"id": upload_file.id,
"name": upload_file.name,
"size": upload_file.size,
"extension": upload_file.extension,
"url": file_helpers.get_signed_file_url(upload_file_id=upload_file.id),
"mime_type": upload_file.mime_type,
"created_by": upload_file.created_by,
"created_at": upload_file.created_at,
}, 201

View File

@ -1,8 +1,7 @@
from collections.abc import Mapping from collections.abc import Mapping
from typing import Any from typing import Any
from core.file.models import FileExtraConfig from core.file import FileExtraConfig
from models import FileUploadConfig
class FileUploadConfigManager: class FileUploadConfigManager:
@ -43,6 +42,6 @@ class FileUploadConfigManager:
if not config.get("file_upload"): if not config.get("file_upload"):
config["file_upload"] = {} config["file_upload"] = {}
else: else:
FileUploadConfig.model_validate(config["file_upload"]) FileExtraConfig.model_validate(config["file_upload"])
return config, ["file_upload"] return config, ["file_upload"]

View File

@ -20,6 +20,7 @@ from core.app.entities.queue_entities import (
QueueIterationStartEvent, QueueIterationStartEvent,
QueueMessageReplaceEvent, QueueMessageReplaceEvent,
QueueNodeFailedEvent, QueueNodeFailedEvent,
QueueNodeInIterationFailedEvent,
QueueNodeStartedEvent, QueueNodeStartedEvent,
QueueNodeSucceededEvent, QueueNodeSucceededEvent,
QueueParallelBranchRunFailedEvent, QueueParallelBranchRunFailedEvent,
@ -314,7 +315,7 @@ class AdvancedChatAppGenerateTaskPipeline(BasedGenerateTaskPipeline, WorkflowCyc
if response: if response:
yield response yield response
elif isinstance(event, QueueNodeFailedEvent): elif isinstance(event, QueueNodeFailedEvent | QueueNodeInIterationFailedEvent):
workflow_node_execution = self._handle_workflow_node_execution_failed(event) workflow_node_execution = self._handle_workflow_node_execution_failed(event)
response = self._workflow_node_finish_to_stream_response( response = self._workflow_node_finish_to_stream_response(

View File

@ -22,7 +22,10 @@ class BaseAppGenerator:
user_inputs = user_inputs or {} user_inputs = user_inputs or {}
# Filter input variables from form configuration, handle required fields, default values, and option values # Filter input variables from form configuration, handle required fields, default values, and option values
variables = app_config.variables variables = app_config.variables
user_inputs = {var.variable: self._validate_input(inputs=user_inputs, var=var) for var in variables} user_inputs = {
var.variable: self._validate_inputs(value=user_inputs.get(var.variable), variable_entity=var)
for var in variables
}
user_inputs = {k: self._sanitize_value(v) for k, v in user_inputs.items()} user_inputs = {k: self._sanitize_value(v) for k, v in user_inputs.items()}
# Convert files in inputs to File # Convert files in inputs to File
entity_dictionary = {item.variable: item for item in app_config.variables} entity_dictionary = {item.variable: item for item in app_config.variables}
@ -74,50 +77,66 @@ class BaseAppGenerator:
return user_inputs return user_inputs
def _validate_input(self, *, inputs: Mapping[str, Any], var: "VariableEntity"): def _validate_inputs(
user_input_value = inputs.get(var.variable) self,
if not user_input_value: *,
if var.required: variable_entity: "VariableEntity",
raise ValueError(f"{var.variable} is required in input form") value: Any,
else: ):
return None if value is None:
if variable_entity.required:
raise ValueError(f"{variable_entity.variable} is required in input form")
return value
if var.type in { if variable_entity.type in {
VariableEntityType.TEXT_INPUT, VariableEntityType.TEXT_INPUT,
VariableEntityType.SELECT, VariableEntityType.SELECT,
VariableEntityType.PARAGRAPH, VariableEntityType.PARAGRAPH,
} and not isinstance(user_input_value, str): } and not isinstance(value, str):
raise ValueError(f"(type '{var.type}') {var.variable} in input form must be a string") raise ValueError(
if var.type == VariableEntityType.NUMBER and isinstance(user_input_value, str): f"(type '{variable_entity.type}') {variable_entity.variable} in input form must be a string"
)
if variable_entity.type == VariableEntityType.NUMBER and isinstance(value, str):
# may raise ValueError if user_input_value is not a valid number # may raise ValueError if user_input_value is not a valid number
try: try:
if "." in user_input_value: if "." in value:
return float(user_input_value) return float(value)
else: else:
return int(user_input_value) return int(value)
except ValueError: except ValueError:
raise ValueError(f"{var.variable} in input form must be a valid number") raise ValueError(f"{variable_entity.variable} in input form must be a valid number")
if var.type == VariableEntityType.SELECT:
options = var.options
if user_input_value not in options:
raise ValueError(f"{var.variable} in input form must be one of the following: {options}")
elif var.type in {VariableEntityType.TEXT_INPUT, VariableEntityType.PARAGRAPH}:
if var.max_length and len(user_input_value) > var.max_length:
raise ValueError(f"{var.variable} in input form must be less than {var.max_length} characters")
elif var.type == VariableEntityType.FILE:
if not isinstance(user_input_value, dict) and not isinstance(user_input_value, File):
raise ValueError(f"{var.variable} in input form must be a file")
elif var.type == VariableEntityType.FILE_LIST:
if not (
isinstance(user_input_value, list)
and (
all(isinstance(item, dict) for item in user_input_value)
or all(isinstance(item, File) for item in user_input_value)
)
):
raise ValueError(f"{var.variable} in input form must be a list of files")
return user_input_value match variable_entity.type:
case VariableEntityType.SELECT:
if value not in variable_entity.options:
raise ValueError(
f"{variable_entity.variable} in input form must be one of the following: "
f"{variable_entity.options}"
)
case VariableEntityType.TEXT_INPUT | VariableEntityType.PARAGRAPH:
if variable_entity.max_length and len(value) > variable_entity.max_length:
raise ValueError(
f"{variable_entity.variable} in input form must be less than {variable_entity.max_length} "
"characters"
)
case VariableEntityType.FILE:
if not isinstance(value, dict) and not isinstance(value, File):
raise ValueError(f"{variable_entity.variable} in input form must be a file")
case VariableEntityType.FILE_LIST:
# if number of files exceeds the limit, raise ValueError
if not (
isinstance(value, list)
and (all(isinstance(item, dict) for item in value) or all(isinstance(item, File) for item in value))
):
raise ValueError(f"{variable_entity.variable} in input form must be a list of files")
if variable_entity.max_length and len(value) > variable_entity.max_length:
raise ValueError(
f"{variable_entity.variable} in input form must be less than {variable_entity.max_length} files"
)
return value
def _sanitize_value(self, value: Any) -> Any: def _sanitize_value(self, value: Any) -> Any:
if isinstance(value, str): if isinstance(value, str):

View File

@ -16,6 +16,7 @@ from core.app.entities.queue_entities import (
QueueIterationNextEvent, QueueIterationNextEvent,
QueueIterationStartEvent, QueueIterationStartEvent,
QueueNodeFailedEvent, QueueNodeFailedEvent,
QueueNodeInIterationFailedEvent,
QueueNodeStartedEvent, QueueNodeStartedEvent,
QueueNodeSucceededEvent, QueueNodeSucceededEvent,
QueueParallelBranchRunFailedEvent, QueueParallelBranchRunFailedEvent,
@ -275,7 +276,7 @@ class WorkflowAppGenerateTaskPipeline(BasedGenerateTaskPipeline, WorkflowCycleMa
if response: if response:
yield response yield response
elif isinstance(event, QueueNodeFailedEvent): elif isinstance(event, QueueNodeFailedEvent | QueueNodeInIterationFailedEvent):
workflow_node_execution = self._handle_workflow_node_execution_failed(event) workflow_node_execution = self._handle_workflow_node_execution_failed(event)
response = self._workflow_node_finish_to_stream_response( response = self._workflow_node_finish_to_stream_response(

View File

@ -9,6 +9,7 @@ from core.app.entities.queue_entities import (
QueueIterationNextEvent, QueueIterationNextEvent,
QueueIterationStartEvent, QueueIterationStartEvent,
QueueNodeFailedEvent, QueueNodeFailedEvent,
QueueNodeInIterationFailedEvent,
QueueNodeStartedEvent, QueueNodeStartedEvent,
QueueNodeSucceededEvent, QueueNodeSucceededEvent,
QueueParallelBranchRunFailedEvent, QueueParallelBranchRunFailedEvent,
@ -30,6 +31,7 @@ from core.workflow.graph_engine.entities.event import (
IterationRunNextEvent, IterationRunNextEvent,
IterationRunStartedEvent, IterationRunStartedEvent,
IterationRunSucceededEvent, IterationRunSucceededEvent,
NodeInIterationFailedEvent,
NodeRunFailedEvent, NodeRunFailedEvent,
NodeRunRetrieverResourceEvent, NodeRunRetrieverResourceEvent,
NodeRunStartedEvent, NodeRunStartedEvent,
@ -193,6 +195,7 @@ class WorkflowBasedAppRunner(AppRunner):
node_run_index=event.route_node_state.index, node_run_index=event.route_node_state.index,
predecessor_node_id=event.predecessor_node_id, predecessor_node_id=event.predecessor_node_id,
in_iteration_id=event.in_iteration_id, in_iteration_id=event.in_iteration_id,
parallel_mode_run_id=event.parallel_mode_run_id,
) )
) )
elif isinstance(event, NodeRunSucceededEvent): elif isinstance(event, NodeRunSucceededEvent):
@ -246,9 +249,40 @@ class WorkflowBasedAppRunner(AppRunner):
error=event.route_node_state.node_run_result.error error=event.route_node_state.node_run_result.error
if event.route_node_state.node_run_result and event.route_node_state.node_run_result.error if event.route_node_state.node_run_result and event.route_node_state.node_run_result.error
else "Unknown error", else "Unknown error",
execution_metadata=event.route_node_state.node_run_result.metadata
if event.route_node_state.node_run_result
else {},
in_iteration_id=event.in_iteration_id, in_iteration_id=event.in_iteration_id,
) )
) )
elif isinstance(event, NodeInIterationFailedEvent):
self._publish_event(
QueueNodeInIterationFailedEvent(
node_execution_id=event.id,
node_id=event.node_id,
node_type=event.node_type,
node_data=event.node_data,
parallel_id=event.parallel_id,
parallel_start_node_id=event.parallel_start_node_id,
parent_parallel_id=event.parent_parallel_id,
parent_parallel_start_node_id=event.parent_parallel_start_node_id,
start_at=event.route_node_state.start_at,
inputs=event.route_node_state.node_run_result.inputs
if event.route_node_state.node_run_result
else {},
process_data=event.route_node_state.node_run_result.process_data
if event.route_node_state.node_run_result
else {},
outputs=event.route_node_state.node_run_result.outputs
if event.route_node_state.node_run_result
else {},
execution_metadata=event.route_node_state.node_run_result.metadata
if event.route_node_state.node_run_result
else {},
in_iteration_id=event.in_iteration_id,
error=event.error,
)
)
elif isinstance(event, NodeRunStreamChunkEvent): elif isinstance(event, NodeRunStreamChunkEvent):
self._publish_event( self._publish_event(
QueueTextChunkEvent( QueueTextChunkEvent(
@ -326,6 +360,7 @@ class WorkflowBasedAppRunner(AppRunner):
index=event.index, index=event.index,
node_run_index=workflow_entry.graph_engine.graph_runtime_state.node_run_steps, node_run_index=workflow_entry.graph_engine.graph_runtime_state.node_run_steps,
output=event.pre_iteration_output, output=event.pre_iteration_output,
parallel_mode_run_id=event.parallel_mode_run_id,
) )
) )
elif isinstance(event, (IterationRunSucceededEvent | IterationRunFailedEvent)): elif isinstance(event, (IterationRunSucceededEvent | IterationRunFailedEvent)):

View File

@ -107,7 +107,8 @@ class QueueIterationNextEvent(AppQueueEvent):
"""parent parallel id if node is in parallel""" """parent parallel id if node is in parallel"""
parent_parallel_start_node_id: Optional[str] = None parent_parallel_start_node_id: Optional[str] = None
"""parent parallel start node id if node is in parallel""" """parent parallel start node id if node is in parallel"""
parallel_mode_run_id: Optional[str] = None
"""iteratoin run in parallel mode run id"""
node_run_index: int node_run_index: int
output: Optional[Any] = None # output for the current iteration output: Optional[Any] = None # output for the current iteration
@ -273,6 +274,8 @@ class QueueNodeStartedEvent(AppQueueEvent):
in_iteration_id: Optional[str] = None in_iteration_id: Optional[str] = None
"""iteration id if node is in iteration""" """iteration id if node is in iteration"""
start_at: datetime start_at: datetime
parallel_mode_run_id: Optional[str] = None
"""iteratoin run in parallel mode run id"""
class QueueNodeSucceededEvent(AppQueueEvent): class QueueNodeSucceededEvent(AppQueueEvent):
@ -306,6 +309,37 @@ class QueueNodeSucceededEvent(AppQueueEvent):
error: Optional[str] = None error: Optional[str] = None
class QueueNodeInIterationFailedEvent(AppQueueEvent):
"""
QueueNodeInIterationFailedEvent entity
"""
event: QueueEvent = QueueEvent.NODE_FAILED
node_execution_id: str
node_id: str
node_type: NodeType
node_data: BaseNodeData
parallel_id: Optional[str] = None
"""parallel id if node is in parallel"""
parallel_start_node_id: Optional[str] = None
"""parallel start node id if node is in parallel"""
parent_parallel_id: Optional[str] = None
"""parent parallel id if node is in parallel"""
parent_parallel_start_node_id: Optional[str] = None
"""parent parallel start node id if node is in parallel"""
in_iteration_id: Optional[str] = None
"""iteration id if node is in iteration"""
start_at: datetime
inputs: Optional[dict[str, Any]] = None
process_data: Optional[dict[str, Any]] = None
outputs: Optional[dict[str, Any]] = None
execution_metadata: Optional[dict[NodeRunMetadataKey, Any]] = None
error: str
class QueueNodeFailedEvent(AppQueueEvent): class QueueNodeFailedEvent(AppQueueEvent):
""" """
QueueNodeFailedEvent entity QueueNodeFailedEvent entity
@ -332,6 +366,7 @@ class QueueNodeFailedEvent(AppQueueEvent):
inputs: Optional[dict[str, Any]] = None inputs: Optional[dict[str, Any]] = None
process_data: Optional[dict[str, Any]] = None process_data: Optional[dict[str, Any]] = None
outputs: Optional[dict[str, Any]] = None outputs: Optional[dict[str, Any]] = None
execution_metadata: Optional[dict[NodeRunMetadataKey, Any]] = None
error: str error: str

View File

@ -244,6 +244,7 @@ class NodeStartStreamResponse(StreamResponse):
parent_parallel_id: Optional[str] = None parent_parallel_id: Optional[str] = None
parent_parallel_start_node_id: Optional[str] = None parent_parallel_start_node_id: Optional[str] = None
iteration_id: Optional[str] = None iteration_id: Optional[str] = None
parallel_run_id: Optional[str] = None
event: StreamEvent = StreamEvent.NODE_STARTED event: StreamEvent = StreamEvent.NODE_STARTED
workflow_run_id: str workflow_run_id: str
@ -432,6 +433,7 @@ class IterationNodeNextStreamResponse(StreamResponse):
extras: dict = {} extras: dict = {}
parallel_id: Optional[str] = None parallel_id: Optional[str] = None
parallel_start_node_id: Optional[str] = None parallel_start_node_id: Optional[str] = None
parallel_mode_run_id: Optional[str] = None
event: StreamEvent = StreamEvent.ITERATION_NEXT event: StreamEvent = StreamEvent.ITERATION_NEXT
workflow_run_id: str workflow_run_id: str

View File

@ -12,6 +12,7 @@ from core.app.entities.queue_entities import (
QueueIterationNextEvent, QueueIterationNextEvent,
QueueIterationStartEvent, QueueIterationStartEvent,
QueueNodeFailedEvent, QueueNodeFailedEvent,
QueueNodeInIterationFailedEvent,
QueueNodeStartedEvent, QueueNodeStartedEvent,
QueueNodeSucceededEvent, QueueNodeSucceededEvent,
QueueParallelBranchRunFailedEvent, QueueParallelBranchRunFailedEvent,
@ -35,6 +36,7 @@ from core.model_runtime.utils.encoders import jsonable_encoder
from core.ops.entities.trace_entity import TraceTaskName from core.ops.entities.trace_entity import TraceTaskName
from core.ops.ops_trace_manager import TraceQueueManager, TraceTask from core.ops.ops_trace_manager import TraceQueueManager, TraceTask
from core.tools.tool_manager import ToolManager from core.tools.tool_manager import ToolManager
from core.workflow.entities.node_entities import NodeRunMetadataKey
from core.workflow.enums import SystemVariableKey from core.workflow.enums import SystemVariableKey
from core.workflow.nodes import NodeType from core.workflow.nodes import NodeType
from core.workflow.nodes.tool.entities import ToolNodeData from core.workflow.nodes.tool.entities import ToolNodeData
@ -251,6 +253,12 @@ class WorkflowCycleManage:
workflow_node_execution.status = WorkflowNodeExecutionStatus.RUNNING.value workflow_node_execution.status = WorkflowNodeExecutionStatus.RUNNING.value
workflow_node_execution.created_by_role = workflow_run.created_by_role workflow_node_execution.created_by_role = workflow_run.created_by_role
workflow_node_execution.created_by = workflow_run.created_by workflow_node_execution.created_by = workflow_run.created_by
workflow_node_execution.execution_metadata = json.dumps(
{
NodeRunMetadataKey.PARALLEL_MODE_RUN_ID: event.parallel_mode_run_id,
NodeRunMetadataKey.ITERATION_ID: event.in_iteration_id,
}
)
workflow_node_execution.created_at = datetime.now(timezone.utc).replace(tzinfo=None) workflow_node_execution.created_at = datetime.now(timezone.utc).replace(tzinfo=None)
session.add(workflow_node_execution) session.add(workflow_node_execution)
@ -305,7 +313,9 @@ class WorkflowCycleManage:
return workflow_node_execution return workflow_node_execution
def _handle_workflow_node_execution_failed(self, event: QueueNodeFailedEvent) -> WorkflowNodeExecution: def _handle_workflow_node_execution_failed(
self, event: QueueNodeFailedEvent | QueueNodeInIterationFailedEvent
) -> WorkflowNodeExecution:
""" """
Workflow node execution failed Workflow node execution failed
:param event: queue node failed event :param event: queue node failed event
@ -318,16 +328,19 @@ class WorkflowCycleManage:
outputs = WorkflowEntry.handle_special_values(event.outputs) outputs = WorkflowEntry.handle_special_values(event.outputs)
finished_at = datetime.now(timezone.utc).replace(tzinfo=None) finished_at = datetime.now(timezone.utc).replace(tzinfo=None)
elapsed_time = (finished_at - event.start_at).total_seconds() elapsed_time = (finished_at - event.start_at).total_seconds()
execution_metadata = (
json.dumps(jsonable_encoder(event.execution_metadata)) if event.execution_metadata else None
)
db.session.query(WorkflowNodeExecution).filter(WorkflowNodeExecution.id == workflow_node_execution.id).update( db.session.query(WorkflowNodeExecution).filter(WorkflowNodeExecution.id == workflow_node_execution.id).update(
{ {
WorkflowNodeExecution.status: WorkflowNodeExecutionStatus.FAILED.value, WorkflowNodeExecution.status: WorkflowNodeExecutionStatus.FAILED.value,
WorkflowNodeExecution.error: event.error, WorkflowNodeExecution.error: event.error,
WorkflowNodeExecution.inputs: json.dumps(inputs) if inputs else None, WorkflowNodeExecution.inputs: json.dumps(inputs) if inputs else None,
WorkflowNodeExecution.process_data: json.dumps(process_data) if event.process_data else None, WorkflowNodeExecution.process_data: json.dumps(event.process_data) if event.process_data else None,
WorkflowNodeExecution.outputs: json.dumps(outputs) if outputs else None, WorkflowNodeExecution.outputs: json.dumps(outputs) if outputs else None,
WorkflowNodeExecution.finished_at: finished_at, WorkflowNodeExecution.finished_at: finished_at,
WorkflowNodeExecution.elapsed_time: elapsed_time, WorkflowNodeExecution.elapsed_time: elapsed_time,
WorkflowNodeExecution.execution_metadata: execution_metadata,
} }
) )
@ -342,6 +355,7 @@ class WorkflowCycleManage:
workflow_node_execution.outputs = json.dumps(outputs) if outputs else None workflow_node_execution.outputs = json.dumps(outputs) if outputs else None
workflow_node_execution.finished_at = finished_at workflow_node_execution.finished_at = finished_at
workflow_node_execution.elapsed_time = elapsed_time workflow_node_execution.elapsed_time = elapsed_time
workflow_node_execution.execution_metadata = execution_metadata
self._wip_workflow_node_executions.pop(workflow_node_execution.node_execution_id) self._wip_workflow_node_executions.pop(workflow_node_execution.node_execution_id)
@ -448,6 +462,7 @@ class WorkflowCycleManage:
parent_parallel_id=event.parent_parallel_id, parent_parallel_id=event.parent_parallel_id,
parent_parallel_start_node_id=event.parent_parallel_start_node_id, parent_parallel_start_node_id=event.parent_parallel_start_node_id,
iteration_id=event.in_iteration_id, iteration_id=event.in_iteration_id,
parallel_run_id=event.parallel_mode_run_id,
), ),
) )
@ -464,7 +479,7 @@ class WorkflowCycleManage:
def _workflow_node_finish_to_stream_response( def _workflow_node_finish_to_stream_response(
self, self,
event: QueueNodeSucceededEvent | QueueNodeFailedEvent, event: QueueNodeSucceededEvent | QueueNodeFailedEvent | QueueNodeInIterationFailedEvent,
task_id: str, task_id: str,
workflow_node_execution: WorkflowNodeExecution, workflow_node_execution: WorkflowNodeExecution,
) -> Optional[NodeFinishStreamResponse]: ) -> Optional[NodeFinishStreamResponse]:
@ -608,6 +623,7 @@ class WorkflowCycleManage:
extras={}, extras={},
parallel_id=event.parallel_id, parallel_id=event.parallel_id,
parallel_start_node_id=event.parallel_start_node_id, parallel_start_node_id=event.parallel_start_node_id,
parallel_mode_run_id=event.parallel_mode_run_id,
), ),
) )
@ -633,7 +649,9 @@ class WorkflowCycleManage:
created_at=int(time.time()), created_at=int(time.time()),
extras={}, extras={},
inputs=event.inputs or {}, inputs=event.inputs or {},
status=WorkflowNodeExecutionStatus.SUCCEEDED, status=WorkflowNodeExecutionStatus.SUCCEEDED
if event.error is None
else WorkflowNodeExecutionStatus.FAILED,
error=None, error=None,
elapsed_time=(datetime.now(timezone.utc).replace(tzinfo=None) - event.start_at).total_seconds(), elapsed_time=(datetime.now(timezone.utc).replace(tzinfo=None) - event.start_at).total_seconds(),
total_tokens=event.metadata.get("total_tokens", 0) if event.metadata else 0, total_tokens=event.metadata.get("total_tokens", 0) if event.metadata else 0,

View File

@ -12,6 +12,10 @@ SSRF_PROXY_ALL_URL = os.getenv("SSRF_PROXY_ALL_URL", "")
SSRF_PROXY_HTTP_URL = os.getenv("SSRF_PROXY_HTTP_URL", "") SSRF_PROXY_HTTP_URL = os.getenv("SSRF_PROXY_HTTP_URL", "")
SSRF_PROXY_HTTPS_URL = os.getenv("SSRF_PROXY_HTTPS_URL", "") SSRF_PROXY_HTTPS_URL = os.getenv("SSRF_PROXY_HTTPS_URL", "")
SSRF_DEFAULT_MAX_RETRIES = int(os.getenv("SSRF_DEFAULT_MAX_RETRIES", "3")) SSRF_DEFAULT_MAX_RETRIES = int(os.getenv("SSRF_DEFAULT_MAX_RETRIES", "3"))
SSRF_DEFAULT_TIME_OUT = float(os.getenv("SSRF_DEFAULT_TIME_OUT", "5"))
SSRF_DEFAULT_CONNECT_TIME_OUT = float(os.getenv("SSRF_DEFAULT_CONNECT_TIME_OUT", "5"))
SSRF_DEFAULT_READ_TIME_OUT = float(os.getenv("SSRF_DEFAULT_READ_TIME_OUT", "5"))
SSRF_DEFAULT_WRITE_TIME_OUT = float(os.getenv("SSRF_DEFAULT_WRITE_TIME_OUT", "5"))
proxy_mounts = ( proxy_mounts = (
{ {
@ -32,6 +36,14 @@ def make_request(method, url, max_retries=SSRF_DEFAULT_MAX_RETRIES, **kwargs):
if "follow_redirects" not in kwargs: if "follow_redirects" not in kwargs:
kwargs["follow_redirects"] = allow_redirects kwargs["follow_redirects"] = allow_redirects
if "timeout" not in kwargs:
kwargs["timeout"] = httpx.Timeout(
SSRF_DEFAULT_TIME_OUT,
connect=SSRF_DEFAULT_CONNECT_TIME_OUT,
read=SSRF_DEFAULT_READ_TIME_OUT,
write=SSRF_DEFAULT_WRITE_TIME_OUT,
)
retries = 0 retries = 0
while retries <= max_retries: while retries <= max_retries:
try: try:

View File

@ -17,6 +17,7 @@ from core.errors.error import ProviderTokenNotInitError
from core.llm_generator.llm_generator import LLMGenerator from core.llm_generator.llm_generator import LLMGenerator
from core.model_manager import ModelInstance, ModelManager from core.model_manager import ModelInstance, ModelManager
from core.model_runtime.entities.model_entities import ModelType from core.model_runtime.entities.model_entities import ModelType
from core.rag.cleaner.clean_processor import CleanProcessor
from core.rag.datasource.keyword.keyword_factory import Keyword from core.rag.datasource.keyword.keyword_factory import Keyword
from core.rag.docstore.dataset_docstore import DatasetDocumentStore from core.rag.docstore.dataset_docstore import DatasetDocumentStore
from core.rag.extractor.entity.extract_setting import ExtractSetting from core.rag.extractor.entity.extract_setting import ExtractSetting
@ -597,26 +598,9 @@ class IndexingRunner:
rules = DatasetProcessRule.AUTOMATIC_RULES rules = DatasetProcessRule.AUTOMATIC_RULES
else: else:
rules = json.loads(processing_rule.rules) if processing_rule.rules else {} rules = json.loads(processing_rule.rules) if processing_rule.rules else {}
document_text = CleanProcessor.clean(text, {"rules": rules})
if "pre_processing_rules" in rules: return document_text
pre_processing_rules = rules["pre_processing_rules"]
for pre_processing_rule in pre_processing_rules:
if pre_processing_rule["id"] == "remove_extra_spaces" and pre_processing_rule["enabled"] is True:
# Remove extra spaces
pattern = r"\n{3,}"
text = re.sub(pattern, "\n\n", text)
pattern = r"[\t\f\r\x20\u00a0\u1680\u180e\u2000-\u200a\u202f\u205f\u3000]{2,}"
text = re.sub(pattern, " ", text)
elif pre_processing_rule["id"] == "remove_urls_emails" and pre_processing_rule["enabled"] is True:
# Remove email
pattern = r"([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+)"
text = re.sub(pattern, "", text)
# Remove URL
pattern = r"https?://[^\s]+"
text = re.sub(pattern, "", text)
return text
@staticmethod @staticmethod
def format_split_text(text): def format_split_text(text):

View File

@ -1,3 +1,4 @@
- claude-3-5-haiku-20241022
- claude-3-5-sonnet-20241022 - claude-3-5-sonnet-20241022
- claude-3-5-sonnet-20240620 - claude-3-5-sonnet-20240620
- claude-3-haiku-20240307 - claude-3-haiku-20240307

View File

@ -0,0 +1,38 @@
model: claude-3-5-haiku-20241022
label:
en_US: claude-3-5-haiku-20241022
model_type: llm
features:
- agent-thought
- tool-call
- stream-tool-call
model_properties:
mode: chat
context_size: 200000
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: top_k
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
en_US: Only sample from the top K options for each subsequent token.
required: false
- name: max_tokens
use_template: max_tokens
required: true
default: 8192
min: 1
max: 8192
- name: response_format
use_template: response_format
pricing:
input: '1.00'
output: '5.00'
unit: '0.000001'
currency: USD

View File

@ -37,6 +37,17 @@ def _get_max_tokens(default: int, min_val: int, max_val: int) -> ParameterRule:
return rule return rule
def _get_o1_max_tokens(default: int, min_val: int, max_val: int) -> ParameterRule:
rule = ParameterRule(
name="max_completion_tokens",
**PARAMETER_RULE_TEMPLATE[DefaultParameterName.MAX_TOKENS],
)
rule.default = default
rule.min = min_val
rule.max = max_val
return rule
class AzureBaseModel(BaseModel): class AzureBaseModel(BaseModel):
base_model_name: str base_model_name: str
entity: AIModelEntity entity: AIModelEntity
@ -1098,14 +1109,6 @@ LLM_BASE_MODELS = [
ModelPropertyKey.CONTEXT_SIZE: 128000, ModelPropertyKey.CONTEXT_SIZE: 128000,
}, },
parameter_rules=[ parameter_rules=[
ParameterRule(
name="temperature",
**PARAMETER_RULE_TEMPLATE[DefaultParameterName.TEMPERATURE],
),
ParameterRule(
name="top_p",
**PARAMETER_RULE_TEMPLATE[DefaultParameterName.TOP_P],
),
ParameterRule( ParameterRule(
name="response_format", name="response_format",
label=I18nObject(zh_Hans="回复格式", en_US="response_format"), label=I18nObject(zh_Hans="回复格式", en_US="response_format"),
@ -1116,7 +1119,7 @@ LLM_BASE_MODELS = [
required=False, required=False,
options=["text", "json_object"], options=["text", "json_object"],
), ),
_get_max_tokens(default=512, min_val=1, max_val=32768), _get_o1_max_tokens(default=512, min_val=1, max_val=32768),
], ],
pricing=PriceConfig( pricing=PriceConfig(
input=15.00, input=15.00,
@ -1143,14 +1146,6 @@ LLM_BASE_MODELS = [
ModelPropertyKey.CONTEXT_SIZE: 128000, ModelPropertyKey.CONTEXT_SIZE: 128000,
}, },
parameter_rules=[ parameter_rules=[
ParameterRule(
name="temperature",
**PARAMETER_RULE_TEMPLATE[DefaultParameterName.TEMPERATURE],
),
ParameterRule(
name="top_p",
**PARAMETER_RULE_TEMPLATE[DefaultParameterName.TOP_P],
),
ParameterRule( ParameterRule(
name="response_format", name="response_format",
label=I18nObject(zh_Hans="回复格式", en_US="response_format"), label=I18nObject(zh_Hans="回复格式", en_US="response_format"),
@ -1161,7 +1156,7 @@ LLM_BASE_MODELS = [
required=False, required=False,
options=["text", "json_object"], options=["text", "json_object"],
), ),
_get_max_tokens(default=512, min_val=1, max_val=65536), _get_o1_max_tokens(default=512, min_val=1, max_val=65536),
], ],
pricing=PriceConfig( pricing=PriceConfig(
input=3.00, input=3.00,

View File

@ -0,0 +1,60 @@
model: anthropic.claude-3-5-haiku-20241022-v1:0
label:
en_US: Claude 3.5 Haiku
model_type: llm
features:
- agent-thought
- tool-call
- stream-tool-call
model_properties:
mode: chat
context_size: 200000
# docs: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-messages.html
parameter_rules:
- name: max_tokens
use_template: max_tokens
required: true
type: int
default: 8192
min: 1
max: 8192
help:
zh_Hans: 停止前生成的最大令牌数。请注意Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。
en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter.
# docs: https://docs.anthropic.com/claude/docs/system-prompts
- name: temperature
use_template: temperature
required: false
type: float
default: 1
min: 0.0
max: 1.0
help:
zh_Hans: 生成内容的随机性。
en_US: The amount of randomness injected into the response.
- name: top_p
required: false
type: float
default: 0.999
min: 0.000
max: 1.000
help:
zh_Hans: 在核采样中Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p但不能同时更改两者。
en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both.
- name: top_k
required: false
type: int
default: 0
min: 0
# tip docs from aws has error, max value is 500
max: 500
help:
zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。
en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses.
- name: response_format
use_template: response_format
pricing:
input: '0.001'
output: '0.005'
unit: '0.001'
currency: USD

View File

@ -0,0 +1,60 @@
model: us.anthropic.claude-3-5-haiku-20241022-v1:0
label:
en_US: Claude 3.5 Haiku(US.Cross Region Inference)
model_type: llm
features:
- agent-thought
- tool-call
- stream-tool-call
model_properties:
mode: chat
context_size: 200000
# docs: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-messages.html
parameter_rules:
- name: max_tokens
use_template: max_tokens
required: true
type: int
default: 4096
min: 1
max: 4096
help:
zh_Hans: 停止前生成的最大令牌数。请注意Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。
en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter.
# docs: https://docs.anthropic.com/claude/docs/system-prompts
- name: temperature
use_template: temperature
required: false
type: float
default: 1
min: 0.0
max: 1.0
help:
zh_Hans: 生成内容的随机性。
en_US: The amount of randomness injected into the response.
- name: top_p
required: false
type: float
default: 0.999
min: 0.000
max: 1.000
help:
zh_Hans: 在核采样中Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p但不能同时更改两者。
en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both.
- name: top_k
required: false
type: int
default: 0
min: 0
# tip docs from aws has error, max value is 500
max: 500
help:
zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。
en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses.
- name: response_format
use_template: response_format
pricing:
input: '0.001'
output: '0.005'
unit: '0.001'
currency: USD

View File

@ -1,6 +1,7 @@
import logging import logging
from core.model_runtime.entities.model_entities import ModelType import requests
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.__base.model_provider import ModelProvider from core.model_runtime.model_providers.__base.model_provider import ModelProvider
@ -16,8 +17,18 @@ class GiteeAIProvider(ModelProvider):
:param credentials: provider credentials, credentials form defined in `provider_credential_schema`. :param credentials: provider credentials, credentials form defined in `provider_credential_schema`.
""" """
try: try:
model_instance = self.get_model_instance(ModelType.LLM) api_key = credentials.get("api_key")
model_instance.validate_credentials(model="Qwen2-7B-Instruct", credentials=credentials) if not api_key:
raise CredentialsValidateFailedError("Credentials validation failed: api_key not given")
# send a get request to validate the credentials
headers = {"Authorization": f"Bearer {api_key}"}
response = requests.get("https://ai.gitee.com/api/base/account/me", headers=headers, timeout=(10, 300))
if response.status_code != 200:
raise CredentialsValidateFailedError(
f"Credentials validation failed with status code {response.status_code}"
)
except CredentialsValidateFailedError as ex: except CredentialsValidateFailedError as ex:
raise ex raise ex
except Exception as ex: except Exception as ex:

Binary file not shown.

After

Width:  |  Height:  |  Size: 277 KiB

View File

@ -0,0 +1,15 @@
<svg width="68" height="24" viewBox="0 0 68 24" fill="none" xmlns="http://www.w3.org/2000/svg">
<g id="Gemini">
<path id="Union" fill-rule="evenodd" clip-rule="evenodd" d="M50.6875 4.37014C48.3498 4.59292 46.5349 6.41319 46.3337 8.72764C46.1446 6.44662 44.2677 4.56074 41.9805 4.3737C44.2762 4.1997 46.152 2.28299 46.3373 0C46.4882 2.28911 48.405 4.20047 50.6875 4.37014ZM15.4567 9.41141L13.9579 10.9076C9.92941 6.64892 2.69298 9.97287 3.17317 15.8112C3.22394 23.108 14.5012 24.4317 15.3628 16.8809H9.52096L9.50061 14.9149H17.3595C18.8163 23.1364 8.44367 27.0292 3.19453 21.238C0.847044 18.7556 0.363651 14.7682 1.83717 11.7212C4.1129 6.62089 11.6505 5.29845 15.4567 9.41141ZM45.5915 23.5989H47.6945C47.6944 22.9155 47.6945 22.2307 47.6946 21.5452V21.5325C47.6948 19.8907 47.695 18.2453 47.6924 16.6072C47.6914 15.9407 47.6161 15.2823 47.4024 14.647C46.4188 11.2828 41.4255 11.4067 39.8332 14.214C38.5637 11.4171 34.4009 11.5236 32.8538 14.0084L32.8082 13.9976V12.4806L32.4233 12.4804H32.4224C31.8687 12.4801 31.3324 12.4798 30.7949 12.4811V23.5848L32.8977 23.5672C32.8981 22.9411 32.8979 22.3122 32.8977 21.6822V21.6812V21.6802V21.6791V21.6781V21.6771V21.676V21.676V21.6759V21.6758V21.6757V21.6757V21.6756C32.8973 20.204 32.8969 18.7261 32.904 17.2614C32.8889 15.3646 34.5674 13.5687 36.5358 14.124C37.7794 14.3298 38.1851 15.6148 38.1761 16.7257C38.1821 17.7019 38.18 18.6824 38.178 19.6633V19.6638C38.1752 20.9756 38.1724 22.2881 38.1891 23.5919L40.2846 23.5731C40.2929 22.7511 40.2881 21.9245 40.2832 21.0966C40.2753 19.7402 40.2674 18.3805 40.317 17.0328C40.4418 15.2122 42.0141 13.6186 43.9064 14.1168C45.2685 14.3231 45.6136 15.7748 45.5882 16.9545C45.5938 18.4959 45.5929 20.0492 45.5921 21.5968V21.5991V21.6014V21.6037V21.606V21.6083V21.6106C45.5917 22.2749 45.5913 22.9382 45.5915 23.5989ZM20.6167 18.4408C20.5625 21.9486 25.2121 23.6996 27.2993 20.0558L29.1566 20.9592C27.8157 23.7067 24.2337 24.7424 21.5381 23.4213C18.0052 21.7253 17.41 16.5007 20.0334 13.7517C21.4609 12.1752 23.7291 11.7901 25.7206 12.3653C28.3408 13.1257 29.4974 15.8937 29.326 18.4399C27.5547 18.4415 25.7971 18.4412 24.0364 18.4409C22.8993 18.4407 21.7609 18.4405 20.6167 18.4408ZM27.1041 16.6957C26.7048 13.1033 21.2867 13.2256 20.7494 16.6957H27.1041ZM53.543 23.5999H55.6206L55.6206 22.4361C55.6205 20.7877 55.6205 19.1443 55.6207 17.4939C55.6208 16.8853 55.7234 16.297 56.0063 15.7531C56.6115 14.3862 58.1745 13.7002 59.5927 14.1774C60.7512 14.4455 61.2852 15.6069 61.2762 16.7154C61.2774 18.3497 61.2771 19.9826 61.2769 21.6162V21.6166V21.617V21.6174V21.6179L61.2766 23.6007H63.3698C63.3913 22.0924 63.3869 20.584 63.3826 19.0755V19.0754V19.0753V19.0753V19.0752C63.3799 18.1682 63.3773 17.2612 63.3803 16.3541C63.3796 15.8622 63.3103 15.3765 63.1698 14.9052C62.3248 11.5142 57.3558 11.2385 55.5828 14.0038L55.5336 13.9905V12.4917H53.539C53.4898 12.7313 53.4934 23.4113 53.543 23.5999ZM49.6211 12.4944H51.7065V23.5994H49.6211V12.4944ZM65.1035 23.5991H67.1831C67.2367 23.2198 67.2133 12.6566 67.1634 12.4983H65.1035V23.5991ZM52.1504 8.67829C52.1709 10.4847 49.2418 10.7058 49.1816 8.65714C49.2189 6.5948 52.2437 6.81331 52.1504 8.67829ZM66.1387 10.1324C64.2712 10.1609 64.1316 7.19881 66.1559 7.17114C68.1709 7.19817 68.0215 10.2087 66.1387 10.1324Z" fill="url(#paint0_linear_14286_118464)"/>
</g>
<defs>
<linearGradient id="paint0_linear_14286_118464" x1="-2" y1="0.999998" x2="67.9999" y2="27.5002" gradientUnits="userSpaceOnUse">
<stop stop-color="#7798E0"/>
<stop offset="0.210002" stop-color="#086FFF"/>
<stop offset="0.345945" stop-color="#086FFF"/>
<stop offset="0.591777" stop-color="#479AFF"/>
<stop offset="0.895892" stop-color="#B7C4FA"/>
<stop offset="1" stop-color="#B5C5F9"/>
</linearGradient>
</defs>
</svg>

After

Width:  |  Height:  |  Size: 3.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 57 KiB

View File

@ -0,0 +1,11 @@
<svg width="24" height="24" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg">
<rect width="24" height="24" rx="6" fill="url(#paint0_linear_7301_16076)"/>
<path d="M20 12.0116C15.7043 12.42 12.3692 15.757 11.9995 20C11.652 15.8183 8.20301 12.361 4 12.0181C8.21855 11.6991 11.6656 8.1853 12.006 4C12.2833 8.19653 15.8057 11.7005 20 12.0116Z" fill="white" fill-opacity="0.88"/>
<defs>
<linearGradient id="paint0_linear_7301_16076" x1="-9" y1="29.5" x2="19.4387" y2="1.43791" gradientUnits="userSpaceOnUse">
<stop offset="0.192878" stop-color="#1C7DFF"/>
<stop offset="0.520213" stop-color="#1C69FF"/>
<stop offset="1" stop-color="#F0DCD6"/>
</linearGradient>
</defs>
</svg>

After

Width:  |  Height:  |  Size: 689 B

View File

@ -0,0 +1,10 @@
import logging
from core.model_runtime.model_providers.__base.model_provider import ModelProvider
logger = logging.getLogger(__name__)
class GPUStackProvider(ModelProvider):
def validate_provider_credentials(self, credentials: dict) -> None:
pass

Some files were not shown because too many files have changed in this diff Show More