mirror of
https://github.com/langgenius/dify.git
synced 2024-11-16 03:32:23 +08:00
Merge branch 'fix/chore-fix' into dev/plugin-deploy
This commit is contained in:
commit
61cf6e1c7b
33
.github/workflows/api-tests.yml
vendored
33
.github/workflows/api-tests.yml
vendored
|
@ -7,6 +7,7 @@ on:
|
|||
paths:
|
||||
- api/**
|
||||
- docker/**
|
||||
- .github/workflows/api-tests.yml
|
||||
|
||||
concurrency:
|
||||
group: api-tests-${{ github.head_ref || github.run_id }}
|
||||
|
@ -27,16 +28,15 @@ jobs:
|
|||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install Poetry
|
||||
uses: abatilo/actions-poetry@v3
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
cache-dependency-path: |
|
||||
api/pyproject.toml
|
||||
api/poetry.lock
|
||||
|
||||
- name: Install Poetry
|
||||
uses: abatilo/actions-poetry@v3
|
||||
cache: poetry
|
||||
cache-dependency-path: api/poetry.lock
|
||||
|
||||
- name: Check Poetry lockfile
|
||||
run: |
|
||||
|
@ -67,7 +67,7 @@ jobs:
|
|||
run: sh .github/workflows/expose_service_ports.sh
|
||||
|
||||
- name: Set up Sandbox
|
||||
uses: hoverkraft-tech/compose-action@v2.0.0
|
||||
uses: hoverkraft-tech/compose-action@v2.0.2
|
||||
with:
|
||||
compose-file: |
|
||||
docker/docker-compose.middleware.yaml
|
||||
|
@ -77,22 +77,3 @@ jobs:
|
|||
|
||||
- name: Run Workflow
|
||||
run: poetry run -C api bash dev/pytest/pytest_workflow.sh
|
||||
|
||||
- name: Set up Vector Stores (Weaviate, Qdrant, PGVector, Milvus, PgVecto-RS, Chroma, MyScale, ElasticSearch, Couchbase)
|
||||
uses: hoverkraft-tech/compose-action@v2.0.0
|
||||
with:
|
||||
compose-file: |
|
||||
docker/docker-compose.yaml
|
||||
services: |
|
||||
weaviate
|
||||
qdrant
|
||||
couchbase-server
|
||||
etcd
|
||||
minio
|
||||
milvus-standalone
|
||||
pgvecto-rs
|
||||
pgvector
|
||||
chroma
|
||||
elasticsearch
|
||||
- name: Test Vector Stores
|
||||
run: poetry run -C api bash dev/pytest/pytest_vdb.sh
|
||||
|
|
2
.github/workflows/db-migration-test.yml
vendored
2
.github/workflows/db-migration-test.yml
vendored
|
@ -43,7 +43,7 @@ jobs:
|
|||
cp middleware.env.example middleware.env
|
||||
|
||||
- name: Set up Middlewares
|
||||
uses: hoverkraft-tech/compose-action@v2.0.0
|
||||
uses: hoverkraft-tech/compose-action@v2.0.2
|
||||
with:
|
||||
compose-file: |
|
||||
docker/docker-compose.middleware.yaml
|
||||
|
|
8
.github/workflows/style.yml
vendored
8
.github/workflows/style.yml
vendored
|
@ -24,16 +24,16 @@ jobs:
|
|||
with:
|
||||
files: api/**
|
||||
|
||||
- name: Install Poetry
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
uses: abatilo/actions-poetry@v3
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
python-version: '3.10'
|
||||
|
||||
- name: Install Poetry
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
uses: abatilo/actions-poetry@v3
|
||||
|
||||
- name: Python dependencies
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: poetry install -C api --only lint
|
||||
|
|
75
.github/workflows/vdb-tests.yml
vendored
Normal file
75
.github/workflows/vdb-tests.yml
vendored
Normal file
|
@ -0,0 +1,75 @@
|
|||
name: Run VDB Tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- api/core/rag/datasource/**
|
||||
- docker/**
|
||||
- .github/workflows/vdb-tests.yml
|
||||
|
||||
concurrency:
|
||||
group: vdb-tests-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: VDB Tests
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
- "3.12"
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install Poetry
|
||||
uses: abatilo/actions-poetry@v3
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
cache: poetry
|
||||
cache-dependency-path: api/poetry.lock
|
||||
|
||||
- name: Check Poetry lockfile
|
||||
run: |
|
||||
poetry check -C api --lock
|
||||
poetry show -C api
|
||||
|
||||
- name: Install dependencies
|
||||
run: poetry install -C api --with dev
|
||||
|
||||
- name: Set up dotenvs
|
||||
run: |
|
||||
cp docker/.env.example docker/.env
|
||||
cp docker/middleware.env.example docker/middleware.env
|
||||
|
||||
- name: Expose Service Ports
|
||||
run: sh .github/workflows/expose_service_ports.sh
|
||||
|
||||
- name: Set up Vector Stores (Weaviate, Qdrant, PGVector, Milvus, PgVecto-RS, Chroma, MyScale, ElasticSearch, Couchbase)
|
||||
uses: hoverkraft-tech/compose-action@v2.0.2
|
||||
with:
|
||||
compose-file: |
|
||||
docker/docker-compose.yaml
|
||||
services: |
|
||||
weaviate
|
||||
qdrant
|
||||
couchbase-server
|
||||
etcd
|
||||
minio
|
||||
milvus-standalone
|
||||
pgvecto-rs
|
||||
pgvector
|
||||
chroma
|
||||
elasticsearch
|
||||
|
||||
- name: Test Vector Stores
|
||||
run: poetry run -C api bash dev/pytest/pytest_vdb.sh
|
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -175,6 +175,7 @@ docker/volumes/pgvector/data/*
|
|||
docker/volumes/pgvecto_rs/data/*
|
||||
docker/volumes/couchbase/*
|
||||
docker/volumes/oceanbase/*
|
||||
!docker/volumes/oceanbase/init.d
|
||||
|
||||
docker/nginx/conf.d/default.conf
|
||||
docker/nginx/ssl/*
|
||||
|
|
73
README.md
73
README.md
|
@ -46,45 +46,18 @@
|
|||
</p>
|
||||
|
||||
|
||||
## Table of Content
|
||||
0. [Quick-Start🚀](https://github.com/langgenius/dify?tab=readme-ov-file#quick-start)
|
||||
|
||||
1. [Intro📖](https://github.com/langgenius/dify?tab=readme-ov-file#intro)
|
||||
|
||||
2. [How to use🔧](https://github.com/langgenius/dify?tab=readme-ov-file#using-dify)
|
||||
|
||||
3. [Stay Ahead🏃](https://github.com/langgenius/dify?tab=readme-ov-file#staying-ahead)
|
||||
|
||||
4. [Next Steps🏹](https://github.com/langgenius/dify?tab=readme-ov-file#next-steps)
|
||||
|
||||
5. [Contributing💪](https://github.com/langgenius/dify?tab=readme-ov-file#contributing)
|
||||
|
||||
6. [Community and Contact🏠](https://github.com/langgenius/dify?tab=readme-ov-file#community--contact)
|
||||
|
||||
7. [Star-History📈](https://github.com/langgenius/dify?tab=readme-ov-file#star-history)
|
||||
|
||||
8. [Security🔒](https://github.com/langgenius/dify?tab=readme-ov-file#security-disclosure)
|
||||
|
||||
9. [License🤝](https://github.com/langgenius/dify?tab=readme-ov-file#license)
|
||||
|
||||
> Make sure you read through this README before you start utilizing Dify😊
|
||||
|
||||
Dify is an open-source LLM app development platform. Its intuitive interface combines agentic AI workflow, RAG pipeline, agent capabilities, model management, observability features and more, letting you quickly go from prototype to production.
|
||||
|
||||
## Quick start
|
||||
The quickest way to deploy Dify locally is to run our [docker-compose.yml](https://github.com/langgenius/dify/blob/main/docker/docker-compose.yaml). Follow the instructions to start in 5 minutes.
|
||||
|
||||
> Before installing Dify, make sure your machine meets the following minimum system requirements:
|
||||
>
|
||||
>- CPU >= 2 Core
|
||||
>- RAM >= 4 GiB
|
||||
>- Docker and Docker Compose Installed
|
||||
|
||||
</br>
|
||||
|
||||
Run the following command in your terminal to clone the whole repo.
|
||||
```bash
|
||||
git clone https://github.com/langgenius/dify.git
|
||||
```
|
||||
After cloning,run the following command one by one.
|
||||
The easiest way to start the Dify server is through [docker compose](docker/docker-compose.yaml). Before running Dify with the following commands, make sure that [Docker](https://docs.docker.com/get-docker/) and [Docker Compose](https://docs.docker.com/compose/install/) are installed on your machine:
|
||||
|
||||
```bash
|
||||
cd dify
|
||||
cd docker
|
||||
|
@ -92,13 +65,14 @@ cp .env.example .env
|
|||
docker compose up -d
|
||||
```
|
||||
|
||||
After running, you can access the Dify dashboard in your browser at [http://localhost/install](http://localhost/install) and start the initialization process. You will be asked to setup an admin account.
|
||||
For more info of quick setup, check [here](https://docs.dify.ai/getting-started/install-self-hosted/docker-compose)
|
||||
After running, you can access the Dify dashboard in your browser at [http://localhost/install](http://localhost/install) and start the initialization process.
|
||||
|
||||
## Intro
|
||||
Dify is an open-source LLM app development platform. Its intuitive interface combines AI workflow, RAG pipeline, agent capabilities, model management, observability features and more, letting you quickly go from prototype to production. Here's a list of the core features:
|
||||
</br> </br>
|
||||
#### Seeking help
|
||||
Please refer to our [FAQ](https://docs.dify.ai/getting-started/install-self-hosted/faqs) if you encounter problems setting up Dify. Reach out to [the community and us](#community--contact) if you are still having issues.
|
||||
|
||||
> If you'd like to contribute to Dify or do additional development, refer to our [guide to deploying from source code](https://docs.dify.ai/getting-started/install-self-hosted/local-source-code)
|
||||
|
||||
## Key features
|
||||
**1. Workflow**:
|
||||
Build and test powerful AI workflows on a visual canvas, leveraging all the following features and beyond.
|
||||
|
||||
|
@ -149,20 +123,8 @@ Star Dify on GitHub and be instantly notified of new releases.
|
|||
|
||||
![star-us](https://github.com/langgenius/dify/assets/13230914/b823edc1-6388-4e25-ad45-2f6b187adbb4)
|
||||
|
||||
## Next steps
|
||||
|
||||
Go to [quick-start](https://github.com/langgenius/dify?tab=readme-ov-file#quick-start) to setup your Dify or setup by source code.
|
||||
|
||||
#### If you......
|
||||
If you forget your admin account, you can refer to this [guide](https://docs.dify.ai/getting-started/install-self-hosted/faqs#id-4.-how-to-reset-the-password-of-the-admin-account) to reset the password.
|
||||
|
||||
> Use docker compose up without "-d" to enable logs printing out in your terminal. This might be useful if you have encountered unknow problems when using Dify.
|
||||
|
||||
If you encountered system error and would like to acquire help in Github issues, make sure you always paste logs of the error in the request to accerate the conversation. Go to [Community & contact](https://github.com/langgenius/dify?tab=readme-ov-file#community--contact) for more information.
|
||||
|
||||
> Please read the [Dify Documentation](https://docs.dify.ai/) for detailed how-to-use guidance. Most of the potential problems are explained in the doc.
|
||||
|
||||
> If you'd like to contribute to Dify or make additional development, refer to our [guide to deploying from source code](https://docs.dify.ai/getting-started/install-self-hosted/local-source-code)
|
||||
## Advanced Setup
|
||||
|
||||
If you need to customize the configuration, please refer to the comments in our [.env.example](docker/.env.example) file and update the corresponding values in your `.env` file. Additionally, you might need to make adjustments to the `docker-compose.yaml` file itself, such as changing image versions, port mappings, or volume mounts, based on your specific deployment environment and requirements. After making any changes, please re-run `docker-compose up -d`. You can find the full list of available environment variables [here](https://docs.dify.ai/getting-started/install-self-hosted/environments).
|
||||
|
||||
|
@ -190,19 +152,18 @@ At the same time, please consider supporting Dify by sharing it on social media
|
|||
|
||||
> We are looking for contributors to help with translating Dify to languages other than Mandarin or English. If you are interested in helping, please see the [i18n README](https://github.com/langgenius/dify/blob/main/web/i18n/README.md) for more information, and leave us a comment in the `global-users` channel of our [Discord Community Server](https://discord.gg/8Tpq4AcN9c).
|
||||
|
||||
**Contributors**
|
||||
|
||||
<a href="https://github.com/langgenius/dify/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=langgenius/dify" />
|
||||
</a>
|
||||
|
||||
## Community & contact
|
||||
|
||||
* [Github Discussion](https://github.com/langgenius/dify/discussions). Best for: sharing feedback and asking questions.
|
||||
* [GitHub Issues](https://github.com/langgenius/dify/issues). Best for: bugs you encounter using Dify.AI, and feature proposals. See our [Contribution Guide](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md).
|
||||
* [Discord](https://discord.gg/FngNHpbcY7). Best for: sharing your applications and hanging out with the community.
|
||||
* [X(Twitter)](https://twitter.com/dify_ai). Best for: sharing your applications and hanging out with the community.
|
||||
* Make sure a log, if possible, is attached to an error reported to maximize solution efficiency.
|
||||
|
||||
**Contributors**
|
||||
|
||||
<a href="https://github.com/langgenius/dify/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=langgenius/dify" />
|
||||
</a>
|
||||
|
||||
## Star history
|
||||
|
||||
|
|
|
@ -121,7 +121,7 @@ WEB_API_CORS_ALLOW_ORIGINS=http://127.0.0.1:3000,*
|
|||
CONSOLE_CORS_ALLOW_ORIGINS=http://127.0.0.1:3000,*
|
||||
|
||||
|
||||
# Vector database configuration, support: weaviate, qdrant, milvus, myscale, relyt, pgvecto_rs, pgvector, pgvector, chroma, opensearch, tidb_vector, couchbase, vikingdb, upstash, lindorm
|
||||
# Vector database configuration, support: weaviate, qdrant, milvus, myscale, relyt, pgvecto_rs, pgvector, pgvector, chroma, opensearch, tidb_vector, couchbase, vikingdb, upstash, lindorm, oceanbase
|
||||
VECTOR_STORE=weaviate
|
||||
|
||||
# Weaviate configuration
|
||||
|
@ -273,7 +273,7 @@ LINDORM_PASSWORD=admin
|
|||
OCEANBASE_VECTOR_HOST=127.0.0.1
|
||||
OCEANBASE_VECTOR_PORT=2881
|
||||
OCEANBASE_VECTOR_USER=root@test
|
||||
OCEANBASE_VECTOR_PASSWORD=
|
||||
OCEANBASE_VECTOR_PASSWORD=difyai123456
|
||||
OCEANBASE_VECTOR_DATABASE=test
|
||||
OCEANBASE_MEMORY_LIMIT=6G
|
||||
|
||||
|
@ -320,9 +320,14 @@ ETL_TYPE=dify
|
|||
UNSTRUCTURED_API_URL=
|
||||
UNSTRUCTURED_API_KEY=
|
||||
|
||||
#ssrf
|
||||
SSRF_PROXY_HTTP_URL=
|
||||
SSRF_PROXY_HTTPS_URL=
|
||||
SSRF_DEFAULT_MAX_RETRIES=3
|
||||
SSRF_DEFAULT_TIME_OUT=
|
||||
SSRF_DEFAULT_CONNECT_TIME_OUT=
|
||||
SSRF_DEFAULT_READ_TIME_OUT=
|
||||
SSRF_DEFAULT_WRITE_TIME_OUT=
|
||||
|
||||
BATCH_UPLOAD_LIMIT=10
|
||||
KEYWORD_DATA_SOURCE_TYPE=database
|
||||
|
|
|
@ -76,13 +76,13 @@
|
|||
1. Install dependencies for both the backend and the test environment
|
||||
|
||||
```bash
|
||||
poetry install --with dev
|
||||
poetry install -C api --with dev
|
||||
```
|
||||
|
||||
2. Run the tests locally with mocked system environment variables in `tool.pytest_env` section in `pyproject.toml`
|
||||
|
||||
```bash
|
||||
cd ../
|
||||
poetry run -C api bash dev/pytest/pytest_all_tests.sh
|
||||
```
|
||||
|
||||
|
||||
|
|
|
@ -109,7 +109,7 @@ class CodeExecutionSandboxConfig(BaseSettings):
|
|||
)
|
||||
|
||||
CODE_MAX_PRECISION: PositiveInt = Field(
|
||||
description="mMaximum number of decimal places for floating-point numbers in code execution",
|
||||
description="Maximum number of decimal places for floating-point numbers in code execution",
|
||||
default=20,
|
||||
)
|
||||
|
||||
|
@ -339,6 +339,26 @@ class HttpConfig(BaseSettings):
|
|||
default=None,
|
||||
)
|
||||
|
||||
SSRF_DEFAULT_TIME_OUT: PositiveFloat = Field(
|
||||
description="The default timeout period used for network requests (SSRF)",
|
||||
default=5,
|
||||
)
|
||||
|
||||
SSRF_DEFAULT_CONNECT_TIME_OUT: PositiveFloat = Field(
|
||||
description="The default connect timeout period used for network requests (SSRF)",
|
||||
default=5,
|
||||
)
|
||||
|
||||
SSRF_DEFAULT_READ_TIME_OUT: PositiveFloat = Field(
|
||||
description="The default read timeout period used for network requests (SSRF)",
|
||||
default=5,
|
||||
)
|
||||
|
||||
SSRF_DEFAULT_WRITE_TIME_OUT: PositiveFloat = Field(
|
||||
description="The default write timeout period used for network requests (SSRF)",
|
||||
default=5,
|
||||
)
|
||||
|
||||
RESPECT_XFORWARD_HEADERS_ENABLED: bool = Field(
|
||||
description="Enable or disable the X-Forwarded-For Proxy Fix middleware from Werkzeug"
|
||||
" to respect X-* headers to redirect clients",
|
||||
|
|
|
@ -9,7 +9,7 @@ class PackagingInfo(BaseSettings):
|
|||
|
||||
CURRENT_VERSION: str = Field(
|
||||
description="Dify version",
|
||||
default="0.10.2",
|
||||
default="0.11.0",
|
||||
)
|
||||
|
||||
COMMIT_SHA: str = Field(
|
||||
|
|
|
@ -62,3 +62,27 @@ class EmailSendIpLimitError(BaseHTTPException):
|
|||
error_code = "email_send_ip_limit"
|
||||
description = "Too many emails have been sent from this IP address recently. Please try again later."
|
||||
code = 429
|
||||
|
||||
|
||||
class FileTooLargeError(BaseHTTPException):
|
||||
error_code = "file_too_large"
|
||||
description = "File size exceeded. {message}"
|
||||
code = 413
|
||||
|
||||
|
||||
class UnsupportedFileTypeError(BaseHTTPException):
|
||||
error_code = "unsupported_file_type"
|
||||
description = "File type not allowed."
|
||||
code = 415
|
||||
|
||||
|
||||
class TooManyFilesError(BaseHTTPException):
|
||||
error_code = "too_many_files"
|
||||
description = "Only one file is allowed."
|
||||
code = 400
|
||||
|
||||
|
||||
class NoFileUploadedError(BaseHTTPException):
|
||||
error_code = "no_file_uploaded"
|
||||
description = "Please upload your file."
|
||||
code = 400
|
||||
|
|
|
@ -15,7 +15,7 @@ from fields.file_fields import file_fields, upload_config_fields
|
|||
from libs.login import login_required
|
||||
from services.file_service import FileService
|
||||
|
||||
from .errors import (
|
||||
from .error import (
|
||||
FileTooLargeError,
|
||||
NoFileUploadedError,
|
||||
TooManyFilesError,
|
|
@ -1,25 +0,0 @@
|
|||
from libs.exception import BaseHTTPException
|
||||
|
||||
|
||||
class FileTooLargeError(BaseHTTPException):
|
||||
error_code = "file_too_large"
|
||||
description = "File size exceeded. {message}"
|
||||
code = 413
|
||||
|
||||
|
||||
class UnsupportedFileTypeError(BaseHTTPException):
|
||||
error_code = "unsupported_file_type"
|
||||
description = "File type not allowed."
|
||||
code = 415
|
||||
|
||||
|
||||
class TooManyFilesError(BaseHTTPException):
|
||||
error_code = "too_many_files"
|
||||
description = "Only one file is allowed."
|
||||
code = 400
|
||||
|
||||
|
||||
class NoFileUploadedError(BaseHTTPException):
|
||||
error_code = "no_file_uploaded"
|
||||
description = "Please upload your file."
|
||||
code = 400
|
|
@ -1,9 +1,11 @@
|
|||
import urllib.parse
|
||||
from typing import cast
|
||||
|
||||
import httpx
|
||||
from flask_login import current_user
|
||||
from flask_restful import Resource, marshal_with, reqparse
|
||||
|
||||
import services
|
||||
from controllers.common import helpers
|
||||
from core.file import helpers as file_helpers
|
||||
from core.helper import ssrf_proxy
|
||||
|
@ -11,19 +13,25 @@ from fields.file_fields import file_fields_with_signed_url, remote_file_info_fie
|
|||
from models.account import Account
|
||||
from services.file_service import FileService
|
||||
|
||||
from .error import (
|
||||
FileTooLargeError,
|
||||
UnsupportedFileTypeError,
|
||||
)
|
||||
|
||||
|
||||
class RemoteFileInfoApi(Resource):
|
||||
@marshal_with(remote_file_info_fields)
|
||||
def get(self, url):
|
||||
decoded_url = urllib.parse.unquote(url)
|
||||
try:
|
||||
response = ssrf_proxy.head(decoded_url)
|
||||
return {
|
||||
"file_type": response.headers.get("Content-Type", "application/octet-stream"),
|
||||
"file_length": int(response.headers.get("Content-Length", 0)),
|
||||
}
|
||||
except Exception as e:
|
||||
return {"error": str(e)}, 400
|
||||
resp = ssrf_proxy.head(decoded_url)
|
||||
if resp.status_code != httpx.codes.OK:
|
||||
# failed back to get method
|
||||
resp = ssrf_proxy.get(decoded_url, timeout=3)
|
||||
resp.raise_for_status()
|
||||
return {
|
||||
"file_type": resp.headers.get("Content-Type", "application/octet-stream"),
|
||||
"file_length": int(resp.headers.get("Content-Length", 0)),
|
||||
}
|
||||
|
||||
|
||||
class RemoteFileUploadApi(Resource):
|
||||
|
@ -35,17 +43,17 @@ class RemoteFileUploadApi(Resource):
|
|||
|
||||
url = args["url"]
|
||||
|
||||
response = ssrf_proxy.head(url)
|
||||
response.raise_for_status()
|
||||
resp = ssrf_proxy.head(url=url)
|
||||
if resp.status_code != httpx.codes.OK:
|
||||
resp = ssrf_proxy.get(url=url, timeout=3)
|
||||
resp.raise_for_status()
|
||||
|
||||
file_info = helpers.guess_file_info_from_response(response)
|
||||
file_info = helpers.guess_file_info_from_response(resp)
|
||||
|
||||
if not FileService.is_file_size_within_limit(extension=file_info.extension, file_size=file_info.size):
|
||||
return {"error": "File size exceeded"}, 400
|
||||
raise FileTooLargeError
|
||||
|
||||
response = ssrf_proxy.get(url)
|
||||
response.raise_for_status()
|
||||
content = response.content
|
||||
content = resp.content if resp.request.method == "GET" else ssrf_proxy.get(url).content
|
||||
|
||||
try:
|
||||
user = cast(Account, current_user)
|
||||
|
@ -56,8 +64,10 @@ class RemoteFileUploadApi(Resource):
|
|||
user=user,
|
||||
source_url=url,
|
||||
)
|
||||
except Exception as e:
|
||||
return {"error": str(e)}, 400
|
||||
except services.errors.file.FileTooLargeError as file_too_large_error:
|
||||
raise FileTooLargeError(file_too_large_error.description)
|
||||
except services.errors.file.UnsupportedFileTypeError:
|
||||
raise UnsupportedFileTypeError()
|
||||
|
||||
return {
|
||||
"id": upload_file.id,
|
||||
|
|
|
@ -41,7 +41,6 @@ class FileApi(Resource):
|
|||
content=file.read(),
|
||||
mimetype=file.mimetype,
|
||||
user=end_user,
|
||||
source="datasets",
|
||||
)
|
||||
except services.errors.file.FileTooLargeError as file_too_large_error:
|
||||
raise FileTooLargeError(file_too_large_error.description)
|
||||
|
|
|
@ -1,7 +1,9 @@
|
|||
import urllib.parse
|
||||
|
||||
import httpx
|
||||
from flask_restful import marshal_with, reqparse
|
||||
|
||||
import services
|
||||
from controllers.common import helpers
|
||||
from controllers.web.wraps import WebApiResource
|
||||
from core.file import helpers as file_helpers
|
||||
|
@ -9,19 +11,22 @@ from core.helper import ssrf_proxy
|
|||
from fields.file_fields import file_fields_with_signed_url, remote_file_info_fields
|
||||
from services.file_service import FileService
|
||||
|
||||
from .error import FileTooLargeError, UnsupportedFileTypeError
|
||||
|
||||
|
||||
class RemoteFileInfoApi(WebApiResource):
|
||||
@marshal_with(remote_file_info_fields)
|
||||
def get(self, url):
|
||||
def get(self, app_model, end_user, url):
|
||||
decoded_url = urllib.parse.unquote(url)
|
||||
try:
|
||||
response = ssrf_proxy.head(decoded_url)
|
||||
return {
|
||||
"file_type": response.headers.get("Content-Type", "application/octet-stream"),
|
||||
"file_length": int(response.headers.get("Content-Length", -1)),
|
||||
}
|
||||
except Exception as e:
|
||||
return {"error": str(e)}, 400
|
||||
resp = ssrf_proxy.head(decoded_url)
|
||||
if resp.status_code != httpx.codes.OK:
|
||||
# failed back to get method
|
||||
resp = ssrf_proxy.get(decoded_url, timeout=3)
|
||||
resp.raise_for_status()
|
||||
return {
|
||||
"file_type": resp.headers.get("Content-Type", "application/octet-stream"),
|
||||
"file_length": int(resp.headers.get("Content-Length", -1)),
|
||||
}
|
||||
|
||||
|
||||
class RemoteFileUploadApi(WebApiResource):
|
||||
|
@ -33,28 +38,30 @@ class RemoteFileUploadApi(WebApiResource):
|
|||
|
||||
url = args["url"]
|
||||
|
||||
response = ssrf_proxy.head(url)
|
||||
response.raise_for_status()
|
||||
resp = ssrf_proxy.head(url=url)
|
||||
if resp.status_code != httpx.codes.OK:
|
||||
resp = ssrf_proxy.get(url=url, timeout=3)
|
||||
resp.raise_for_status()
|
||||
|
||||
file_info = helpers.guess_file_info_from_response(response)
|
||||
file_info = helpers.guess_file_info_from_response(resp)
|
||||
|
||||
if not FileService.is_file_size_within_limit(extension=file_info.extension, file_size=file_info.size):
|
||||
return {"error": "File size exceeded"}, 400
|
||||
raise FileTooLargeError
|
||||
|
||||
response = ssrf_proxy.get(url)
|
||||
response.raise_for_status()
|
||||
content = response.content
|
||||
content = resp.content if resp.request.method == "GET" else ssrf_proxy.get(url).content
|
||||
|
||||
try:
|
||||
upload_file = FileService.upload_file(
|
||||
filename=file_info.filename,
|
||||
content=content,
|
||||
mimetype=file_info.mimetype,
|
||||
user=end_user, # Use end_user instead of current_user
|
||||
user=end_user,
|
||||
source_url=url,
|
||||
)
|
||||
except Exception as e:
|
||||
return {"error": str(e)}, 400
|
||||
except services.errors.file.FileTooLargeError as file_too_large_error:
|
||||
raise FileTooLargeError(file_too_large_error.description)
|
||||
except services.errors.file.UnsupportedFileTypeError:
|
||||
raise UnsupportedFileTypeError
|
||||
|
||||
return {
|
||||
"id": upload_file.id,
|
||||
|
|
|
@ -12,6 +12,10 @@ SSRF_PROXY_ALL_URL = os.getenv("SSRF_PROXY_ALL_URL", "")
|
|||
SSRF_PROXY_HTTP_URL = os.getenv("SSRF_PROXY_HTTP_URL", "")
|
||||
SSRF_PROXY_HTTPS_URL = os.getenv("SSRF_PROXY_HTTPS_URL", "")
|
||||
SSRF_DEFAULT_MAX_RETRIES = int(os.getenv("SSRF_DEFAULT_MAX_RETRIES", "3"))
|
||||
SSRF_DEFAULT_TIME_OUT = float(os.getenv("SSRF_DEFAULT_TIME_OUT", "5"))
|
||||
SSRF_DEFAULT_CONNECT_TIME_OUT = float(os.getenv("SSRF_DEFAULT_CONNECT_TIME_OUT", "5"))
|
||||
SSRF_DEFAULT_READ_TIME_OUT = float(os.getenv("SSRF_DEFAULT_READ_TIME_OUT", "5"))
|
||||
SSRF_DEFAULT_WRITE_TIME_OUT = float(os.getenv("SSRF_DEFAULT_WRITE_TIME_OUT", "5"))
|
||||
|
||||
proxy_mounts = (
|
||||
{
|
||||
|
@ -32,6 +36,14 @@ def make_request(method, url, max_retries=SSRF_DEFAULT_MAX_RETRIES, **kwargs):
|
|||
if "follow_redirects" not in kwargs:
|
||||
kwargs["follow_redirects"] = allow_redirects
|
||||
|
||||
if "timeout" not in kwargs:
|
||||
kwargs["timeout"] = httpx.Timeout(
|
||||
SSRF_DEFAULT_TIME_OUT,
|
||||
connect=SSRF_DEFAULT_CONNECT_TIME_OUT,
|
||||
read=SSRF_DEFAULT_READ_TIME_OUT,
|
||||
write=SSRF_DEFAULT_WRITE_TIME_OUT,
|
||||
)
|
||||
|
||||
retries = 0
|
||||
while retries <= max_retries:
|
||||
try:
|
||||
|
|
|
@ -1,61 +0,0 @@
|
|||
model: anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
label:
|
||||
en_US: Claude 3.5 Haiku
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- vision
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 200000
|
||||
# docs: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-messages.html
|
||||
parameter_rules:
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
type: int
|
||||
default: 8192
|
||||
min: 1
|
||||
max: 8192
|
||||
help:
|
||||
zh_Hans: 停止前生成的最大令牌数。请注意,Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。
|
||||
en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter.
|
||||
# docs: https://docs.anthropic.com/claude/docs/system-prompts
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
required: false
|
||||
type: float
|
||||
default: 1
|
||||
min: 0.0
|
||||
max: 1.0
|
||||
help:
|
||||
zh_Hans: 生成内容的随机性。
|
||||
en_US: The amount of randomness injected into the response.
|
||||
- name: top_p
|
||||
required: false
|
||||
type: float
|
||||
default: 0.999
|
||||
min: 0.000
|
||||
max: 1.000
|
||||
help:
|
||||
zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。
|
||||
en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both.
|
||||
- name: top_k
|
||||
required: false
|
||||
type: int
|
||||
default: 0
|
||||
min: 0
|
||||
# tip docs from aws has error, max value is 500
|
||||
max: 500
|
||||
help:
|
||||
zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。
|
||||
en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses.
|
||||
- name: response_format
|
||||
use_template: response_format
|
||||
pricing:
|
||||
input: '0.001'
|
||||
output: '0.005'
|
||||
unit: '0.001'
|
||||
currency: USD
|
|
@ -1,60 +0,0 @@
|
|||
model: anthropic.claude-3-5-sonnet-20241022-v2:0
|
||||
label:
|
||||
en_US: Claude 3.5 Sonnet V2
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- vision
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 200000
|
||||
# docs: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-messages.html
|
||||
parameter_rules:
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
type: int
|
||||
default: 4096
|
||||
min: 1
|
||||
max: 4096
|
||||
help:
|
||||
zh_Hans: 停止前生成的最大令牌数。请注意,Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。
|
||||
en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter.
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
required: false
|
||||
type: float
|
||||
default: 1
|
||||
min: 0.0
|
||||
max: 1.0
|
||||
help:
|
||||
zh_Hans: 生成内容的随机性。
|
||||
en_US: The amount of randomness injected into the response.
|
||||
- name: top_p
|
||||
required: false
|
||||
type: float
|
||||
default: 0.999
|
||||
min: 0.000
|
||||
max: 1.000
|
||||
help:
|
||||
zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。
|
||||
en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both.
|
||||
- name: top_k
|
||||
required: false
|
||||
type: int
|
||||
default: 0
|
||||
min: 0
|
||||
# tip docs from aws has error, max value is 500
|
||||
max: 500
|
||||
help:
|
||||
zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。
|
||||
en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses.
|
||||
- name: response_format
|
||||
use_template: response_format
|
||||
pricing:
|
||||
input: '0.003'
|
||||
output: '0.015'
|
||||
unit: '0.001'
|
||||
currency: USD
|
|
@ -1,60 +0,0 @@
|
|||
model: eu.anthropic.claude-3-5-sonnet-20241022-v2:0
|
||||
label:
|
||||
en_US: Claude 3.5 Sonnet V2(EU.Cross Region Inference)
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- vision
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 200000
|
||||
# docs: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-messages.html
|
||||
parameter_rules:
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
type: int
|
||||
default: 4096
|
||||
min: 1
|
||||
max: 4096
|
||||
help:
|
||||
zh_Hans: 停止前生成的最大令牌数。请注意,Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。
|
||||
en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter.
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
required: false
|
||||
type: float
|
||||
default: 1
|
||||
min: 0.0
|
||||
max: 1.0
|
||||
help:
|
||||
zh_Hans: 生成内容的随机性。
|
||||
en_US: The amount of randomness injected into the response.
|
||||
- name: top_p
|
||||
required: false
|
||||
type: float
|
||||
default: 0.999
|
||||
min: 0.000
|
||||
max: 1.000
|
||||
help:
|
||||
zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。
|
||||
en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both.
|
||||
- name: top_k
|
||||
required: false
|
||||
type: int
|
||||
default: 0
|
||||
min: 0
|
||||
# tip docs from aws has error, max value is 500
|
||||
max: 500
|
||||
help:
|
||||
zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。
|
||||
en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses.
|
||||
- name: response_format
|
||||
use_template: response_format
|
||||
pricing:
|
||||
input: '0.003'
|
||||
output: '0.015'
|
||||
unit: '0.001'
|
||||
currency: USD
|
|
@ -1,61 +0,0 @@
|
|||
model: us.anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
label:
|
||||
en_US: Claude 3.5 Haiku(US.Cross Region Inference)
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- vision
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 200000
|
||||
# docs: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-messages.html
|
||||
parameter_rules:
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
type: int
|
||||
default: 4096
|
||||
min: 1
|
||||
max: 4096
|
||||
help:
|
||||
zh_Hans: 停止前生成的最大令牌数。请注意,Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。
|
||||
en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter.
|
||||
# docs: https://docs.anthropic.com/claude/docs/system-prompts
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
required: false
|
||||
type: float
|
||||
default: 1
|
||||
min: 0.0
|
||||
max: 1.0
|
||||
help:
|
||||
zh_Hans: 生成内容的随机性。
|
||||
en_US: The amount of randomness injected into the response.
|
||||
- name: top_p
|
||||
required: false
|
||||
type: float
|
||||
default: 0.999
|
||||
min: 0.000
|
||||
max: 1.000
|
||||
help:
|
||||
zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。
|
||||
en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both.
|
||||
- name: top_k
|
||||
required: false
|
||||
type: int
|
||||
default: 0
|
||||
min: 0
|
||||
# tip docs from aws has error, max value is 500
|
||||
max: 500
|
||||
help:
|
||||
zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。
|
||||
en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses.
|
||||
- name: response_format
|
||||
use_template: response_format
|
||||
pricing:
|
||||
input: '0.001'
|
||||
output: '0.005'
|
||||
unit: '0.001'
|
||||
currency: USD
|
|
@ -1,60 +0,0 @@
|
|||
model: us.anthropic.claude-3-5-sonnet-20241022-v2:0
|
||||
label:
|
||||
en_US: Claude 3.5 Sonnet V2(US.Cross Region Inference)
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- vision
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 200000
|
||||
# docs: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-messages.html
|
||||
parameter_rules:
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
type: int
|
||||
default: 4096
|
||||
min: 1
|
||||
max: 4096
|
||||
help:
|
||||
zh_Hans: 停止前生成的最大令牌数。请注意,Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。
|
||||
en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter.
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
required: false
|
||||
type: float
|
||||
default: 1
|
||||
min: 0.0
|
||||
max: 1.0
|
||||
help:
|
||||
zh_Hans: 生成内容的随机性。
|
||||
en_US: The amount of randomness injected into the response.
|
||||
- name: top_p
|
||||
required: false
|
||||
type: float
|
||||
default: 0.999
|
||||
min: 0.000
|
||||
max: 1.000
|
||||
help:
|
||||
zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。
|
||||
en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both.
|
||||
- name: top_k
|
||||
required: false
|
||||
type: int
|
||||
default: 0
|
||||
min: 0
|
||||
# tip docs from aws has error, max value is 500
|
||||
max: 500
|
||||
help:
|
||||
zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。
|
||||
en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses.
|
||||
- name: response_format
|
||||
use_template: response_format
|
||||
pricing:
|
||||
input: '0.003'
|
||||
output: '0.015'
|
||||
unit: '0.001'
|
||||
currency: USD
|
22
api/core/workflow/nodes/iteration/exc.py
Normal file
22
api/core/workflow/nodes/iteration/exc.py
Normal file
|
@ -0,0 +1,22 @@
|
|||
class IterationNodeError(ValueError):
|
||||
"""Base class for iteration node errors."""
|
||||
|
||||
|
||||
class IteratorVariableNotFoundError(IterationNodeError):
|
||||
"""Raised when the iterator variable is not found."""
|
||||
|
||||
|
||||
class InvalidIteratorValueError(IterationNodeError):
|
||||
"""Raised when the iterator value is invalid."""
|
||||
|
||||
|
||||
class StartNodeIdNotFoundError(IterationNodeError):
|
||||
"""Raised when the start node ID is not found."""
|
||||
|
||||
|
||||
class IterationGraphNotFoundError(IterationNodeError):
|
||||
"""Raised when the iteration graph is not found."""
|
||||
|
||||
|
||||
class IterationIndexNotFoundError(IterationNodeError):
|
||||
"""Raised when the iteration index is not found."""
|
|
@ -38,6 +38,15 @@ from core.workflow.nodes.event import NodeEvent, RunCompletedEvent
|
|||
from core.workflow.nodes.iteration.entities import ErrorHandleMode, IterationNodeData
|
||||
from models.workflow import WorkflowNodeExecutionStatus
|
||||
|
||||
from .exc import (
|
||||
InvalidIteratorValueError,
|
||||
IterationGraphNotFoundError,
|
||||
IterationIndexNotFoundError,
|
||||
IterationNodeError,
|
||||
IteratorVariableNotFoundError,
|
||||
StartNodeIdNotFoundError,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from core.workflow.graph_engine.graph_engine import GraphEngine
|
||||
logger = logging.getLogger(__name__)
|
||||
|
@ -69,7 +78,7 @@ class IterationNode(BaseNode[IterationNodeData]):
|
|||
iterator_list_segment = self.graph_runtime_state.variable_pool.get(self.node_data.iterator_selector)
|
||||
|
||||
if not iterator_list_segment:
|
||||
raise ValueError(f"Iterator variable {self.node_data.iterator_selector} not found")
|
||||
raise IteratorVariableNotFoundError(f"Iterator variable {self.node_data.iterator_selector} not found")
|
||||
|
||||
if len(iterator_list_segment.value) == 0:
|
||||
yield RunCompletedEvent(
|
||||
|
@ -83,14 +92,14 @@ class IterationNode(BaseNode[IterationNodeData]):
|
|||
iterator_list_value = iterator_list_segment.to_object()
|
||||
|
||||
if not isinstance(iterator_list_value, list):
|
||||
raise ValueError(f"Invalid iterator value: {iterator_list_value}, please provide a list.")
|
||||
raise InvalidIteratorValueError(f"Invalid iterator value: {iterator_list_value}, please provide a list.")
|
||||
|
||||
inputs = {"iterator_selector": iterator_list_value}
|
||||
|
||||
graph_config = self.graph_config
|
||||
|
||||
if not self.node_data.start_node_id:
|
||||
raise ValueError(f"field start_node_id in iteration {self.node_id} not found")
|
||||
raise StartNodeIdNotFoundError(f"field start_node_id in iteration {self.node_id} not found")
|
||||
|
||||
root_node_id = self.node_data.start_node_id
|
||||
|
||||
|
@ -98,7 +107,7 @@ class IterationNode(BaseNode[IterationNodeData]):
|
|||
iteration_graph = Graph.init(graph_config=graph_config, root_node_id=root_node_id)
|
||||
|
||||
if not iteration_graph:
|
||||
raise ValueError("iteration graph not found")
|
||||
raise IterationGraphNotFoundError("iteration graph not found")
|
||||
|
||||
variable_pool = self.graph_runtime_state.variable_pool
|
||||
|
||||
|
@ -222,9 +231,9 @@ class IterationNode(BaseNode[IterationNodeData]):
|
|||
status=WorkflowNodeExecutionStatus.SUCCEEDED, outputs={"output": jsonable_encoder(outputs)}
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
except IterationNodeError as e:
|
||||
# iteration run failed
|
||||
logger.exception("Iteration run failed")
|
||||
logger.warning("Iteration run failed")
|
||||
yield IterationRunFailedEvent(
|
||||
iteration_id=self.id,
|
||||
iteration_node_id=self.node_id,
|
||||
|
@ -272,7 +281,7 @@ class IterationNode(BaseNode[IterationNodeData]):
|
|||
iteration_graph = Graph.init(graph_config=graph_config, root_node_id=node_data.start_node_id)
|
||||
|
||||
if not iteration_graph:
|
||||
raise ValueError("iteration graph not found")
|
||||
raise IterationGraphNotFoundError("iteration graph not found")
|
||||
|
||||
for sub_node_id, sub_node_config in iteration_graph.node_id_config_mapping.items():
|
||||
if sub_node_config.get("data", {}).get("iteration_id") != node_id:
|
||||
|
@ -357,7 +366,7 @@ class IterationNode(BaseNode[IterationNodeData]):
|
|||
next_index = int(current_index) + 1
|
||||
|
||||
if current_index is None:
|
||||
raise ValueError(f"iteration {self.node_id} current index not found")
|
||||
raise IterationIndexNotFoundError(f"iteration {self.node_id} current index not found")
|
||||
for event in rst:
|
||||
if isinstance(event, (BaseNodeEvent | BaseParallelBranchEvent)) and not event.in_iteration_id:
|
||||
event.in_iteration_id = self.node_id
|
||||
|
@ -484,8 +493,8 @@ class IterationNode(BaseNode[IterationNodeData]):
|
|||
pre_iteration_output=jsonable_encoder(current_iteration_output) if current_iteration_output else None,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.exception(f"Iteration run failed:{str(e)}")
|
||||
except IterationNodeError as e:
|
||||
logger.warning(f"Iteration run failed:{str(e)}")
|
||||
yield IterationRunFailedEvent(
|
||||
iteration_id=self.id,
|
||||
iteration_node_id=self.node_id,
|
||||
|
|
18
api/core/workflow/nodes/knowledge_retrieval/exc.py
Normal file
18
api/core/workflow/nodes/knowledge_retrieval/exc.py
Normal file
|
@ -0,0 +1,18 @@
|
|||
class KnowledgeRetrievalNodeError(ValueError):
|
||||
"""Base class for KnowledgeRetrievalNode errors."""
|
||||
|
||||
|
||||
class ModelNotExistError(KnowledgeRetrievalNodeError):
|
||||
"""Raised when the model does not exist."""
|
||||
|
||||
|
||||
class ModelCredentialsNotInitializedError(KnowledgeRetrievalNodeError):
|
||||
"""Raised when the model credentials are not initialized."""
|
||||
|
||||
|
||||
class ModelNotSupportedError(KnowledgeRetrievalNodeError):
|
||||
"""Raised when the model is not supported."""
|
||||
|
||||
|
||||
class ModelQuotaExceededError(KnowledgeRetrievalNodeError):
|
||||
"""Raised when the model provider quota is exceeded."""
|
|
@ -8,7 +8,6 @@ from core.app.app_config.entities import DatasetRetrieveConfigEntity
|
|||
from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity
|
||||
from core.entities.agent_entities import PlanningStrategy
|
||||
from core.entities.model_entities import ModelStatus
|
||||
from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
|
||||
from core.model_manager import ModelInstance, ModelManager
|
||||
from core.model_runtime.entities.model_entities import ModelFeature, ModelType
|
||||
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
|
||||
|
@ -18,11 +17,19 @@ from core.variables import StringSegment
|
|||
from core.workflow.entities.node_entities import NodeRunResult
|
||||
from core.workflow.nodes.base import BaseNode
|
||||
from core.workflow.nodes.enums import NodeType
|
||||
from core.workflow.nodes.knowledge_retrieval.entities import KnowledgeRetrievalNodeData
|
||||
from extensions.ext_database import db
|
||||
from models.dataset import Dataset, Document, DocumentSegment
|
||||
from models.workflow import WorkflowNodeExecutionStatus
|
||||
|
||||
from .entities import KnowledgeRetrievalNodeData
|
||||
from .exc import (
|
||||
KnowledgeRetrievalNodeError,
|
||||
ModelCredentialsNotInitializedError,
|
||||
ModelNotExistError,
|
||||
ModelNotSupportedError,
|
||||
ModelQuotaExceededError,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
default_retrieval_model = {
|
||||
|
@ -61,8 +68,8 @@ class KnowledgeRetrievalNode(BaseNode[KnowledgeRetrievalNodeData]):
|
|||
status=WorkflowNodeExecutionStatus.SUCCEEDED, inputs=variables, process_data=None, outputs=outputs
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.exception("Error when running knowledge retrieval node")
|
||||
except KnowledgeRetrievalNodeError as e:
|
||||
logger.warning("Error when running knowledge retrieval node")
|
||||
return NodeRunResult(status=WorkflowNodeExecutionStatus.FAILED, inputs=variables, error=str(e))
|
||||
|
||||
def _fetch_dataset_retriever(self, node_data: KnowledgeRetrievalNodeData, query: str) -> list[dict[str, Any]]:
|
||||
|
@ -295,14 +302,14 @@ class KnowledgeRetrievalNode(BaseNode[KnowledgeRetrievalNodeData]):
|
|||
)
|
||||
|
||||
if provider_model is None:
|
||||
raise ValueError(f"Model {model_name} not exist.")
|
||||
raise ModelNotExistError(f"Model {model_name} not exist.")
|
||||
|
||||
if provider_model.status == ModelStatus.NO_CONFIGURE:
|
||||
raise ProviderTokenNotInitError(f"Model {model_name} credentials is not initialized.")
|
||||
raise ModelCredentialsNotInitializedError(f"Model {model_name} credentials is not initialized.")
|
||||
elif provider_model.status == ModelStatus.NO_PERMISSION:
|
||||
raise ModelCurrentlyNotSupportError(f"Dify Hosted OpenAI {model_name} currently not support.")
|
||||
raise ModelNotSupportedError(f"Dify Hosted OpenAI {model_name} currently not support.")
|
||||
elif provider_model.status == ModelStatus.QUOTA_EXCEEDED:
|
||||
raise QuotaExceededError(f"Model provider {provider_name} quota exceeded.")
|
||||
raise ModelQuotaExceededError(f"Model provider {provider_name} quota exceeded.")
|
||||
|
||||
# model config
|
||||
completion_params = node_data.single_retrieval_config.model.completion_params
|
||||
|
@ -314,12 +321,12 @@ class KnowledgeRetrievalNode(BaseNode[KnowledgeRetrievalNodeData]):
|
|||
# get model mode
|
||||
model_mode = node_data.single_retrieval_config.model.mode
|
||||
if not model_mode:
|
||||
raise ValueError("LLM mode is required.")
|
||||
raise ModelNotExistError("LLM mode is required.")
|
||||
|
||||
model_schema = model_type_instance.get_model_schema(model_name, model_credentials)
|
||||
|
||||
if not model_schema:
|
||||
raise ValueError(f"Model {model_name} not exist.")
|
||||
raise ModelNotExistError(f"Model {model_name} not exist.")
|
||||
|
||||
return model_instance, ModelConfigWithCredentialsEntity(
|
||||
provider=provider_name,
|
||||
|
|
6
api/core/workflow/nodes/question_classifier/exc.py
Normal file
6
api/core/workflow/nodes/question_classifier/exc.py
Normal file
|
@ -0,0 +1,6 @@
|
|||
class QuestionClassifierNodeError(ValueError):
|
||||
"""Base class for QuestionClassifierNode errors."""
|
||||
|
||||
|
||||
class InvalidModelTypeError(QuestionClassifierNodeError):
|
||||
"""Raised when the model is not a Large Language Model."""
|
|
@ -4,6 +4,7 @@ from collections.abc import Mapping, Sequence
|
|||
from typing import TYPE_CHECKING, Any, Optional, cast
|
||||
|
||||
from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity
|
||||
from core.llm_generator.output_parser.errors import OutputParserError
|
||||
from core.memory.token_buffer_memory import TokenBufferMemory
|
||||
from core.model_manager import ModelInstance
|
||||
from core.model_runtime.entities import LLMUsage, ModelPropertyKey, PromptMessageRole
|
||||
|
@ -24,6 +25,7 @@ from libs.json_in_md_parser import parse_and_check_json_markdown
|
|||
from models.workflow import WorkflowNodeExecutionStatus
|
||||
|
||||
from .entities import QuestionClassifierNodeData
|
||||
from .exc import InvalidModelTypeError
|
||||
from .template_prompts import (
|
||||
QUESTION_CLASSIFIER_ASSISTANT_PROMPT_1,
|
||||
QUESTION_CLASSIFIER_ASSISTANT_PROMPT_2,
|
||||
|
@ -124,7 +126,7 @@ class QuestionClassifierNode(LLMNode):
|
|||
category_name = classes_map[category_id_result]
|
||||
category_id = category_id_result
|
||||
|
||||
except Exception:
|
||||
except OutputParserError:
|
||||
logging.error(f"Failed to parse result text: {result_text}")
|
||||
try:
|
||||
process_data = {
|
||||
|
@ -309,4 +311,4 @@ class QuestionClassifierNode(LLMNode):
|
|||
)
|
||||
|
||||
else:
|
||||
raise ValueError(f"Model mode {model_mode} not support.")
|
||||
raise InvalidModelTypeError(f"Model mode {model_mode} not support.")
|
||||
|
|
16
api/core/workflow/nodes/tool/exc.py
Normal file
16
api/core/workflow/nodes/tool/exc.py
Normal file
|
@ -0,0 +1,16 @@
|
|||
class ToolNodeError(ValueError):
|
||||
"""Base exception for tool node errors."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class ToolParameterError(ToolNodeError):
|
||||
"""Exception raised for errors in tool parameters."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class ToolFileError(ToolNodeError):
|
||||
"""Exception raised for errors related to tool files."""
|
||||
|
||||
pass
|
|
@ -6,7 +6,7 @@ from sqlalchemy import select
|
|||
from sqlalchemy.orm import Session
|
||||
|
||||
from core.callback_handler.workflow_tool_callback_handler import DifyWorkflowCallbackHandler
|
||||
from core.file.models import File, FileTransferMethod, FileType
|
||||
from core.file import File, FileTransferMethod, FileType
|
||||
from core.tools.entities.tool_entities import ToolInvokeMessage, ToolParameter
|
||||
from core.tools.tool_engine import ToolEngine
|
||||
from core.tools.tool_manager import ToolManager
|
||||
|
@ -19,12 +19,18 @@ from core.workflow.enums import SystemVariableKey
|
|||
from core.workflow.nodes.base import BaseNode
|
||||
from core.workflow.nodes.enums import NodeType
|
||||
from core.workflow.nodes.event import RunCompletedEvent, RunStreamChunkEvent
|
||||
from core.workflow.nodes.tool.entities import ToolNodeData
|
||||
from core.workflow.utils.variable_template_parser import VariableTemplateParser
|
||||
from extensions.ext_database import db
|
||||
from models.tools import ToolFile
|
||||
from models.workflow import WorkflowNodeExecutionStatus
|
||||
|
||||
from .entities import ToolNodeData
|
||||
from .exc import (
|
||||
ToolFileError,
|
||||
ToolNodeError,
|
||||
ToolParameterError,
|
||||
)
|
||||
|
||||
|
||||
class ToolNode(BaseNode[ToolNodeData]):
|
||||
"""
|
||||
|
@ -49,7 +55,7 @@ class ToolNode(BaseNode[ToolNodeData]):
|
|||
tool_runtime = ToolManager.get_workflow_tool_runtime(
|
||||
self.tenant_id, self.app_id, self.node_id, self.node_data, self.invoke_from
|
||||
)
|
||||
except Exception as e:
|
||||
except ToolNodeError as e:
|
||||
yield RunCompletedEvent(
|
||||
run_result=NodeRunResult(
|
||||
status=WorkflowNodeExecutionStatus.FAILED,
|
||||
|
@ -58,7 +64,6 @@ class ToolNode(BaseNode[ToolNodeData]):
|
|||
error=f"Failed to get tool runtime: {str(e)}",
|
||||
)
|
||||
)
|
||||
return
|
||||
|
||||
# get parameters
|
||||
tool_parameters = tool_runtime.get_merged_runtime_parameters() or []
|
||||
|
@ -85,7 +90,7 @@ class ToolNode(BaseNode[ToolNodeData]):
|
|||
app_id=self.app_id,
|
||||
# TODO: conversation id and message id
|
||||
)
|
||||
except Exception as e:
|
||||
except ToolNodeError as e:
|
||||
yield RunCompletedEvent(
|
||||
run_result=NodeRunResult(
|
||||
status=WorkflowNodeExecutionStatus.FAILED,
|
||||
|
@ -94,7 +99,6 @@ class ToolNode(BaseNode[ToolNodeData]):
|
|||
error=f"Failed to invoke tool: {str(e)}",
|
||||
)
|
||||
)
|
||||
return
|
||||
|
||||
# convert tool messages
|
||||
yield from self._transform_message(message_stream, tool_info, parameters_for_log)
|
||||
|
@ -131,14 +135,13 @@ class ToolNode(BaseNode[ToolNodeData]):
|
|||
if tool_input.type == "variable":
|
||||
variable = variable_pool.get(tool_input.value)
|
||||
if variable is None:
|
||||
raise ValueError(f"variable {tool_input.value} not exists")
|
||||
raise ToolParameterError(f"Variable {tool_input.value} does not exist")
|
||||
parameter_value = variable.value
|
||||
elif tool_input.type in {"mixed", "constant"}:
|
||||
segment_group = variable_pool.convert_template(str(tool_input.value))
|
||||
parameter_value = segment_group.log if for_log else segment_group.text
|
||||
else:
|
||||
raise ValueError(f"unknown tool input type '{tool_input.type}'")
|
||||
|
||||
raise ToolParameterError(f"Unknown tool input type '{tool_input.type}'")
|
||||
result[parameter_name] = parameter_value
|
||||
|
||||
return result
|
||||
|
@ -187,7 +190,7 @@ class ToolNode(BaseNode[ToolNodeData]):
|
|||
stmt = select(ToolFile).where(ToolFile.id == tool_file_id)
|
||||
tool_file = session.scalar(stmt)
|
||||
if tool_file is None:
|
||||
raise ValueError(f"tool file {tool_file_id} not exists")
|
||||
raise ToolFileError(f"Tool file {tool_file_id} does not exist")
|
||||
|
||||
files.append(
|
||||
File(
|
||||
|
@ -212,8 +215,7 @@ class ToolNode(BaseNode[ToolNodeData]):
|
|||
stmt = select(ToolFile).where(ToolFile.id == tool_file_id)
|
||||
tool_file = session.scalar(stmt)
|
||||
if tool_file is None:
|
||||
raise ValueError(f"tool file {tool_file_id} not exists")
|
||||
|
||||
raise ToolFileError(f"tool file {tool_file_id} not exists")
|
||||
files.append(
|
||||
File(
|
||||
tenant_id=self.tenant_id,
|
||||
|
|
|
@ -9,6 +9,7 @@ def parse_json_markdown(json_string: str) -> dict:
|
|||
starts = ["```json", "```", "``", "`", "{"]
|
||||
ends = ["```", "``", "`", "}"]
|
||||
end_index = -1
|
||||
start_index = 0
|
||||
for s in starts:
|
||||
start_index = json_string.find(s)
|
||||
if start_index != -1:
|
||||
|
@ -24,7 +25,6 @@ def parse_json_markdown(json_string: str) -> dict:
|
|||
break
|
||||
if start_index != -1 and end_index != -1 and start_index < end_index:
|
||||
extracted_content = json_string[start_index:end_index].strip()
|
||||
print("content:", extracted_content, start_index, end_index)
|
||||
parsed = json.loads(extracted_content)
|
||||
else:
|
||||
raise Exception("Could not find JSON block in the output.")
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
import json
|
||||
import re
|
||||
import uuid
|
||||
from collections.abc import Mapping, Sequence
|
||||
from collections.abc import Mapping
|
||||
from datetime import datetime
|
||||
from enum import Enum
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
|
@ -14,7 +14,6 @@ from typing import Any, Literal
|
|||
import sqlalchemy as sa
|
||||
from flask import request
|
||||
from flask_login import UserMixin
|
||||
from pydantic import BaseModel, Field
|
||||
from sqlalchemy import Float, Index, PrimaryKeyConstraint, func, text
|
||||
from sqlalchemy.orm import Mapped, mapped_column
|
||||
|
||||
|
@ -31,14 +30,6 @@ from .account import Account, Tenant
|
|||
from .types import StringUUID
|
||||
|
||||
|
||||
class FileUploadConfig(BaseModel):
|
||||
enabled: bool = Field(default=False)
|
||||
allowed_file_types: Sequence[FileType] = Field(default_factory=list)
|
||||
allowed_extensions: Sequence[str] = Field(default_factory=list)
|
||||
allowed_upload_methods: Sequence[FileTransferMethod] = Field(default_factory=list)
|
||||
number_limits: int = Field(default=0, gt=0, le=10)
|
||||
|
||||
|
||||
class DifySetup(Base):
|
||||
__tablename__ = "dify_setups"
|
||||
__table_args__ = (db.PrimaryKeyConstraint("version", name="dify_setup_pkey"),)
|
||||
|
|
1766
api/poetry.lock
generated
1766
api/poetry.lock
generated
File diff suppressed because it is too large
Load Diff
|
@ -168,7 +168,7 @@ readabilipy = "0.2.0"
|
|||
redis = { version = "~5.0.3", extras = ["hiredis"] }
|
||||
replicate = "~0.22.0"
|
||||
resend = "~0.7.0"
|
||||
sagemaker = "2.231.0"
|
||||
sagemaker = "~2.231.0"
|
||||
scikit-learn = "~1.5.1"
|
||||
sentry-sdk = { version = "~1.44.1", extras = ["flask"] }
|
||||
sqlalchemy = "~2.0.29"
|
||||
|
|
|
@ -27,11 +27,7 @@ from .exc import (
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
current_dsl_version = "0.1.2"
|
||||
dsl_to_dify_version_mapping: dict[str, str] = {
|
||||
"0.1.2": "0.8.0",
|
||||
"0.1.1": "0.6.0", # dsl version -> from dify version
|
||||
}
|
||||
current_dsl_version = "0.1.3"
|
||||
|
||||
|
||||
class AppDslService:
|
||||
|
|
|
@ -7,8 +7,6 @@ import httpx
|
|||
import validators
|
||||
|
||||
from constants import HIDDEN_VALUE
|
||||
|
||||
# from tasks.external_document_indexing_task import external_document_indexing_task
|
||||
from core.helper import ssrf_proxy
|
||||
from extensions.ext_database import db
|
||||
from models.dataset import (
|
||||
|
|
|
@ -2,7 +2,7 @@ version: '3'
|
|||
services:
|
||||
# API service
|
||||
api:
|
||||
image: langgenius/dify-api:0.10.2
|
||||
image: langgenius/dify-api:0.11.0
|
||||
restart: always
|
||||
environment:
|
||||
# Startup mode, 'api' starts the API server.
|
||||
|
@ -227,7 +227,7 @@ services:
|
|||
# worker service
|
||||
# The Celery worker for processing the queue.
|
||||
worker:
|
||||
image: langgenius/dify-api:0.10.2
|
||||
image: langgenius/dify-api:0.11.0
|
||||
restart: always
|
||||
environment:
|
||||
CONSOLE_WEB_URL: ''
|
||||
|
@ -396,7 +396,7 @@ services:
|
|||
|
||||
# Frontend web application.
|
||||
web:
|
||||
image: langgenius/dify-web:0.10.2
|
||||
image: langgenius/dify-web:0.11.0
|
||||
restart: always
|
||||
environment:
|
||||
# The base URL of console application api server, refers to the Console base URL of WEB service if console domain is
|
||||
|
|
|
@ -374,7 +374,7 @@ SUPABASE_URL=your-server-url
|
|||
# ------------------------------
|
||||
|
||||
# The type of vector store to use.
|
||||
# Supported values are `weaviate`, `qdrant`, `milvus`, `myscale`, `relyt`, `pgvector`, `pgvecto-rs`, `chroma`, `opensearch`, `tidb_vector`, `oracle`, `tencent`, `elasticsearch`, `analyticdb`, `couchbase`, `vikingdb`.
|
||||
# Supported values are `weaviate`, `qdrant`, `milvus`, `myscale`, `relyt`, `pgvector`, `pgvecto-rs`, `chroma`, `opensearch`, `tidb_vector`, `oracle`, `tencent`, `elasticsearch`, `analyticdb`, `couchbase`, `vikingdb`, `oceanbase`.
|
||||
VECTOR_STORE=weaviate
|
||||
|
||||
# The Weaviate endpoint URL. Only available when VECTOR_STORE is `weaviate`.
|
||||
|
@ -537,10 +537,10 @@ LINDORM_USERNAME=username
|
|||
LINDORM_PASSWORD=password
|
||||
|
||||
# OceanBase Vector configuration, only available when VECTOR_STORE is `oceanbase`
|
||||
OCEANBASE_VECTOR_HOST=oceanbase-vector
|
||||
OCEANBASE_VECTOR_HOST=oceanbase
|
||||
OCEANBASE_VECTOR_PORT=2881
|
||||
OCEANBASE_VECTOR_USER=root@test
|
||||
OCEANBASE_VECTOR_PASSWORD=
|
||||
OCEANBASE_VECTOR_PASSWORD=difyai123456
|
||||
OCEANBASE_VECTOR_DATABASE=test
|
||||
OCEANBASE_MEMORY_LIMIT=6G
|
||||
|
||||
|
|
|
@ -266,14 +266,15 @@ x-shared-env: &shared-api-worker-env
|
|||
OCEANBASE_VECTOR_HOST: ${OCEANBASE_VECTOR_HOST:-http://oceanbase-vector}
|
||||
OCEANBASE_VECTOR_PORT: ${OCEANBASE_VECTOR_PORT:-2881}
|
||||
OCEANBASE_VECTOR_USER: ${OCEANBASE_VECTOR_USER:-root@test}
|
||||
OCEANBASE_VECTOR_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-""}
|
||||
OCEANBASE_VECTOR_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
|
||||
OCEANBASE_VECTOR_DATABASE: ${OCEANBASE_VECTOR_DATABASE:-test}
|
||||
OCEANBASE_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai}
|
||||
OCEANBASE_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G}
|
||||
|
||||
services:
|
||||
# API service
|
||||
api:
|
||||
image: langgenius/dify-api:0.10.2
|
||||
image: langgenius/dify-api:0.11.0
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
|
@ -293,7 +294,7 @@ services:
|
|||
# worker service
|
||||
# The Celery worker for processing the queue.
|
||||
worker:
|
||||
image: langgenius/dify-api:0.10.2
|
||||
image: langgenius/dify-api:0.11.0
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
|
@ -312,7 +313,7 @@ services:
|
|||
|
||||
# Frontend web application.
|
||||
web:
|
||||
image: langgenius/dify-web:0.10.2
|
||||
image: langgenius/dify-web:0.11.0
|
||||
restart: always
|
||||
environment:
|
||||
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
|
||||
|
@ -597,16 +598,21 @@ services:
|
|||
IS_PERSISTENT: ${CHROMA_IS_PERSISTENT:-TRUE}
|
||||
|
||||
# OceanBase vector database
|
||||
oceanbase-vector:
|
||||
oceanbase:
|
||||
image: quay.io/oceanbase/oceanbase-ce:4.3.3.0-100000142024101215
|
||||
profiles:
|
||||
- oceanbase-vector
|
||||
- oceanbase
|
||||
restart: always
|
||||
volumes:
|
||||
- ./volumes/oceanbase/data:/root/ob
|
||||
- ./volumes/oceanbase/conf:/root/.obd/cluster
|
||||
- ./volumes/oceanbase/init.d:/root/boot/init.d
|
||||
environment:
|
||||
OB_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G}
|
||||
OB_SYS_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
|
||||
OB_TENANT_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
|
||||
OB_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai}
|
||||
OB_SERVER_IP: '127.0.0.1'
|
||||
|
||||
# Oracle vector database
|
||||
oracle:
|
||||
|
|
1
docker/volumes/oceanbase/init.d/vec_memory.sql
Normal file
1
docker/volumes/oceanbase/init.d/vec_memory.sql
Normal file
|
@ -0,0 +1 @@
|
|||
ALTER SYSTEM SET ob_vector_memory_limit_percentage = 30;
|
|
@ -8,18 +8,22 @@ import classNames from 'classnames'
|
|||
|
||||
import { ImagePlus } from '../icons/src/vender/line/images'
|
||||
import { useDraggableUploader } from './hooks'
|
||||
import { checkIsAnimatedImage } from './utils'
|
||||
import { ALLOW_FILE_EXTENSIONS } from '@/types/app'
|
||||
|
||||
type UploaderProps = {
|
||||
className?: string
|
||||
onImageCropped?: (tempUrl: string, croppedAreaPixels: Area, fileName: string) => void
|
||||
onUpload?: (file?: File) => void
|
||||
}
|
||||
|
||||
const Uploader: FC<UploaderProps> = ({
|
||||
className,
|
||||
onImageCropped,
|
||||
onUpload,
|
||||
}) => {
|
||||
const [inputImage, setInputImage] = useState<{ file: File; url: string }>()
|
||||
const [isAnimatedImage, setIsAnimatedImage] = useState<boolean>(false)
|
||||
useEffect(() => {
|
||||
return () => {
|
||||
if (inputImage)
|
||||
|
@ -34,12 +38,19 @@ const Uploader: FC<UploaderProps> = ({
|
|||
if (!inputImage)
|
||||
return
|
||||
onImageCropped?.(inputImage.url, croppedAreaPixels, inputImage.file.name)
|
||||
onUpload?.(undefined)
|
||||
}
|
||||
|
||||
const handleLocalFileInput = (e: ChangeEvent<HTMLInputElement>) => {
|
||||
const file = e.target.files?.[0]
|
||||
if (file)
|
||||
if (file) {
|
||||
setInputImage({ file, url: URL.createObjectURL(file) })
|
||||
checkIsAnimatedImage(file).then((isAnimatedImage) => {
|
||||
setIsAnimatedImage(!!isAnimatedImage)
|
||||
if (isAnimatedImage)
|
||||
onUpload?.(file)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const {
|
||||
|
@ -52,6 +63,26 @@ const Uploader: FC<UploaderProps> = ({
|
|||
|
||||
const inputRef = createRef<HTMLInputElement>()
|
||||
|
||||
const handleShowImage = () => {
|
||||
if (isAnimatedImage) {
|
||||
return (
|
||||
<img src={inputImage?.url} alt='' />
|
||||
)
|
||||
}
|
||||
|
||||
return (
|
||||
<Cropper
|
||||
image={inputImage?.url}
|
||||
crop={crop}
|
||||
zoom={zoom}
|
||||
aspect={1}
|
||||
onCropChange={setCrop}
|
||||
onCropComplete={onCropComplete}
|
||||
onZoomChange={setZoom}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
return (
|
||||
<div className={classNames(className, 'w-full px-3 py-1.5')}>
|
||||
<div
|
||||
|
@ -79,15 +110,7 @@ const Uploader: FC<UploaderProps> = ({
|
|||
</div>
|
||||
<div className="text-xs pointer-events-none">Supports PNG, JPG, JPEG, WEBP and GIF</div>
|
||||
</>
|
||||
: <Cropper
|
||||
image={inputImage.url}
|
||||
crop={crop}
|
||||
zoom={zoom}
|
||||
aspect={1}
|
||||
onCropChange={setCrop}
|
||||
onCropComplete={onCropComplete}
|
||||
onZoomChange={setZoom}
|
||||
/>
|
||||
: handleShowImage()
|
||||
}
|
||||
</div>
|
||||
</div>
|
||||
|
|
|
@ -74,6 +74,11 @@ const AppIconPicker: FC<AppIconPickerProps> = ({
|
|||
setImageCropInfo({ tempUrl, croppedAreaPixels, fileName })
|
||||
}
|
||||
|
||||
const [uploadImageInfo, setUploadImageInfo] = useState<{ file?: File }>()
|
||||
const handleUpload = async (file?: File) => {
|
||||
setUploadImageInfo({ file })
|
||||
}
|
||||
|
||||
const handleSelect = async () => {
|
||||
if (activeTab === 'emoji') {
|
||||
if (emoji) {
|
||||
|
@ -85,9 +90,13 @@ const AppIconPicker: FC<AppIconPickerProps> = ({
|
|||
}
|
||||
}
|
||||
else {
|
||||
if (!imageCropInfo)
|
||||
if (!imageCropInfo && !uploadImageInfo)
|
||||
return
|
||||
setUploading(true)
|
||||
if (imageCropInfo.file) {
|
||||
handleLocalFileUpload(imageCropInfo.file)
|
||||
return
|
||||
}
|
||||
const blob = await getCroppedImg(imageCropInfo.tempUrl, imageCropInfo.croppedAreaPixels, imageCropInfo.fileName)
|
||||
const file = new File([blob], imageCropInfo.fileName, { type: blob.type })
|
||||
handleLocalFileUpload(file)
|
||||
|
@ -121,7 +130,7 @@ const AppIconPicker: FC<AppIconPickerProps> = ({
|
|||
<Divider className='m-0' />
|
||||
|
||||
<EmojiPickerInner className={activeTab === 'emoji' ? 'block' : 'hidden'} onSelect={handleSelectEmoji} />
|
||||
<Uploader className={activeTab === 'image' ? 'block' : 'hidden'} onImageCropped={handleImageCropped} />
|
||||
<Uploader className={activeTab === 'image' ? 'block' : 'hidden'} onImageCropped={handleImageCropped} onUpload={handleUpload}/>
|
||||
|
||||
<Divider className='m-0' />
|
||||
<div className='w-full flex items-center justify-center p-3 gap-2'>
|
||||
|
|
|
@ -115,3 +115,52 @@ export default async function getCroppedImg(
|
|||
}, mimeType)
|
||||
})
|
||||
}
|
||||
|
||||
export function checkIsAnimatedImage(file) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const fileReader = new FileReader()
|
||||
|
||||
fileReader.onload = function (e) {
|
||||
const arr = new Uint8Array(e.target.result)
|
||||
|
||||
// Check file extension
|
||||
const fileName = file.name.toLowerCase()
|
||||
if (fileName.endsWith('.gif')) {
|
||||
// If file is a GIF, assume it's animated
|
||||
resolve(true)
|
||||
}
|
||||
// Check for WebP signature (RIFF and WEBP)
|
||||
else if (isWebP(arr)) {
|
||||
resolve(checkWebPAnimation(arr)) // Check if it's animated
|
||||
}
|
||||
else {
|
||||
resolve(false) // Not a GIF or WebP
|
||||
}
|
||||
}
|
||||
|
||||
fileReader.onerror = function (err) {
|
||||
reject(err) // Reject the promise on error
|
||||
}
|
||||
|
||||
// Read the file as an array buffer
|
||||
fileReader.readAsArrayBuffer(file)
|
||||
})
|
||||
}
|
||||
|
||||
// Function to check for WebP signature
|
||||
function isWebP(arr) {
|
||||
return (
|
||||
arr[0] === 0x52 && arr[1] === 0x49 && arr[2] === 0x46 && arr[3] === 0x46
|
||||
&& arr[8] === 0x57 && arr[9] === 0x45 && arr[10] === 0x42 && arr[11] === 0x50
|
||||
) // "WEBP"
|
||||
}
|
||||
|
||||
// Function to check if the WebP is animated (contains ANIM chunk)
|
||||
function checkWebPAnimation(arr) {
|
||||
// Search for the ANIM chunk in WebP to determine if it's animated
|
||||
for (let i = 12; i < arr.length - 4; i++) {
|
||||
if (arr[i] === 0x41 && arr[i + 1] === 0x4E && arr[i + 2] === 0x49 && arr[i + 3] === 0x4D)
|
||||
return true // Found animation
|
||||
}
|
||||
return false // No animation chunk found
|
||||
}
|
||||
|
|
|
@ -28,7 +28,7 @@ const Category: FC<ICategoryProps> = ({
|
|||
allCategoriesEn,
|
||||
}) => {
|
||||
const { t } = useTranslation()
|
||||
const isAllCategories = !list.includes(value as AppCategory)
|
||||
const isAllCategories = !list.includes(value as AppCategory) || value === allCategoriesEn
|
||||
|
||||
const itemClassName = (isSelected: boolean) => cn(
|
||||
'flex items-center px-3 py-[7px] h-[32px] rounded-lg border-[0.5px] border-transparent text-gray-700 font-medium leading-[18px] cursor-pointer hover:bg-gray-200',
|
||||
|
@ -44,7 +44,7 @@ const Category: FC<ICategoryProps> = ({
|
|||
<ThumbsUp className='mr-1 w-3.5 h-3.5' />
|
||||
{t('explore.apps.allCategories')}
|
||||
</div>
|
||||
{list.map(name => (
|
||||
{list.filter(name => name !== allCategoriesEn).map(name => (
|
||||
<div
|
||||
key={name}
|
||||
className={itemClassName(name === value)}
|
||||
|
|
|
@ -169,7 +169,7 @@ const translation = {
|
|||
deleteConfirmTip: '확인하려면 등록된 이메일에서 다음 내용을 로 보내주세요 ',
|
||||
myAccount: '내 계정',
|
||||
studio: '디파이 스튜디오',
|
||||
account: '계좌',
|
||||
account: '계정',
|
||||
},
|
||||
members: {
|
||||
team: '팀',
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "dify-web",
|
||||
"version": "0.10.2",
|
||||
"version": "0.11.0",
|
||||
"private": true,
|
||||
"engines": {
|
||||
"node": ">=18.17.0"
|
||||
|
|
Loading…
Reference in New Issue
Block a user