mirror of
https://github.com/langgenius/dify.git
synced 2024-11-16 03:32:23 +08:00
Merge branch 'main' into feat/continue-on-error
This commit is contained in:
commit
b5be84cc8e
54
.github/pull_request_template.md
vendored
54
.github/pull_request_template.md
vendored
|
@ -1,34 +1,32 @@
|
|||
# Checklist:
|
||||
# Summary
|
||||
|
||||
Please include a summary of the change and which issue is fixed. Please also include relevant motivation and context. List any dependencies that are required for this change.
|
||||
|
||||
> [!Tip]
|
||||
> Close issue syntax: `Fixes #<issue number>` or `Resolves #<issue number>`, see [documentation](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue#linking-a-pull-request-to-an-issue-using-a-keyword) for more details.
|
||||
|
||||
|
||||
# Screenshots
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td>Before: </td>
|
||||
<td>After: </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>...</td>
|
||||
<td>...</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
# Checklist
|
||||
|
||||
> [!IMPORTANT]
|
||||
> Please review the checklist below before submitting your pull request.
|
||||
|
||||
- [ ] Please open an issue before creating a PR or link to an existing issue
|
||||
- [ ] I have performed a self-review of my own code
|
||||
- [ ] I have commented my code, particularly in hard-to-understand areas
|
||||
- [ ] I ran `dev/reformat`(backend) and `cd web && npx lint-staged`(frontend) to appease the lint gods
|
||||
|
||||
# Description
|
||||
|
||||
Describe the big picture of your changes here to communicate to the maintainers why we should accept this pull request. If it fixes a bug or resolves a feature request, be sure to link to that issue. Close issue syntax: `Fixes #<issue number>`, see [documentation](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue#linking-a-pull-request-to-an-issue-using-a-keyword) for more details.
|
||||
|
||||
Fixes
|
||||
|
||||
## Type of Change
|
||||
|
||||
- [ ] Bug fix (non-breaking change which fixes an issue)
|
||||
- [ ] New feature (non-breaking change which adds functionality)
|
||||
- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected)
|
||||
- [ ] This change requires a documentation update, included: [Dify Document](https://github.com/langgenius/dify-docs)
|
||||
- [ ] Improvement, including but not limited to code refactoring, performance optimization, and UI/UX improvement
|
||||
- [ ] Dependency upgrade
|
||||
|
||||
# Testing Instructions
|
||||
|
||||
Please describe the tests that you ran to verify your changes. Provide instructions so we can reproduce. Please also list any relevant details for your test configuration
|
||||
|
||||
- [ ] Test A
|
||||
- [ ] Test B
|
||||
|
||||
|
||||
- [x] I understand that this PR may be closed in case there was no previous discussion or issues. (This doesn't apply to typos!)
|
||||
- [x] I've added a test for each change that was introduced, and I tried as much as possible to make a single atomic change.
|
||||
- [x] I've updated the documentation accordingly.
|
||||
- [x] I ran `dev/reformat`(backend) and `cd web && npx lint-staged`(frontend) to appease the lint gods
|
||||
|
||||
|
|
|
@ -19,6 +19,9 @@
|
|||
<a href="https://discord.gg/FngNHpbcY7" target="_blank">
|
||||
<img src="https://img.shields.io/discord/1082486657678311454?logo=discord&labelColor=%20%235462eb&logoColor=%20%23f5f5f5&color=%20%235462eb"
|
||||
alt="chat on Discord"></a>
|
||||
<a href="https://reddit.com/r/difyai" target="_blank">
|
||||
<img src="https://img.shields.io/reddit/subreddit-subscribers/difyai?style=plastic&logo=reddit&label=r%2Fdifyai&labelColor=white"
|
||||
alt="join Reddit"></a>
|
||||
<a href="https://twitter.com/intent/follow?screen_name=dify_ai" target="_blank">
|
||||
<img src="https://img.shields.io/twitter/follow/dify_ai?logo=X&color=%20%23f5f5f5"
|
||||
alt="follow on X(Twitter)"></a>
|
||||
|
|
|
@ -15,6 +15,9 @@
|
|||
<a href="https://discord.gg/FngNHpbcY7" target="_blank">
|
||||
<img src="https://img.shields.io/discord/1082486657678311454?logo=discord&labelColor=%20%235462eb&logoColor=%20%23f5f5f5&color=%20%235462eb"
|
||||
alt="chat on Discord"></a>
|
||||
<a href="https://reddit.com/r/difyai" target="_blank">
|
||||
<img src="https://img.shields.io/reddit/subreddit-subscribers/difyai?style=plastic&logo=reddit&label=r%2Fdifyai&labelColor=white"
|
||||
alt="join Reddit"></a>
|
||||
<a href="https://twitter.com/intent/follow?screen_name=dify_ai" target="_blank">
|
||||
<img src="https://img.shields.io/twitter/follow/dify_ai?logo=X&color=%20%23f5f5f5"
|
||||
alt="follow on X(Twitter)"></a>
|
||||
|
|
|
@ -15,6 +15,9 @@
|
|||
<a href="https://discord.gg/FngNHpbcY7" target="_blank">
|
||||
<img src="https://img.shields.io/discord/1082486657678311454?logo=discord&labelColor=%20%235462eb&logoColor=%20%23f5f5f5&color=%20%235462eb"
|
||||
alt="chat on Discord"></a>
|
||||
<a href="https://reddit.com/r/difyai" target="_blank">
|
||||
<img src="https://img.shields.io/reddit/subreddit-subscribers/difyai?style=plastic&logo=reddit&label=r%2Fdifyai&labelColor=white"
|
||||
alt="join Reddit"></a>
|
||||
<a href="https://twitter.com/intent/follow?screen_name=dify_ai" target="_blank">
|
||||
<img src="https://img.shields.io/twitter/follow/dify_ai?logo=X&color=%20%23f5f5f5"
|
||||
alt="follow on X(Twitter)"></a>
|
||||
|
|
|
@ -15,6 +15,9 @@
|
|||
<a href="https://discord.gg/FngNHpbcY7" target="_blank">
|
||||
<img src="https://img.shields.io/discord/1082486657678311454?logo=discord&labelColor=%20%235462eb&logoColor=%20%23f5f5f5&color=%20%235462eb"
|
||||
alt="chat en Discord"></a>
|
||||
<a href="https://reddit.com/r/difyai" target="_blank">
|
||||
<img src="https://img.shields.io/reddit/subreddit-subscribers/difyai?style=plastic&logo=reddit&label=r%2Fdifyai&labelColor=white"
|
||||
alt="join Reddit"></a>
|
||||
<a href="https://twitter.com/intent/follow?screen_name=dify_ai" target="_blank">
|
||||
<img src="https://img.shields.io/twitter/follow/dify_ai?logo=X&color=%20%23f5f5f5"
|
||||
alt="seguir en X(Twitter)"></a>
|
||||
|
|
|
@ -15,6 +15,9 @@
|
|||
<a href="https://discord.gg/FngNHpbcY7" target="_blank">
|
||||
<img src="https://img.shields.io/discord/1082486657678311454?logo=discord&labelColor=%20%235462eb&logoColor=%20%23f5f5f5&color=%20%235462eb"
|
||||
alt="chat sur Discord"></a>
|
||||
<a href="https://reddit.com/r/difyai" target="_blank">
|
||||
<img src="https://img.shields.io/reddit/subreddit-subscribers/difyai?style=plastic&logo=reddit&label=r%2Fdifyai&labelColor=white"
|
||||
alt="join Reddit"></a>
|
||||
<a href="https://twitter.com/intent/follow?screen_name=dify_ai" target="_blank">
|
||||
<img src="https://img.shields.io/twitter/follow/dify_ai?logo=X&color=%20%23f5f5f5"
|
||||
alt="suivre sur X(Twitter)"></a>
|
||||
|
|
|
@ -15,6 +15,9 @@
|
|||
<a href="https://discord.gg/FngNHpbcY7" target="_blank">
|
||||
<img src="https://img.shields.io/discord/1082486657678311454?logo=discord&labelColor=%20%235462eb&logoColor=%20%23f5f5f5&color=%20%235462eb"
|
||||
alt="Discordでチャット"></a>
|
||||
<a href="https://reddit.com/r/difyai" target="_blank">
|
||||
<img src="https://img.shields.io/reddit/subreddit-subscribers/difyai?style=plastic&logo=reddit&label=r%2Fdifyai&labelColor=white"
|
||||
alt="Reddit"></a>
|
||||
<a href="https://twitter.com/intent/follow?screen_name=dify_ai" target="_blank">
|
||||
<img src="https://img.shields.io/twitter/follow/dify_ai?logo=X&color=%20%23f5f5f5"
|
||||
alt="X(Twitter)でフォロー"></a>
|
||||
|
|
|
@ -15,6 +15,9 @@
|
|||
<a href="https://discord.gg/FngNHpbcY7" target="_blank">
|
||||
<img src="https://img.shields.io/discord/1082486657678311454?logo=discord&labelColor=%20%235462eb&logoColor=%20%23f5f5f5&color=%20%235462eb"
|
||||
alt="chat on Discord"></a>
|
||||
<a href="https://reddit.com/r/difyai" target="_blank">
|
||||
<img src="https://img.shields.io/reddit/subreddit-subscribers/difyai?style=plastic&logo=reddit&label=r%2Fdifyai&labelColor=white"
|
||||
alt="Follow Reddit"></a>
|
||||
<a href="https://twitter.com/intent/follow?screen_name=dify_ai" target="_blank">
|
||||
<img src="https://img.shields.io/twitter/follow/dify_ai?logo=X&color=%20%23f5f5f5"
|
||||
alt="follow on X(Twitter)"></a>
|
||||
|
|
|
@ -15,6 +15,9 @@
|
|||
<a href="https://discord.gg/FngNHpbcY7" target="_blank">
|
||||
<img src="https://img.shields.io/discord/1082486657678311454?logo=discord&labelColor=%20%235462eb&logoColor=%20%23f5f5f5&color=%20%235462eb"
|
||||
alt="chat on Discord"></a>
|
||||
<a href="https://reddit.com/r/difyai" target="_blank">
|
||||
<img src="https://img.shields.io/reddit/subreddit-subscribers/difyai?style=plastic&logo=reddit&label=r%2Fdifyai&labelColor=white"
|
||||
alt="Follow Reddit"></a>
|
||||
<a href="https://twitter.com/intent/follow?screen_name=dify_ai" target="_blank">
|
||||
<img src="https://img.shields.io/twitter/follow/dify_ai?logo=X&color=%20%23f5f5f5"
|
||||
alt="follow on X(Twitter)"></a>
|
||||
|
|
|
@ -19,6 +19,9 @@
|
|||
<a href="https://discord.gg/FngNHpbcY7" target="_blank">
|
||||
<img src="https://img.shields.io/discord/1082486657678311454?logo=discord&labelColor=%20%235462eb&logoColor=%20%23f5f5f5&color=%20%235462eb"
|
||||
alt="chat on Discord"></a>
|
||||
<a href="https://reddit.com/r/difyai" target="_blank">
|
||||
<img src="https://img.shields.io/reddit/subreddit-subscribers/difyai?style=plastic&logo=reddit&label=r%2Fdifyai&labelColor=white"
|
||||
alt="Follow Reddit"></a>
|
||||
<a href="https://twitter.com/intent/follow?screen_name=dify_ai" target="_blank">
|
||||
<img src="https://img.shields.io/twitter/follow/dify_ai?logo=X&color=%20%23f5f5f5"
|
||||
alt="follow on X(Twitter)"></a>
|
||||
|
@ -238,4 +241,4 @@ Para proteger sua privacidade, evite postar problemas de segurança no GitHub. E
|
|||
|
||||
## Licença
|
||||
|
||||
Este repositório está disponível sob a [Licença de Código Aberto Dify](LICENSE), que é essencialmente Apache 2.0 com algumas restrições adicionais.
|
||||
Este repositório está disponível sob a [Licença de Código Aberto Dify](LICENSE), que é essencialmente Apache 2.0 com algumas restrições adicionais.
|
||||
|
|
180
README_SI.md
Normal file
180
README_SI.md
Normal file
|
@ -0,0 +1,180 @@
|
|||
![cover-v5-optimized](https://github.com/langgenius/dify/assets/13230914/f9e19af5-61ba-4119-b926-d10c4c06ebab)
|
||||
|
||||
<p align="center">
|
||||
📌 <a href="https://dify.ai/blog/introducing-dify-workflow-file-upload-a-demo-on-ai-podcast">Predstavljamo nalaganje datotek Dify Workflow: znova ustvarite Google NotebookLM Podcast</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://cloud.dify.ai">Dify Cloud</a> ·
|
||||
<a href="https://docs.dify.ai/getting-started/install-self-hosted">Samostojno gostovanje</a> ·
|
||||
<a href="https://docs.dify.ai">Dokumentacija</a> ·
|
||||
<a href="https://udify.app/chat/22L1zSxg6yW1cWQg">Povpraševanje za podjetja</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://dify.ai" target="_blank">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Product-F04438"></a>
|
||||
<a href="https://dify.ai/pricing" target="_blank">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/free-pricing?logo=free&color=%20%23155EEF&label=pricing&labelColor=%20%23528bff"></a>
|
||||
<a href="https://discord.gg/FngNHpbcY7" target="_blank">
|
||||
<img src="https://img.shields.io/discord/1082486657678311454?logo=discord&labelColor=%20%235462eb&logoColor=%20%23f5f5f5&color=%20%235462eb"
|
||||
alt="chat on Discord"></a>
|
||||
<a href="https://twitter.com/intent/follow?screen_name=dify_ai" target="_blank">
|
||||
<img src="https://img.shields.io/twitter/follow/dify_ai?logo=X&color=%20%23f5f5f5"
|
||||
alt="follow on X(Twitter)"></a>
|
||||
<a href="https://hub.docker.com/u/langgenius" target="_blank">
|
||||
<img alt="Docker Pulls" src="https://img.shields.io/docker/pulls/langgenius/dify-web?labelColor=%20%23FDB062&color=%20%23f79009"></a>
|
||||
<a href="https://github.com/langgenius/dify/graphs/commit-activity" target="_blank">
|
||||
<img alt="Commits last month" src="https://img.shields.io/github/commit-activity/m/langgenius/dify?labelColor=%20%2332b583&color=%20%2312b76a"></a>
|
||||
<a href="https://github.com/langgenius/dify/" target="_blank">
|
||||
<img alt="Issues closed" src="https://img.shields.io/github/issues-search?query=repo%3Alanggenius%2Fdify%20is%3Aclosed&label=issues%20closed&labelColor=%20%237d89b0&color=%20%235d6b98"></a>
|
||||
<a href="https://github.com/langgenius/dify/discussions/" target="_blank">
|
||||
<img alt="Discussion posts" src="https://img.shields.io/github/discussions/langgenius/dify?labelColor=%20%239b8afb&color=%20%237a5af8"></a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="./README.md"><img alt="README in English" src="https://img.shields.io/badge/English-d9d9d9"></a>
|
||||
<a href="./README_CN.md"><img alt="简体中文版自述文件" src="https://img.shields.io/badge/简体中文-d9d9d9"></a>
|
||||
<a href="./README_JA.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-d9d9d9"></a>
|
||||
<a href="./README_ES.md"><img alt="README en Español" src="https://img.shields.io/badge/Español-d9d9d9"></a>
|
||||
<a href="./README_FR.md"><img alt="README en Français" src="https://img.shields.io/badge/Français-d9d9d9"></a>
|
||||
<a href="./README_KL.md"><img alt="README tlhIngan Hol" src="https://img.shields.io/badge/Klingon-d9d9d9"></a>
|
||||
<a href="./README_KR.md"><img alt="README in Korean" src="https://img.shields.io/badge/한국어-d9d9d9"></a>
|
||||
<a href="./README_AR.md"><img alt="README بالعربية" src="https://img.shields.io/badge/العربية-d9d9d9"></a>
|
||||
<a href="./README_TR.md"><img alt="Türkçe README" src="https://img.shields.io/badge/Türkçe-d9d9d9"></a>
|
||||
<a href="./README_VI.md"><img alt="README Tiếng Việt" src="https://img.shields.io/badge/Ti%E1%BA%BFng%20Vi%E1%BB%87t-d9d9d9"></a>
|
||||
<a href="./README_SI.md"><img alt="README Slovenščina" src="https://img.shields.io/badge/Sloven%C5%A1%C4%8Dina-d9d9d9"></a>
|
||||
</p>
|
||||
|
||||
|
||||
Dify je odprtokodna platforma za razvoj aplikacij LLM. Njegov intuitivni vmesnik združuje agentski potek dela z umetno inteligenco, cevovod RAG, zmogljivosti agentov, upravljanje modelov, funkcije opazovanja in več, kar vam omogoča hiter prehod od prototipa do proizvodnje.
|
||||
|
||||
## Hitri začetek
|
||||
> Preden namestite Dify, se prepričajte, da vaša naprava izpolnjuje naslednje minimalne sistemske zahteve:
|
||||
>
|
||||
>- CPU >= 2 Core
|
||||
>- RAM >= 4 GiB
|
||||
|
||||
</br>
|
||||
|
||||
Najlažji način za zagon strežnika Dify je prek docker compose . Preden zaženete Dify z naslednjimi ukazi, se prepričajte, da sta Docker in Docker Compose nameščena na vašem računalniku:
|
||||
|
||||
```bash
|
||||
cd dify
|
||||
cd docker
|
||||
cp .env.example .env
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
Po zagonu lahko dostopate do nadzorne plošče Dify v brskalniku na [http://localhost/install](http://localhost/install) in začnete postopek inicializacije.
|
||||
|
||||
#### Iskanje pomoči
|
||||
Prosimo, glejte naša pogosta vprašanja [FAQ](https://docs.dify.ai/getting-started/install-self-hosted/faqs) če naletite na težave pri nastavitvi Dify. Če imate še vedno težave, se obrnite na [skupnost ali nas](#community--contact).
|
||||
|
||||
> Če želite prispevati k Difyju ali narediti dodaten razvoj, glejte naš vodnik za [uvajanje iz izvorne kode](https://docs.dify.ai/getting-started/install-self-hosted/local-source-code)
|
||||
|
||||
## Ključne značilnosti
|
||||
**1. Potek dela**:
|
||||
Zgradite in preizkusite zmogljive poteke dela AI na vizualnem platnu, pri čemer izkoristite vse naslednje funkcije in več.
|
||||
|
||||
|
||||
https://github.com/langgenius/dify/assets/13230914/356df23e-1604-483d-80a6-9517ece318aa
|
||||
|
||||
|
||||
|
||||
**2. Celovita podpora za modele**:
|
||||
Brezhibna integracija s stotinami lastniških/odprtokodnih LLM-jev ducatov ponudnikov sklepanja in samostojnih rešitev, ki pokrivajo GPT, Mistral, Llama3 in vse modele, združljive z API-jem OpenAI. Celoten seznam podprtih ponudnikov modelov najdete [tukaj](https://docs.dify.ai/getting-started/readme/model-providers).
|
||||
|
||||
![providers-v5](https://github.com/langgenius/dify/assets/13230914/5a17bdbe-097a-4100-8363-40255b70f6e3)
|
||||
|
||||
|
||||
**3. Prompt IDE**:
|
||||
intuitivni vmesnik za ustvarjanje pozivov, primerjavo zmogljivosti modela in dodajanje dodatnih funkcij, kot je pretvorba besedila v govor, aplikaciji, ki temelji na klepetu.
|
||||
|
||||
**4. RAG Pipeline**:
|
||||
E Obsežne zmogljivosti RAG, ki pokrivajo vse od vnosa dokumenta do priklica, s podporo za ekstrakcijo besedila iz datotek PDF, PPT in drugih običajnih formatov dokumentov.
|
||||
|
||||
**5. Agent capabilities**:
|
||||
definirate lahko agente, ki temeljijo na klicanju funkcij LLM ali ReAct, in dodate vnaprej izdelana orodja ali orodja po meri za agenta. Dify ponuja več kot 50 vgrajenih orodij za agente AI, kot so Google Search, DALL·E, Stable Diffusion in WolframAlpha.
|
||||
|
||||
**6. LLMOps**:
|
||||
Spremljajte in analizirajte dnevnike aplikacij in učinkovitost skozi čas. Pozive, nabore podatkov in modele lahko nenehno izboljšujete na podlagi proizvodnih podatkov in opomb.
|
||||
|
||||
**7. Backend-as-a-Service**:
|
||||
AVse ponudbe Difyja so opremljene z ustreznimi API-ji, tako da lahko Dify brez težav integrirate v svojo poslovno logiko.
|
||||
|
||||
|
||||
## Uporaba Dify
|
||||
|
||||
- **Cloud </br>**
|
||||
Gostimo storitev Dify Cloud za vsakogar, ki jo lahko preizkusite brez nastavitev. Zagotavlja vse zmožnosti različice za samostojno namestitev in vključuje 200 brezplačnih klicev GPT-4 v načrtu peskovnika.
|
||||
|
||||
- **Self-hosting Dify Community Edition</br>**
|
||||
Hitro zaženite Dify v svojem okolju s tem [začetnim vodnikom](#quick-start) . Za dodatne reference in podrobnejša navodila uporabite našo [dokumentacijo](https://docs.dify.ai) .
|
||||
|
||||
|
||||
- **Dify za podjetja/organizacije</br>**
|
||||
Ponujamo dodatne funkcije, osredotočene na podjetja. Zabeležite svoja vprašanja prek tega klepetalnega robota ali nam pošljite e-pošto, da se pogovorimo o potrebah podjetja. </br>
|
||||
> Za novoustanovljena podjetja in mala podjetja, ki uporabljajo AWS, si oglejte Dify Premium na AWS Marketplace in ga z enim klikom uvedite v svoj AWS VPC. To je cenovno ugodna ponudba AMI z možnostjo ustvarjanja aplikacij z logotipom in blagovno znamko po meri.
|
||||
|
||||
|
||||
## Staying ahead
|
||||
|
||||
Star Dify on GitHub and be instantly notified of new releases.
|
||||
|
||||
![star-us](https://github.com/langgenius/dify/assets/13230914/b823edc1-6388-4e25-ad45-2f6b187adbb4)
|
||||
|
||||
|
||||
## Napredne nastavitve
|
||||
|
||||
Če morate prilagoditi konfiguracijo, si oglejte komentarje v naši datoteki .env.example in posodobite ustrezne vrednosti v svoji .env datoteki. Poleg tega boste morda morali prilagoditi docker-compose.yamlsamo datoteko, na primer spremeniti različice slike, preslikave vrat ali namestitve nosilca, glede na vaše specifično okolje in zahteve za uvajanje. Po kakršnih koli spremembah ponovno zaženite docker-compose up -d. Celoten seznam razpoložljivih spremenljivk okolja najdete tukaj .
|
||||
|
||||
Če želite konfigurirati visoko razpoložljivo nastavitev, so na voljo Helm Charts in datoteke YAML, ki jih prispeva skupnost, ki omogočajo uvedbo Difyja v Kubernetes.
|
||||
|
||||
- [Helm Chart by @LeoQuote](https://github.com/douban/charts/tree/master/charts/dify)
|
||||
- [Helm Chart by @BorisPolonsky](https://github.com/BorisPolonsky/dify-helm)
|
||||
- [YAML file by @Winson-030](https://github.com/Winson-030/dify-kubernetes)
|
||||
|
||||
#### Uporaba Terraform za uvajanje
|
||||
|
||||
namestite Dify v Cloud Platform z enim klikom z uporabo [terraform](https://www.terraform.io/)
|
||||
|
||||
##### Azure Global
|
||||
- [Azure Terraform by @nikawang](https://github.com/nikawang/dify-azure-terraform)
|
||||
|
||||
##### Google Cloud
|
||||
- [Google Cloud Terraform by @sotazum](https://github.com/DeNA/dify-google-cloud-terraform)
|
||||
|
||||
## Prispevam
|
||||
|
||||
Za tiste, ki bi radi prispevali kodo, si oglejte naš vodnik za prispevke . Hkrati vas prosimo, da podprete Dify tako, da ga delite na družbenih medijih ter na dogodkih in konferencah.
|
||||
|
||||
|
||||
|
||||
> Iščemo sodelavce za pomoč pri prevajanju Difyja v jezike, ki niso mandarinščina ali angleščina. Če želite pomagati, si oglejte i18n README za več informacij in nam pustite komentar v global-userskanalu našega strežnika skupnosti Discord .
|
||||
|
||||
## Skupnost in stik
|
||||
|
||||
* [Github Discussion](https://github.com/langgenius/dify/discussions). Najboljše za: izmenjavo povratnih informacij in postavljanje vprašanj.
|
||||
* [GitHub Issues](https://github.com/langgenius/dify/issues). Najboljše za: hrošče, na katere naletite pri uporabi Dify.AI, in predloge funkcij. Oglejte si naš [vodnik za prispevke](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md).
|
||||
* [Discord](https://discord.gg/FngNHpbcY7). Najboljše za: deljenje vaših aplikacij in druženje s skupnostjo.
|
||||
* [X(Twitter)](https://twitter.com/dify_ai). Najboljše za: deljenje vaših aplikacij in druženje s skupnostjo.
|
||||
|
||||
**Contributors**
|
||||
|
||||
<a href="https://github.com/langgenius/dify/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=langgenius/dify" />
|
||||
</a>
|
||||
|
||||
## Star history
|
||||
|
||||
[![Star History Chart](https://api.star-history.com/svg?repos=langgenius/dify&type=Date)](https://star-history.com/#langgenius/dify&Date)
|
||||
|
||||
|
||||
## Varnostno razkritje
|
||||
|
||||
Zaradi zaščite vaše zasebnosti se izogibajte objavljanju varnostnih vprašanj na GitHub. Namesto tega pošljite vprašanja na security@dify.ai in zagotovili vam bomo podrobnejši odgovor.
|
||||
|
||||
## Licenca
|
||||
|
||||
To skladišče je na voljo pod [odprtokodno licenco Dify](LICENSE) , ki je v bistvu Apache 2.0 z nekaj dodatnimi omejitvami.
|
|
@ -15,6 +15,9 @@
|
|||
<a href="https://discord.gg/FngNHpbcY7" target="_blank">
|
||||
<img src="https://img.shields.io/discord/1082486657678311454?logo=discord&labelColor=%20%235462eb&logoColor=%20%23f5f5f5&color=%20%235462eb"
|
||||
alt="Discord'da sohbet et"></a>
|
||||
<a href="https://reddit.com/r/difyai" target="_blank">
|
||||
<img src="https://img.shields.io/reddit/subreddit-subscribers/difyai?style=plastic&logo=reddit&label=r%2Fdifyai&labelColor=white"
|
||||
alt="Follow Reddit"></a>
|
||||
<a href="https://twitter.com/intent/follow?screen_name=dify_ai" target="_blank">
|
||||
<img src="https://img.shields.io/twitter/follow/dify_ai?logo=X&color=%20%23f5f5f5"
|
||||
alt="X(Twitter)'da takip et"></a>
|
||||
|
|
|
@ -15,6 +15,9 @@
|
|||
<a href="https://discord.gg/FngNHpbcY7" target="_blank">
|
||||
<img src="https://img.shields.io/discord/1082486657678311454?logo=discord&labelColor=%20%235462eb&logoColor=%20%23f5f5f5&color=%20%235462eb"
|
||||
alt="chat trên Discord"></a>
|
||||
<a href="https://reddit.com/r/difyai" target="_blank">
|
||||
<img src="https://img.shields.io/reddit/subreddit-subscribers/difyai?style=plastic&logo=reddit&label=r%2Fdifyai&labelColor=white"
|
||||
alt="Follow Reddit"></a>
|
||||
<a href="https://twitter.com/intent/follow?screen_name=dify_ai" target="_blank">
|
||||
<img src="https://img.shields.io/twitter/follow/dify_ai?logo=X&color=%20%23f5f5f5"
|
||||
alt="theo dõi trên X(Twitter)"></a>
|
||||
|
@ -235,4 +238,4 @@ Triển khai Dify lên nền tảng đám mây với một cú nhấp chuột b
|
|||
|
||||
## Giấy phép
|
||||
|
||||
Kho lưu trữ này có sẵn theo [Giấy phép Mã nguồn Mở Dify](LICENSE), về cơ bản là Apache 2.0 với một vài hạn chế bổ sung.
|
||||
Kho lưu trữ này có sẵn theo [Giấy phép Mã nguồn Mở Dify](LICENSE), về cơ bản là Apache 2.0 với một vài hạn chế bổ sung.
|
||||
|
|
|
@ -55,7 +55,7 @@ RUN apt-get update \
|
|||
&& echo "deb http://deb.debian.org/debian testing main" > /etc/apt/sources.list \
|
||||
&& apt-get update \
|
||||
# For Security
|
||||
&& apt-get install -y --no-install-recommends expat=2.6.3-2 libldap-2.5-0=2.5.18+dfsg-3+b1 perl=5.40.0-7 libsqlite3-0=3.46.1-1 zlib1g=1:1.3.dfsg+really1.3.1-1+b1 \
|
||||
&& apt-get install -y --no-install-recommends expat=2.6.4-1 libldap-2.5-0=2.5.18+dfsg-3+b1 perl=5.40.0-7 libsqlite3-0=3.46.1-1 zlib1g=1:1.3.dfsg+really1.3.1-1+b1 \
|
||||
# install a chinese font to support the use of tools like matplotlib
|
||||
&& apt-get install -y fonts-noto-cjk \
|
||||
&& apt-get autoremove -y \
|
||||
|
|
|
@ -53,7 +53,6 @@ if dify_config.TESTING:
|
|||
@app.after_request
|
||||
def after_request(response):
|
||||
"""Add Version headers to the response."""
|
||||
response.set_cookie("remember_token", "", expires=0)
|
||||
response.headers.add("X-Version", dify_config.CURRENT_VERSION)
|
||||
response.headers.add("X-Env", dify_config.DEPLOY_ENV)
|
||||
return response
|
||||
|
|
|
@ -17,6 +17,7 @@ language_timezone_mapping = {
|
|||
"hi-IN": "Asia/Kolkata",
|
||||
"tr-TR": "Europe/Istanbul",
|
||||
"fa-IR": "Asia/Tehran",
|
||||
"sl-SI": "Europe/Ljubljana",
|
||||
}
|
||||
|
||||
languages = list(language_timezone_mapping.keys())
|
||||
|
|
|
@ -113,7 +113,7 @@ class AzureOpenAILargeLanguageModel(_CommonAzureOpenAI, LargeLanguageModel):
|
|||
try:
|
||||
client = AzureOpenAI(**self._to_credential_kwargs(credentials))
|
||||
|
||||
if model.startswith("o1"):
|
||||
if "o1" in model:
|
||||
client.chat.completions.create(
|
||||
messages=[{"role": "user", "content": "ping"}],
|
||||
model=model,
|
||||
|
@ -311,7 +311,7 @@ class AzureOpenAILargeLanguageModel(_CommonAzureOpenAI, LargeLanguageModel):
|
|||
prompt_messages = self._clear_illegal_prompt_messages(model, prompt_messages)
|
||||
|
||||
block_as_stream = False
|
||||
if model.startswith("o1"):
|
||||
if "o1" in model:
|
||||
if stream:
|
||||
block_as_stream = True
|
||||
stream = False
|
||||
|
@ -404,7 +404,7 @@ class AzureOpenAILargeLanguageModel(_CommonAzureOpenAI, LargeLanguageModel):
|
|||
]
|
||||
)
|
||||
|
||||
if model.startswith("o1"):
|
||||
if "o1" in model:
|
||||
system_message_count = len([m for m in prompt_messages if isinstance(m, SystemPromptMessage)])
|
||||
if system_message_count > 0:
|
||||
new_prompt_messages = []
|
||||
|
@ -653,7 +653,7 @@ class AzureOpenAILargeLanguageModel(_CommonAzureOpenAI, LargeLanguageModel):
|
|||
tokens_per_message = 4
|
||||
# if there's a name, the role is omitted
|
||||
tokens_per_name = -1
|
||||
elif model.startswith("gpt-35-turbo") or model.startswith("gpt-4") or model.startswith("o1"):
|
||||
elif model.startswith("gpt-35-turbo") or model.startswith("gpt-4") or "o1" in model:
|
||||
tokens_per_message = 3
|
||||
tokens_per_name = 1
|
||||
else:
|
||||
|
|
|
@ -0,0 +1,95 @@
|
|||
model: Qwen2.5-72B-Instruct
|
||||
label:
|
||||
zh_Hans: Qwen2.5-72B-Instruct
|
||||
en_US: Qwen2.5-72B-Instruct
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 32768
|
||||
parameter_rules:
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
label:
|
||||
en_US: "Max Tokens"
|
||||
zh_Hans: "最大Token数"
|
||||
type: int
|
||||
default: 512
|
||||
min: 1
|
||||
required: true
|
||||
help:
|
||||
en_US: "The maximum number of tokens that can be generated by the model varies depending on the model."
|
||||
zh_Hans: "模型可生成的最大 token 个数,不同模型上限不同。"
|
||||
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
label:
|
||||
en_US: "Temperature"
|
||||
zh_Hans: "采样温度"
|
||||
type: float
|
||||
default: 0.7
|
||||
min: 0.0
|
||||
max: 1.0
|
||||
precision: 1
|
||||
required: true
|
||||
help:
|
||||
en_US: "The randomness of the sampling temperature control output. The temperature value is within the range of [0.0, 1.0]. The higher the value, the more random and creative the output; the lower the value, the more stable it is. It is recommended to adjust either top_p or temperature parameters according to your needs to avoid adjusting both at the same time."
|
||||
zh_Hans: "采样温度控制输出的随机性。温度值在 [0.0, 1.0] 范围内,值越高,输出越随机和创造性;值越低,输出越稳定。建议根据需求调整 top_p 或 temperature 参数,避免同时调整两者。"
|
||||
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
label:
|
||||
en_US: "Top P"
|
||||
zh_Hans: "Top P"
|
||||
type: float
|
||||
default: 0.7
|
||||
min: 0.0
|
||||
max: 1.0
|
||||
precision: 1
|
||||
required: true
|
||||
help:
|
||||
en_US: "The value range of the sampling method is [0.0, 1.0]. The top_p value determines that the model selects tokens from the top p% of candidate words with the highest probability; when top_p is 0, this parameter is invalid. It is recommended to adjust either top_p or temperature parameters according to your needs to avoid adjusting both at the same time."
|
||||
zh_Hans: "采样方法的取值范围为 [0.0,1.0]。top_p 值确定模型从概率最高的前p%的候选词中选取 tokens;当 top_p 为 0 时,此参数无效。建议根据需求调整 top_p 或 temperature 参数,避免同时调整两者。"
|
||||
|
||||
- name: top_k
|
||||
use_template: top_k
|
||||
label:
|
||||
en_US: "Top K"
|
||||
zh_Hans: "Top K"
|
||||
type: int
|
||||
default: 50
|
||||
min: 0
|
||||
max: 100
|
||||
required: true
|
||||
help:
|
||||
en_US: "The value range is [0,100], which limits the model to only select from the top k words with the highest probability when choosing the next word at each step. The larger the value, the more diverse text generation will be."
|
||||
zh_Hans: "取值范围为 [0,100],限制模型在每一步选择下一个词时,只从概率最高的前 k 个词中选取。数值越大,文本生成越多样。"
|
||||
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
label:
|
||||
en_US: "Frequency Penalty"
|
||||
zh_Hans: "频率惩罚"
|
||||
type: float
|
||||
default: 0
|
||||
min: -1.0
|
||||
max: 1.0
|
||||
precision: 1
|
||||
required: false
|
||||
help:
|
||||
en_US: "Used to adjust the frequency of repeated content in automatically generated text. Positive numbers reduce repetition, while negative numbers increase repetition. After setting this parameter, if a word has already appeared in the text, the model will decrease the probability of choosing that word for subsequent generation."
|
||||
zh_Hans: "用于调整自动生成文本中重复内容的频率。正数减少重复,负数增加重复。设置此参数后,如果一个词在文本中已经出现过,模型在后续生成中选择该词的概率会降低。"
|
||||
|
||||
- name: user
|
||||
use_template: text
|
||||
label:
|
||||
en_US: "User"
|
||||
zh_Hans: "用户"
|
||||
type: string
|
||||
required: false
|
||||
help:
|
||||
en_US: "Used to track and differentiate conversation requests from different users."
|
||||
zh_Hans: "用于追踪和区分不同用户的对话请求。"
|
|
@ -1,3 +1,4 @@
|
|||
- Qwen2.5-72B-Instruct
|
||||
- Qwen2-7B-Instruct
|
||||
- Qwen2-72B-Instruct
|
||||
- Yi-1.5-34B-Chat
|
||||
|
|
|
@ -6,6 +6,7 @@ from core.model_runtime.entities.message_entities import (
|
|||
PromptMessage,
|
||||
PromptMessageTool,
|
||||
)
|
||||
from core.model_runtime.entities.model_entities import ModelFeature
|
||||
from core.model_runtime.model_providers.openai_api_compatible.llm.llm import OAIAPICompatLargeLanguageModel
|
||||
|
||||
|
||||
|
@ -28,14 +29,13 @@ class GiteeAILargeLanguageModel(OAIAPICompatLargeLanguageModel):
|
|||
user: Optional[str] = None,
|
||||
) -> Union[LLMResult, Generator]:
|
||||
self._add_custom_parameters(credentials, model, model_parameters)
|
||||
return super()._invoke(model, credentials, prompt_messages, model_parameters, tools, stop, stream)
|
||||
return super()._invoke(model, credentials, prompt_messages, model_parameters, tools, stop, stream, user)
|
||||
|
||||
def validate_credentials(self, model: str, credentials: dict) -> None:
|
||||
self._add_custom_parameters(credentials, model, None)
|
||||
super().validate_credentials(model, credentials)
|
||||
|
||||
@staticmethod
|
||||
def _add_custom_parameters(credentials: dict, model: str, model_parameters: dict) -> None:
|
||||
def _add_custom_parameters(self, credentials: dict, model: str, model_parameters: dict) -> None:
|
||||
if model is None:
|
||||
model = "bge-large-zh-v1.5"
|
||||
|
||||
|
@ -45,3 +45,7 @@ class GiteeAILargeLanguageModel(OAIAPICompatLargeLanguageModel):
|
|||
credentials["mode"] = LLMMode.COMPLETION.value
|
||||
else:
|
||||
credentials["mode"] = LLMMode.CHAT.value
|
||||
|
||||
schema = self.get_model_schema(model, credentials)
|
||||
if ModelFeature.TOOL_CALL in schema.features or ModelFeature.MULTI_TOOL_CALL in schema.features:
|
||||
credentials["function_calling_type"] = "tool_call"
|
||||
|
|
|
@ -0,0 +1,46 @@
|
|||
model: abab7-chat-preview
|
||||
label:
|
||||
en_US: Abab7-chat-preview
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 245760
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
min: 0.01
|
||||
max: 1
|
||||
default: 0.1
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
min: 0.01
|
||||
max: 1
|
||||
default: 0.95
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 2048
|
||||
min: 1
|
||||
max: 245760
|
||||
- name: mask_sensitive_info
|
||||
type: boolean
|
||||
default: true
|
||||
label:
|
||||
zh_Hans: 隐私保护
|
||||
en_US: Moderate
|
||||
help:
|
||||
zh_Hans: 对输出中易涉及隐私问题的文本信息进行打码,目前包括但不限于邮箱、域名、链接、证件号、家庭住址等,默认true,即开启打码
|
||||
en_US: Mask the sensitive info of the generated content, such as email/domain/link/address/phone/id..
|
||||
- name: presence_penalty
|
||||
use_template: presence_penalty
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
pricing:
|
||||
input: '0.1'
|
||||
output: '0.1'
|
||||
unit: '0.001'
|
||||
currency: RMB
|
|
@ -34,6 +34,7 @@ from core.model_runtime.model_providers.minimax.llm.types import MinimaxMessage
|
|||
|
||||
class MinimaxLargeLanguageModel(LargeLanguageModel):
|
||||
model_apis = {
|
||||
"abab7-chat-preview": MinimaxChatCompletionPro,
|
||||
"abab6.5s-chat": MinimaxChatCompletionPro,
|
||||
"abab6.5-chat": MinimaxChatCompletionPro,
|
||||
"abab6-chat": MinimaxChatCompletionPro,
|
||||
|
|
|
@ -0,0 +1,84 @@
|
|||
model: OpenGVLab/InternVL2-26B
|
||||
label:
|
||||
en_US: OpenGVLab/InternVL2-26B
|
||||
model_type: llm
|
||||
features:
|
||||
- vision
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 32768
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
type: float
|
||||
default: 0.3
|
||||
min: 0.0
|
||||
max: 2.0
|
||||
help:
|
||||
zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。
|
||||
en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain.
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
type: int
|
||||
default: 2000
|
||||
min: 1
|
||||
max: 2000
|
||||
help:
|
||||
zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。
|
||||
en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time.
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
type: float
|
||||
default: 0.8
|
||||
min: 0.1
|
||||
max: 0.9
|
||||
help:
|
||||
zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。
|
||||
en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated.
|
||||
- name: top_k
|
||||
type: int
|
||||
min: 0
|
||||
max: 99
|
||||
label:
|
||||
zh_Hans: 取样数量
|
||||
en_US: Top k
|
||||
help:
|
||||
zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。
|
||||
en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated.
|
||||
- name: seed
|
||||
required: false
|
||||
type: int
|
||||
default: 1234
|
||||
label:
|
||||
zh_Hans: 随机种子
|
||||
en_US: Random seed
|
||||
help:
|
||||
zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。
|
||||
en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time.
|
||||
- name: repetition_penalty
|
||||
required: false
|
||||
type: float
|
||||
default: 1.1
|
||||
label:
|
||||
zh_Hans: 重复惩罚
|
||||
en_US: Repetition penalty
|
||||
help:
|
||||
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
|
||||
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: Response Format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
pricing:
|
||||
input: '21'
|
||||
output: '21'
|
||||
unit: '0.000001'
|
||||
currency: RMB
|
|
@ -0,0 +1,84 @@
|
|||
model: Pro/OpenGVLab/InternVL2-8B
|
||||
label:
|
||||
en_US: Pro/OpenGVLab/InternVL2-8B
|
||||
model_type: llm
|
||||
features:
|
||||
- vision
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 32768
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
type: float
|
||||
default: 0.3
|
||||
min: 0.0
|
||||
max: 2.0
|
||||
help:
|
||||
zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。
|
||||
en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain.
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
type: int
|
||||
default: 2000
|
||||
min: 1
|
||||
max: 2000
|
||||
help:
|
||||
zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。
|
||||
en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time.
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
type: float
|
||||
default: 0.8
|
||||
min: 0.1
|
||||
max: 0.9
|
||||
help:
|
||||
zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。
|
||||
en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated.
|
||||
- name: top_k
|
||||
type: int
|
||||
min: 0
|
||||
max: 99
|
||||
label:
|
||||
zh_Hans: 取样数量
|
||||
en_US: Top k
|
||||
help:
|
||||
zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。
|
||||
en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated.
|
||||
- name: seed
|
||||
required: false
|
||||
type: int
|
||||
default: 1234
|
||||
label:
|
||||
zh_Hans: 随机种子
|
||||
en_US: Random seed
|
||||
help:
|
||||
zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。
|
||||
en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time.
|
||||
- name: repetition_penalty
|
||||
required: false
|
||||
type: float
|
||||
default: 1.1
|
||||
label:
|
||||
zh_Hans: 重复惩罚
|
||||
en_US: Repetition penalty
|
||||
help:
|
||||
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
|
||||
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: Response Format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
pricing:
|
||||
input: '21'
|
||||
output: '21'
|
||||
unit: '0.000001'
|
||||
currency: RMB
|
|
@ -1,16 +1,18 @@
|
|||
- Tencent/Hunyuan-A52B-Instruct
|
||||
- Qwen/Qwen2.5-72B-Instruct
|
||||
- Qwen/Qwen2.5-32B-Instruct
|
||||
- Qwen/Qwen2.5-14B-Instruct
|
||||
- Qwen/Qwen2.5-7B-Instruct
|
||||
- Qwen/Qwen2.5-Coder-32B-Instruct
|
||||
- Qwen/Qwen2.5-Coder-7B-Instruct
|
||||
- Qwen/Qwen2.5-Math-72B-Instruct
|
||||
- Qwen/Qwen2-72B-Instruct
|
||||
- Qwen/Qwen2-57B-A14B-Instruct
|
||||
- Qwen/Qwen2-7B-Instruct
|
||||
- Qwen/Qwen2-VL-72B-Instruct
|
||||
- Qwen/Qwen2-1.5B-Instruct
|
||||
- Pro/Qwen/Qwen2-VL-7B-Instruct
|
||||
- OpenGVLab/InternVL2-Llama3-76B
|
||||
- OpenGVLab/InternVL2-26B
|
||||
- Pro/OpenGVLab/InternVL2-8B
|
||||
- deepseek-ai/DeepSeek-V2.5
|
||||
- deepseek-ai/DeepSeek-V2-Chat
|
||||
- deepseek-ai/DeepSeek-Coder-V2-Instruct
|
||||
- THUDM/glm-4-9b-chat
|
||||
- 01-ai/Yi-1.5-34B-Chat-16K
|
||||
- 01-ai/Yi-1.5-9B-Chat-16K
|
||||
|
@ -20,9 +22,6 @@
|
|||
- meta-llama/Meta-Llama-3.1-405B-Instruct
|
||||
- meta-llama/Meta-Llama-3.1-70B-Instruct
|
||||
- meta-llama/Meta-Llama-3.1-8B-Instruct
|
||||
- meta-llama/Meta-Llama-3-70B-Instruct
|
||||
- meta-llama/Meta-Llama-3-8B-Instruct
|
||||
- google/gemma-2-27b-it
|
||||
- google/gemma-2-9b-it
|
||||
- mistralai/Mistral-7B-Instruct-v0.2
|
||||
- mistralai/Mixtral-8x7B-Instruct-v0.1
|
||||
- deepseek-ai/DeepSeek-V2-Chat
|
||||
|
|
|
@ -37,3 +37,4 @@ pricing:
|
|||
output: '1.33'
|
||||
unit: '0.000001'
|
||||
currency: RMB
|
||||
deprecated: true
|
||||
|
|
|
@ -37,3 +37,4 @@ pricing:
|
|||
output: '1.33'
|
||||
unit: '0.000001'
|
||||
currency: RMB
|
||||
deprecated: true
|
||||
|
|
|
@ -4,6 +4,8 @@ label:
|
|||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 32768
|
||||
|
@ -32,6 +34,18 @@ parameter_rules:
|
|||
required: false
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: Response Format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
pricing:
|
||||
input: '1.33'
|
||||
output: '1.33'
|
||||
|
|
|
@ -32,6 +32,18 @@ parameter_rules:
|
|||
required: false
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: Response Format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
pricing:
|
||||
input: '1.26'
|
||||
output: '1.26'
|
||||
|
|
|
@ -32,6 +32,18 @@ parameter_rules:
|
|||
required: false
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: Response Format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
pricing:
|
||||
input: '0'
|
||||
output: '0'
|
||||
|
|
|
@ -32,6 +32,18 @@ parameter_rules:
|
|||
required: false
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: Response Format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
pricing:
|
||||
input: '0'
|
||||
output: '0'
|
||||
|
|
|
@ -0,0 +1,84 @@
|
|||
model: Tencent/Hunyuan-A52B-Instruct
|
||||
label:
|
||||
en_US: Tencent/Hunyuan-A52B-Instruct
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 32768
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
type: float
|
||||
default: 0.3
|
||||
min: 0.0
|
||||
max: 2.0
|
||||
help:
|
||||
zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。
|
||||
en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain.
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
type: int
|
||||
default: 2000
|
||||
min: 1
|
||||
max: 2000
|
||||
help:
|
||||
zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。
|
||||
en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time.
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
type: float
|
||||
default: 0.8
|
||||
min: 0.1
|
||||
max: 0.9
|
||||
help:
|
||||
zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。
|
||||
en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated.
|
||||
- name: top_k
|
||||
type: int
|
||||
min: 0
|
||||
max: 99
|
||||
label:
|
||||
zh_Hans: 取样数量
|
||||
en_US: Top k
|
||||
help:
|
||||
zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。
|
||||
en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated.
|
||||
- name: seed
|
||||
required: false
|
||||
type: int
|
||||
default: 1234
|
||||
label:
|
||||
zh_Hans: 随机种子
|
||||
en_US: Random seed
|
||||
help:
|
||||
zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。
|
||||
en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time.
|
||||
- name: repetition_penalty
|
||||
required: false
|
||||
type: float
|
||||
default: 1.1
|
||||
label:
|
||||
zh_Hans: 重复惩罚
|
||||
en_US: Repetition penalty
|
||||
help:
|
||||
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
|
||||
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: Response Format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
pricing:
|
||||
input: '21'
|
||||
output: '21'
|
||||
unit: '0.000001'
|
||||
currency: RMB
|
|
@ -32,6 +32,18 @@ parameter_rules:
|
|||
required: false
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: Response Format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
pricing:
|
||||
input: '1'
|
||||
output: '1'
|
||||
|
|
|
@ -32,6 +32,18 @@ parameter_rules:
|
|||
required: false
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: Response Format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
pricing:
|
||||
input: '0'
|
||||
output: '0'
|
||||
|
|
|
@ -0,0 +1,84 @@
|
|||
model: OpenGVLab/InternVL2-Llama3-76B
|
||||
label:
|
||||
en_US: OpenGVLab/InternVL2-Llama3-76B
|
||||
model_type: llm
|
||||
features:
|
||||
- vision
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 8192
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
type: float
|
||||
default: 0.3
|
||||
min: 0.0
|
||||
max: 2.0
|
||||
help:
|
||||
zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。
|
||||
en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain.
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
type: int
|
||||
default: 2000
|
||||
min: 1
|
||||
max: 2000
|
||||
help:
|
||||
zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。
|
||||
en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time.
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
type: float
|
||||
default: 0.8
|
||||
min: 0.1
|
||||
max: 0.9
|
||||
help:
|
||||
zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。
|
||||
en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated.
|
||||
- name: top_k
|
||||
type: int
|
||||
min: 0
|
||||
max: 99
|
||||
label:
|
||||
zh_Hans: 取样数量
|
||||
en_US: Top k
|
||||
help:
|
||||
zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。
|
||||
en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated.
|
||||
- name: seed
|
||||
required: false
|
||||
type: int
|
||||
default: 1234
|
||||
label:
|
||||
zh_Hans: 随机种子
|
||||
en_US: Random seed
|
||||
help:
|
||||
zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。
|
||||
en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time.
|
||||
- name: repetition_penalty
|
||||
required: false
|
||||
type: float
|
||||
default: 1.1
|
||||
label:
|
||||
zh_Hans: 重复惩罚
|
||||
en_US: Repetition penalty
|
||||
help:
|
||||
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
|
||||
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: Response Format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
pricing:
|
||||
input: '21'
|
||||
output: '21'
|
||||
unit: '0.000001'
|
||||
currency: RMB
|
|
@ -29,6 +29,9 @@ class SiliconflowLargeLanguageModel(OAIAPICompatLargeLanguageModel):
|
|||
user: Optional[str] = None,
|
||||
) -> Union[LLMResult, Generator]:
|
||||
self._add_custom_parameters(credentials)
|
||||
# {"response_format": "json_object"} need convert to {"response_format": {"type": "json_object"}}
|
||||
if "response_format" in model_parameters:
|
||||
model_parameters["response_format"] = {"type": model_parameters.get("response_format")}
|
||||
return super()._invoke(model, credentials, prompt_messages, model_parameters, tools, stop, stream)
|
||||
|
||||
def validate_credentials(self, model: str, credentials: dict) -> None:
|
||||
|
|
|
@ -37,3 +37,4 @@ pricing:
|
|||
output: '4.13'
|
||||
unit: '0.000001'
|
||||
currency: RMB
|
||||
deprecated: true
|
||||
|
|
|
@ -37,3 +37,4 @@ pricing:
|
|||
output: '0'
|
||||
unit: '0.000001'
|
||||
currency: RMB
|
||||
deprecated: true
|
||||
|
|
|
@ -32,6 +32,18 @@ parameter_rules:
|
|||
required: false
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: Response Format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
pricing:
|
||||
input: '21'
|
||||
output: '21'
|
||||
|
|
|
@ -6,7 +6,7 @@ features:
|
|||
- agent-thought
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 32768
|
||||
context_size: 8192
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
|
@ -32,6 +32,18 @@ parameter_rules:
|
|||
required: false
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: Response Format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
pricing:
|
||||
input: '4.13'
|
||||
output: '4.13'
|
||||
|
|
|
@ -32,6 +32,18 @@ parameter_rules:
|
|||
required: false
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: Response Format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
pricing:
|
||||
input: '0'
|
||||
output: '0'
|
||||
|
|
|
@ -37,3 +37,4 @@ pricing:
|
|||
output: '1.26'
|
||||
unit: '0.000001'
|
||||
currency: RMB
|
||||
deprecated: true
|
||||
|
|
|
@ -37,3 +37,4 @@ pricing:
|
|||
output: '4.13'
|
||||
unit: '0.000001'
|
||||
currency: RMB
|
||||
deprecated: true
|
||||
|
|
|
@ -37,3 +37,4 @@ pricing:
|
|||
output: '0'
|
||||
unit: '0.000001'
|
||||
currency: RMB
|
||||
deprecated: true
|
||||
|
|
|
@ -0,0 +1,84 @@
|
|||
model: Qwen/Qwen2-VL-72B-Instruct
|
||||
label:
|
||||
en_US: Qwen/Qwen2-VL-72B-Instruct
|
||||
model_type: llm
|
||||
features:
|
||||
- vision
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 32768
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
type: float
|
||||
default: 0.3
|
||||
min: 0.0
|
||||
max: 2.0
|
||||
help:
|
||||
zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。
|
||||
en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain.
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
type: int
|
||||
default: 2000
|
||||
min: 1
|
||||
max: 2000
|
||||
help:
|
||||
zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。
|
||||
en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time.
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
type: float
|
||||
default: 0.8
|
||||
min: 0.1
|
||||
max: 0.9
|
||||
help:
|
||||
zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。
|
||||
en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated.
|
||||
- name: top_k
|
||||
type: int
|
||||
min: 0
|
||||
max: 99
|
||||
label:
|
||||
zh_Hans: 取样数量
|
||||
en_US: Top k
|
||||
help:
|
||||
zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。
|
||||
en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated.
|
||||
- name: seed
|
||||
required: false
|
||||
type: int
|
||||
default: 1234
|
||||
label:
|
||||
zh_Hans: 随机种子
|
||||
en_US: Random seed
|
||||
help:
|
||||
zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。
|
||||
en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time.
|
||||
- name: repetition_penalty
|
||||
required: false
|
||||
type: float
|
||||
default: 1.1
|
||||
label:
|
||||
zh_Hans: 重复惩罚
|
||||
en_US: Repetition penalty
|
||||
help:
|
||||
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
|
||||
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: Response Format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
pricing:
|
||||
input: '21'
|
||||
output: '21'
|
||||
unit: '0.000001'
|
||||
currency: RMB
|
|
@ -0,0 +1,84 @@
|
|||
model: Pro/Qwen/Qwen2-VL-7B-Instruct
|
||||
label:
|
||||
en_US: Pro/Qwen/Qwen2-VL-7B-Instruct
|
||||
model_type: llm
|
||||
features:
|
||||
- vision
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 32768
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
type: float
|
||||
default: 0.3
|
||||
min: 0.0
|
||||
max: 2.0
|
||||
help:
|
||||
zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。
|
||||
en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain.
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
type: int
|
||||
default: 2000
|
||||
min: 1
|
||||
max: 2000
|
||||
help:
|
||||
zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。
|
||||
en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time.
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
type: float
|
||||
default: 0.8
|
||||
min: 0.1
|
||||
max: 0.9
|
||||
help:
|
||||
zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。
|
||||
en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated.
|
||||
- name: top_k
|
||||
type: int
|
||||
min: 0
|
||||
max: 99
|
||||
label:
|
||||
zh_Hans: 取样数量
|
||||
en_US: Top k
|
||||
help:
|
||||
zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。
|
||||
en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated.
|
||||
- name: seed
|
||||
required: false
|
||||
type: int
|
||||
default: 1234
|
||||
label:
|
||||
zh_Hans: 随机种子
|
||||
en_US: Random seed
|
||||
help:
|
||||
zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。
|
||||
en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time.
|
||||
- name: repetition_penalty
|
||||
required: false
|
||||
type: float
|
||||
default: 1.1
|
||||
label:
|
||||
zh_Hans: 重复惩罚
|
||||
en_US: Repetition penalty
|
||||
help:
|
||||
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
|
||||
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: Response Format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
pricing:
|
||||
input: '21'
|
||||
output: '21'
|
||||
unit: '0.000001'
|
||||
currency: RMB
|
|
@ -32,6 +32,18 @@ parameter_rules:
|
|||
required: false
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: Response Format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
pricing:
|
||||
input: '0.7'
|
||||
output: '0.7'
|
||||
|
|
|
@ -32,6 +32,18 @@ parameter_rules:
|
|||
required: false
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: Response Format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
pricing:
|
||||
input: '1.26'
|
||||
output: '1.26'
|
||||
|
|
|
@ -32,6 +32,18 @@ parameter_rules:
|
|||
required: false
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: Response Format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
pricing:
|
||||
input: '4.13'
|
||||
output: '4.13'
|
||||
|
|
|
@ -32,6 +32,18 @@ parameter_rules:
|
|||
required: false
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: Response Format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
pricing:
|
||||
input: '0'
|
||||
output: '0'
|
||||
|
|
|
@ -0,0 +1,84 @@
|
|||
model: Qwen/Qwen2.5-Coder-32B-Instruct
|
||||
label:
|
||||
en_US: Qwen/Qwen2.5-Coder-32B-Instruct
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 32768
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
type: float
|
||||
default: 0.3
|
||||
min: 0.0
|
||||
max: 2.0
|
||||
help:
|
||||
zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。
|
||||
en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain.
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
type: int
|
||||
default: 8192
|
||||
min: 1
|
||||
max: 8192
|
||||
help:
|
||||
zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。
|
||||
en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time.
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
type: float
|
||||
default: 0.8
|
||||
min: 0.1
|
||||
max: 0.9
|
||||
help:
|
||||
zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。
|
||||
en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated.
|
||||
- name: top_k
|
||||
type: int
|
||||
min: 0
|
||||
max: 99
|
||||
label:
|
||||
zh_Hans: 取样数量
|
||||
en_US: Top k
|
||||
help:
|
||||
zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。
|
||||
en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated.
|
||||
- name: seed
|
||||
required: false
|
||||
type: int
|
||||
default: 1234
|
||||
label:
|
||||
zh_Hans: 随机种子
|
||||
en_US: Random seed
|
||||
help:
|
||||
zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。
|
||||
en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time.
|
||||
- name: repetition_penalty
|
||||
required: false
|
||||
type: float
|
||||
default: 1.1
|
||||
label:
|
||||
zh_Hans: 重复惩罚
|
||||
en_US: Repetition penalty
|
||||
help:
|
||||
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
|
||||
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: Response Format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
pricing:
|
||||
input: '1.26'
|
||||
output: '1.26'
|
||||
unit: '0.000001'
|
||||
currency: RMB
|
|
@ -66,7 +66,17 @@ parameter_rules:
|
|||
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
|
||||
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
|
||||
- name: response_format
|
||||
use_template: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: Response Format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
pricing:
|
||||
input: '0'
|
||||
output: '0'
|
||||
|
|
|
@ -66,7 +66,17 @@ parameter_rules:
|
|||
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
|
||||
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
|
||||
- name: response_format
|
||||
use_template: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: Response Format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
pricing:
|
||||
input: '4.13'
|
||||
output: '4.13'
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
model: FunAudioLLM/SenseVoiceSmall
|
||||
model_type: speech2text
|
||||
model_properties:
|
||||
file_upload_limit: 1
|
||||
supported_file_extensions: mp3,wav
|
|
@ -3,3 +3,4 @@ model_type: speech2text
|
|||
model_properties:
|
||||
file_upload_limit: 1
|
||||
supported_file_extensions: mp3,wav
|
||||
deprecated: true
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
import json
|
||||
import random
|
||||
from collections import UserDict
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
|
@ -10,9 +11,9 @@ class ChatRole:
|
|||
FUNCTION = "function"
|
||||
|
||||
|
||||
class _Dict(dict):
|
||||
__setattr__ = dict.__setitem__
|
||||
__getattr__ = dict.__getitem__
|
||||
class _Dict(UserDict):
|
||||
__setattr__ = UserDict.__setitem__
|
||||
__getattr__ = UserDict.__getitem__
|
||||
|
||||
def __missing__(self, key):
|
||||
return None
|
||||
|
|
|
@ -178,6 +178,7 @@ class ElasticSearchVector(BaseVector):
|
|||
Field.VECTOR.value: { # Make sure the dimension is correct here
|
||||
"type": "dense_vector",
|
||||
"dims": dim,
|
||||
"index": True,
|
||||
"similarity": "cosine",
|
||||
},
|
||||
Field.METADATA_KEY.value: {
|
||||
|
|
|
@ -78,3 +78,4 @@
|
|||
- regex
|
||||
- trello
|
||||
- vanna
|
||||
- fal
|
||||
|
|
3
api/core/tools/provider/builtin/audio/_assets/icon.svg
Normal file
3
api/core/tools/provider/builtin/audio/_assets/icon.svg
Normal file
|
@ -0,0 +1,3 @@
|
|||
<svg xmlns="http://www.w3.org/2000/svg" width="200" height="200" viewBox="0 0 200 200" fill="none">
|
||||
<path d="M167.358 102.395C167.358 117.174 157.246 129.18 144.61 131.027H137.861C125.225 129.18 115.113 117.174 115.113 102.395H100.792C100.792 123.637 115.118 142.106 133.653 145.801V164.276H147.139V145.801C165.674 142.106 180 124.558 180 102.4H167.358V102.395ZM154.717 62.677C154.717 53.4397 147.979 46.9765 140.396 46.9765C138.523 46.9446 136.663 47.3273 134.924 48.1024C133.185 48.8775 131.603 50.0294 130.27 51.4909C128.936 52.9524 127.878 54.6943 127.157 56.6148C126.436 58.5354 126.066 60.5962 126.07 62.677V78.3775H154.717V70.4478V62.677ZM126.07 102.395C126.07 111.632 132.813 118.095 140.396 118.095C142.269 118.127 144.13 117.744 145.868 116.969C147.607 116.194 149.189 115.042 150.523 113.581C151.856 112.119 152.914 110.377 153.635 108.457C154.356 106.536 154.726 104.475 154.722 102.395V86.694H126.07V102.395ZM92.1297 45.8938L70.4796 21.7595L69.4235 20.5865L59.604 20L68.3674 20.5865L67.3113 21.7654L64.1429 25.2961L63.6149 25.8826L64.1429 27.0614L66.2552 29.4133L77.8723 42.3631H54.1099C35.1 43.5361 20.3146 61.1896 20.3146 81.7874V83.5527H28.2354V81.7932C28.2354 65.8992 39.8525 52.3628 54.1099 51.1899H77.8723L66.2552 64.1338L64.671 65.8992L64.1429 67.0722L63.6149 67.6645L64.1429 68.251L68.3674 72.9606L68.8954 73.5471L69.4235 72.9606L74.1759 67.6645L92.1297 47.6591L92.6578 47.0727L92.1297 45.8938ZM20 95.8496V118.213H30.033V107.034H50.099V168.821H40.066V180H70.165V168.821H60.132V107.034H80.198V118.213H90.231V95.8496H20Z" fill="#FF0099"/>
|
||||
</svg>
|
After Width: | Height: | Size: 1.5 KiB |
6
api/core/tools/provider/builtin/audio/audio.py
Normal file
6
api/core/tools/provider/builtin/audio/audio.py
Normal file
|
@ -0,0 +1,6 @@
|
|||
from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController
|
||||
|
||||
|
||||
class AudioToolProvider(BuiltinToolProviderController):
|
||||
def _validate_credentials(self, credentials: dict) -> None:
|
||||
pass
|
11
api/core/tools/provider/builtin/audio/audio.yaml
Normal file
11
api/core/tools/provider/builtin/audio/audio.yaml
Normal file
|
@ -0,0 +1,11 @@
|
|||
identity:
|
||||
author: hjlarry
|
||||
name: audio
|
||||
label:
|
||||
en_US: Audio
|
||||
description:
|
||||
en_US: A tool for tts and asr.
|
||||
zh_Hans: 一个用于文本转语音和语音转文本的工具。
|
||||
icon: icon.svg
|
||||
tags:
|
||||
- utilities
|
69
api/core/tools/provider/builtin/audio/tools/asr.py
Normal file
69
api/core/tools/provider/builtin/audio/tools/asr.py
Normal file
|
@ -0,0 +1,69 @@
|
|||
import io
|
||||
from typing import Any
|
||||
|
||||
from core.file.enums import FileType
|
||||
from core.file.file_manager import download
|
||||
from core.model_manager import ModelManager
|
||||
from core.model_runtime.entities.model_entities import ModelType
|
||||
from core.tools.entities.common_entities import I18nObject
|
||||
from core.tools.entities.tool_entities import ToolInvokeMessage, ToolParameter, ToolParameterOption
|
||||
from core.tools.tool.builtin_tool import BuiltinTool
|
||||
from services.model_provider_service import ModelProviderService
|
||||
|
||||
|
||||
class ASRTool(BuiltinTool):
|
||||
def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> list[ToolInvokeMessage]:
|
||||
file = tool_parameters.get("audio_file")
|
||||
if file.type != FileType.AUDIO:
|
||||
return [self.create_text_message("not a valid audio file")]
|
||||
audio_binary = io.BytesIO(download(file))
|
||||
audio_binary.name = "temp.mp3"
|
||||
provider, model = tool_parameters.get("model").split("#")
|
||||
model_manager = ModelManager()
|
||||
model_instance = model_manager.get_model_instance(
|
||||
tenant_id=self.runtime.tenant_id,
|
||||
provider=provider,
|
||||
model_type=ModelType.SPEECH2TEXT,
|
||||
model=model,
|
||||
)
|
||||
text = model_instance.invoke_speech2text(
|
||||
file=audio_binary,
|
||||
user=user_id,
|
||||
)
|
||||
return [self.create_text_message(text)]
|
||||
|
||||
def get_available_models(self) -> list[tuple[str, str]]:
|
||||
model_provider_service = ModelProviderService()
|
||||
models = model_provider_service.get_models_by_model_type(
|
||||
tenant_id=self.runtime.tenant_id, model_type="speech2text"
|
||||
)
|
||||
items = []
|
||||
for provider_model in models:
|
||||
provider = provider_model.provider
|
||||
for model in provider_model.models:
|
||||
items.append((provider, model.model))
|
||||
return items
|
||||
|
||||
def get_runtime_parameters(self) -> list[ToolParameter]:
|
||||
parameters = []
|
||||
|
||||
options = []
|
||||
for provider, model in self.get_available_models():
|
||||
option = ToolParameterOption(value=f"{provider}#{model}", label=I18nObject(en_US=f"{model}({provider})"))
|
||||
options.append(option)
|
||||
|
||||
parameters.append(
|
||||
ToolParameter(
|
||||
name="model",
|
||||
label=I18nObject(en_US="Model", zh_Hans="Model"),
|
||||
human_description=I18nObject(
|
||||
en_US="All available ASR models. You can config model in the Model Provider of Settings.",
|
||||
zh_Hans="所有可用的 ASR 模型。你可以在设置中的模型供应商里配置。",
|
||||
),
|
||||
type=ToolParameter.ToolParameterType.SELECT,
|
||||
form=ToolParameter.ToolParameterForm.FORM,
|
||||
required=True,
|
||||
options=options,
|
||||
)
|
||||
)
|
||||
return parameters
|
22
api/core/tools/provider/builtin/audio/tools/asr.yaml
Normal file
22
api/core/tools/provider/builtin/audio/tools/asr.yaml
Normal file
|
@ -0,0 +1,22 @@
|
|||
identity:
|
||||
name: asr
|
||||
author: hjlarry
|
||||
label:
|
||||
en_US: Speech To Text
|
||||
description:
|
||||
human:
|
||||
en_US: Convert audio file to text.
|
||||
zh_Hans: 将音频文件转换为文本。
|
||||
llm: Convert audio file to text.
|
||||
parameters:
|
||||
- name: audio_file
|
||||
type: file
|
||||
required: true
|
||||
label:
|
||||
en_US: Audio File
|
||||
zh_Hans: 音频文件
|
||||
human_description:
|
||||
en_US: The audio file to be converted.
|
||||
zh_Hans: 要转换的音频文件。
|
||||
llm_description: The audio file to be converted.
|
||||
form: llm
|
89
api/core/tools/provider/builtin/audio/tools/tts.py
Normal file
89
api/core/tools/provider/builtin/audio/tools/tts.py
Normal file
|
@ -0,0 +1,89 @@
|
|||
import io
|
||||
from typing import Any
|
||||
|
||||
from core.model_manager import ModelManager
|
||||
from core.model_runtime.entities.model_entities import ModelPropertyKey, ModelType
|
||||
from core.tools.entities.common_entities import I18nObject
|
||||
from core.tools.entities.tool_entities import ToolInvokeMessage, ToolParameter, ToolParameterOption
|
||||
from core.tools.tool.builtin_tool import BuiltinTool
|
||||
from services.model_provider_service import ModelProviderService
|
||||
|
||||
|
||||
class TTSTool(BuiltinTool):
|
||||
def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> list[ToolInvokeMessage]:
|
||||
provider, model = tool_parameters.get("model").split("#")
|
||||
voice = tool_parameters.get(f"voice#{provider}#{model}")
|
||||
model_manager = ModelManager()
|
||||
model_instance = model_manager.get_model_instance(
|
||||
tenant_id=self.runtime.tenant_id,
|
||||
provider=provider,
|
||||
model_type=ModelType.TTS,
|
||||
model=model,
|
||||
)
|
||||
tts = model_instance.invoke_tts(
|
||||
content_text=tool_parameters.get("text"),
|
||||
user=user_id,
|
||||
tenant_id=self.runtime.tenant_id,
|
||||
voice=voice,
|
||||
)
|
||||
buffer = io.BytesIO()
|
||||
for chunk in tts:
|
||||
buffer.write(chunk)
|
||||
|
||||
wav_bytes = buffer.getvalue()
|
||||
return [
|
||||
self.create_text_message("Audio generated successfully"),
|
||||
self.create_blob_message(
|
||||
blob=wav_bytes,
|
||||
meta={"mime_type": "audio/x-wav"},
|
||||
save_as=self.VariableKey.AUDIO,
|
||||
),
|
||||
]
|
||||
|
||||
def get_available_models(self) -> list[tuple[str, str, list[Any]]]:
|
||||
model_provider_service = ModelProviderService()
|
||||
models = model_provider_service.get_models_by_model_type(tenant_id=self.runtime.tenant_id, model_type="tts")
|
||||
items = []
|
||||
for provider_model in models:
|
||||
provider = provider_model.provider
|
||||
for model in provider_model.models:
|
||||
voices = model.model_properties.get(ModelPropertyKey.VOICES, [])
|
||||
items.append((provider, model.model, voices))
|
||||
return items
|
||||
|
||||
def get_runtime_parameters(self) -> list[ToolParameter]:
|
||||
parameters = []
|
||||
|
||||
options = []
|
||||
for provider, model, voices in self.get_available_models():
|
||||
option = ToolParameterOption(value=f"{provider}#{model}", label=I18nObject(en_US=f"{model}({provider})"))
|
||||
options.append(option)
|
||||
parameters.append(
|
||||
ToolParameter(
|
||||
name=f"voice#{provider}#{model}",
|
||||
label=I18nObject(en_US=f"Voice of {model}({provider})"),
|
||||
type=ToolParameter.ToolParameterType.SELECT,
|
||||
form=ToolParameter.ToolParameterForm.FORM,
|
||||
options=[
|
||||
ToolParameterOption(value=voice.get("mode"), label=I18nObject(en_US=voice.get("name")))
|
||||
for voice in voices
|
||||
],
|
||||
)
|
||||
)
|
||||
|
||||
parameters.insert(
|
||||
0,
|
||||
ToolParameter(
|
||||
name="model",
|
||||
label=I18nObject(en_US="Model", zh_Hans="Model"),
|
||||
human_description=I18nObject(
|
||||
en_US="All available TTS models. You can config model in the Model Provider of Settings.",
|
||||
zh_Hans="所有可用的 TTS 模型。你可以在设置中的模型供应商里配置。",
|
||||
),
|
||||
type=ToolParameter.ToolParameterType.SELECT,
|
||||
form=ToolParameter.ToolParameterForm.FORM,
|
||||
required=True,
|
||||
options=options,
|
||||
),
|
||||
)
|
||||
return parameters
|
22
api/core/tools/provider/builtin/audio/tools/tts.yaml
Normal file
22
api/core/tools/provider/builtin/audio/tools/tts.yaml
Normal file
|
@ -0,0 +1,22 @@
|
|||
identity:
|
||||
name: tts
|
||||
author: hjlarry
|
||||
label:
|
||||
en_US: Text To Speech
|
||||
description:
|
||||
human:
|
||||
en_US: Convert text to audio file.
|
||||
zh_Hans: 将文本转换为音频文件。
|
||||
llm: Convert text to audio file.
|
||||
parameters:
|
||||
- name: text
|
||||
type: string
|
||||
required: true
|
||||
label:
|
||||
en_US: Text
|
||||
zh_Hans: 文本
|
||||
human_description:
|
||||
en_US: The text to be converted.
|
||||
zh_Hans: 要转换的文本。
|
||||
llm_description: The text to be converted.
|
||||
form: llm
|
1
api/core/tools/provider/builtin/email/_assets/icon.svg
Normal file
1
api/core/tools/provider/builtin/email/_assets/icon.svg
Normal file
File diff suppressed because one or more lines are too long
After Width: | Height: | Size: 11 KiB |
7
api/core/tools/provider/builtin/email/email.py
Normal file
7
api/core/tools/provider/builtin/email/email.py
Normal file
|
@ -0,0 +1,7 @@
|
|||
from core.tools.provider.builtin.email.tools.send_mail import SendMailTool
|
||||
from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController
|
||||
|
||||
|
||||
class SmtpProvider(BuiltinToolProviderController):
|
||||
def _validate_credentials(self, credentials: dict) -> None:
|
||||
SendMailTool()
|
83
api/core/tools/provider/builtin/email/email.yaml
Normal file
83
api/core/tools/provider/builtin/email/email.yaml
Normal file
|
@ -0,0 +1,83 @@
|
|||
identity:
|
||||
author: wakaka6
|
||||
name: email
|
||||
label:
|
||||
en_US: email
|
||||
zh_Hans: 电子邮件
|
||||
description:
|
||||
en_US: send email through smtp protocol
|
||||
zh_Hans: 通过smtp协议发送电子邮件
|
||||
icon: icon.svg
|
||||
tags:
|
||||
- utilities
|
||||
credentials_for_provider:
|
||||
email_account:
|
||||
type: text-input
|
||||
required: true
|
||||
label:
|
||||
en_US: email account
|
||||
zh_Hans: 邮件账号
|
||||
placeholder:
|
||||
en_US: input you email account
|
||||
zh_Hans: 输入你的邮箱账号
|
||||
help:
|
||||
en_US: email account
|
||||
zh_Hans: 邮件账号
|
||||
email_password:
|
||||
type: secret-input
|
||||
required: true
|
||||
label:
|
||||
en_US: email password
|
||||
zh_Hans: 邮件密码
|
||||
placeholder:
|
||||
en_US: email password
|
||||
zh_Hans: 邮件密码
|
||||
help:
|
||||
en_US: email password
|
||||
zh_Hans: 邮件密码
|
||||
smtp_server:
|
||||
type: text-input
|
||||
required: true
|
||||
label:
|
||||
en_US: smtp server
|
||||
zh_Hans: 发信smtp服务器地址
|
||||
placeholder:
|
||||
en_US: smtp server
|
||||
zh_Hans: 发信smtp服务器地址
|
||||
help:
|
||||
en_US: smtp server
|
||||
zh_Hans: 发信smtp服务器地址
|
||||
smtp_port:
|
||||
type: text-input
|
||||
required: true
|
||||
label:
|
||||
en_US: smtp server port
|
||||
zh_Hans: 发信smtp服务器端口
|
||||
placeholder:
|
||||
en_US: smtp server port
|
||||
zh_Hans: 发信smtp服务器端口
|
||||
help:
|
||||
en_US: smtp server port
|
||||
zh_Hans: 发信smtp服务器端口
|
||||
encrypt_method:
|
||||
type: select
|
||||
required: true
|
||||
options:
|
||||
- value: NONE
|
||||
label:
|
||||
en_US: NONE
|
||||
zh_Hans: 无加密
|
||||
- value: SSL
|
||||
label:
|
||||
en_US: SSL
|
||||
zh_Hans: SSL加密
|
||||
- value: TLS
|
||||
label:
|
||||
en_US: START TLS
|
||||
zh_Hans: START TLS加密
|
||||
label:
|
||||
en_US: encrypt method
|
||||
zh_Hans: 加密方式
|
||||
help:
|
||||
en_US: smtp server encrypt method
|
||||
zh_Hans: 发信smtp服务器加密方式
|
53
api/core/tools/provider/builtin/email/tools/send.py
Normal file
53
api/core/tools/provider/builtin/email/tools/send.py
Normal file
|
@ -0,0 +1,53 @@
|
|||
import logging
|
||||
import smtplib
|
||||
import ssl
|
||||
from email.mime.multipart import MIMEMultipart
|
||||
from email.mime.text import MIMEText
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class SendEmailToolParameters(BaseModel):
|
||||
smtp_server: str
|
||||
smtp_port: int
|
||||
|
||||
email_account: str
|
||||
email_password: str
|
||||
|
||||
sender_to: str
|
||||
subject: str
|
||||
email_content: str
|
||||
encrypt_method: str
|
||||
|
||||
|
||||
def send_mail(parmas: SendEmailToolParameters):
|
||||
timeout = 60
|
||||
msg = MIMEMultipart("alternative")
|
||||
msg["From"] = parmas.email_account
|
||||
msg["To"] = parmas.sender_to
|
||||
msg["Subject"] = parmas.subject
|
||||
msg.attach(MIMEText(parmas.email_content, "plain"))
|
||||
msg.attach(MIMEText(parmas.email_content, "html"))
|
||||
|
||||
ctx = ssl.create_default_context()
|
||||
|
||||
if parmas.encrypt_method.upper() == "SSL":
|
||||
try:
|
||||
with smtplib.SMTP_SSL(parmas.smtp_server, parmas.smtp_port, context=ctx, timeout=timeout) as server:
|
||||
server.login(parmas.email_account, parmas.email_password)
|
||||
server.sendmail(parmas.email_account, parmas.sender_to, msg.as_string())
|
||||
return True
|
||||
except Exception as e:
|
||||
logging.exception("send email failed: %s", e)
|
||||
return False
|
||||
else: # NONE or TLS
|
||||
try:
|
||||
with smtplib.SMTP(parmas.smtp_server, parmas.smtp_port, timeout=timeout) as server:
|
||||
if parmas.encrypt_method.upper() == "TLS":
|
||||
server.starttls(context=ctx)
|
||||
server.login(parmas.email_account, parmas.email_password)
|
||||
server.sendmail(parmas.email_account, parmas.sender_to, msg.as_string())
|
||||
return True
|
||||
except Exception as e:
|
||||
logging.exception("send email failed: %s", e)
|
||||
return False
|
66
api/core/tools/provider/builtin/email/tools/send_mail.py
Normal file
66
api/core/tools/provider/builtin/email/tools/send_mail.py
Normal file
|
@ -0,0 +1,66 @@
|
|||
import re
|
||||
from typing import Any, Union
|
||||
|
||||
from core.tools.entities.tool_entities import ToolInvokeMessage
|
||||
from core.tools.provider.builtin.email.tools.send import (
|
||||
SendEmailToolParameters,
|
||||
send_mail,
|
||||
)
|
||||
from core.tools.tool.builtin_tool import BuiltinTool
|
||||
|
||||
|
||||
class SendMailTool(BuiltinTool):
|
||||
def _invoke(
|
||||
self, user_id: str, tool_parameters: dict[str, Any]
|
||||
) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
|
||||
"""
|
||||
invoke tools
|
||||
"""
|
||||
sender = self.runtime.credentials.get("email_account", "")
|
||||
email_rgx = re.compile(r"^[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+(\.[a-zA-Z0-9_-]+)+$")
|
||||
password = self.runtime.credentials.get("email_password", "")
|
||||
smtp_server = self.runtime.credentials.get("smtp_server", "")
|
||||
if not smtp_server:
|
||||
return self.create_text_message("please input smtp server")
|
||||
smtp_port = self.runtime.credentials.get("smtp_port", "")
|
||||
try:
|
||||
smtp_port = int(smtp_port)
|
||||
except ValueError:
|
||||
return self.create_text_message("Invalid parameter smtp_port(should be int)")
|
||||
|
||||
if not sender:
|
||||
return self.create_text_message("please input sender")
|
||||
if not email_rgx.match(sender):
|
||||
return self.create_text_message("Invalid parameter userid, the sender is not a mailbox")
|
||||
|
||||
receiver_email = tool_parameters["send_to"]
|
||||
if not receiver_email:
|
||||
return self.create_text_message("please input receiver email")
|
||||
if not email_rgx.match(receiver_email):
|
||||
return self.create_text_message("Invalid parameter receiver email, the receiver email is not a mailbox")
|
||||
email_content = tool_parameters.get("email_content", "")
|
||||
|
||||
if not email_content:
|
||||
return self.create_text_message("please input email content")
|
||||
|
||||
subject = tool_parameters.get("subject", "")
|
||||
if not subject:
|
||||
return self.create_text_message("please input email subject")
|
||||
|
||||
encrypt_method = self.runtime.credentials.get("encrypt_method", "")
|
||||
if not encrypt_method:
|
||||
return self.create_text_message("please input encrypt method")
|
||||
|
||||
send_email_params = SendEmailToolParameters(
|
||||
smtp_server=smtp_server,
|
||||
smtp_port=smtp_port,
|
||||
email_account=sender,
|
||||
email_password=password,
|
||||
sender_to=receiver_email,
|
||||
subject=subject,
|
||||
email_content=email_content,
|
||||
encrypt_method=encrypt_method,
|
||||
)
|
||||
if send_mail(send_email_params):
|
||||
return self.create_text_message("send email success")
|
||||
return self.create_text_message("send email failed")
|
46
api/core/tools/provider/builtin/email/tools/send_mail.yaml
Normal file
46
api/core/tools/provider/builtin/email/tools/send_mail.yaml
Normal file
|
@ -0,0 +1,46 @@
|
|||
identity:
|
||||
name: send_mail
|
||||
author: wakaka6
|
||||
label:
|
||||
en_US: send email
|
||||
zh_Hans: 发送邮件
|
||||
icon: icon.svg
|
||||
description:
|
||||
human:
|
||||
en_US: A tool for sending email
|
||||
zh_Hans: 用于发送邮件
|
||||
llm: A tool for sending email
|
||||
parameters:
|
||||
- name: send_to
|
||||
type: string
|
||||
required: true
|
||||
label:
|
||||
en_US: Recipient email account
|
||||
zh_Hans: 收件人邮箱账号
|
||||
human_description:
|
||||
en_US: Recipient email account
|
||||
zh_Hans: 收件人邮箱账号
|
||||
llm_description: Recipient email account
|
||||
form: llm
|
||||
- name: subject
|
||||
type: string
|
||||
required: true
|
||||
label:
|
||||
en_US: email subject
|
||||
zh_Hans: 邮件主题
|
||||
human_description:
|
||||
en_US: email subject
|
||||
zh_Hans: 邮件主题
|
||||
llm_description: email subject
|
||||
form: llm
|
||||
- name: email_content
|
||||
type: string
|
||||
required: true
|
||||
label:
|
||||
en_US: email content
|
||||
zh_Hans: 邮件内容
|
||||
human_description:
|
||||
en_US: email content
|
||||
zh_Hans: 邮件内容
|
||||
llm_description: email content
|
||||
form: llm
|
|
@ -0,0 +1,75 @@
|
|||
import json
|
||||
import re
|
||||
from typing import Any, Union
|
||||
|
||||
from core.tools.entities.tool_entities import ToolInvokeMessage
|
||||
from core.tools.provider.builtin.email.tools.send import (
|
||||
SendEmailToolParameters,
|
||||
send_mail,
|
||||
)
|
||||
from core.tools.tool.builtin_tool import BuiltinTool
|
||||
|
||||
|
||||
class SendMailTool(BuiltinTool):
|
||||
def _invoke(
|
||||
self, user_id: str, tool_parameters: dict[str, Any]
|
||||
) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
|
||||
"""
|
||||
invoke tools
|
||||
"""
|
||||
sender = self.runtime.credentials.get("email_account", "")
|
||||
email_rgx = re.compile(r"^[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+(\.[a-zA-Z0-9_-]+)+$")
|
||||
password = self.runtime.credentials.get("email_password", "")
|
||||
smtp_server = self.runtime.credentials.get("smtp_server", "")
|
||||
if not smtp_server:
|
||||
return self.create_text_message("please input smtp server")
|
||||
smtp_port = self.runtime.credentials.get("smtp_port", "")
|
||||
try:
|
||||
smtp_port = int(smtp_port)
|
||||
except ValueError:
|
||||
return self.create_text_message("Invalid parameter smtp_port(should be int)")
|
||||
|
||||
if not sender:
|
||||
return self.create_text_message("please input sender")
|
||||
if not email_rgx.match(sender):
|
||||
return self.create_text_message("Invalid parameter userid, the sender is not a mailbox")
|
||||
|
||||
receivers_email = tool_parameters["send_to"]
|
||||
if not receivers_email:
|
||||
return self.create_text_message("please input receiver email")
|
||||
receivers_email = json.loads(receivers_email)
|
||||
for receiver in receivers_email:
|
||||
if not email_rgx.match(receiver):
|
||||
return self.create_text_message(
|
||||
f"Invalid parameter receiver email, the receiver email({receiver}) is not a mailbox"
|
||||
)
|
||||
email_content = tool_parameters.get("email_content", "")
|
||||
|
||||
if not email_content:
|
||||
return self.create_text_message("please input email content")
|
||||
|
||||
subject = tool_parameters.get("subject", "")
|
||||
if not subject:
|
||||
return self.create_text_message("please input email subject")
|
||||
|
||||
encrypt_method = self.runtime.credentials.get("encrypt_method", "")
|
||||
if not encrypt_method:
|
||||
return self.create_text_message("please input encrypt method")
|
||||
|
||||
msg = {}
|
||||
for receiver in receivers_email:
|
||||
send_email_params = SendEmailToolParameters(
|
||||
smtp_server=smtp_server,
|
||||
smtp_port=smtp_port,
|
||||
email_account=sender,
|
||||
email_password=password,
|
||||
sender_to=receiver,
|
||||
subject=subject,
|
||||
email_content=email_content,
|
||||
encrypt_method=encrypt_method,
|
||||
)
|
||||
if send_mail(send_email_params):
|
||||
msg[receiver] = "send email success"
|
||||
else:
|
||||
msg[receiver] = "send email failed"
|
||||
return self.create_text_message(json.dumps(msg))
|
|
@ -0,0 +1,46 @@
|
|||
identity:
|
||||
name: send_mail_batch
|
||||
author: wakaka6
|
||||
label:
|
||||
en_US: send email to multiple recipients
|
||||
zh_Hans: 发送邮件给多个收件人
|
||||
icon: icon.svg
|
||||
description:
|
||||
human:
|
||||
en_US: A tool for sending email to multiple recipients
|
||||
zh_Hans: 用于发送邮件给多个收件人的工具
|
||||
llm: A tool for sending email to multiple recipients
|
||||
parameters:
|
||||
- name: send_to
|
||||
type: string
|
||||
required: true
|
||||
label:
|
||||
en_US: Recipient email account(json list)
|
||||
zh_Hans: 收件人邮箱账号(json list)
|
||||
human_description:
|
||||
en_US: Recipient email account
|
||||
zh_Hans: 收件人邮箱账号
|
||||
llm_description: A list of recipient email account(json format)
|
||||
form: llm
|
||||
- name: subject
|
||||
type: string
|
||||
required: true
|
||||
label:
|
||||
en_US: email subject
|
||||
zh_Hans: 邮件主题
|
||||
human_description:
|
||||
en_US: email subject
|
||||
zh_Hans: 邮件主题
|
||||
llm_description: email subject
|
||||
form: llm
|
||||
- name: email_content
|
||||
type: string
|
||||
required: true
|
||||
label:
|
||||
en_US: email content
|
||||
zh_Hans: 邮件内容
|
||||
human_description:
|
||||
en_US: email content
|
||||
zh_Hans: 邮件内容
|
||||
llm_description: email content
|
||||
form: llm
|
4
api/core/tools/provider/builtin/fal/_assets/icon.svg
Normal file
4
api/core/tools/provider/builtin/fal/_assets/icon.svg
Normal file
|
@ -0,0 +1,4 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" width="32" height="32">
|
||||
<path d="M0 0 C3.96 0 7.92 0 12 0 C12.4125 0.928125 12.825 1.85625 13.25 2.8125 C15.56104487 7.02190315 17.49701732 8.49900577 22 10 C22 13.96 22 17.92 22 22 C21.071875 22.4125 20.14375 22.825 19.1875 23.25 C14.97809685 25.56104487 13.50099423 27.49701732 12 32 C8.04 32 4.08 32 0 32 C-0.4125 31.071875 -0.825 30.14375 -1.25 29.1875 C-3.56104487 24.97809685 -5.49701732 23.50099423 -10 22 C-10 18.04 -10 14.08 -10 10 C-9.071875 9.5875 -8.14375 9.175 -7.1875 8.75 C-2.97809685 6.43895513 -1.50099423 4.50298268 0 0 Z M-2 11 C-3.42662219 13.85324437 -3.31033868 15.83454549 -3 19 C-1.20006226 21.69990662 0.083773 23.5418865 3 25 C7.1364408 25.56406011 8.76045933 25.14638597 12.375 22.9375 C15.26054626 20.20817124 15.26054626 20.20817124 15.6875 16.5625 C14.76325283 11.77321919 13.68514918 10.2147046 10 7 C4.54838272 6.02649691 1.87056683 7.12943317 -2 11 Z " fill="#EC0648" transform="translate(10,0)"/>
|
||||
</svg>
|
After Width: | Height: | Size: 1.0 KiB |
20
api/core/tools/provider/builtin/fal/fal.py
Normal file
20
api/core/tools/provider/builtin/fal/fal.py
Normal file
|
@ -0,0 +1,20 @@
|
|||
import requests
|
||||
|
||||
from core.tools.errors import ToolProviderCredentialValidationError
|
||||
from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController
|
||||
|
||||
|
||||
class FalProvider(BuiltinToolProviderController):
|
||||
def _validate_credentials(self, credentials: dict) -> None:
|
||||
url = "https://fal.run/fal-ai/flux/dev"
|
||||
headers = {
|
||||
"Authorization": f"Key {credentials.get('fal_api_key')}",
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
data = {"prompt": "Cat"}
|
||||
|
||||
response = requests.post(url, json=data, headers=headers)
|
||||
if response.status_code == 401:
|
||||
raise ToolProviderCredentialValidationError("FAL API key is invalid")
|
||||
elif response.status_code != 200:
|
||||
raise ToolProviderCredentialValidationError(f"FAL API key validation failed: {response.text}")
|
21
api/core/tools/provider/builtin/fal/fal.yaml
Normal file
21
api/core/tools/provider/builtin/fal/fal.yaml
Normal file
|
@ -0,0 +1,21 @@
|
|||
identity:
|
||||
author: Kalo Chin
|
||||
name: fal
|
||||
label:
|
||||
en_US: FAL
|
||||
zh_CN: FAL
|
||||
description:
|
||||
en_US: The image generation API provided by FAL.
|
||||
zh_CN: FAL 提供的图像生成 API。
|
||||
icon: icon.svg
|
||||
tags:
|
||||
- image
|
||||
credentials_for_provider:
|
||||
fal_api_key:
|
||||
type: secret-input
|
||||
required: true
|
||||
label:
|
||||
en_US: FAL API Key
|
||||
placeholder:
|
||||
en_US: Please input your FAL API key
|
||||
url: https://fal.ai/dashboard/keys
|
46
api/core/tools/provider/builtin/fal/tools/flux_1_1_pro.py
Normal file
46
api/core/tools/provider/builtin/fal/tools/flux_1_1_pro.py
Normal file
|
@ -0,0 +1,46 @@
|
|||
from typing import Any, Union
|
||||
|
||||
import requests
|
||||
|
||||
from core.tools.entities.tool_entities import ToolInvokeMessage
|
||||
from core.tools.tool.builtin_tool import BuiltinTool
|
||||
|
||||
|
||||
class Flux11ProTool(BuiltinTool):
|
||||
def _invoke(
|
||||
self, user_id: str, tool_parameters: dict[str, Any]
|
||||
) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
|
||||
headers = {
|
||||
"Authorization": f"Key {self.runtime.credentials['fal_api_key']}",
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
prompt = tool_parameters.get("prompt", "")
|
||||
sanitized_prompt = prompt.replace("\\", "") # Remove backslashes from the prompt which may cause errors
|
||||
|
||||
payload = {
|
||||
"prompt": sanitized_prompt,
|
||||
"image_size": tool_parameters.get("image_size", "landscape_4_3"),
|
||||
"seed": tool_parameters.get("seed"),
|
||||
"sync_mode": tool_parameters.get("sync_mode", False),
|
||||
"num_images": tool_parameters.get("num_images", 1),
|
||||
"enable_safety_checker": tool_parameters.get("enable_safety_checker", True),
|
||||
"safety_tolerance": tool_parameters.get("safety_tolerance", "2"),
|
||||
}
|
||||
|
||||
url = "https://fal.run/fal-ai/flux-pro/v1.1"
|
||||
|
||||
response = requests.post(url, json=payload, headers=headers)
|
||||
|
||||
if response.status_code != 200:
|
||||
return self.create_text_message(f"Got Error Response: {response.text}")
|
||||
|
||||
res = response.json()
|
||||
result = [self.create_json_message(res)]
|
||||
|
||||
for image_info in res.get("images", []):
|
||||
image_url = image_info.get("url")
|
||||
if image_url:
|
||||
result.append(self.create_image_message(image=image_url, save_as=self.VariableKey.IMAGE.value))
|
||||
|
||||
return result
|
147
api/core/tools/provider/builtin/fal/tools/flux_1_1_pro.yaml
Normal file
147
api/core/tools/provider/builtin/fal/tools/flux_1_1_pro.yaml
Normal file
|
@ -0,0 +1,147 @@
|
|||
identity:
|
||||
name: flux_1_1_pro
|
||||
author: Kalo Chin
|
||||
label:
|
||||
en_US: FLUX 1.1 [pro]
|
||||
zh_Hans: FLUX 1.1 [pro]
|
||||
icon: icon.svg
|
||||
description:
|
||||
human:
|
||||
en_US: FLUX 1.1 [pro] is an enhanced version of FLUX.1 [pro], improved image generation capabilities, delivering superior composition, detail, and artistic fidelity compared to its predecessor.
|
||||
zh_Hans: FLUX 1.1 [pro] 是 FLUX.1 [pro] 的增强版,改进了图像生成能力,与其前身相比,提供了更出色的构图、细节和艺术保真度。
|
||||
llm: This tool generates images from prompts using FAL's FLUX 1.1 [pro] model.
|
||||
parameters:
|
||||
- name: prompt
|
||||
type: string
|
||||
required: true
|
||||
label:
|
||||
en_US: Prompt
|
||||
zh_Hans: 提示词
|
||||
human_description:
|
||||
en_US: The text prompt used to generate the image.
|
||||
zh_Hans: 用于生成图片的文字提示词。
|
||||
llm_description: This prompt text will be used to generate the image.
|
||||
form: llm
|
||||
- name: image_size
|
||||
type: select
|
||||
required: false
|
||||
options:
|
||||
- value: square_hd
|
||||
label:
|
||||
en_US: Square HD
|
||||
zh_Hans: 方形高清
|
||||
- value: square
|
||||
label:
|
||||
en_US: Square
|
||||
zh_Hans: 方形
|
||||
- value: portrait_4_3
|
||||
label:
|
||||
en_US: Portrait 4:3
|
||||
zh_Hans: 竖屏 4:3
|
||||
- value: portrait_16_9
|
||||
label:
|
||||
en_US: Portrait 16:9
|
||||
zh_Hans: 竖屏 16:9
|
||||
- value: landscape_4_3
|
||||
label:
|
||||
en_US: Landscape 4:3
|
||||
zh_Hans: 横屏 4:3
|
||||
- value: landscape_16_9
|
||||
label:
|
||||
en_US: Landscape 16:9
|
||||
zh_Hans: 横屏 16:9
|
||||
default: landscape_4_3
|
||||
label:
|
||||
en_US: Image Size
|
||||
zh_Hans: 图片大小
|
||||
human_description:
|
||||
en_US: The size of the generated image.
|
||||
zh_Hans: 生成图像的尺寸。
|
||||
form: form
|
||||
- name: num_images
|
||||
type: number
|
||||
required: false
|
||||
default: 1
|
||||
min: 1
|
||||
max: 1
|
||||
label:
|
||||
en_US: Number of Images
|
||||
zh_Hans: 图片数量
|
||||
human_description:
|
||||
en_US: The number of images to generate.
|
||||
zh_Hans: 要生成的图片数量。
|
||||
form: form
|
||||
- name: safety_tolerance
|
||||
type: select
|
||||
required: false
|
||||
options:
|
||||
- value: "1"
|
||||
label:
|
||||
en_US: "1 (Most strict)"
|
||||
zh_Hans: "1(最严格)"
|
||||
- value: "2"
|
||||
label:
|
||||
en_US: "2"
|
||||
zh_Hans: "2"
|
||||
- value: "3"
|
||||
label:
|
||||
en_US: "3"
|
||||
zh_Hans: "3"
|
||||
- value: "4"
|
||||
label:
|
||||
en_US: "4"
|
||||
zh_Hans: "4"
|
||||
- value: "5"
|
||||
label:
|
||||
en_US: "5"
|
||||
zh_Hans: "5"
|
||||
- value: "6"
|
||||
label:
|
||||
en_US: "6 (Most permissive)"
|
||||
zh_Hans: "6(最宽松)"
|
||||
default: "2"
|
||||
label:
|
||||
en_US: Safety Tolerance
|
||||
zh_Hans: 安全容忍度
|
||||
human_description:
|
||||
en_US: The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive.
|
||||
zh_Hans: 生成图像的安全容忍级别,1 为最严格,6 为最宽松。
|
||||
form: form
|
||||
- name: seed
|
||||
type: number
|
||||
required: false
|
||||
min: 0
|
||||
max: 9999999999
|
||||
label:
|
||||
en_US: Seed
|
||||
zh_Hans: 种子
|
||||
human_description:
|
||||
en_US: The same seed and prompt can produce similar images.
|
||||
zh_Hans: 相同的种子和提示词可以产生相似的图像。
|
||||
form: form
|
||||
- name: enable_safety_checker
|
||||
type: boolean
|
||||
required: false
|
||||
default: true
|
||||
label:
|
||||
en_US: Enable Safety Checker
|
||||
zh_Hans: 启用安全检查器
|
||||
human_description:
|
||||
en_US: Enable or disable the safety checker.
|
||||
zh_Hans: 启用或禁用安全检查器。
|
||||
form: form
|
||||
- name: sync_mode
|
||||
type: boolean
|
||||
required: false
|
||||
default: false
|
||||
label:
|
||||
en_US: Sync Mode
|
||||
zh_Hans: 同步模式
|
||||
human_description:
|
||||
en_US: >
|
||||
If set to true, the function will wait for the image to be generated and uploaded before returning the response.
|
||||
This will increase the latency but allows you to get the image directly in the response without going through the CDN.
|
||||
zh_Hans: >
|
||||
如果设置为 true,函数将在生成并上传图像后再返回响应。
|
||||
这将增加函数的延迟,但可以让您直接在响应中获取图像,而无需通过 CDN。
|
||||
form: form
|
|
@ -0,0 +1,47 @@
|
|||
from typing import Any, Union
|
||||
|
||||
import requests
|
||||
|
||||
from core.tools.entities.tool_entities import ToolInvokeMessage
|
||||
from core.tools.tool.builtin_tool import BuiltinTool
|
||||
|
||||
|
||||
class Flux11ProUltraTool(BuiltinTool):
|
||||
def _invoke(
|
||||
self, user_id: str, tool_parameters: dict[str, Any]
|
||||
) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
|
||||
headers = {
|
||||
"Authorization": f"Key {self.runtime.credentials['fal_api_key']}",
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
prompt = tool_parameters.get("prompt", "")
|
||||
sanitized_prompt = prompt.replace("\\", "") # Remove backslashes from the prompt which may cause errors
|
||||
|
||||
payload = {
|
||||
"prompt": sanitized_prompt,
|
||||
"seed": tool_parameters.get("seed"),
|
||||
"sync_mode": tool_parameters.get("sync_mode", False),
|
||||
"num_images": tool_parameters.get("num_images", 1),
|
||||
"enable_safety_checker": tool_parameters.get("enable_safety_checker", True),
|
||||
"safety_tolerance": str(tool_parameters.get("safety_tolerance", "2")),
|
||||
"aspect_ratio": tool_parameters.get("aspect_ratio", "16:9"),
|
||||
"raw": tool_parameters.get("raw", False),
|
||||
}
|
||||
|
||||
url = "https://fal.run/fal-ai/flux-pro/v1.1-ultra"
|
||||
|
||||
response = requests.post(url, json=payload, headers=headers)
|
||||
|
||||
if response.status_code != 200:
|
||||
return self.create_text_message(f"Got Error Response: {response.text}")
|
||||
|
||||
res = response.json()
|
||||
result = [self.create_json_message(res)]
|
||||
|
||||
for image_info in res.get("images", []):
|
||||
image_url = image_info.get("url")
|
||||
if image_url:
|
||||
result.append(self.create_image_message(image=image_url, save_as=self.VariableKey.IMAGE.value))
|
||||
|
||||
return result
|
|
@ -0,0 +1,162 @@
|
|||
identity:
|
||||
name: flux_1_1_pro_ultra
|
||||
author: Kalo Chin
|
||||
label:
|
||||
en_US: FLUX 1.1 [pro] ultra
|
||||
zh_Hans: FLUX 1.1 [pro] ultra
|
||||
icon: icon.svg
|
||||
description:
|
||||
human:
|
||||
en_US: FLUX 1.1 [pro] ultra is the newest version of FLUX 1.1 [pro], maintaining professional-grade image quality while delivering up to 2K resolution with improved photo realism.
|
||||
zh_Hans: FLUX 1.1 [pro] ultra 是 FLUX 1.1 [pro] 的最新版本,保持了专业级的图像质量,同时以改进的照片真实感提供高达 2K 的分辨率。
|
||||
llm: This tool generates images from prompts using FAL's FLUX 1.1 [pro] ultra model.
|
||||
parameters:
|
||||
- name: prompt
|
||||
type: string
|
||||
required: true
|
||||
label:
|
||||
en_US: Prompt
|
||||
zh_Hans: 提示词
|
||||
human_description:
|
||||
en_US: The text prompt used to generate the image.
|
||||
zh_Hans: 用于生成图像的文本提示。
|
||||
llm_description: This prompt text will be used to generate the image.
|
||||
form: llm
|
||||
- name: aspect_ratio
|
||||
type: select
|
||||
required: false
|
||||
options:
|
||||
- value: '21:9'
|
||||
label:
|
||||
en_US: '21:9'
|
||||
zh_Hans: '21:9'
|
||||
- value: '16:9'
|
||||
label:
|
||||
en_US: '16:9'
|
||||
zh_Hans: '16:9'
|
||||
- value: '4:3'
|
||||
label:
|
||||
en_US: '4:3'
|
||||
zh_Hans: '4:3'
|
||||
- value: '1:1'
|
||||
label:
|
||||
en_US: '1:1'
|
||||
zh_Hans: '1:1'
|
||||
- value: '3:4'
|
||||
label:
|
||||
en_US: '3:4'
|
||||
zh_Hans: '3:4'
|
||||
- value: '9:16'
|
||||
label:
|
||||
en_US: '9:16'
|
||||
zh_Hans: '9:16'
|
||||
- value: '9:21'
|
||||
label:
|
||||
en_US: '9:21'
|
||||
zh_Hans: '9:21'
|
||||
default: '16:9'
|
||||
label:
|
||||
en_US: Aspect Ratio
|
||||
zh_Hans: 纵横比
|
||||
human_description:
|
||||
en_US: The aspect ratio of the generated image.
|
||||
zh_Hans: 生成图像的宽高比。
|
||||
form: form
|
||||
- name: num_images
|
||||
type: number
|
||||
required: false
|
||||
default: 1
|
||||
min: 1
|
||||
max: 1
|
||||
label:
|
||||
en_US: Number of Images
|
||||
zh_Hans: 图片数量
|
||||
human_description:
|
||||
en_US: The number of images to generate.
|
||||
zh_Hans: 要生成的图像数量。
|
||||
form: form
|
||||
- name: safety_tolerance
|
||||
type: select
|
||||
required: false
|
||||
options:
|
||||
- value: "1"
|
||||
label:
|
||||
en_US: "1 (Most strict)"
|
||||
zh_Hans: "1(最严格)"
|
||||
- value: "2"
|
||||
label:
|
||||
en_US: "2"
|
||||
zh_Hans: "2"
|
||||
- value: "3"
|
||||
label:
|
||||
en_US: "3"
|
||||
zh_Hans: "3"
|
||||
- value: "4"
|
||||
label:
|
||||
en_US: "4"
|
||||
zh_Hans: "4"
|
||||
- value: "5"
|
||||
label:
|
||||
en_US: "5"
|
||||
zh_Hans: "5"
|
||||
- value: "6"
|
||||
label:
|
||||
en_US: "6 (Most permissive)"
|
||||
zh_Hans: "6(最宽松)"
|
||||
default: '2'
|
||||
label:
|
||||
en_US: Safety Tolerance
|
||||
zh_Hans: 安全容忍度
|
||||
human_description:
|
||||
en_US: The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive.
|
||||
zh_Hans: 生成图像的安全容忍级别,1 为最严格,6 为最宽松。
|
||||
form: form
|
||||
- name: seed
|
||||
type: number
|
||||
required: false
|
||||
min: 0
|
||||
max: 9999999999
|
||||
label:
|
||||
en_US: Seed
|
||||
zh_Hans: 种子
|
||||
human_description:
|
||||
en_US: The same seed and prompt can produce similar images.
|
||||
zh_Hans: 相同的种子和提示词可以生成相似的图像。
|
||||
form: form
|
||||
- name: raw
|
||||
type: boolean
|
||||
required: false
|
||||
default: false
|
||||
label:
|
||||
en_US: Raw Mode
|
||||
zh_Hans: 原始模式
|
||||
human_description:
|
||||
en_US: Generate less processed, more natural-looking images.
|
||||
zh_Hans: 生成较少处理、更自然的图像。
|
||||
form: form
|
||||
- name: enable_safety_checker
|
||||
type: boolean
|
||||
required: false
|
||||
default: true
|
||||
label:
|
||||
en_US: Enable Safety Checker
|
||||
zh_Hans: 启用安全检查器
|
||||
human_description:
|
||||
en_US: Enable or disable the safety checker.
|
||||
zh_Hans: 启用或禁用安全检查器。
|
||||
form: form
|
||||
- name: sync_mode
|
||||
type: boolean
|
||||
required: false
|
||||
default: false
|
||||
label:
|
||||
en_US: Sync Mode
|
||||
zh_Hans: 同步模式
|
||||
human_description:
|
||||
en_US: >
|
||||
If set to true, the function will wait for the image to be generated and uploaded before returning the response.
|
||||
This will increase the latency but allows you to get the image directly in the response without going through the CDN.
|
||||
zh_Hans: >
|
||||
如果设置为 true,函数将在生成并上传图像后才返回响应。
|
||||
这将增加延迟,但允许您直接在响应中获取图像,而无需通过 CDN。
|
||||
form: form
|
47
api/core/tools/provider/builtin/fal/tools/flux_1_dev.py
Normal file
47
api/core/tools/provider/builtin/fal/tools/flux_1_dev.py
Normal file
|
@ -0,0 +1,47 @@
|
|||
from typing import Any, Union
|
||||
|
||||
import requests
|
||||
|
||||
from core.tools.entities.tool_entities import ToolInvokeMessage
|
||||
from core.tools.tool.builtin_tool import BuiltinTool
|
||||
|
||||
|
||||
class Flux1DevTool(BuiltinTool):
|
||||
def _invoke(
|
||||
self, user_id: str, tool_parameters: dict[str, Any]
|
||||
) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
|
||||
headers = {
|
||||
"Authorization": f"Key {self.runtime.credentials['fal_api_key']}",
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
prompt = tool_parameters.get("prompt", "")
|
||||
sanitized_prompt = prompt.replace("\\", "") # Remove backslashes from the prompt which may cause errors
|
||||
|
||||
payload = {
|
||||
"prompt": sanitized_prompt,
|
||||
"image_size": tool_parameters.get("image_size", "landscape_4_3"),
|
||||
"num_inference_steps": tool_parameters.get("num_inference_steps", 28),
|
||||
"guidance_scale": tool_parameters.get("guidance_scale", 3.5),
|
||||
"seed": tool_parameters.get("seed"),
|
||||
"num_images": tool_parameters.get("num_images", 1),
|
||||
"enable_safety_checker": tool_parameters.get("enable_safety_checker", True),
|
||||
"sync_mode": tool_parameters.get("sync_mode", False),
|
||||
}
|
||||
|
||||
url = "https://fal.run/fal-ai/flux/dev"
|
||||
|
||||
response = requests.post(url, json=payload, headers=headers)
|
||||
|
||||
if response.status_code != 200:
|
||||
return self.create_text_message(f"Got Error Response: {response.text}")
|
||||
|
||||
res = response.json()
|
||||
result = [self.create_json_message(res)]
|
||||
|
||||
for image_info in res.get("images", []):
|
||||
image_url = image_info.get("url")
|
||||
if image_url:
|
||||
result.append(self.create_image_message(image=image_url, save_as=self.VariableKey.IMAGE.value))
|
||||
|
||||
return result
|
137
api/core/tools/provider/builtin/fal/tools/flux_1_dev.yaml
Normal file
137
api/core/tools/provider/builtin/fal/tools/flux_1_dev.yaml
Normal file
|
@ -0,0 +1,137 @@
|
|||
identity:
|
||||
name: flux_1_dev
|
||||
author: Kalo Chin
|
||||
label:
|
||||
en_US: FLUX.1 [dev]
|
||||
zh_Hans: FLUX.1 [dev]
|
||||
icon: icon.svg
|
||||
description:
|
||||
human:
|
||||
en_US: FLUX.1 [dev] is a 12 billion parameter flow transformer that generates high-quality images from text. It is suitable for personal and commercial use.
|
||||
zh_Hans: FLUX.1 [dev] 是一个拥有120亿参数的流动变换模型,可以从文本生成高质量的图像。适用于个人和商业用途。
|
||||
llm: This tool generates images from prompts using FAL's FLUX.1 [dev] model.
|
||||
parameters:
|
||||
- name: prompt
|
||||
type: string
|
||||
required: true
|
||||
label:
|
||||
en_US: Prompt
|
||||
zh_Hans: 提示词
|
||||
human_description:
|
||||
en_US: The text prompt used to generate the image.
|
||||
zh_Hans: 用于生成图片的文字提示词。
|
||||
llm_description: This prompt text will be used to generate the image.
|
||||
form: llm
|
||||
- name: image_size
|
||||
type: select
|
||||
required: false
|
||||
options:
|
||||
- value: square_hd
|
||||
label:
|
||||
en_US: Square HD
|
||||
zh_Hans: 方形高清
|
||||
- value: square
|
||||
label:
|
||||
en_US: Square
|
||||
zh_Hans: 方形
|
||||
- value: portrait_4_3
|
||||
label:
|
||||
en_US: Portrait 4:3
|
||||
zh_Hans: 竖屏 4:3
|
||||
- value: portrait_16_9
|
||||
label:
|
||||
en_US: Portrait 16:9
|
||||
zh_Hans: 竖屏 16:9
|
||||
- value: landscape_4_3
|
||||
label:
|
||||
en_US: Landscape 4:3
|
||||
zh_Hans: 横屏 4:3
|
||||
- value: landscape_16_9
|
||||
label:
|
||||
en_US: Landscape 16:9
|
||||
zh_Hans: 横屏 16:9
|
||||
default: landscape_4_3
|
||||
label:
|
||||
en_US: Image Size
|
||||
zh_Hans: 图片大小
|
||||
human_description:
|
||||
en_US: The size of the generated image.
|
||||
zh_Hans: 生成图像的尺寸。
|
||||
form: form
|
||||
- name: num_images
|
||||
type: number
|
||||
required: false
|
||||
default: 1
|
||||
min: 1
|
||||
max: 4
|
||||
label:
|
||||
en_US: Number of Images
|
||||
zh_Hans: 图片数量
|
||||
human_description:
|
||||
en_US: The number of images to generate.
|
||||
zh_Hans: 要生成的图片数量。
|
||||
form: form
|
||||
- name: num_inference_steps
|
||||
type: number
|
||||
required: false
|
||||
default: 28
|
||||
min: 1
|
||||
max: 50
|
||||
label:
|
||||
en_US: Num Inference Steps
|
||||
zh_Hans: 推理步数
|
||||
human_description:
|
||||
en_US: The number of inference steps to perform. More steps produce higher quality but take longer.
|
||||
zh_Hans: 执行的推理步骤数量。更多的步骤可以产生更高质量的结果,但需要更长的时间。
|
||||
form: form
|
||||
- name: guidance_scale
|
||||
type: number
|
||||
required: false
|
||||
default: 3.5
|
||||
min: 0
|
||||
max: 20
|
||||
label:
|
||||
en_US: Guidance Scale
|
||||
zh_Hans: 指导强度
|
||||
human_description:
|
||||
en_US: How closely the model should follow the prompt.
|
||||
zh_Hans: 模型对提示词的遵循程度。
|
||||
form: form
|
||||
- name: seed
|
||||
type: number
|
||||
required: false
|
||||
min: 0
|
||||
max: 9999999999
|
||||
label:
|
||||
en_US: Seed
|
||||
zh_Hans: 种子
|
||||
human_description:
|
||||
en_US: The same seed and prompt can produce similar images.
|
||||
zh_Hans: 相同的种子和提示可以产生相似的图像。
|
||||
form: form
|
||||
- name: enable_safety_checker
|
||||
type: boolean
|
||||
required: false
|
||||
default: true
|
||||
label:
|
||||
en_US: Enable Safety Checker
|
||||
zh_Hans: 启用安全检查器
|
||||
human_description:
|
||||
en_US: Enable or disable the safety checker.
|
||||
zh_Hans: 启用或禁用安全检查器。
|
||||
form: form
|
||||
- name: sync_mode
|
||||
type: boolean
|
||||
required: false
|
||||
default: false
|
||||
label:
|
||||
en_US: Sync Mode
|
||||
zh_Hans: 同步模式
|
||||
human_description:
|
||||
en_US: >
|
||||
If set to true, the function will wait for the image to be generated and uploaded before returning the response.
|
||||
This will increase the latency but allows you to get the image directly in the response without going through the CDN.
|
||||
zh_Hans: >
|
||||
如果设置为 true,函数将在生成并上传图像后再返回响应。
|
||||
这将增加函数的延迟,但可以让您直接在响应中获取图像,而无需通过 CDN。
|
||||
form: form
|
47
api/core/tools/provider/builtin/fal/tools/flux_1_pro_new.py
Normal file
47
api/core/tools/provider/builtin/fal/tools/flux_1_pro_new.py
Normal file
|
@ -0,0 +1,47 @@
|
|||
from typing import Any, Union
|
||||
|
||||
import requests
|
||||
|
||||
from core.tools.entities.tool_entities import ToolInvokeMessage
|
||||
from core.tools.tool.builtin_tool import BuiltinTool
|
||||
|
||||
|
||||
class Flux1ProNewTool(BuiltinTool):
|
||||
def _invoke(
|
||||
self, user_id: str, tool_parameters: dict[str, Any]
|
||||
) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
|
||||
headers = {
|
||||
"Authorization": f"Key {self.runtime.credentials['fal_api_key']}",
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
prompt = tool_parameters.get("prompt", "")
|
||||
sanitized_prompt = prompt.replace("\\", "") # Remove backslashes that may cause errors
|
||||
|
||||
payload = {
|
||||
"prompt": sanitized_prompt,
|
||||
"image_size": tool_parameters.get("image_size", "landscape_4_3"),
|
||||
"num_inference_steps": tool_parameters.get("num_inference_steps", 28),
|
||||
"guidance_scale": tool_parameters.get("guidance_scale", 3.5),
|
||||
"seed": tool_parameters.get("seed"),
|
||||
"num_images": tool_parameters.get("num_images", 1),
|
||||
"safety_tolerance": tool_parameters.get("safety_tolerance", "2"),
|
||||
"sync_mode": tool_parameters.get("sync_mode", False),
|
||||
}
|
||||
|
||||
url = "https://fal.run/fal-ai/flux-pro/new"
|
||||
|
||||
response = requests.post(url, json=payload, headers=headers)
|
||||
|
||||
if response.status_code != 200:
|
||||
return self.create_text_message(f"Got Error Response: {response.text}")
|
||||
|
||||
res = response.json()
|
||||
result = [self.create_json_message(res)]
|
||||
|
||||
for image_info in res.get("images", []):
|
||||
image_url = image_info.get("url")
|
||||
if image_url:
|
||||
result.append(self.create_image_message(image=image_url, save_as=self.VariableKey.IMAGE.value))
|
||||
|
||||
return result
|
164
api/core/tools/provider/builtin/fal/tools/flux_1_pro_new.yaml
Normal file
164
api/core/tools/provider/builtin/fal/tools/flux_1_pro_new.yaml
Normal file
|
@ -0,0 +1,164 @@
|
|||
identity:
|
||||
name: flux_1_pro_new
|
||||
author: Kalo Chin
|
||||
label:
|
||||
en_US: FLUX.1 [pro] new
|
||||
zh_Hans: FLUX.1 [pro] new
|
||||
icon: icon.svg
|
||||
description:
|
||||
human:
|
||||
en_US: FLUX.1 [pro] new is an accelerated version of FLUX.1 [pro], maintaining professional-grade image quality while delivering significantly faster generation speeds.
|
||||
zh_Hans: FLUX.1 [pro] new 是 FLUX.1 [pro] 的加速版本,在保持专业级图像质量的同时,大大提高了生成速度。
|
||||
llm: This tool generates images from prompts using FAL's FLUX.1 [pro] new model.
|
||||
parameters:
|
||||
- name: prompt
|
||||
type: string
|
||||
required: true
|
||||
label:
|
||||
en_US: Prompt
|
||||
zh_Hans: 提示词
|
||||
human_description:
|
||||
en_US: The text prompt used to generate the image.
|
||||
zh_Hans: 用于生成图像的文本提示。
|
||||
llm_description: This prompt text will be used to generate the image.
|
||||
form: llm
|
||||
- name: image_size
|
||||
type: select
|
||||
required: false
|
||||
options:
|
||||
- value: square_hd
|
||||
label:
|
||||
en_US: Square HD
|
||||
zh_Hans: 正方形高清
|
||||
- value: square
|
||||
label:
|
||||
en_US: Square
|
||||
zh_Hans: 正方形
|
||||
- value: portrait_4_3
|
||||
label:
|
||||
en_US: Portrait 4:3
|
||||
zh_Hans: 竖屏 4:3
|
||||
- value: portrait_16_9
|
||||
label:
|
||||
en_US: Portrait 16:9
|
||||
zh_Hans: 竖屏 16:9
|
||||
- value: landscape_4_3
|
||||
label:
|
||||
en_US: Landscape 4:3
|
||||
zh_Hans: 横屏 4:3
|
||||
- value: landscape_16_9
|
||||
label:
|
||||
en_US: Landscape 16:9
|
||||
zh_Hans: 横屏 16:9
|
||||
default: landscape_4_3
|
||||
label:
|
||||
en_US: Image Size
|
||||
zh_Hans: 图像尺寸
|
||||
human_description:
|
||||
en_US: The size of the generated image.
|
||||
zh_Hans: 生成图像的尺寸。
|
||||
form: form
|
||||
- name: num_images
|
||||
type: number
|
||||
required: false
|
||||
default: 1
|
||||
min: 1
|
||||
max: 1
|
||||
label:
|
||||
en_US: Number of Images
|
||||
zh_Hans: 图像数量
|
||||
human_description:
|
||||
en_US: The number of images to generate.
|
||||
zh_Hans: 要生成的图像数量。
|
||||
form: form
|
||||
- name: num_inference_steps
|
||||
type: number
|
||||
required: false
|
||||
default: 28
|
||||
min: 1
|
||||
max: 50
|
||||
label:
|
||||
en_US: Num Inference Steps
|
||||
zh_Hans: 推理步数
|
||||
human_description:
|
||||
en_US: The number of inference steps to perform. More steps produce higher quality but take longer.
|
||||
zh_Hans: 执行的推理步数。步数越多,质量越高,但所需时间也更长。
|
||||
form: form
|
||||
- name: guidance_scale
|
||||
type: number
|
||||
required: false
|
||||
default: 3.5
|
||||
min: 0
|
||||
max: 20
|
||||
label:
|
||||
en_US: Guidance Scale
|
||||
zh_Hans: 指导强度
|
||||
human_description:
|
||||
en_US: How closely the model should follow the prompt.
|
||||
zh_Hans: 模型对提示词的遵循程度。
|
||||
form: form
|
||||
- name: safety_tolerance
|
||||
type: select
|
||||
required: false
|
||||
options:
|
||||
- value: "1"
|
||||
label:
|
||||
en_US: "1 (Most strict)"
|
||||
zh_Hans: "1(最严格)"
|
||||
- value: "2"
|
||||
label:
|
||||
en_US: "2"
|
||||
zh_Hans: "2"
|
||||
- value: "3"
|
||||
label:
|
||||
en_US: "3"
|
||||
zh_Hans: "3"
|
||||
- value: "4"
|
||||
label:
|
||||
en_US: "4"
|
||||
zh_Hans: "4"
|
||||
- value: "5"
|
||||
label:
|
||||
en_US: "5"
|
||||
zh_Hans: "5"
|
||||
- value: "6"
|
||||
label:
|
||||
en_US: "6 (Most permissive)"
|
||||
zh_Hans: "6(最宽松)"
|
||||
default: "2"
|
||||
label:
|
||||
en_US: Safety Tolerance
|
||||
zh_Hans: 安全容忍度
|
||||
human_description:
|
||||
en_US: >
|
||||
The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive.
|
||||
zh_Hans: >
|
||||
生成图像的安全容忍级别。1 是最严格,6 是最宽松。
|
||||
form: form
|
||||
- name: seed
|
||||
type: number
|
||||
required: false
|
||||
min: 0
|
||||
max: 9999999999
|
||||
label:
|
||||
en_US: Seed
|
||||
zh_Hans: 种子
|
||||
human_description:
|
||||
en_US: The same seed and prompt can produce similar images.
|
||||
zh_Hans: 相同的种子和提示词可以生成相似的图像。
|
||||
form: form
|
||||
- name: sync_mode
|
||||
type: boolean
|
||||
required: false
|
||||
default: false
|
||||
label:
|
||||
en_US: Sync Mode
|
||||
zh_Hans: 同步模式
|
||||
human_description:
|
||||
en_US: >
|
||||
If set to true, the function will wait for the image to be generated and uploaded before returning the response.
|
||||
This will increase the latency but allows you to get the image directly in the response without going through the CDN.
|
||||
zh_Hans: >
|
||||
如果设置为 true,函数将在生成并上传图像后才返回响应。
|
||||
这将增加延迟,但允许您直接在响应中获取图像,而无需通过 CDN。
|
||||
form: form
|
52
api/core/tools/provider/builtin/fal/tools/wizper.py
Normal file
52
api/core/tools/provider/builtin/fal/tools/wizper.py
Normal file
|
@ -0,0 +1,52 @@
|
|||
import io
|
||||
import os
|
||||
from typing import Any
|
||||
|
||||
import fal_client
|
||||
|
||||
from core.file.enums import FileAttribute, FileType
|
||||
from core.file.file_manager import download, get_attr
|
||||
from core.tools.entities.tool_entities import ToolInvokeMessage
|
||||
from core.tools.tool.builtin_tool import BuiltinTool
|
||||
|
||||
|
||||
class WizperTool(BuiltinTool):
|
||||
def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage:
|
||||
audio_file = tool_parameters.get("audio_file")
|
||||
task = tool_parameters.get("task", "transcribe")
|
||||
language = tool_parameters.get("language", "en")
|
||||
chunk_level = tool_parameters.get("chunk_level", "segment")
|
||||
version = tool_parameters.get("version", "3")
|
||||
|
||||
if audio_file.type != FileType.AUDIO:
|
||||
return [self.create_text_message("Not a valid audio file.")]
|
||||
|
||||
api_key = self.runtime.credentials["fal_api_key"]
|
||||
|
||||
os.environ["FAL_KEY"] = api_key
|
||||
|
||||
audio_binary = io.BytesIO(download(audio_file))
|
||||
mime_type = get_attr(file=audio_file, attr=FileAttribute.MIME_TYPE)
|
||||
file_data = audio_binary.getvalue()
|
||||
|
||||
try:
|
||||
audio_url = fal_client.upload(file_data, mime_type)
|
||||
|
||||
except Exception as e:
|
||||
return [self.create_text_message(f"Error uploading audio file: {str(e)}")]
|
||||
|
||||
arguments = {
|
||||
"audio_url": audio_url,
|
||||
"task": task,
|
||||
"language": language,
|
||||
"chunk_level": chunk_level,
|
||||
"version": version,
|
||||
}
|
||||
|
||||
result = fal_client.subscribe(
|
||||
"fal-ai/wizper",
|
||||
arguments=arguments,
|
||||
with_logs=False,
|
||||
)
|
||||
|
||||
return self.create_json_message(result)
|
489
api/core/tools/provider/builtin/fal/tools/wizper.yaml
Normal file
489
api/core/tools/provider/builtin/fal/tools/wizper.yaml
Normal file
|
@ -0,0 +1,489 @@
|
|||
identity:
|
||||
name: wizper
|
||||
author: Kalo Chin
|
||||
label:
|
||||
en_US: Wizper
|
||||
zh_Hans: Wizper
|
||||
description:
|
||||
human:
|
||||
en_US: Transcribe an audio file using the Whisper model.
|
||||
zh_Hans: 使用 Whisper 模型转录音频文件。
|
||||
llm: Transcribe an audio file using the Whisper model.
|
||||
parameters:
|
||||
- name: audio_file
|
||||
type: file
|
||||
required: true
|
||||
label:
|
||||
en_US: Audio File
|
||||
zh_Hans: 音频文件
|
||||
human_description:
|
||||
en_US: "Upload an audio file to transcribe. Supports mp3, mp4, mpeg, mpga, m4a, wav, or webm formats."
|
||||
zh_Hans: "上传要转录的音频文件。支持 mp3、mp4、mpeg、mpga、m4a、wav 或 webm 格式。"
|
||||
llm_description: "Audio file to transcribe. Supported formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm."
|
||||
form: llm
|
||||
- name: task
|
||||
type: select
|
||||
required: true
|
||||
label:
|
||||
en_US: Task
|
||||
zh_Hans: 任务
|
||||
human_description:
|
||||
en_US: "Choose whether to transcribe the audio in its original language or translate it to English"
|
||||
zh_Hans: "选择是以原始语言转录音频还是将其翻译成英语"
|
||||
llm_description: "Task to perform on the audio file. Either transcribe or translate. Default value: 'transcribe'. If 'translate' is selected as the task, the audio will be translated to English, regardless of the language selected."
|
||||
form: form
|
||||
default: transcribe
|
||||
options:
|
||||
- value: transcribe
|
||||
label:
|
||||
en_US: Transcribe
|
||||
zh_Hans: 转录
|
||||
- value: translate
|
||||
label:
|
||||
en_US: Translate
|
||||
zh_Hans: 翻译
|
||||
- name: language
|
||||
type: select
|
||||
required: true
|
||||
label:
|
||||
en_US: Language
|
||||
zh_Hans: 语言
|
||||
human_description:
|
||||
en_US: "Select the primary language spoken in the audio file"
|
||||
zh_Hans: "选择音频文件中使用的主要语言"
|
||||
llm_description: "Language of the audio file."
|
||||
form: form
|
||||
default: en
|
||||
options:
|
||||
- value: af
|
||||
label:
|
||||
en_US: Afrikaans
|
||||
zh_Hans: 南非语
|
||||
- value: am
|
||||
label:
|
||||
en_US: Amharic
|
||||
zh_Hans: 阿姆哈拉语
|
||||
- value: ar
|
||||
label:
|
||||
en_US: Arabic
|
||||
zh_Hans: 阿拉伯语
|
||||
- value: as
|
||||
label:
|
||||
en_US: Assamese
|
||||
zh_Hans: 阿萨姆语
|
||||
- value: az
|
||||
label:
|
||||
en_US: Azerbaijani
|
||||
zh_Hans: 阿塞拜疆语
|
||||
- value: ba
|
||||
label:
|
||||
en_US: Bashkir
|
||||
zh_Hans: 巴什基尔语
|
||||
- value: be
|
||||
label:
|
||||
en_US: Belarusian
|
||||
zh_Hans: 白俄罗斯语
|
||||
- value: bg
|
||||
label:
|
||||
en_US: Bulgarian
|
||||
zh_Hans: 保加利亚语
|
||||
- value: bn
|
||||
label:
|
||||
en_US: Bengali
|
||||
zh_Hans: 孟加拉语
|
||||
- value: bo
|
||||
label:
|
||||
en_US: Tibetan
|
||||
zh_Hans: 藏语
|
||||
- value: br
|
||||
label:
|
||||
en_US: Breton
|
||||
zh_Hans: 布列塔尼语
|
||||
- value: bs
|
||||
label:
|
||||
en_US: Bosnian
|
||||
zh_Hans: 波斯尼亚语
|
||||
- value: ca
|
||||
label:
|
||||
en_US: Catalan
|
||||
zh_Hans: 加泰罗尼亚语
|
||||
- value: cs
|
||||
label:
|
||||
en_US: Czech
|
||||
zh_Hans: 捷克语
|
||||
- value: cy
|
||||
label:
|
||||
en_US: Welsh
|
||||
zh_Hans: 威尔士语
|
||||
- value: da
|
||||
label:
|
||||
en_US: Danish
|
||||
zh_Hans: 丹麦语
|
||||
- value: de
|
||||
label:
|
||||
en_US: German
|
||||
zh_Hans: 德语
|
||||
- value: el
|
||||
label:
|
||||
en_US: Greek
|
||||
zh_Hans: 希腊语
|
||||
- value: en
|
||||
label:
|
||||
en_US: English
|
||||
zh_Hans: 英语
|
||||
- value: es
|
||||
label:
|
||||
en_US: Spanish
|
||||
zh_Hans: 西班牙语
|
||||
- value: et
|
||||
label:
|
||||
en_US: Estonian
|
||||
zh_Hans: 爱沙尼亚语
|
||||
- value: eu
|
||||
label:
|
||||
en_US: Basque
|
||||
zh_Hans: 巴斯克语
|
||||
- value: fa
|
||||
label:
|
||||
en_US: Persian
|
||||
zh_Hans: 波斯语
|
||||
- value: fi
|
||||
label:
|
||||
en_US: Finnish
|
||||
zh_Hans: 芬兰语
|
||||
- value: fo
|
||||
label:
|
||||
en_US: Faroese
|
||||
zh_Hans: 法罗语
|
||||
- value: fr
|
||||
label:
|
||||
en_US: French
|
||||
zh_Hans: 法语
|
||||
- value: gl
|
||||
label:
|
||||
en_US: Galician
|
||||
zh_Hans: 加利西亚语
|
||||
- value: gu
|
||||
label:
|
||||
en_US: Gujarati
|
||||
zh_Hans: 古吉拉特语
|
||||
- value: ha
|
||||
label:
|
||||
en_US: Hausa
|
||||
zh_Hans: 毫萨语
|
||||
- value: haw
|
||||
label:
|
||||
en_US: Hawaiian
|
||||
zh_Hans: 夏威夷语
|
||||
- value: he
|
||||
label:
|
||||
en_US: Hebrew
|
||||
zh_Hans: 希伯来语
|
||||
- value: hi
|
||||
label:
|
||||
en_US: Hindi
|
||||
zh_Hans: 印地语
|
||||
- value: hr
|
||||
label:
|
||||
en_US: Croatian
|
||||
zh_Hans: 克罗地亚语
|
||||
- value: ht
|
||||
label:
|
||||
en_US: Haitian Creole
|
||||
zh_Hans: 海地克里奥尔语
|
||||
- value: hu
|
||||
label:
|
||||
en_US: Hungarian
|
||||
zh_Hans: 匈牙利语
|
||||
- value: hy
|
||||
label:
|
||||
en_US: Armenian
|
||||
zh_Hans: 亚美尼亚语
|
||||
- value: id
|
||||
label:
|
||||
en_US: Indonesian
|
||||
zh_Hans: 印度尼西亚语
|
||||
- value: is
|
||||
label:
|
||||
en_US: Icelandic
|
||||
zh_Hans: 冰岛语
|
||||
- value: it
|
||||
label:
|
||||
en_US: Italian
|
||||
zh_Hans: 意大利语
|
||||
- value: ja
|
||||
label:
|
||||
en_US: Japanese
|
||||
zh_Hans: 日语
|
||||
- value: jw
|
||||
label:
|
||||
en_US: Javanese
|
||||
zh_Hans: 爪哇语
|
||||
- value: ka
|
||||
label:
|
||||
en_US: Georgian
|
||||
zh_Hans: 格鲁吉亚语
|
||||
- value: kk
|
||||
label:
|
||||
en_US: Kazakh
|
||||
zh_Hans: 哈萨克语
|
||||
- value: km
|
||||
label:
|
||||
en_US: Khmer
|
||||
zh_Hans: 高棉语
|
||||
- value: kn
|
||||
label:
|
||||
en_US: Kannada
|
||||
zh_Hans: 卡纳达语
|
||||
- value: ko
|
||||
label:
|
||||
en_US: Korean
|
||||
zh_Hans: 韩语
|
||||
- value: la
|
||||
label:
|
||||
en_US: Latin
|
||||
zh_Hans: 拉丁语
|
||||
- value: lb
|
||||
label:
|
||||
en_US: Luxembourgish
|
||||
zh_Hans: 卢森堡语
|
||||
- value: ln
|
||||
label:
|
||||
en_US: Lingala
|
||||
zh_Hans: 林加拉语
|
||||
- value: lo
|
||||
label:
|
||||
en_US: Lao
|
||||
zh_Hans: 老挝语
|
||||
- value: lt
|
||||
label:
|
||||
en_US: Lithuanian
|
||||
zh_Hans: 立陶宛语
|
||||
- value: lv
|
||||
label:
|
||||
en_US: Latvian
|
||||
zh_Hans: 拉脱维亚语
|
||||
- value: mg
|
||||
label:
|
||||
en_US: Malagasy
|
||||
zh_Hans: 马尔加什语
|
||||
- value: mi
|
||||
label:
|
||||
en_US: Maori
|
||||
zh_Hans: 毛利语
|
||||
- value: mk
|
||||
label:
|
||||
en_US: Macedonian
|
||||
zh_Hans: 马其顿语
|
||||
- value: ml
|
||||
label:
|
||||
en_US: Malayalam
|
||||
zh_Hans: 马拉雅拉姆语
|
||||
- value: mn
|
||||
label:
|
||||
en_US: Mongolian
|
||||
zh_Hans: 蒙古语
|
||||
- value: mr
|
||||
label:
|
||||
en_US: Marathi
|
||||
zh_Hans: 马拉地语
|
||||
- value: ms
|
||||
label:
|
||||
en_US: Malay
|
||||
zh_Hans: 马来语
|
||||
- value: mt
|
||||
label:
|
||||
en_US: Maltese
|
||||
zh_Hans: 马耳他语
|
||||
- value: my
|
||||
label:
|
||||
en_US: Burmese
|
||||
zh_Hans: 缅甸语
|
||||
- value: ne
|
||||
label:
|
||||
en_US: Nepali
|
||||
zh_Hans: 尼泊尔语
|
||||
- value: nl
|
||||
label:
|
||||
en_US: Dutch
|
||||
zh_Hans: 荷兰语
|
||||
- value: nn
|
||||
label:
|
||||
en_US: Norwegian Nynorsk
|
||||
zh_Hans: 新挪威语
|
||||
- value: no
|
||||
label:
|
||||
en_US: Norwegian
|
||||
zh_Hans: 挪威语
|
||||
- value: oc
|
||||
label:
|
||||
en_US: Occitan
|
||||
zh_Hans: 奥克语
|
||||
- value: pa
|
||||
label:
|
||||
en_US: Punjabi
|
||||
zh_Hans: 旁遮普语
|
||||
- value: pl
|
||||
label:
|
||||
en_US: Polish
|
||||
zh_Hans: 波兰语
|
||||
- value: ps
|
||||
label:
|
||||
en_US: Pashto
|
||||
zh_Hans: 普什图语
|
||||
- value: pt
|
||||
label:
|
||||
en_US: Portuguese
|
||||
zh_Hans: 葡萄牙语
|
||||
- value: ro
|
||||
label:
|
||||
en_US: Romanian
|
||||
zh_Hans: 罗马尼亚语
|
||||
- value: ru
|
||||
label:
|
||||
en_US: Russian
|
||||
zh_Hans: 俄语
|
||||
- value: sa
|
||||
label:
|
||||
en_US: Sanskrit
|
||||
zh_Hans: 梵语
|
||||
- value: sd
|
||||
label:
|
||||
en_US: Sindhi
|
||||
zh_Hans: 信德语
|
||||
- value: si
|
||||
label:
|
||||
en_US: Sinhala
|
||||
zh_Hans: 僧伽罗语
|
||||
- value: sk
|
||||
label:
|
||||
en_US: Slovak
|
||||
zh_Hans: 斯洛伐克语
|
||||
- value: sl
|
||||
label:
|
||||
en_US: Slovenian
|
||||
zh_Hans: 斯洛文尼亚语
|
||||
- value: sn
|
||||
label:
|
||||
en_US: Shona
|
||||
zh_Hans: 修纳语
|
||||
- value: so
|
||||
label:
|
||||
en_US: Somali
|
||||
zh_Hans: 索马里语
|
||||
- value: sq
|
||||
label:
|
||||
en_US: Albanian
|
||||
zh_Hans: 阿尔巴尼亚语
|
||||
- value: sr
|
||||
label:
|
||||
en_US: Serbian
|
||||
zh_Hans: 塞尔维亚语
|
||||
- value: su
|
||||
label:
|
||||
en_US: Sundanese
|
||||
zh_Hans: 巽他语
|
||||
- value: sv
|
||||
label:
|
||||
en_US: Swedish
|
||||
zh_Hans: 瑞典语
|
||||
- value: sw
|
||||
label:
|
||||
en_US: Swahili
|
||||
zh_Hans: 斯瓦希里语
|
||||
- value: ta
|
||||
label:
|
||||
en_US: Tamil
|
||||
zh_Hans: 泰米尔语
|
||||
- value: te
|
||||
label:
|
||||
en_US: Telugu
|
||||
zh_Hans: 泰卢固语
|
||||
- value: tg
|
||||
label:
|
||||
en_US: Tajik
|
||||
zh_Hans: 塔吉克语
|
||||
- value: th
|
||||
label:
|
||||
en_US: Thai
|
||||
zh_Hans: 泰语
|
||||
- value: tk
|
||||
label:
|
||||
en_US: Turkmen
|
||||
zh_Hans: 土库曼语
|
||||
- value: tl
|
||||
label:
|
||||
en_US: Tagalog
|
||||
zh_Hans: 他加禄语
|
||||
- value: tr
|
||||
label:
|
||||
en_US: Turkish
|
||||
zh_Hans: 土耳其语
|
||||
- value: tt
|
||||
label:
|
||||
en_US: Tatar
|
||||
zh_Hans: 鞑靼语
|
||||
- value: uk
|
||||
label:
|
||||
en_US: Ukrainian
|
||||
zh_Hans: 乌克兰语
|
||||
- value: ur
|
||||
label:
|
||||
en_US: Urdu
|
||||
zh_Hans: 乌尔都语
|
||||
- value: uz
|
||||
label:
|
||||
en_US: Uzbek
|
||||
zh_Hans: 乌兹别克语
|
||||
- value: vi
|
||||
label:
|
||||
en_US: Vietnamese
|
||||
zh_Hans: 越南语
|
||||
- value: yi
|
||||
label:
|
||||
en_US: Yiddish
|
||||
zh_Hans: 意第绪语
|
||||
- value: yo
|
||||
label:
|
||||
en_US: Yoruba
|
||||
zh_Hans: 约鲁巴语
|
||||
- value: yue
|
||||
label:
|
||||
en_US: Cantonese
|
||||
zh_Hans: 粤语
|
||||
- value: zh
|
||||
label:
|
||||
en_US: Chinese
|
||||
zh_Hans: 中文
|
||||
- name: chunk_level
|
||||
type: select
|
||||
label:
|
||||
en_US: Chunk Level
|
||||
zh_Hans: 分块级别
|
||||
human_description:
|
||||
en_US: "Choose how the transcription should be divided into chunks"
|
||||
zh_Hans: "选择如何将转录内容分成块"
|
||||
llm_description: "Level of the chunks to return."
|
||||
form: form
|
||||
default: segment
|
||||
options:
|
||||
- value: segment
|
||||
label:
|
||||
en_US: Segment
|
||||
zh_Hans: 段
|
||||
- name: version
|
||||
type: select
|
||||
label:
|
||||
en_US: Version
|
||||
zh_Hans: 版本
|
||||
human_description:
|
||||
en_US: "Select which version of the Whisper large model to use"
|
||||
zh_Hans: "选择要使用的 Whisper large 模型版本"
|
||||
llm_description: "Version of the model to use. All of the models are the Whisper large variant."
|
||||
form: form
|
||||
default: "3"
|
||||
options:
|
||||
- value: "3"
|
||||
label:
|
||||
en_US: Version 3
|
||||
zh_Hans: 版本 3
|
|
@ -1,6 +1,7 @@
|
|||
from typing import Any
|
||||
|
||||
import openai
|
||||
from yarl import URL
|
||||
|
||||
from core.tools.errors import ToolProviderCredentialValidationError
|
||||
from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController
|
||||
|
@ -10,6 +11,7 @@ class PodcastGeneratorProvider(BuiltinToolProviderController):
|
|||
def _validate_credentials(self, credentials: dict[str, Any]) -> None:
|
||||
tts_service = credentials.get("tts_service")
|
||||
api_key = credentials.get("api_key")
|
||||
base_url = credentials.get("openai_base_url")
|
||||
|
||||
if not tts_service:
|
||||
raise ToolProviderCredentialValidationError("TTS service is not specified")
|
||||
|
@ -17,13 +19,16 @@ class PodcastGeneratorProvider(BuiltinToolProviderController):
|
|||
if not api_key:
|
||||
raise ToolProviderCredentialValidationError("API key is missing")
|
||||
|
||||
if base_url:
|
||||
base_url = str(URL(base_url) / "v1")
|
||||
|
||||
if tts_service == "openai":
|
||||
self._validate_openai_credentials(api_key)
|
||||
self._validate_openai_credentials(api_key, base_url)
|
||||
else:
|
||||
raise ToolProviderCredentialValidationError(f"Unsupported TTS service: {tts_service}")
|
||||
|
||||
def _validate_openai_credentials(self, api_key: str) -> None:
|
||||
client = openai.OpenAI(api_key=api_key)
|
||||
def _validate_openai_credentials(self, api_key: str, base_url: str | None) -> None:
|
||||
client = openai.OpenAI(api_key=api_key, base_url=base_url)
|
||||
try:
|
||||
# We're using a simple API call to validate the credentials
|
||||
client.models.list()
|
||||
|
|
|
@ -17,6 +17,7 @@ from .segments import (
|
|||
from .types import SegmentType
|
||||
from .variables import (
|
||||
ArrayAnyVariable,
|
||||
ArrayFileVariable,
|
||||
ArrayNumberVariable,
|
||||
ArrayObjectVariable,
|
||||
ArrayStringVariable,
|
||||
|
@ -58,4 +59,5 @@ __all__ = [
|
|||
"ArrayStringSegment",
|
||||
"FileSegment",
|
||||
"FileVariable",
|
||||
"ArrayFileVariable",
|
||||
]
|
||||
|
|
|
@ -1,9 +1,13 @@
|
|||
from collections.abc import Sequence
|
||||
from uuid import uuid4
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from core.helper import encrypter
|
||||
|
||||
from .segments import (
|
||||
ArrayAnySegment,
|
||||
ArrayFileSegment,
|
||||
ArrayNumberSegment,
|
||||
ArrayObjectSegment,
|
||||
ArrayStringSegment,
|
||||
|
@ -24,11 +28,12 @@ class Variable(Segment):
|
|||
"""
|
||||
|
||||
id: str = Field(
|
||||
default="",
|
||||
description="Unique identity for variable. It's only used by environment variables now.",
|
||||
default=lambda _: str(uuid4()),
|
||||
description="Unique identity for variable.",
|
||||
)
|
||||
name: str
|
||||
description: str = Field(default="", description="Description of the variable.")
|
||||
selector: Sequence[str] = Field(default_factory=list)
|
||||
|
||||
|
||||
class StringVariable(StringSegment, Variable):
|
||||
|
@ -78,3 +83,7 @@ class NoneVariable(NoneSegment, Variable):
|
|||
|
||||
class FileVariable(FileSegment, Variable):
|
||||
pass
|
||||
|
||||
|
||||
class ArrayFileVariable(ArrayFileSegment, Variable):
|
||||
pass
|
||||
|
|
|
@ -95,13 +95,16 @@ class VariablePool(BaseModel):
|
|||
if len(selector) < 2:
|
||||
raise ValueError("Invalid selector")
|
||||
|
||||
if isinstance(value, Variable):
|
||||
variable = value
|
||||
if isinstance(value, Segment):
|
||||
v = value
|
||||
variable = variable_factory.segment_to_variable(segment=value, selector=selector)
|
||||
else:
|
||||
v = variable_factory.build_segment(value)
|
||||
segment = variable_factory.build_segment(value)
|
||||
variable = variable_factory.segment_to_variable(segment=segment, selector=selector)
|
||||
|
||||
hash_key = hash(tuple(selector[1:]))
|
||||
self.variable_dictionary[selector[0]][hash_key] = v
|
||||
self.variable_dictionary[selector[0]][hash_key] = variable
|
||||
|
||||
def get(self, selector: Sequence[str], /) -> Segment | None:
|
||||
"""
|
||||
|
|
|
@ -143,14 +143,14 @@ def _extract_text_by_file_extension(*, file_content: bytes, file_extension: str)
|
|||
|
||||
def _extract_text_from_plain_text(file_content: bytes) -> str:
|
||||
try:
|
||||
return file_content.decode("utf-8")
|
||||
return file_content.decode("utf-8", "ignore")
|
||||
except UnicodeDecodeError as e:
|
||||
raise TextExtractionError("Failed to decode plain text file") from e
|
||||
|
||||
|
||||
def _extract_text_from_json(file_content: bytes) -> str:
|
||||
try:
|
||||
json_data = json.loads(file_content.decode("utf-8"))
|
||||
json_data = json.loads(file_content.decode("utf-8", "ignore"))
|
||||
return json.dumps(json_data, indent=2, ensure_ascii=False)
|
||||
except (UnicodeDecodeError, json.JSONDecodeError) as e:
|
||||
raise TextExtractionError(f"Failed to decode or parse JSON file: {e}") from e
|
||||
|
@ -159,7 +159,7 @@ def _extract_text_from_json(file_content: bytes) -> str:
|
|||
def _extract_text_from_yaml(file_content: bytes) -> str:
|
||||
"""Extract the content from yaml file"""
|
||||
try:
|
||||
yaml_data = yaml.safe_load_all(file_content.decode("utf-8"))
|
||||
yaml_data = yaml.safe_load_all(file_content.decode("utf-8", "ignore"))
|
||||
return yaml.dump_all(yaml_data, allow_unicode=True, sort_keys=False)
|
||||
except (UnicodeDecodeError, yaml.YAMLError) as e:
|
||||
raise TextExtractionError(f"Failed to decode or parse YAML file: {e}") from e
|
||||
|
@ -217,7 +217,7 @@ def _extract_text_from_file(file: File):
|
|||
|
||||
def _extract_text_from_csv(file_content: bytes) -> str:
|
||||
try:
|
||||
csv_file = io.StringIO(file_content.decode("utf-8"))
|
||||
csv_file = io.StringIO(file_content.decode("utf-8", "ignore"))
|
||||
csv_reader = csv.reader(csv_file)
|
||||
rows = list(csv_reader)
|
||||
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
from collections.abc import Mapping, Sequence
|
||||
from os import path
|
||||
from typing import Any
|
||||
|
||||
from sqlalchemy import select
|
||||
|
@ -182,7 +181,6 @@ class ToolNode(BaseNode[ToolNodeData]):
|
|||
for response in tool_response:
|
||||
if response.type in {ToolInvokeMessage.MessageType.IMAGE_LINK, ToolInvokeMessage.MessageType.IMAGE}:
|
||||
url = str(response.message) if response.message else None
|
||||
ext = path.splitext(url)[1] if url else ".bin"
|
||||
tool_file_id = str(url).split("/")[-1].split(".")[0]
|
||||
transfer_method = response.meta.get("transfer_method", FileTransferMethod.TOOL_FILE)
|
||||
|
||||
|
@ -204,7 +202,6 @@ class ToolNode(BaseNode[ToolNodeData]):
|
|||
)
|
||||
result.append(file)
|
||||
elif response.type == ToolInvokeMessage.MessageType.BLOB:
|
||||
# get tool file id
|
||||
tool_file_id = str(response.message).split("/")[-1].split(".")[0]
|
||||
with Session(db.engine) as session:
|
||||
stmt = select(ToolFile).where(ToolFile.id == tool_file_id)
|
||||
|
@ -213,7 +210,6 @@ class ToolNode(BaseNode[ToolNodeData]):
|
|||
raise ValueError(f"tool file {tool_file_id} not exists")
|
||||
mapping = {
|
||||
"tool_file_id": tool_file_id,
|
||||
"type": FileType.IMAGE,
|
||||
"transfer_method": FileTransferMethod.TOOL_FILE,
|
||||
}
|
||||
file = file_factory.build_from_mapping(
|
||||
|
@ -230,13 +226,8 @@ class ToolNode(BaseNode[ToolNodeData]):
|
|||
tool_file = session.scalar(stmt)
|
||||
if tool_file is None:
|
||||
raise ToolFileError(f"Tool file {tool_file_id} does not exist")
|
||||
if "." in url:
|
||||
extension = "." + url.split("/")[-1].split(".")[1]
|
||||
else:
|
||||
extension = ".bin"
|
||||
mapping = {
|
||||
"tool_file_id": tool_file_id,
|
||||
"type": FileType.IMAGE,
|
||||
"transfer_method": transfer_method,
|
||||
"url": url,
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ if [[ "${MODE}" == "worker" ]]; then
|
|||
fi
|
||||
|
||||
exec celery -A app.celery worker -P ${CELERY_WORKER_CLASS:-gevent} $CONCURRENCY_OPTION --loglevel ${LOG_LEVEL} \
|
||||
-Q ${CELERY_QUEUES:-dataset,generation,mail,ops_trace,app_deletion}
|
||||
-Q ${CELERY_QUEUES:-dataset,mail,ops_trace,app_deletion}
|
||||
|
||||
elif [[ "${MODE}" == "beat" ]]; then
|
||||
exec celery -A app.celery beat --loglevel ${LOG_LEVEL}
|
||||
|
|
|
@ -180,6 +180,20 @@ def _get_remote_file_info(url: str):
|
|||
return mime_type, filename, file_size
|
||||
|
||||
|
||||
def _get_file_type_by_mimetype(mime_type: str) -> FileType:
|
||||
if "image" in mime_type:
|
||||
file_type = FileType.IMAGE
|
||||
elif "video" in mime_type:
|
||||
file_type = FileType.VIDEO
|
||||
elif "audio" in mime_type:
|
||||
file_type = FileType.AUDIO
|
||||
elif "text" in mime_type or "pdf" in mime_type:
|
||||
file_type = FileType.DOCUMENT
|
||||
else:
|
||||
file_type = FileType.CUSTOM
|
||||
return file_type
|
||||
|
||||
|
||||
def _build_from_tool_file(
|
||||
*,
|
||||
mapping: Mapping[str, Any],
|
||||
|
@ -199,12 +213,13 @@ def _build_from_tool_file(
|
|||
raise ValueError(f"ToolFile {mapping.get('tool_file_id')} not found")
|
||||
|
||||
extension = "." + tool_file.file_key.split(".")[-1] if "." in tool_file.file_key else ".bin"
|
||||
file_type = mapping.get("type", _get_file_type_by_mimetype(tool_file.mimetype))
|
||||
|
||||
return File(
|
||||
id=mapping.get("id"),
|
||||
tenant_id=tenant_id,
|
||||
filename=tool_file.name,
|
||||
type=FileType.value_of(mapping.get("type")),
|
||||
type=file_type,
|
||||
transfer_method=transfer_method,
|
||||
remote_url=tool_file.original_url,
|
||||
related_id=tool_file.id,
|
||||
|
|
|
@ -1,34 +1,65 @@
|
|||
from collections.abc import Mapping
|
||||
from collections.abc import Mapping, Sequence
|
||||
from typing import Any
|
||||
from uuid import uuid4
|
||||
|
||||
from configs import dify_config
|
||||
from core.file import File
|
||||
from core.variables import (
|
||||
from core.variables.exc import VariableError
|
||||
from core.variables.segments import (
|
||||
ArrayAnySegment,
|
||||
ArrayFileSegment,
|
||||
ArrayNumberSegment,
|
||||
ArrayNumberVariable,
|
||||
ArrayObjectSegment,
|
||||
ArrayObjectVariable,
|
||||
ArraySegment,
|
||||
ArrayStringSegment,
|
||||
ArrayStringVariable,
|
||||
FileSegment,
|
||||
FloatSegment,
|
||||
FloatVariable,
|
||||
IntegerSegment,
|
||||
IntegerVariable,
|
||||
NoneSegment,
|
||||
ObjectSegment,
|
||||
Segment,
|
||||
StringSegment,
|
||||
)
|
||||
from core.variables.types import SegmentType
|
||||
from core.variables.variables import (
|
||||
ArrayAnyVariable,
|
||||
ArrayFileVariable,
|
||||
ArrayNumberVariable,
|
||||
ArrayObjectVariable,
|
||||
ArrayStringVariable,
|
||||
FileVariable,
|
||||
FloatVariable,
|
||||
IntegerVariable,
|
||||
NoneVariable,
|
||||
ObjectVariable,
|
||||
SecretVariable,
|
||||
Segment,
|
||||
SegmentType,
|
||||
StringSegment,
|
||||
StringVariable,
|
||||
Variable,
|
||||
)
|
||||
from core.variables.exc import VariableError
|
||||
|
||||
|
||||
class InvalidSelectorError(ValueError):
|
||||
pass
|
||||
|
||||
|
||||
class UnsupportedSegmentTypeError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
# Define the constant
|
||||
SEGMENT_TO_VARIABLE_MAP = {
|
||||
StringSegment: StringVariable,
|
||||
IntegerSegment: IntegerVariable,
|
||||
FloatSegment: FloatVariable,
|
||||
ObjectSegment: ObjectVariable,
|
||||
FileSegment: FileVariable,
|
||||
ArrayStringSegment: ArrayStringVariable,
|
||||
ArrayNumberSegment: ArrayNumberVariable,
|
||||
ArrayObjectSegment: ArrayObjectVariable,
|
||||
ArrayFileSegment: ArrayFileVariable,
|
||||
ArrayAnySegment: ArrayAnyVariable,
|
||||
NoneSegment: NoneVariable,
|
||||
}
|
||||
|
||||
|
||||
def build_variable_from_mapping(mapping: Mapping[str, Any], /) -> Variable:
|
||||
|
@ -96,3 +127,30 @@ def build_segment(value: Any, /) -> Segment:
|
|||
case _:
|
||||
raise ValueError(f"not supported value {value}")
|
||||
raise ValueError(f"not supported value {value}")
|
||||
|
||||
|
||||
def segment_to_variable(
|
||||
*,
|
||||
segment: Segment,
|
||||
selector: Sequence[str],
|
||||
id: str | None = None,
|
||||
name: str | None = None,
|
||||
description: str = "",
|
||||
) -> Variable:
|
||||
if isinstance(segment, Variable):
|
||||
return segment
|
||||
name = name or selector[-1]
|
||||
id = id or str(uuid4())
|
||||
|
||||
segment_type = type(segment)
|
||||
if segment_type not in SEGMENT_TO_VARIABLE_MAP:
|
||||
raise UnsupportedSegmentTypeError(f"not supported segment type {segment_type}")
|
||||
|
||||
variable_class = SEGMENT_TO_VARIABLE_MAP[segment_type]
|
||||
return variable_class(
|
||||
id=id,
|
||||
name=name,
|
||||
description=description,
|
||||
value=segment.value,
|
||||
selector=selector,
|
||||
)
|
||||
|
|
115
api/poetry.lock
generated
115
api/poetry.lock
generated
|
@ -1,4 +1,4 @@
|
|||
# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
|
||||
# This file is automatically @generated by Poetry 1.8.4 and should not be changed by hand.
|
||||
|
||||
[[package]]
|
||||
name = "aiohappyeyeballs"
|
||||
|
@ -932,6 +932,10 @@ files = [
|
|||
{file = "Brotli-1.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a37b8f0391212d29b3a91a799c8e4a2855e0576911cdfb2515487e30e322253d"},
|
||||
{file = "Brotli-1.1.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e84799f09591700a4154154cab9787452925578841a94321d5ee8fb9a9a328f0"},
|
||||
{file = "Brotli-1.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f66b5337fa213f1da0d9000bc8dc0cb5b896b726eefd9c6046f699b169c41b9e"},
|
||||
{file = "Brotli-1.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:5dab0844f2cf82be357a0eb11a9087f70c5430b2c241493fc122bb6f2bb0917c"},
|
||||
{file = "Brotli-1.1.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e4fe605b917c70283db7dfe5ada75e04561479075761a0b3866c081d035b01c1"},
|
||||
{file = "Brotli-1.1.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:1e9a65b5736232e7a7f91ff3d02277f11d339bf34099a56cdab6a8b3410a02b2"},
|
||||
{file = "Brotli-1.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:58d4b711689366d4a03ac7957ab8c28890415e267f9b6589969e74b6e42225ec"},
|
||||
{file = "Brotli-1.1.0-cp310-cp310-win32.whl", hash = "sha256:be36e3d172dc816333f33520154d708a2657ea63762ec16b62ece02ab5e4daf2"},
|
||||
{file = "Brotli-1.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:0c6244521dda65ea562d5a69b9a26120769b7a9fb3db2fe9545935ed6735b128"},
|
||||
{file = "Brotli-1.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a3daabb76a78f829cafc365531c972016e4aa8d5b4bf60660ad8ecee19df7ccc"},
|
||||
|
@ -944,8 +948,14 @@ files = [
|
|||
{file = "Brotli-1.1.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:19c116e796420b0cee3da1ccec3b764ed2952ccfcc298b55a10e5610ad7885f9"},
|
||||
{file = "Brotli-1.1.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:510b5b1bfbe20e1a7b3baf5fed9e9451873559a976c1a78eebaa3b86c57b4265"},
|
||||
{file = "Brotli-1.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a1fd8a29719ccce974d523580987b7f8229aeace506952fa9ce1d53a033873c8"},
|
||||
{file = "Brotli-1.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c247dd99d39e0338a604f8c2b3bc7061d5c2e9e2ac7ba9cc1be5a69cb6cd832f"},
|
||||
{file = "Brotli-1.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:1b2c248cd517c222d89e74669a4adfa5577e06ab68771a529060cf5a156e9757"},
|
||||
{file = "Brotli-1.1.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:2a24c50840d89ded6c9a8fdc7b6ed3692ed4e86f1c4a4a938e1e92def92933e0"},
|
||||
{file = "Brotli-1.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f31859074d57b4639318523d6ffdca586ace54271a73ad23ad021acd807eb14b"},
|
||||
{file = "Brotli-1.1.0-cp311-cp311-win32.whl", hash = "sha256:39da8adedf6942d76dc3e46653e52df937a3c4d6d18fdc94a7c29d263b1f5b50"},
|
||||
{file = "Brotli-1.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:aac0411d20e345dc0920bdec5548e438e999ff68d77564d5e9463a7ca9d3e7b1"},
|
||||
{file = "Brotli-1.1.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:32d95b80260d79926f5fab3c41701dbb818fde1c9da590e77e571eefd14abe28"},
|
||||
{file = "Brotli-1.1.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b760c65308ff1e462f65d69c12e4ae085cff3b332d894637f6273a12a482d09f"},
|
||||
{file = "Brotli-1.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:316cc9b17edf613ac76b1f1f305d2a748f1b976b033b049a6ecdfd5612c70409"},
|
||||
{file = "Brotli-1.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:caf9ee9a5775f3111642d33b86237b05808dafcd6268faa492250e9b78046eb2"},
|
||||
{file = "Brotli-1.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70051525001750221daa10907c77830bc889cb6d865cc0b813d9db7fefc21451"},
|
||||
|
@ -956,8 +966,24 @@ files = [
|
|||
{file = "Brotli-1.1.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:4093c631e96fdd49e0377a9c167bfd75b6d0bad2ace734c6eb20b348bc3ea180"},
|
||||
{file = "Brotli-1.1.0-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:7e4c4629ddad63006efa0ef968c8e4751c5868ff0b1c5c40f76524e894c50248"},
|
||||
{file = "Brotli-1.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:861bf317735688269936f755fa136a99d1ed526883859f86e41a5d43c61d8966"},
|
||||
{file = "Brotli-1.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:87a3044c3a35055527ac75e419dfa9f4f3667a1e887ee80360589eb8c90aabb9"},
|
||||
{file = "Brotli-1.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:c5529b34c1c9d937168297f2c1fde7ebe9ebdd5e121297ff9c043bdb2ae3d6fb"},
|
||||
{file = "Brotli-1.1.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:ca63e1890ede90b2e4454f9a65135a4d387a4585ff8282bb72964fab893f2111"},
|
||||
{file = "Brotli-1.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e79e6520141d792237c70bcd7a3b122d00f2613769ae0cb61c52e89fd3443839"},
|
||||
{file = "Brotli-1.1.0-cp312-cp312-win32.whl", hash = "sha256:5f4d5ea15c9382135076d2fb28dde923352fe02951e66935a9efaac8f10e81b0"},
|
||||
{file = "Brotli-1.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:906bc3a79de8c4ae5b86d3d75a8b77e44404b0f4261714306e3ad248d8ab0951"},
|
||||
{file = "Brotli-1.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8bf32b98b75c13ec7cf774164172683d6e7891088f6316e54425fde1efc276d5"},
|
||||
{file = "Brotli-1.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7bc37c4d6b87fb1017ea28c9508b36bbcb0c3d18b4260fcdf08b200c74a6aee8"},
|
||||
{file = "Brotli-1.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c0ef38c7a7014ffac184db9e04debe495d317cc9c6fb10071f7fefd93100a4f"},
|
||||
{file = "Brotli-1.1.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91d7cc2a76b5567591d12c01f019dd7afce6ba8cba6571187e21e2fc418ae648"},
|
||||
{file = "Brotli-1.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a93dde851926f4f2678e704fadeb39e16c35d8baebd5252c9fd94ce8ce68c4a0"},
|
||||
{file = "Brotli-1.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f0db75f47be8b8abc8d9e31bc7aad0547ca26f24a54e6fd10231d623f183d089"},
|
||||
{file = "Brotli-1.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6967ced6730aed543b8673008b5a391c3b1076d834ca438bbd70635c73775368"},
|
||||
{file = "Brotli-1.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:7eedaa5d036d9336c95915035fb57422054014ebdeb6f3b42eac809928e40d0c"},
|
||||
{file = "Brotli-1.1.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:d487f5432bf35b60ed625d7e1b448e2dc855422e87469e3f450aa5552b0eb284"},
|
||||
{file = "Brotli-1.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:832436e59afb93e1836081a20f324cb185836c617659b07b129141a8426973c7"},
|
||||
{file = "Brotli-1.1.0-cp313-cp313-win32.whl", hash = "sha256:43395e90523f9c23a3d5bdf004733246fba087f2948f87ab28015f12359ca6a0"},
|
||||
{file = "Brotli-1.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:9011560a466d2eb3f5a6e4929cf4a09be405c64154e12df0dd72713f6500e32b"},
|
||||
{file = "Brotli-1.1.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:a090ca607cbb6a34b0391776f0cb48062081f5f60ddcce5d11838e67a01928d1"},
|
||||
{file = "Brotli-1.1.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2de9d02f5bda03d27ede52e8cfe7b865b066fa49258cbab568720aa5be80a47d"},
|
||||
{file = "Brotli-1.1.0-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2333e30a5e00fe0fe55903c8832e08ee9c3b1382aacf4db26664a16528d51b4b"},
|
||||
|
@ -967,6 +993,10 @@ files = [
|
|||
{file = "Brotli-1.1.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:fd5f17ff8f14003595ab414e45fce13d073e0762394f957182e69035c9f3d7c2"},
|
||||
{file = "Brotli-1.1.0-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:069a121ac97412d1fe506da790b3e69f52254b9df4eb665cd42460c837193354"},
|
||||
{file = "Brotli-1.1.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:e93dfc1a1165e385cc8239fab7c036fb2cd8093728cbd85097b284d7b99249a2"},
|
||||
{file = "Brotli-1.1.0-cp36-cp36m-musllinux_1_2_aarch64.whl", hash = "sha256:aea440a510e14e818e67bfc4027880e2fb500c2ccb20ab21c7a7c8b5b4703d75"},
|
||||
{file = "Brotli-1.1.0-cp36-cp36m-musllinux_1_2_i686.whl", hash = "sha256:6974f52a02321b36847cd19d1b8e381bf39939c21efd6ee2fc13a28b0d99348c"},
|
||||
{file = "Brotli-1.1.0-cp36-cp36m-musllinux_1_2_ppc64le.whl", hash = "sha256:a7e53012d2853a07a4a79c00643832161a910674a893d296c9f1259859a289d2"},
|
||||
{file = "Brotli-1.1.0-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:d7702622a8b40c49bffb46e1e3ba2e81268d5c04a34f460978c6b5517a34dd52"},
|
||||
{file = "Brotli-1.1.0-cp36-cp36m-win32.whl", hash = "sha256:a599669fd7c47233438a56936988a2478685e74854088ef5293802123b5b2460"},
|
||||
{file = "Brotli-1.1.0-cp36-cp36m-win_amd64.whl", hash = "sha256:d143fd47fad1db3d7c27a1b1d66162e855b5d50a89666af46e1679c496e8e579"},
|
||||
{file = "Brotli-1.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:11d00ed0a83fa22d29bc6b64ef636c4552ebafcef57154b4ddd132f5638fbd1c"},
|
||||
|
@ -978,6 +1008,10 @@ files = [
|
|||
{file = "Brotli-1.1.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:919e32f147ae93a09fe064d77d5ebf4e35502a8df75c29fb05788528e330fe74"},
|
||||
{file = "Brotli-1.1.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:23032ae55523cc7bccb4f6a0bf368cd25ad9bcdcc1990b64a647e7bbcce9cb5b"},
|
||||
{file = "Brotli-1.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:224e57f6eac61cc449f498cc5f0e1725ba2071a3d4f48d5d9dffba42db196438"},
|
||||
{file = "Brotli-1.1.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:cb1dac1770878ade83f2ccdf7d25e494f05c9165f5246b46a621cc849341dc01"},
|
||||
{file = "Brotli-1.1.0-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:3ee8a80d67a4334482d9712b8e83ca6b1d9bc7e351931252ebef5d8f7335a547"},
|
||||
{file = "Brotli-1.1.0-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:5e55da2c8724191e5b557f8e18943b1b4839b8efc3ef60d65985bcf6f587dd38"},
|
||||
{file = "Brotli-1.1.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:d342778ef319e1026af243ed0a07c97acf3bad33b9f29e7ae6a1f68fd083e90c"},
|
||||
{file = "Brotli-1.1.0-cp37-cp37m-win32.whl", hash = "sha256:587ca6d3cef6e4e868102672d3bd9dc9698c309ba56d41c2b9c85bbb903cdb95"},
|
||||
{file = "Brotli-1.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:2954c1c23f81c2eaf0b0717d9380bd348578a94161a65b3a2afc62c86467dd68"},
|
||||
{file = "Brotli-1.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:efa8b278894b14d6da122a72fefcebc28445f2d3f880ac59d46c90f4c13be9a3"},
|
||||
|
@ -990,6 +1024,10 @@ files = [
|
|||
{file = "Brotli-1.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1ab4fbee0b2d9098c74f3057b2bc055a8bd92ccf02f65944a241b4349229185a"},
|
||||
{file = "Brotli-1.1.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:141bd4d93984070e097521ed07e2575b46f817d08f9fa42b16b9b5f27b5ac088"},
|
||||
{file = "Brotli-1.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fce1473f3ccc4187f75b4690cfc922628aed4d3dd013d047f95a9b3919a86596"},
|
||||
{file = "Brotli-1.1.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d2b35ca2c7f81d173d2fadc2f4f31e88cc5f7a39ae5b6db5513cf3383b0e0ec7"},
|
||||
{file = "Brotli-1.1.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:af6fa6817889314555aede9a919612b23739395ce767fe7fcbea9a80bf140fe5"},
|
||||
{file = "Brotli-1.1.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:2feb1d960f760a575dbc5ab3b1c00504b24caaf6986e2dc2b01c09c87866a943"},
|
||||
{file = "Brotli-1.1.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:4410f84b33374409552ac9b6903507cdb31cd30d2501fc5ca13d18f73548444a"},
|
||||
{file = "Brotli-1.1.0-cp38-cp38-win32.whl", hash = "sha256:db85ecf4e609a48f4b29055f1e144231b90edc90af7481aa731ba2d059226b1b"},
|
||||
{file = "Brotli-1.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:3d7954194c36e304e1523f55d7042c59dc53ec20dd4e9ea9d151f1b62b4415c0"},
|
||||
{file = "Brotli-1.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5fb2ce4b8045c78ebbc7b8f3c15062e435d47e7393cc57c25115cfd49883747a"},
|
||||
|
@ -1002,6 +1040,10 @@ files = [
|
|||
{file = "Brotli-1.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:949f3b7c29912693cee0afcf09acd6ebc04c57af949d9bf77d6101ebb61e388c"},
|
||||
{file = "Brotli-1.1.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:89f4988c7203739d48c6f806f1e87a1d96e0806d44f0fba61dba81392c9e474d"},
|
||||
{file = "Brotli-1.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:de6551e370ef19f8de1807d0a9aa2cdfdce2e85ce88b122fe9f6b2b076837e59"},
|
||||
{file = "Brotli-1.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0737ddb3068957cf1b054899b0883830bb1fec522ec76b1098f9b6e0f02d9419"},
|
||||
{file = "Brotli-1.1.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:4f3607b129417e111e30637af1b56f24f7a49e64763253bbc275c75fa887d4b2"},
|
||||
{file = "Brotli-1.1.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:6c6e0c425f22c1c719c42670d561ad682f7bfeeef918edea971a79ac5252437f"},
|
||||
{file = "Brotli-1.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:494994f807ba0b92092a163a0a283961369a65f6cbe01e8891132b7a320e61eb"},
|
||||
{file = "Brotli-1.1.0-cp39-cp39-win32.whl", hash = "sha256:f0d8a7a6b5983c2496e364b969f0e526647a06b075d034f3297dc66f3b360c64"},
|
||||
{file = "Brotli-1.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:cdad5b9014d83ca68c25d2e9444e28e967ef16e80f6b436918c700c117a85467"},
|
||||
{file = "Brotli-1.1.0.tar.gz", hash = "sha256:81de08ac11bcb85841e440c13611c00b67d3bf82698314928d0b676362546724"},
|
||||
|
@ -2411,6 +2453,26 @@ files = [
|
|||
[package.extras]
|
||||
test = ["pytest (>=6)"]
|
||||
|
||||
[[package]]
|
||||
name = "fal-client"
|
||||
version = "0.5.6"
|
||||
description = "Python client for fal.ai"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "fal_client-0.5.6-py3-none-any.whl", hash = "sha256:631fd857a3c44753ee46a2eea1e7276471453aca58faac9c3702f744c7c84050"},
|
||||
{file = "fal_client-0.5.6.tar.gz", hash = "sha256:d3afc4b6250023d0ee8437ec504558231d3b106d7aabc12cda8c39883faddecb"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
httpx = ">=0.21.0,<1"
|
||||
httpx-sse = ">=0.4.0,<0.5"
|
||||
|
||||
[package.extras]
|
||||
dev = ["fal-client[docs,test]"]
|
||||
docs = ["sphinx", "sphinx-autodoc-typehints", "sphinx-rtd-theme"]
|
||||
test = ["pillow", "pytest", "pytest-asyncio"]
|
||||
|
||||
[[package]]
|
||||
name = "fastapi"
|
||||
version = "0.115.4"
|
||||
|
@ -4049,6 +4111,17 @@ http2 = ["h2 (>=3,<5)"]
|
|||
socks = ["socksio (==1.*)"]
|
||||
zstd = ["zstandard (>=0.18.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "httpx-sse"
|
||||
version = "0.4.0"
|
||||
description = "Consume Server-Sent Event (SSE) messages with HTTPX."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "httpx-sse-0.4.0.tar.gz", hash = "sha256:1e81a3a3070ce322add1d3529ed42eb5f70817f45ed6ec915ab753f961139721"},
|
||||
{file = "httpx_sse-0.4.0-py3-none-any.whl", hash = "sha256:f329af6eae57eaa2bdfd962b42524764af68075ea87370a2de920af5341e318f"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "huggingface-hub"
|
||||
version = "0.16.4"
|
||||
|
@ -8466,29 +8539,29 @@ pyasn1 = ">=0.1.3"
|
|||
|
||||
[[package]]
|
||||
name = "ruff"
|
||||
version = "0.6.9"
|
||||
version = "0.7.3"
|
||||
description = "An extremely fast Python linter and code formatter, written in Rust."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "ruff-0.6.9-py3-none-linux_armv6l.whl", hash = "sha256:064df58d84ccc0ac0fcd63bc3090b251d90e2a372558c0f057c3f75ed73e1ccd"},
|
||||
{file = "ruff-0.6.9-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:140d4b5c9f5fc7a7b074908a78ab8d384dd7f6510402267bc76c37195c02a7ec"},
|
||||
{file = "ruff-0.6.9-py3-none-macosx_11_0_arm64.whl", hash = "sha256:53fd8ca5e82bdee8da7f506d7b03a261f24cd43d090ea9db9a1dc59d9313914c"},
|
||||
{file = "ruff-0.6.9-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:645d7d8761f915e48a00d4ecc3686969761df69fb561dd914a773c1a8266e14e"},
|
||||
{file = "ruff-0.6.9-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eae02b700763e3847595b9d2891488989cac00214da7f845f4bcf2989007d577"},
|
||||
{file = "ruff-0.6.9-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d5ccc9e58112441de8ad4b29dcb7a86dc25c5f770e3c06a9d57e0e5eba48829"},
|
||||
{file = "ruff-0.6.9-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:417b81aa1c9b60b2f8edc463c58363075412866ae4e2b9ab0f690dc1e87ac1b5"},
|
||||
{file = "ruff-0.6.9-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c866b631f5fbce896a74a6e4383407ba7507b815ccc52bcedabb6810fdb3ef7"},
|
||||
{file = "ruff-0.6.9-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7b118afbb3202f5911486ad52da86d1d52305b59e7ef2031cea3425142b97d6f"},
|
||||
{file = "ruff-0.6.9-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a67267654edc23c97335586774790cde402fb6bbdb3c2314f1fc087dee320bfa"},
|
||||
{file = "ruff-0.6.9-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:3ef0cc774b00fec123f635ce5c547dac263f6ee9fb9cc83437c5904183b55ceb"},
|
||||
{file = "ruff-0.6.9-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:12edd2af0c60fa61ff31cefb90aef4288ac4d372b4962c2864aeea3a1a2460c0"},
|
||||
{file = "ruff-0.6.9-py3-none-musllinux_1_2_i686.whl", hash = "sha256:55bb01caeaf3a60b2b2bba07308a02fca6ab56233302406ed5245180a05c5625"},
|
||||
{file = "ruff-0.6.9-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:925d26471fa24b0ce5a6cdfab1bb526fb4159952385f386bdcc643813d472039"},
|
||||
{file = "ruff-0.6.9-py3-none-win32.whl", hash = "sha256:eb61ec9bdb2506cffd492e05ac40e5bc6284873aceb605503d8494180d6fc84d"},
|
||||
{file = "ruff-0.6.9-py3-none-win_amd64.whl", hash = "sha256:785d31851c1ae91f45b3d8fe23b8ae4b5170089021fbb42402d811135f0b7117"},
|
||||
{file = "ruff-0.6.9-py3-none-win_arm64.whl", hash = "sha256:a9641e31476d601f83cd602608739a0840e348bda93fec9f1ee816f8b6798b93"},
|
||||
{file = "ruff-0.6.9.tar.gz", hash = "sha256:b076ef717a8e5bc819514ee1d602bbdca5b4420ae13a9cf61a0c0a4f53a2baa2"},
|
||||
{file = "ruff-0.7.3-py3-none-linux_armv6l.whl", hash = "sha256:34f2339dc22687ec7e7002792d1f50712bf84a13d5152e75712ac08be565d344"},
|
||||
{file = "ruff-0.7.3-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:fb397332a1879b9764a3455a0bb1087bda876c2db8aca3a3cbb67b3dbce8cda0"},
|
||||
{file = "ruff-0.7.3-py3-none-macosx_11_0_arm64.whl", hash = "sha256:37d0b619546103274e7f62643d14e1adcbccb242efda4e4bdb9544d7764782e9"},
|
||||
{file = "ruff-0.7.3-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d59f0c3ee4d1a6787614e7135b72e21024875266101142a09a61439cb6e38a5"},
|
||||
{file = "ruff-0.7.3-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:44eb93c2499a169d49fafd07bc62ac89b1bc800b197e50ff4633aed212569299"},
|
||||
{file = "ruff-0.7.3-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6d0242ce53f3a576c35ee32d907475a8d569944c0407f91d207c8af5be5dae4e"},
|
||||
{file = "ruff-0.7.3-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:6b6224af8b5e09772c2ecb8dc9f3f344c1aa48201c7f07e7315367f6dd90ac29"},
|
||||
{file = "ruff-0.7.3-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c50f95a82b94421c964fae4c27c0242890a20fe67d203d127e84fbb8013855f5"},
|
||||
{file = "ruff-0.7.3-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7f3eff9961b5d2644bcf1616c606e93baa2d6b349e8aa8b035f654df252c8c67"},
|
||||
{file = "ruff-0.7.3-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8963cab06d130c4df2fd52c84e9f10d297826d2e8169ae0c798b6221be1d1d2"},
|
||||
{file = "ruff-0.7.3-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:61b46049d6edc0e4317fb14b33bd693245281a3007288b68a3f5b74a22a0746d"},
|
||||
{file = "ruff-0.7.3-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:10ebce7696afe4644e8c1a23b3cf8c0f2193a310c18387c06e583ae9ef284de2"},
|
||||
{file = "ruff-0.7.3-py3-none-musllinux_1_2_i686.whl", hash = "sha256:3f36d56326b3aef8eeee150b700e519880d1aab92f471eefdef656fd57492aa2"},
|
||||
{file = "ruff-0.7.3-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:5d024301109a0007b78d57ab0ba190087b43dce852e552734ebf0b0b85e4fb16"},
|
||||
{file = "ruff-0.7.3-py3-none-win32.whl", hash = "sha256:4ba81a5f0c5478aa61674c5a2194de8b02652f17addf8dfc40c8937e6e7d79fc"},
|
||||
{file = "ruff-0.7.3-py3-none-win_amd64.whl", hash = "sha256:588a9ff2fecf01025ed065fe28809cd5a53b43505f48b69a1ac7707b1b7e4088"},
|
||||
{file = "ruff-0.7.3-py3-none-win_arm64.whl", hash = "sha256:1713e2c5545863cdbfe2cbce21f69ffaf37b813bfd1fb3b90dc9a6f1963f5a8c"},
|
||||
{file = "ruff-0.7.3.tar.gz", hash = "sha256:e1d1ba2e40b6e71a61b063354d04be669ab0d39c352461f3d789cac68b54a313"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -11005,4 +11078,4 @@ cffi = ["cffi (>=1.11)"]
|
|||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = ">=3.10,<3.13"
|
||||
content-hash = "f20bd678044926913dbbc24bd0cf22503a75817aa55f59457ff7822032139b77"
|
||||
content-hash = "2ba4b464eebc26598f290fa94713acc44c588f902176e6efa80622911d40f0ac"
|
||||
|
|
|
@ -122,6 +122,7 @@ celery = "~5.4.0"
|
|||
chardet = "~5.1.0"
|
||||
cohere = "~5.2.4"
|
||||
dashscope = { version = "~1.17.0", extras = ["tokenizer"] }
|
||||
fal-client = "0.5.6"
|
||||
flask = "~3.0.1"
|
||||
flask-compress = "~1.14"
|
||||
flask-cors = "~4.0.0"
|
||||
|
@ -278,4 +279,4 @@ pytest-mock = "~3.14.0"
|
|||
optional = true
|
||||
[tool.poetry.group.lint.dependencies]
|
||||
dotenv-linter = "~0.5.0"
|
||||
ruff = "~0.6.9"
|
||||
ruff = "~0.7.3"
|
||||
|
|
|
@ -1458,6 +1458,7 @@ class SegmentService:
|
|||
pre_segment_data_list = []
|
||||
segment_data_list = []
|
||||
keywords_list = []
|
||||
position = max_position + 1 if max_position else 1
|
||||
for segment_item in segments:
|
||||
content = segment_item["content"]
|
||||
doc_id = str(uuid.uuid4())
|
||||
|
@ -1475,7 +1476,7 @@ class SegmentService:
|
|||
document_id=document.id,
|
||||
index_node_id=doc_id,
|
||||
index_node_hash=segment_hash,
|
||||
position=max_position + 1 if max_position else 1,
|
||||
position=position,
|
||||
content=content,
|
||||
word_count=len(content),
|
||||
tokens=tokens,
|
||||
|
@ -1490,6 +1491,7 @@ class SegmentService:
|
|||
increment_word_count += segment_document.word_count
|
||||
db.session.add(segment_document)
|
||||
segment_data_list.append(segment_document)
|
||||
position += 1
|
||||
|
||||
pre_segment_data_list.append(segment_document)
|
||||
if "keywords" in segment_item:
|
||||
|
|
|
@ -25,7 +25,9 @@ def document_indexing_task(dataset_id: str, document_ids: list):
|
|||
start_at = time.perf_counter()
|
||||
|
||||
dataset = db.session.query(Dataset).filter(Dataset.id == dataset_id).first()
|
||||
|
||||
if not dataset:
|
||||
logging.info(click.style("Dataset is not found: {}".format(dataset_id), fg="yellow"))
|
||||
return
|
||||
# check document limit
|
||||
features = FeatureService.get_features(dataset.tenant_id)
|
||||
try:
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
import os
|
||||
from collections import UserDict
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
import pytest
|
||||
|
@ -11,7 +12,7 @@ from pymochow.model.table import Table
|
|||
from requests.adapters import HTTPAdapter
|
||||
|
||||
|
||||
class AttrDict(dict):
|
||||
class AttrDict(UserDict):
|
||||
def __getattr__(self, item):
|
||||
return self.get(item)
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user