diff --git a/api/.env.example b/api/.env.example index 33c58ed691..5a9d21c980 100644 --- a/api/.env.example +++ b/api/.env.example @@ -239,6 +239,7 @@ UPLOAD_AUDIO_FILE_SIZE_LIMIT=50 # Model Configuration MULTIMODAL_SEND_IMAGE_FORMAT=base64 PROMPT_GENERATION_MAX_TOKENS=512 +CODE_GENERATION_MAX_TOKENS=1024 # Mail configuration, support: resend, smtp MAIL_TYPE= diff --git a/api/controllers/console/app/generator.py b/api/controllers/console/app/generator.py index 3d1e6b7a37..7108759b0b 100644 --- a/api/controllers/console/app/generator.py +++ b/api/controllers/console/app/generator.py @@ -52,4 +52,39 @@ class RuleGenerateApi(Resource): return rules +class RuleCodeGenerateApi(Resource): + @setup_required + @login_required + @account_initialization_required + def post(self): + parser = reqparse.RequestParser() + parser.add_argument("instruction", type=str, required=True, nullable=False, location="json") + parser.add_argument("model_config", type=dict, required=True, nullable=False, location="json") + parser.add_argument("no_variable", type=bool, required=True, default=False, location="json") + parser.add_argument("code_language", type=str, required=False, default="javascript", location="json") + args = parser.parse_args() + + account = current_user + CODE_GENERATION_MAX_TOKENS = int(os.getenv("CODE_GENERATION_MAX_TOKENS", "1024")) + try: + code_result = LLMGenerator.generate_code( + tenant_id=account.current_tenant_id, + instruction=args["instruction"], + model_config=args["model_config"], + code_language=args["code_language"], + max_tokens=CODE_GENERATION_MAX_TOKENS, + ) + except ProviderTokenNotInitError as ex: + raise ProviderNotInitializeError(ex.description) + except QuotaExceededError: + raise ProviderQuotaExceededError() + except ModelCurrentlyNotSupportError: + raise ProviderModelCurrentlyNotSupportError() + except InvokeError as e: + raise CompletionRequestError(e.description) + + return code_result + + api.add_resource(RuleGenerateApi, "/rule-generate") +api.add_resource(RuleCodeGenerateApi, "/rule-code-generate") diff --git a/api/core/llm_generator/llm_generator.py b/api/core/llm_generator/llm_generator.py index 39bd6fee69..9cf9ed75c0 100644 --- a/api/core/llm_generator/llm_generator.py +++ b/api/core/llm_generator/llm_generator.py @@ -8,6 +8,8 @@ from core.llm_generator.output_parser.suggested_questions_after_answer import Su from core.llm_generator.prompts import ( CONVERSATION_TITLE_PROMPT, GENERATOR_QA_PROMPT, + JAVASCRIPT_CODE_GENERATOR_PROMPT_TEMPLATE, + PYTHON_CODE_GENERATOR_PROMPT_TEMPLATE, WORKFLOW_RULE_CONFIG_PROMPT_GENERATE_TEMPLATE, ) from core.model_manager import ModelManager @@ -239,6 +241,54 @@ class LLMGenerator: return rule_config + @classmethod + def generate_code( + cls, + tenant_id: str, + instruction: str, + model_config: dict, + code_language: str = "javascript", + max_tokens: int = 1000, + ) -> dict: + if code_language == "python": + prompt_template = PromptTemplateParser(PYTHON_CODE_GENERATOR_PROMPT_TEMPLATE) + else: + prompt_template = PromptTemplateParser(JAVASCRIPT_CODE_GENERATOR_PROMPT_TEMPLATE) + + prompt = prompt_template.format( + inputs={ + "INSTRUCTION": instruction, + "CODE_LANGUAGE": code_language, + }, + remove_template_variables=False, + ) + + model_manager = ModelManager() + model_instance = model_manager.get_model_instance( + tenant_id=tenant_id, + model_type=ModelType.LLM, + provider=model_config.get("provider") if model_config else None, + model=model_config.get("name") if model_config else None, + ) + + prompt_messages = [UserPromptMessage(content=prompt)] + model_parameters = {"max_tokens": max_tokens, "temperature": 0.01} + + try: + response = model_instance.invoke_llm( + prompt_messages=prompt_messages, model_parameters=model_parameters, stream=False + ) + + generated_code = response.message.content + return {"code": generated_code, "language": code_language, "error": ""} + + except InvokeError as e: + error = str(e) + return {"code": "", "language": code_language, "error": f"Failed to generate code. Error: {error}"} + except Exception as e: + logging.exception(e) + return {"code": "", "language": code_language, "error": f"An unexpected error occurred: {str(e)}"} + @classmethod def generate_qa_document(cls, tenant_id: str, query, document_language: str): prompt = GENERATOR_QA_PROMPT.format(language=document_language) diff --git a/api/core/llm_generator/prompts.py b/api/core/llm_generator/prompts.py index e5b6784516..7c0f247052 100644 --- a/api/core/llm_generator/prompts.py +++ b/api/core/llm_generator/prompts.py @@ -61,6 +61,73 @@ User Input: yo, 你今天咋样? User Input: """ # noqa: E501 +PYTHON_CODE_GENERATOR_PROMPT_TEMPLATE = ( + "You are an expert programmer. Generate code based on the following instructions:\n\n" + "Instructions: {{INSTRUCTION}}\n\n" + "Write the code in {{CODE_LANGUAGE}}.\n\n" + "Please ensure that you meet the following requirements:\n" + "1. Define a function named 'main'.\n" + "2. The 'main' function must return a dictionary (dict).\n" + "3. You may modify the arguments of the 'main' function, but include appropriate type hints.\n" + "4. The returned dictionary should contain at least one key-value pair.\n\n" + "5. You may ONLY use the following libraries in your code: \n" + "- json\n" + "- datetime\n" + "- math\n" + "- random\n" + "- re\n" + "- string\n" + "- sys\n" + "- time\n" + "- traceback\n" + "- uuid\n" + "- os\n" + "- base64\n" + "- hashlib\n" + "- hmac\n" + "- binascii\n" + "- collections\n" + "- functools\n" + "- operator\n" + "- itertools\n\n" + "Example:\n" + "def main(arg1: str, arg2: int) -> dict:\n" + " return {\n" + ' "result": arg1 * arg2,\n' + " }\n\n" + "IMPORTANT:\n" + "- Provide ONLY the code without any additional explanations, comments, or markdown formatting.\n" + "- DO NOT use markdown code blocks (``` or ``` python). Return the raw code directly.\n" + "- The code should start immediately after this instruction, without any preceding newlines or spaces.\n" + "- The code should be complete, functional, and follow best practices for {{CODE_LANGUAGE}}.\n\n" + "- Always use the format return {'result': ...} for the output.\n\n" + "Generated Code:\n" +) +JAVASCRIPT_CODE_GENERATOR_PROMPT_TEMPLATE = ( + "You are an expert programmer. Generate code based on the following instructions:\n\n" + "Instructions: {{INSTRUCTION}}\n\n" + "Write the code in {{CODE_LANGUAGE}}.\n\n" + "Please ensure that you meet the following requirements:\n" + "1. Define a function named 'main'.\n" + "2. The 'main' function must return an object.\n" + "3. You may modify the arguments of the 'main' function, but include appropriate JSDoc annotations.\n" + "4. The returned object should contain at least one key-value pair.\n\n" + "5. The returned object should always be in the format: {result: ...}\n\n" + "Example:\n" + "function main(arg1, arg2) {\n" + " return {\n" + " result: arg1 * arg2\n" + " };\n" + "}\n\n" + "IMPORTANT:\n" + "- Provide ONLY the code without any additional explanations, comments, or markdown formatting.\n" + "- DO NOT use markdown code blocks (``` or ``` javascript). Return the raw code directly.\n" + "- The code should start immediately after this instruction, without any preceding newlines or spaces.\n" + "- The code should be complete, functional, and follow best practices for {{CODE_LANGUAGE}}.\n\n" + "Generated Code:\n" +) + + SUGGESTED_QUESTIONS_AFTER_ANSWER_INSTRUCTION_PROMPT = ( "Please help me predict the three most likely questions that human would ask, " "and keeping each question under 20 characters.\n" diff --git a/web/app/components/app/configuration/config/code-generator/get-code-generator-res.tsx b/web/app/components/app/configuration/config/code-generator/get-code-generator-res.tsx new file mode 100644 index 0000000000..b2d45d2733 --- /dev/null +++ b/web/app/components/app/configuration/config/code-generator/get-code-generator-res.tsx @@ -0,0 +1,200 @@ +import type { FC } from 'react' +import React from 'react' +import cn from 'classnames' +import useBoolean from 'ahooks/lib/useBoolean' +import { useTranslation } from 'react-i18next' +import ConfigPrompt from '../../config-prompt' +import { languageMap } from '../../../../workflow/nodes/_base/components/editor/code-editor/index' +import { generateRuleCode } from '@/service/debug' +import type { CodeGenRes } from '@/service/debug' +import { ModelModeType } from '@/types/app' +import type { AppType, Model } from '@/types/app' +import Modal from '@/app/components/base/modal' +import Button from '@/app/components/base/button' +import { Generator } from '@/app/components/base/icons/src/vender/other' +import Toast from '@/app/components/base/toast' +import Loading from '@/app/components/base/loading' +import Confirm from '@/app/components/base/confirm' +import type { CodeLanguage } from '@/app/components/workflow/nodes/code/types' +export type IGetCodeGeneratorResProps = { + mode: AppType + isShow: boolean + codeLanguages: CodeLanguage + onClose: () => void + onFinished: (res: CodeGenRes) => void +} + +export const GetCodeGeneratorResModal: FC = ( + { + mode, + isShow, + codeLanguages, + onClose, + onFinished, + + }, +) => { + const { t } = useTranslation() + const [instruction, setInstruction] = React.useState('') + const [isLoading, { setTrue: setLoadingTrue, setFalse: setLoadingFalse }] = useBoolean(false) + const [res, setRes] = React.useState(null) + const isValid = () => { + if (instruction.trim() === '') { + Toast.notify({ + type: 'error', + message: t('common.errorMsg.fieldRequired', { + field: t('appDebug.code.instruction'), + }), + }) + return false + } + return true + } + const model: Model = { + provider: 'openai', + name: 'gpt-4o-mini', + mode: ModelModeType.chat, + completion_params: { + temperature: 0.7, + max_tokens: 0, + top_p: 0, + echo: false, + stop: [], + presence_penalty: 0, + frequency_penalty: 0, + }, + } + const isInLLMNode = true + const onGenerate = async () => { + if (!isValid()) + return + if (isLoading) + return + setLoadingTrue() + try { + const { error, ...res } = await generateRuleCode({ + instruction, + model_config: model, + no_variable: !!isInLLMNode, + code_language: languageMap[codeLanguages] || 'javascript', + }) + setRes(res) + if (error) { + Toast.notify({ + type: 'error', + message: error, + }) + } + } + finally { + setLoadingFalse() + } + } + const [showConfirmOverwrite, setShowConfirmOverwrite] = React.useState(false) + + const renderLoading = ( +
+ +
{t('appDebug.codegen.loading')}
+
+ ) + + return ( + +
+
+
+
{t('appDebug.codegen.title')}
+
{t('appDebug.codegen.description')}
+
+
+
+
{t('appDebug.codegen.instruction')}
+