mirror of
https://github.com/langgenius/dify.git
synced 2024-11-16 11:42:29 +08:00
chore: fix unnecessary string concatation in single line (#8311)
This commit is contained in:
parent
08c486452f
commit
6613b8f2e0
|
@ -104,7 +104,7 @@ def reset_email(email, new_email, email_confirm):
|
||||||
)
|
)
|
||||||
@click.confirmation_option(
|
@click.confirmation_option(
|
||||||
prompt=click.style(
|
prompt=click.style(
|
||||||
"Are you sure you want to reset encrypt key pair?" " this operation cannot be rolled back!", fg="red"
|
"Are you sure you want to reset encrypt key pair? this operation cannot be rolled back!", fg="red"
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
def reset_encrypt_key_pair():
|
def reset_encrypt_key_pair():
|
||||||
|
@ -131,7 +131,7 @@ def reset_encrypt_key_pair():
|
||||||
|
|
||||||
click.echo(
|
click.echo(
|
||||||
click.style(
|
click.style(
|
||||||
"Congratulations! " "the asymmetric key pair of workspace {} has been reset.".format(tenant.id),
|
"Congratulations! The asymmetric key pair of workspace {} has been reset.".format(tenant.id),
|
||||||
fg="green",
|
fg="green",
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
@ -275,8 +275,7 @@ def migrate_knowledge_vector_database():
|
||||||
for dataset in datasets:
|
for dataset in datasets:
|
||||||
total_count = total_count + 1
|
total_count = total_count + 1
|
||||||
click.echo(
|
click.echo(
|
||||||
f"Processing the {total_count} dataset {dataset.id}. "
|
f"Processing the {total_count} dataset {dataset.id}. {create_count} created, {skipped_count} skipped."
|
||||||
+ f"{create_count} created, {skipped_count} skipped."
|
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
click.echo("Create dataset vdb index: {}".format(dataset.id))
|
click.echo("Create dataset vdb index: {}".format(dataset.id))
|
||||||
|
@ -594,7 +593,7 @@ def create_tenant(email: str, language: Optional[str] = None, name: Optional[str
|
||||||
|
|
||||||
click.echo(
|
click.echo(
|
||||||
click.style(
|
click.style(
|
||||||
"Congratulations! Account and tenant created.\n" "Account: {}\nPassword: {}".format(email, new_password),
|
"Congratulations! Account and tenant created.\nAccount: {}\nPassword: {}".format(email, new_password),
|
||||||
fg="green",
|
fg="green",
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
|
@ -129,12 +129,12 @@ class EndpointConfig(BaseSettings):
|
||||||
)
|
)
|
||||||
|
|
||||||
SERVICE_API_URL: str = Field(
|
SERVICE_API_URL: str = Field(
|
||||||
description="Service API Url prefix." "used to display Service API Base Url to the front-end.",
|
description="Service API Url prefix. used to display Service API Base Url to the front-end.",
|
||||||
default="",
|
default="",
|
||||||
)
|
)
|
||||||
|
|
||||||
APP_WEB_URL: str = Field(
|
APP_WEB_URL: str = Field(
|
||||||
description="WebApp Url prefix." "used to display WebAPP API Base Url to the front-end.",
|
description="WebApp Url prefix. used to display WebAPP API Base Url to the front-end.",
|
||||||
default="",
|
default="",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -272,7 +272,7 @@ class LoggingConfig(BaseSettings):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG_LEVEL: str = Field(
|
LOG_LEVEL: str = Field(
|
||||||
description="Log output level, default to INFO." "It is recommended to set it to ERROR for production.",
|
description="Log output level, default to INFO. It is recommended to set it to ERROR for production.",
|
||||||
default="INFO",
|
default="INFO",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -465,6 +465,6 @@ api.add_resource(
|
||||||
api.add_resource(PublishedWorkflowApi, "/apps/<uuid:app_id>/workflows/publish")
|
api.add_resource(PublishedWorkflowApi, "/apps/<uuid:app_id>/workflows/publish")
|
||||||
api.add_resource(DefaultBlockConfigsApi, "/apps/<uuid:app_id>/workflows/default-workflow-block-configs")
|
api.add_resource(DefaultBlockConfigsApi, "/apps/<uuid:app_id>/workflows/default-workflow-block-configs")
|
||||||
api.add_resource(
|
api.add_resource(
|
||||||
DefaultBlockConfigApi, "/apps/<uuid:app_id>/workflows/default-workflow-block-configs" "/<string:block_type>"
|
DefaultBlockConfigApi, "/apps/<uuid:app_id>/workflows/default-workflow-block-configs/<string:block_type>"
|
||||||
)
|
)
|
||||||
api.add_resource(ConvertToWorkflowApi, "/apps/<uuid:app_id>/convert-to-workflow")
|
api.add_resource(ConvertToWorkflowApi, "/apps/<uuid:app_id>/convert-to-workflow")
|
||||||
|
|
|
@ -399,7 +399,7 @@ class DatasetIndexingEstimateApi(Resource):
|
||||||
)
|
)
|
||||||
except LLMBadRequestError:
|
except LLMBadRequestError:
|
||||||
raise ProviderNotInitializeError(
|
raise ProviderNotInitializeError(
|
||||||
"No Embedding Model available. Please configure a valid provider " "in the Settings -> Model Provider."
|
"No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
|
||||||
)
|
)
|
||||||
except ProviderTokenNotInitError as ex:
|
except ProviderTokenNotInitError as ex:
|
||||||
raise ProviderNotInitializeError(ex.description)
|
raise ProviderNotInitializeError(ex.description)
|
||||||
|
|
|
@ -18,9 +18,7 @@ class NotSetupError(BaseHTTPException):
|
||||||
|
|
||||||
class NotInitValidateError(BaseHTTPException):
|
class NotInitValidateError(BaseHTTPException):
|
||||||
error_code = "not_init_validated"
|
error_code = "not_init_validated"
|
||||||
description = (
|
description = "Init validation has not been completed yet. Please proceed with the init validation process first."
|
||||||
"Init validation has not been completed yet. " "Please proceed with the init validation process first."
|
|
||||||
)
|
|
||||||
code = 401
|
code = 401
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -218,7 +218,7 @@ api.add_resource(ModelProviderCredentialApi, "/workspaces/current/model-provider
|
||||||
api.add_resource(ModelProviderValidateApi, "/workspaces/current/model-providers/<string:provider>/credentials/validate")
|
api.add_resource(ModelProviderValidateApi, "/workspaces/current/model-providers/<string:provider>/credentials/validate")
|
||||||
api.add_resource(ModelProviderApi, "/workspaces/current/model-providers/<string:provider>")
|
api.add_resource(ModelProviderApi, "/workspaces/current/model-providers/<string:provider>")
|
||||||
api.add_resource(
|
api.add_resource(
|
||||||
ModelProviderIconApi, "/workspaces/current/model-providers/<string:provider>/" "<string:icon_type>/<string:lang>"
|
ModelProviderIconApi, "/workspaces/current/model-providers/<string:provider>/<string:icon_type>/<string:lang>"
|
||||||
)
|
)
|
||||||
|
|
||||||
api.add_resource(
|
api.add_resource(
|
||||||
|
|
|
@ -86,7 +86,7 @@ class PromptTemplateConfigManager:
|
||||||
if config["prompt_type"] == PromptTemplateEntity.PromptType.ADVANCED.value:
|
if config["prompt_type"] == PromptTemplateEntity.PromptType.ADVANCED.value:
|
||||||
if not config["chat_prompt_config"] and not config["completion_prompt_config"]:
|
if not config["chat_prompt_config"] and not config["completion_prompt_config"]:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"chat_prompt_config or completion_prompt_config is required " "when prompt_type is advanced"
|
"chat_prompt_config or completion_prompt_config is required when prompt_type is advanced"
|
||||||
)
|
)
|
||||||
|
|
||||||
model_mode_vals = [mode.value for mode in ModelMode]
|
model_mode_vals = [mode.value for mode in ModelMode]
|
||||||
|
|
|
@ -115,7 +115,7 @@ class BasicVariablesConfigManager:
|
||||||
|
|
||||||
pattern = re.compile(r"^(?!\d)[\u4e00-\u9fa5A-Za-z0-9_\U0001F300-\U0001F64F\U0001F680-\U0001F6FF]{1,100}$")
|
pattern = re.compile(r"^(?!\d)[\u4e00-\u9fa5A-Za-z0-9_\U0001F300-\U0001F64F\U0001F680-\U0001F6FF]{1,100}$")
|
||||||
if pattern.match(form_item["variable"]) is None:
|
if pattern.match(form_item["variable"]) is None:
|
||||||
raise ValueError("variable in user_input_form must be a string, " "and cannot start with a number")
|
raise ValueError("variable in user_input_form must be a string, and cannot start with a number")
|
||||||
|
|
||||||
variables.append(form_item["variable"])
|
variables.append(form_item["variable"])
|
||||||
|
|
||||||
|
|
|
@ -379,7 +379,7 @@ class AppRunner:
|
||||||
queue_manager=queue_manager,
|
queue_manager=queue_manager,
|
||||||
app_generate_entity=application_generate_entity,
|
app_generate_entity=application_generate_entity,
|
||||||
prompt_messages=prompt_messages,
|
prompt_messages=prompt_messages,
|
||||||
text="I apologize for any confusion, " "but I'm an AI assistant to be helpful, harmless, and honest.",
|
text="I apologize for any confusion, but I'm an AI assistant to be helpful, harmless, and honest.",
|
||||||
stream=application_generate_entity.stream,
|
stream=application_generate_entity.stream,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -84,7 +84,7 @@ class WorkflowLoggingCallback(WorkflowCallback):
|
||||||
if route_node_state.node_run_result:
|
if route_node_state.node_run_result:
|
||||||
node_run_result = route_node_state.node_run_result
|
node_run_result = route_node_state.node_run_result
|
||||||
self.print_text(
|
self.print_text(
|
||||||
f"Inputs: " f"{jsonable_encoder(node_run_result.inputs) if node_run_result.inputs else ''}",
|
f"Inputs: {jsonable_encoder(node_run_result.inputs) if node_run_result.inputs else ''}",
|
||||||
color="green",
|
color="green",
|
||||||
)
|
)
|
||||||
self.print_text(
|
self.print_text(
|
||||||
|
@ -116,7 +116,7 @@ class WorkflowLoggingCallback(WorkflowCallback):
|
||||||
node_run_result = route_node_state.node_run_result
|
node_run_result = route_node_state.node_run_result
|
||||||
self.print_text(f"Error: {node_run_result.error}", color="red")
|
self.print_text(f"Error: {node_run_result.error}", color="red")
|
||||||
self.print_text(
|
self.print_text(
|
||||||
f"Inputs: " f"" f"{jsonable_encoder(node_run_result.inputs) if node_run_result.inputs else ''}",
|
f"Inputs: {jsonable_encoder(node_run_result.inputs) if node_run_result.inputs else ''}",
|
||||||
color="red",
|
color="red",
|
||||||
)
|
)
|
||||||
self.print_text(
|
self.print_text(
|
||||||
|
@ -125,7 +125,7 @@ class WorkflowLoggingCallback(WorkflowCallback):
|
||||||
color="red",
|
color="red",
|
||||||
)
|
)
|
||||||
self.print_text(
|
self.print_text(
|
||||||
f"Outputs: " f"{jsonable_encoder(node_run_result.outputs) if node_run_result.outputs else ''}",
|
f"Outputs: {jsonable_encoder(node_run_result.outputs) if node_run_result.outputs else ''}",
|
||||||
color="red",
|
color="red",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -200,7 +200,7 @@ class AIModel(ABC):
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
model_schema_yaml_file_name = os.path.basename(model_schema_yaml_path).rstrip(".yaml")
|
model_schema_yaml_file_name = os.path.basename(model_schema_yaml_path).rstrip(".yaml")
|
||||||
raise Exception(
|
raise Exception(
|
||||||
f"Invalid model schema for {provider_name}.{model_type}.{model_schema_yaml_file_name}:" f" {str(e)}"
|
f"Invalid model schema for {provider_name}.{model_type}.{model_schema_yaml_file_name}: {str(e)}"
|
||||||
)
|
)
|
||||||
|
|
||||||
# cache model schema
|
# cache model schema
|
||||||
|
|
|
@ -621,7 +621,7 @@ class CohereLargeLanguageModel(LargeLanguageModel):
|
||||||
|
|
||||||
desc = p_val["description"]
|
desc = p_val["description"]
|
||||||
if "enum" in p_val:
|
if "enum" in p_val:
|
||||||
desc += f"; Only accepts one of the following predefined options: " f"[{', '.join(p_val['enum'])}]"
|
desc += f"; Only accepts one of the following predefined options: [{', '.join(p_val['enum'])}]"
|
||||||
|
|
||||||
parameter_definitions[p_key] = ToolParameterDefinitionsValue(
|
parameter_definitions[p_key] = ToolParameterDefinitionsValue(
|
||||||
description=desc, type=p_val["type"], required=required
|
description=desc, type=p_val["type"], required=required
|
||||||
|
|
|
@ -96,7 +96,7 @@ class HuggingfaceHubLargeLanguageModel(_CommonHuggingfaceHub, LargeLanguageModel
|
||||||
|
|
||||||
if credentials["task_type"] not in ("text2text-generation", "text-generation"):
|
if credentials["task_type"] not in ("text2text-generation", "text-generation"):
|
||||||
raise CredentialsValidateFailedError(
|
raise CredentialsValidateFailedError(
|
||||||
"Huggingface Hub Task Type must be one of text2text-generation, " "text-generation."
|
"Huggingface Hub Task Type must be one of text2text-generation, text-generation."
|
||||||
)
|
)
|
||||||
|
|
||||||
client = InferenceClient(token=credentials["huggingfacehub_api_token"])
|
client = InferenceClient(token=credentials["huggingfacehub_api_token"])
|
||||||
|
@ -282,7 +282,7 @@ class HuggingfaceHubLargeLanguageModel(_CommonHuggingfaceHub, LargeLanguageModel
|
||||||
|
|
||||||
valid_tasks = ("text2text-generation", "text-generation")
|
valid_tasks = ("text2text-generation", "text-generation")
|
||||||
if model_info.pipeline_tag not in valid_tasks:
|
if model_info.pipeline_tag not in valid_tasks:
|
||||||
raise ValueError(f"Model {model_name} is not a valid task, " f"must be one of {valid_tasks}.")
|
raise ValueError(f"Model {model_name} is not a valid task, must be one of {valid_tasks}.")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise CredentialsValidateFailedError(f"{str(e)}")
|
raise CredentialsValidateFailedError(f"{str(e)}")
|
||||||
|
|
||||||
|
|
|
@ -121,7 +121,7 @@ class HuggingfaceHubTextEmbeddingModel(_CommonHuggingfaceHub, TextEmbeddingModel
|
||||||
|
|
||||||
valid_tasks = "feature-extraction"
|
valid_tasks = "feature-extraction"
|
||||||
if model_info.pipeline_tag not in valid_tasks:
|
if model_info.pipeline_tag not in valid_tasks:
|
||||||
raise ValueError(f"Model {model_name} is not a valid task, " f"must be one of {valid_tasks}.")
|
raise ValueError(f"Model {model_name} is not a valid task, must be one of {valid_tasks}.")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise CredentialsValidateFailedError(f"{str(e)}")
|
raise CredentialsValidateFailedError(f"{str(e)}")
|
||||||
|
|
||||||
|
|
|
@ -572,7 +572,7 @@ class OllamaLargeLanguageModel(LargeLanguageModel):
|
||||||
label=I18nObject(en_US="Size of context window"),
|
label=I18nObject(en_US="Size of context window"),
|
||||||
type=ParameterType.INT,
|
type=ParameterType.INT,
|
||||||
help=I18nObject(
|
help=I18nObject(
|
||||||
en_US="Sets the size of the context window used to generate the next token. " "(Default: 2048)"
|
en_US="Sets the size of the context window used to generate the next token. (Default: 2048)"
|
||||||
),
|
),
|
||||||
default=2048,
|
default=2048,
|
||||||
min=1,
|
min=1,
|
||||||
|
@ -650,7 +650,7 @@ class OllamaLargeLanguageModel(LargeLanguageModel):
|
||||||
label=I18nObject(en_US="Format"),
|
label=I18nObject(en_US="Format"),
|
||||||
type=ParameterType.STRING,
|
type=ParameterType.STRING,
|
||||||
help=I18nObject(
|
help=I18nObject(
|
||||||
en_US="the format to return a response in." " Currently the only accepted value is json."
|
en_US="the format to return a response in. Currently the only accepted value is json."
|
||||||
),
|
),
|
||||||
options=["json"],
|
options=["json"],
|
||||||
),
|
),
|
||||||
|
|
|
@ -86,7 +86,7 @@ class ReplicateLargeLanguageModel(_CommonReplicate, LargeLanguageModel):
|
||||||
|
|
||||||
if model.count("/") != 1:
|
if model.count("/") != 1:
|
||||||
raise CredentialsValidateFailedError(
|
raise CredentialsValidateFailedError(
|
||||||
"Replicate Model Name must be provided, " "format: {user_name}/{model_name}"
|
"Replicate Model Name must be provided, format: {user_name}/{model_name}"
|
||||||
)
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -472,7 +472,7 @@ class TongyiLargeLanguageModel(LargeLanguageModel):
|
||||||
for p_key, p_val in properties.items():
|
for p_key, p_val in properties.items():
|
||||||
desc = p_val["description"]
|
desc = p_val["description"]
|
||||||
if "enum" in p_val:
|
if "enum" in p_val:
|
||||||
desc += f"; Only accepts one of the following predefined options: " f"[{', '.join(p_val['enum'])}]"
|
desc += f"; Only accepts one of the following predefined options: [{', '.join(p_val['enum'])}]"
|
||||||
|
|
||||||
properties_definitions[p_key] = {
|
properties_definitions[p_key] = {
|
||||||
"description": desc,
|
"description": desc,
|
||||||
|
|
|
@ -245,7 +245,7 @@ class RelytVector(BaseVector):
|
||||||
try:
|
try:
|
||||||
from sqlalchemy.engine import Row
|
from sqlalchemy.engine import Row
|
||||||
except ImportError:
|
except ImportError:
|
||||||
raise ImportError("Could not import Row from sqlalchemy.engine. " "Please 'pip install sqlalchemy>=1.4'.")
|
raise ImportError("Could not import Row from sqlalchemy.engine. Please 'pip install sqlalchemy>=1.4'.")
|
||||||
|
|
||||||
filter_condition = ""
|
filter_condition = ""
|
||||||
if filter is not None:
|
if filter is not None:
|
||||||
|
|
|
@ -88,7 +88,7 @@ class DatasetDocumentStore:
|
||||||
# NOTE: doc could already exist in the store, but we overwrite it
|
# NOTE: doc could already exist in the store, but we overwrite it
|
||||||
if not allow_update and segment_document:
|
if not allow_update and segment_document:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
f"doc_id {doc.metadata['doc_id']} already exists. " "Set allow_update to True to overwrite."
|
f"doc_id {doc.metadata['doc_id']} already exists. Set allow_update to True to overwrite."
|
||||||
)
|
)
|
||||||
|
|
||||||
# calc embedding use tokens
|
# calc embedding use tokens
|
||||||
|
|
|
@ -50,7 +50,7 @@ class NotionExtractor(BaseExtractor):
|
||||||
integration_token = dify_config.NOTION_INTEGRATION_TOKEN
|
integration_token = dify_config.NOTION_INTEGRATION_TOKEN
|
||||||
if integration_token is None:
|
if integration_token is None:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"Must specify `integration_token` or set environment " "variable `NOTION_INTEGRATION_TOKEN`."
|
"Must specify `integration_token` or set environment variable `NOTION_INTEGRATION_TOKEN`."
|
||||||
)
|
)
|
||||||
|
|
||||||
self._notion_access_token = integration_token
|
self._notion_access_token = integration_token
|
||||||
|
|
|
@ -60,7 +60,7 @@ class TextSplitter(BaseDocumentTransformer, ABC):
|
||||||
"""
|
"""
|
||||||
if chunk_overlap > chunk_size:
|
if chunk_overlap > chunk_size:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
f"Got a larger chunk overlap ({chunk_overlap}) than chunk size " f"({chunk_size}), should be smaller."
|
f"Got a larger chunk overlap ({chunk_overlap}) than chunk size ({chunk_size}), should be smaller."
|
||||||
)
|
)
|
||||||
self._chunk_size = chunk_size
|
self._chunk_size = chunk_size
|
||||||
self._chunk_overlap = chunk_overlap
|
self._chunk_overlap = chunk_overlap
|
||||||
|
@ -117,7 +117,7 @@ class TextSplitter(BaseDocumentTransformer, ABC):
|
||||||
if total + _len + (separator_len if len(current_doc) > 0 else 0) > self._chunk_size:
|
if total + _len + (separator_len if len(current_doc) > 0 else 0) > self._chunk_size:
|
||||||
if total > self._chunk_size:
|
if total > self._chunk_size:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
f"Created a chunk of size {total}, " f"which is longer than the specified {self._chunk_size}"
|
f"Created a chunk of size {total}, which is longer than the specified {self._chunk_size}"
|
||||||
)
|
)
|
||||||
if len(current_doc) > 0:
|
if len(current_doc) > 0:
|
||||||
doc = self._join_docs(current_doc, separator)
|
doc = self._join_docs(current_doc, separator)
|
||||||
|
@ -153,7 +153,7 @@ class TextSplitter(BaseDocumentTransformer, ABC):
|
||||||
|
|
||||||
except ImportError:
|
except ImportError:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"Could not import transformers python package. " "Please install it with `pip install transformers`."
|
"Could not import transformers python package. Please install it with `pip install transformers`."
|
||||||
)
|
)
|
||||||
return cls(length_function=_huggingface_tokenizer_length, **kwargs)
|
return cls(length_function=_huggingface_tokenizer_length, **kwargs)
|
||||||
|
|
||||||
|
|
|
@ -14,7 +14,7 @@ class GaodeProvider(BuiltinToolProviderController):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
response = requests.get(
|
response = requests.get(
|
||||||
url="https://restapi.amap.com/v3/geocode/geo?address={address}&key={apikey}" "".format(
|
url="https://restapi.amap.com/v3/geocode/geo?address={address}&key={apikey}".format(
|
||||||
address=urllib.parse.quote("广东省广州市天河区广州塔"), apikey=credentials.get("api_key")
|
address=urllib.parse.quote("广东省广州市天河区广州塔"), apikey=credentials.get("api_key")
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
|
@ -27,7 +27,7 @@ class GaodeRepositoriesTool(BuiltinTool):
|
||||||
city_response = s.request(
|
city_response = s.request(
|
||||||
method="GET",
|
method="GET",
|
||||||
headers={"Content-Type": "application/json; charset=utf-8"},
|
headers={"Content-Type": "application/json; charset=utf-8"},
|
||||||
url="{url}/config/district?keywords={keywords}" "&subdistrict=0&extensions=base&key={apikey}" "".format(
|
url="{url}/config/district?keywords={keywords}&subdistrict=0&extensions=base&key={apikey}".format(
|
||||||
url=api_domain, keywords=city, apikey=self.runtime.credentials.get("api_key")
|
url=api_domain, keywords=city, apikey=self.runtime.credentials.get("api_key")
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
|
@ -39,7 +39,7 @@ class GithubRepositoriesTool(BuiltinTool):
|
||||||
response = s.request(
|
response = s.request(
|
||||||
method="GET",
|
method="GET",
|
||||||
headers=headers,
|
headers=headers,
|
||||||
url=f"{api_domain}/search/repositories?" f"q={quote(query)}&sort=stars&per_page={top_n}&order=desc",
|
url=f"{api_domain}/search/repositories?q={quote(query)}&sort=stars&per_page={top_n}&order=desc",
|
||||||
)
|
)
|
||||||
response_data = response.json()
|
response_data = response.json()
|
||||||
if response.status_code == 200 and isinstance(response_data.get("items"), list):
|
if response.status_code == 200 and isinstance(response_data.get("items"), list):
|
||||||
|
|
|
@ -51,7 +51,7 @@ class PubMedAPIWrapper(BaseModel):
|
||||||
try:
|
try:
|
||||||
# Retrieve the top-k results for the query
|
# Retrieve the top-k results for the query
|
||||||
docs = [
|
docs = [
|
||||||
f"Published: {result['pub_date']}\nTitle: {result['title']}\n" f"Summary: {result['summary']}"
|
f"Published: {result['pub_date']}\nTitle: {result['title']}\nSummary: {result['summary']}"
|
||||||
for result in self.load(query[: self.ARXIV_MAX_QUERY_LENGTH])
|
for result in self.load(query[: self.ARXIV_MAX_QUERY_LENGTH])
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -97,7 +97,7 @@ class PubMedAPIWrapper(BaseModel):
|
||||||
if e.code == 429 and retry < self.max_retry:
|
if e.code == 429 and retry < self.max_retry:
|
||||||
# Too Many Requests error
|
# Too Many Requests error
|
||||||
# wait for an exponentially increasing amount of time
|
# wait for an exponentially increasing amount of time
|
||||||
print(f"Too Many Requests, " f"waiting for {self.sleep_time:.2f} seconds...")
|
print(f"Too Many Requests, waiting for {self.sleep_time:.2f} seconds...")
|
||||||
time.sleep(self.sleep_time)
|
time.sleep(self.sleep_time)
|
||||||
self.sleep_time *= 2
|
self.sleep_time *= 2
|
||||||
retry += 1
|
retry += 1
|
||||||
|
|
|
@ -39,7 +39,7 @@ class TwilioAPIWrapper(BaseModel):
|
||||||
try:
|
try:
|
||||||
from twilio.rest import Client
|
from twilio.rest import Client
|
||||||
except ImportError:
|
except ImportError:
|
||||||
raise ImportError("Could not import twilio python package. " "Please install it with `pip install twilio`.")
|
raise ImportError("Could not import twilio python package. Please install it with `pip install twilio`.")
|
||||||
account_sid = values.get("account_sid")
|
account_sid = values.get("account_sid")
|
||||||
auth_token = values.get("auth_token")
|
auth_token = values.get("auth_token")
|
||||||
values["from_number"] = values.get("from_number")
|
values["from_number"] = values.get("from_number")
|
||||||
|
|
|
@ -37,6 +37,6 @@ def parse_and_check_json_markdown(text: str, expected_keys: list[str]) -> dict:
|
||||||
for key in expected_keys:
|
for key in expected_keys:
|
||||||
if key not in json_obj:
|
if key not in json_obj:
|
||||||
raise OutputParserError(
|
raise OutputParserError(
|
||||||
f"Got invalid return object. Expected key `{key}` " f"to be present, but got {json_obj}"
|
f"Got invalid return object. Expected key `{key}` to be present, but got {json_obj}"
|
||||||
)
|
)
|
||||||
return json_obj
|
return json_obj
|
||||||
|
|
|
@ -238,7 +238,7 @@ class AppDslService:
|
||||||
:param use_icon_as_answer_icon: use app icon as answer icon
|
:param use_icon_as_answer_icon: use app icon as answer icon
|
||||||
"""
|
"""
|
||||||
if not workflow_data:
|
if not workflow_data:
|
||||||
raise ValueError("Missing workflow in data argument " "when app mode is advanced-chat or workflow")
|
raise ValueError("Missing workflow in data argument when app mode is advanced-chat or workflow")
|
||||||
|
|
||||||
app = cls._create_app(
|
app = cls._create_app(
|
||||||
tenant_id=tenant_id,
|
tenant_id=tenant_id,
|
||||||
|
@ -283,7 +283,7 @@ class AppDslService:
|
||||||
:param account: Account instance
|
:param account: Account instance
|
||||||
"""
|
"""
|
||||||
if not workflow_data:
|
if not workflow_data:
|
||||||
raise ValueError("Missing workflow in data argument " "when app mode is advanced-chat or workflow")
|
raise ValueError("Missing workflow in data argument when app mode is advanced-chat or workflow")
|
||||||
|
|
||||||
# fetch draft workflow by app_model
|
# fetch draft workflow by app_model
|
||||||
workflow_service = WorkflowService()
|
workflow_service = WorkflowService()
|
||||||
|
@ -337,7 +337,7 @@ class AppDslService:
|
||||||
:param icon_background: app icon background
|
:param icon_background: app icon background
|
||||||
"""
|
"""
|
||||||
if not model_config_data:
|
if not model_config_data:
|
||||||
raise ValueError("Missing model_config in data argument " "when app mode is chat, agent-chat or completion")
|
raise ValueError("Missing model_config in data argument when app mode is chat, agent-chat or completion")
|
||||||
|
|
||||||
app = cls._create_app(
|
app = cls._create_app(
|
||||||
tenant_id=tenant_id,
|
tenant_id=tenant_id,
|
||||||
|
|
|
@ -181,7 +181,7 @@ class DatasetService:
|
||||||
"in the Settings -> Model Provider."
|
"in the Settings -> Model Provider."
|
||||||
)
|
)
|
||||||
except ProviderTokenNotInitError as ex:
|
except ProviderTokenNotInitError as ex:
|
||||||
raise ValueError(f"The dataset in unavailable, due to: " f"{ex.description}")
|
raise ValueError(f"The dataset in unavailable, due to: {ex.description}")
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def check_embedding_model_setting(tenant_id: str, embedding_model_provider: str, embedding_model: str):
|
def check_embedding_model_setting(tenant_id: str, embedding_model_provider: str, embedding_model: str):
|
||||||
|
@ -195,10 +195,10 @@ class DatasetService:
|
||||||
)
|
)
|
||||||
except LLMBadRequestError:
|
except LLMBadRequestError:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"No Embedding Model available. Please configure a valid provider " "in the Settings -> Model Provider."
|
"No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
|
||||||
)
|
)
|
||||||
except ProviderTokenNotInitError as ex:
|
except ProviderTokenNotInitError as ex:
|
||||||
raise ValueError(f"The dataset in unavailable, due to: " f"{ex.description}")
|
raise ValueError(f"The dataset in unavailable, due to: {ex.description}")
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def update_dataset(dataset_id, data, user):
|
def update_dataset(dataset_id, data, user):
|
||||||
|
|
|
@ -53,7 +53,7 @@ def test__get_completion_model_prompt_messages():
|
||||||
"#context#": context,
|
"#context#": context,
|
||||||
"#histories#": "\n".join(
|
"#histories#": "\n".join(
|
||||||
[
|
[
|
||||||
f"{'Human' if prompt.role.value == 'user' else 'Assistant'}: " f"{prompt.content}"
|
f"{'Human' if prompt.role.value == 'user' else 'Assistant'}: {prompt.content}"
|
||||||
for prompt in history_prompt_messages
|
for prompt in history_prompt_messages
|
||||||
]
|
]
|
||||||
),
|
),
|
||||||
|
|
Loading…
Reference in New Issue
Block a user