2023-09-12 10:26:12 +08:00
|
|
|
import logging
|
2023-10-11 17:11:20 +08:00
|
|
|
import random
|
2023-09-12 10:26:12 +08:00
|
|
|
|
2024-04-08 18:51:46 +08:00
|
|
|
from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity
|
2024-01-02 23:42:00 +08:00
|
|
|
from core.model_runtime.errors.invoke import InvokeBadRequestError
|
|
|
|
from core.model_runtime.model_providers.openai.moderation.moderation import OpenAIModerationModel
|
|
|
|
from extensions.ext_hosting_provider import hosting_configuration
|
2023-09-12 10:26:12 +08:00
|
|
|
from models.provider import ProviderType
|
|
|
|
|
2024-01-02 23:42:00 +08:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
2024-04-08 18:51:46 +08:00
|
|
|
def check_moderation(model_config: ModelConfigWithCredentialsEntity, text: str) -> bool:
|
2024-01-02 23:42:00 +08:00
|
|
|
moderation_config = hosting_configuration.moderation_config
|
|
|
|
if (moderation_config and moderation_config.enabled is True
|
|
|
|
and 'openai' in hosting_configuration.provider_map
|
|
|
|
and hosting_configuration.provider_map['openai'].enabled is True
|
|
|
|
):
|
|
|
|
using_provider_type = model_config.provider_model_bundle.configuration.using_provider_type
|
|
|
|
provider_name = model_config.provider
|
|
|
|
if using_provider_type == ProviderType.SYSTEM \
|
|
|
|
and provider_name in moderation_config.providers:
|
|
|
|
hosting_openai_config = hosting_configuration.provider_map['openai']
|
2023-09-12 10:26:12 +08:00
|
|
|
|
|
|
|
# 2000 text per chunk
|
|
|
|
length = 2000
|
2023-09-18 17:32:31 +08:00
|
|
|
text_chunks = [text[i:i + length] for i in range(0, len(text), length)]
|
|
|
|
|
2023-10-11 17:11:20 +08:00
|
|
|
if len(text_chunks) == 0:
|
|
|
|
return True
|
2023-09-18 17:32:31 +08:00
|
|
|
|
2023-10-11 17:11:20 +08:00
|
|
|
text_chunk = random.choice(text_chunks)
|
2023-09-18 17:32:31 +08:00
|
|
|
|
2023-10-11 17:11:20 +08:00
|
|
|
try:
|
2024-01-02 23:42:00 +08:00
|
|
|
model_type_instance = OpenAIModerationModel()
|
|
|
|
moderation_result = model_type_instance.invoke(
|
|
|
|
model='text-moderation-stable',
|
|
|
|
credentials=hosting_openai_config.credentials,
|
|
|
|
text=text_chunk
|
|
|
|
)
|
|
|
|
|
|
|
|
if moderation_result is True:
|
|
|
|
return True
|
2023-10-11 17:11:20 +08:00
|
|
|
except Exception as ex:
|
2024-01-02 23:42:00 +08:00
|
|
|
logger.exception(ex)
|
|
|
|
raise InvokeBadRequestError('Rate limit exceeded, please try again later.')
|
2023-09-12 10:26:12 +08:00
|
|
|
|
2024-01-02 23:42:00 +08:00
|
|
|
return False
|