catch openai rate limit error (#7658)

This commit is contained in:
Jyong 2024-08-26 19:36:44 +08:00 committed by GitHub
parent 7cda73f192
commit 1473083a41
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 53 additions and 0 deletions

View File

@ -17,6 +17,7 @@ from controllers.console.app.error import (
from controllers.console.app.wraps import get_app_model
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from controllers.web.error import InvokeRateLimitError as InvokeRateLimitHttpError
from core.app.apps.base_app_queue_manager import AppQueueManager
from core.app.entities.app_invoke_entities import InvokeFrom
from core.errors.error import (
@ -31,6 +32,7 @@ from libs.helper import uuid_value
from libs.login import login_required
from models.model import AppMode
from services.app_generate_service import AppGenerateService
from services.errors.llm import InvokeRateLimitError
# define completion message api for user
@ -135,6 +137,8 @@ class ChatMessageApi(Resource):
raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError:
raise ProviderModelCurrentlyNotSupportError()
except InvokeRateLimitError as ex:
raise InvokeRateLimitHttpError(ex.description)
except InvokeError as e:
raise CompletionRequestError(e.description)
except (ValueError, AppInvokeQuotaExceededError) as e:

View File

@ -119,3 +119,11 @@ class TracingConfigCheckError(BaseHTTPException):
error_code = "trace_config_check_error"
description = "Invalid Credentials."
code = 400
class InvokeRateLimitError(BaseHTTPException):
"""Raised when the Invoke returns rate limit error."""
error_code = "rate_limit_error"
description = "Rate Limit Error"
code = 429

View File

@ -15,6 +15,7 @@ from controllers.web.error import (
ProviderNotInitializeError,
ProviderQuotaExceededError,
)
from controllers.web.error import InvokeRateLimitError as InvokeRateLimitHttpError
from controllers.web.wraps import WebApiResource
from core.app.apps.base_app_queue_manager import AppQueueManager
from core.app.entities.app_invoke_entities import InvokeFrom
@ -24,6 +25,7 @@ from libs import helper
from libs.helper import uuid_value
from models.model import AppMode
from services.app_generate_service import AppGenerateService
from services.errors.llm import InvokeRateLimitError
# define completion api for user
@ -120,6 +122,8 @@ class ChatApi(WebApiResource):
raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError:
raise ProviderModelCurrentlyNotSupportError()
except InvokeRateLimitError as ex:
raise InvokeRateLimitHttpError(ex.description)
except InvokeError as e:
raise CompletionRequestError(e.description)
except ValueError as e:

View File

@ -125,3 +125,11 @@ class WebSSOAuthRequiredError(BaseHTTPException):
error_code = "web_sso_auth_required"
description = "Web SSO authentication required."
code = 401
class InvokeRateLimitError(BaseHTTPException):
"""Raised when the Invoke returns rate limit error."""
error_code = "rate_limit_error"
description = "Rate Limit Error"
code = 429

View File

@ -43,3 +43,8 @@ class ModelCurrentlyNotSupportError(Exception):
Custom exception raised when the model not support
"""
description = "Model Currently Not Support"
class InvokeRateLimitError(Exception):
"""Raised when the Invoke returns rate limit error."""
description = "Rate Limit Error"

View File

@ -1,6 +1,8 @@
from collections.abc import Generator
from typing import Any, Union
from openai._exceptions import RateLimitError
from configs import dify_config
from core.app.apps.advanced_chat.app_generator import AdvancedChatAppGenerator
from core.app.apps.agent_chat.app_generator import AgentChatAppGenerator
@ -10,6 +12,7 @@ from core.app.apps.workflow.app_generator import WorkflowAppGenerator
from core.app.entities.app_invoke_entities import InvokeFrom
from core.app.features.rate_limiting import RateLimit
from models.model import Account, App, AppMode, EndUser
from services.errors.llm import InvokeRateLimitError
from services.workflow_service import WorkflowService
@ -86,6 +89,8 @@ class AppGenerateService:
)
else:
raise ValueError(f"Invalid app mode {app_model.mode}")
except RateLimitError as e:
raise InvokeRateLimitError(str(e))
finally:
if not streaming:
rate_limit.exit(request_id)

View File

@ -0,0 +1,19 @@
from typing import Optional
class InvokeError(Exception):
"""Base class for all LLM exceptions."""
description: Optional[str] = None
def __init__(self, description: Optional[str] = None) -> None:
self.description = description
def __str__(self):
return self.description or self.__class__.__name__
class InvokeRateLimitError(InvokeError):
"""Raised when the Invoke returns rate limit error."""
description = "Rate Limit Error"