From e57bdd4e58acf1dfabc85e0872a8f488b69839c5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=91=86=E8=90=8C=E9=97=B7=E6=B2=B9=E7=93=B6?= <253605712@qq.com> Date: Thu, 23 May 2024 11:51:38 +0800 Subject: [PATCH] chore:update gpt-3.5-turbo and gpt-4-turbo parameter for azure (#4596) --- .../model_providers/azure_openai/_constant.py | 30 ++++++++++++++----- 1 file changed, 22 insertions(+), 8 deletions(-) diff --git a/api/core/model_runtime/model_providers/azure_openai/_constant.py b/api/core/model_runtime/model_providers/azure_openai/_constant.py index 707b199417..63a0b5c8be 100644 --- a/api/core/model_runtime/model_providers/azure_openai/_constant.py +++ b/api/core/model_runtime/model_providers/azure_openai/_constant.py @@ -49,7 +49,7 @@ LLM_BASE_MODELS = [ fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, model_properties={ ModelPropertyKey.MODE: LLMMode.CHAT.value, - ModelPropertyKey.CONTEXT_SIZE: 4096, + ModelPropertyKey.CONTEXT_SIZE: 16385, }, parameter_rules=[ ParameterRule( @@ -68,11 +68,25 @@ LLM_BASE_MODELS = [ name='frequency_penalty', **PARAMETER_RULE_TEMPLATE[DefaultParameterName.FREQUENCY_PENALTY], ), - _get_max_tokens(default=512, min_val=1, max_val=4096) + _get_max_tokens(default=512, min_val=1, max_val=4096), + ParameterRule( + name='response_format', + label=I18nObject( + zh_Hans='回复格式', + en_US='response_format' + ), + type='string', + help=I18nObject( + zh_Hans='指定模型必须输出的格式', + en_US='specifying the format that the model must output' + ), + required=False, + options=['text', 'json_object'] + ), ], pricing=PriceConfig( - input=0.001, - output=0.002, + input=0.0005, + output=0.0015, unit=0.001, currency='USD', ) @@ -703,8 +717,8 @@ LLM_BASE_MODELS = [ ), ], pricing=PriceConfig( - input=0.001, - output=0.003, + input=0.01, + output=0.03, unit=0.001, currency='USD', ) @@ -779,8 +793,8 @@ LLM_BASE_MODELS = [ ), ], pricing=PriceConfig( - input=0.001, - output=0.003, + input=0.01, + output=0.03, unit=0.001, currency='USD', )