mirror of
https://github.com/langgenius/dify.git
synced 2024-11-16 11:42:29 +08:00
feat: add gpt-4o-2024-08-06 and json_schema for azure openAI service (#7648)
Some checks are pending
Build and Push API & Web / build (api, DIFY_API_IMAGE_NAME, linux/amd64, build-api-amd64) (push) Waiting to run
Build and Push API & Web / build (api, DIFY_API_IMAGE_NAME, linux/arm64, build-api-arm64) (push) Waiting to run
Build and Push API & Web / build (web, DIFY_WEB_IMAGE_NAME, linux/amd64, build-web-amd64) (push) Waiting to run
Build and Push API & Web / build (web, DIFY_WEB_IMAGE_NAME, linux/arm64, build-web-arm64) (push) Waiting to run
Build and Push API & Web / create-manifest (api, DIFY_API_IMAGE_NAME, merge-api-images) (push) Blocked by required conditions
Build and Push API & Web / create-manifest (web, DIFY_WEB_IMAGE_NAME, merge-web-images) (push) Blocked by required conditions
Some checks are pending
Build and Push API & Web / build (api, DIFY_API_IMAGE_NAME, linux/amd64, build-api-amd64) (push) Waiting to run
Build and Push API & Web / build (api, DIFY_API_IMAGE_NAME, linux/arm64, build-api-arm64) (push) Waiting to run
Build and Push API & Web / build (web, DIFY_WEB_IMAGE_NAME, linux/amd64, build-web-amd64) (push) Waiting to run
Build and Push API & Web / build (web, DIFY_WEB_IMAGE_NAME, linux/arm64, build-web-arm64) (push) Waiting to run
Build and Push API & Web / create-manifest (api, DIFY_API_IMAGE_NAME, merge-api-images) (push) Blocked by required conditions
Build and Push API & Web / create-manifest (web, DIFY_WEB_IMAGE_NAME, merge-web-images) (push) Blocked by required conditions
This commit is contained in:
parent
0e71f6db84
commit
3e7597f2bd
|
@ -637,7 +637,19 @@ LLM_BASE_MODELS = [
|
||||||
en_US='specifying the format that the model must output'
|
en_US='specifying the format that the model must output'
|
||||||
),
|
),
|
||||||
required=False,
|
required=False,
|
||||||
options=['text', 'json_object']
|
options=['text', 'json_object', 'json_schema']
|
||||||
|
),
|
||||||
|
ParameterRule(
|
||||||
|
name='json_schema',
|
||||||
|
label=I18nObject(
|
||||||
|
en_US='JSON Schema'
|
||||||
|
),
|
||||||
|
type='text',
|
||||||
|
help=I18nObject(
|
||||||
|
zh_Hans='设置返回的json schema,llm将按照它返回',
|
||||||
|
en_US='Set a response json schema will ensure LLM to adhere it.'
|
||||||
|
),
|
||||||
|
required=False
|
||||||
),
|
),
|
||||||
],
|
],
|
||||||
pricing=PriceConfig(
|
pricing=PriceConfig(
|
||||||
|
@ -800,6 +812,94 @@ LLM_BASE_MODELS = [
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
|
AzureBaseModel(
|
||||||
|
base_model_name='gpt-4o-2024-08-06',
|
||||||
|
entity=AIModelEntity(
|
||||||
|
model='fake-deployment-name',
|
||||||
|
label=I18nObject(
|
||||||
|
en_US='fake-deployment-name-label',
|
||||||
|
),
|
||||||
|
model_type=ModelType.LLM,
|
||||||
|
features=[
|
||||||
|
ModelFeature.AGENT_THOUGHT,
|
||||||
|
ModelFeature.VISION,
|
||||||
|
ModelFeature.MULTI_TOOL_CALL,
|
||||||
|
ModelFeature.STREAM_TOOL_CALL,
|
||||||
|
],
|
||||||
|
fetch_from=FetchFrom.CUSTOMIZABLE_MODEL,
|
||||||
|
model_properties={
|
||||||
|
ModelPropertyKey.MODE: LLMMode.CHAT.value,
|
||||||
|
ModelPropertyKey.CONTEXT_SIZE: 128000,
|
||||||
|
},
|
||||||
|
parameter_rules=[
|
||||||
|
ParameterRule(
|
||||||
|
name='temperature',
|
||||||
|
**PARAMETER_RULE_TEMPLATE[DefaultParameterName.TEMPERATURE],
|
||||||
|
),
|
||||||
|
ParameterRule(
|
||||||
|
name='top_p',
|
||||||
|
**PARAMETER_RULE_TEMPLATE[DefaultParameterName.TOP_P],
|
||||||
|
),
|
||||||
|
ParameterRule(
|
||||||
|
name='presence_penalty',
|
||||||
|
**PARAMETER_RULE_TEMPLATE[DefaultParameterName.PRESENCE_PENALTY],
|
||||||
|
),
|
||||||
|
ParameterRule(
|
||||||
|
name='frequency_penalty',
|
||||||
|
**PARAMETER_RULE_TEMPLATE[DefaultParameterName.FREQUENCY_PENALTY],
|
||||||
|
),
|
||||||
|
_get_max_tokens(default=512, min_val=1, max_val=4096),
|
||||||
|
ParameterRule(
|
||||||
|
name='seed',
|
||||||
|
label=I18nObject(
|
||||||
|
zh_Hans='种子',
|
||||||
|
en_US='Seed'
|
||||||
|
),
|
||||||
|
type='int',
|
||||||
|
help=I18nObject(
|
||||||
|
zh_Hans='如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。',
|
||||||
|
en_US='If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.'
|
||||||
|
),
|
||||||
|
required=False,
|
||||||
|
precision=2,
|
||||||
|
min=0,
|
||||||
|
max=1,
|
||||||
|
),
|
||||||
|
ParameterRule(
|
||||||
|
name='response_format',
|
||||||
|
label=I18nObject(
|
||||||
|
zh_Hans='回复格式',
|
||||||
|
en_US='response_format'
|
||||||
|
),
|
||||||
|
type='string',
|
||||||
|
help=I18nObject(
|
||||||
|
zh_Hans='指定模型必须输出的格式',
|
||||||
|
en_US='specifying the format that the model must output'
|
||||||
|
),
|
||||||
|
required=False,
|
||||||
|
options=['text', 'json_object', 'json_schema']
|
||||||
|
),
|
||||||
|
ParameterRule(
|
||||||
|
name='json_schema',
|
||||||
|
label=I18nObject(
|
||||||
|
en_US='JSON Schema'
|
||||||
|
),
|
||||||
|
type='text',
|
||||||
|
help=I18nObject(
|
||||||
|
zh_Hans='设置返回的json schema,llm将按照它返回',
|
||||||
|
en_US='Set a response json schema will ensure LLM to adhere it.'
|
||||||
|
),
|
||||||
|
required=False
|
||||||
|
),
|
||||||
|
],
|
||||||
|
pricing=PriceConfig(
|
||||||
|
input=5.00,
|
||||||
|
output=15.00,
|
||||||
|
unit=0.000001,
|
||||||
|
currency='USD',
|
||||||
|
)
|
||||||
|
)
|
||||||
|
),
|
||||||
AzureBaseModel(
|
AzureBaseModel(
|
||||||
base_model_name='gpt-4-turbo',
|
base_model_name='gpt-4-turbo',
|
||||||
entity=AIModelEntity(
|
entity=AIModelEntity(
|
||||||
|
|
|
@ -138,6 +138,12 @@ model_credential_schema:
|
||||||
show_on:
|
show_on:
|
||||||
- variable: __model_type
|
- variable: __model_type
|
||||||
value: llm
|
value: llm
|
||||||
|
- label:
|
||||||
|
en_US: gpt-4o-2024-08-06
|
||||||
|
value: gpt-4o-2024-08-06
|
||||||
|
show_on:
|
||||||
|
- variable: __model_type
|
||||||
|
value: llm
|
||||||
- label:
|
- label:
|
||||||
en_US: gpt-4-turbo
|
en_US: gpt-4-turbo
|
||||||
value: gpt-4-turbo
|
value: gpt-4-turbo
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
import copy
|
import copy
|
||||||
|
import json
|
||||||
import logging
|
import logging
|
||||||
from collections.abc import Generator, Sequence
|
from collections.abc import Generator, Sequence
|
||||||
from typing import Optional, Union, cast
|
from typing import Optional, Union, cast
|
||||||
|
@ -276,12 +277,18 @@ class AzureOpenAILargeLanguageModel(_CommonAzureOpenAI, LargeLanguageModel):
|
||||||
|
|
||||||
response_format = model_parameters.get("response_format")
|
response_format = model_parameters.get("response_format")
|
||||||
if response_format:
|
if response_format:
|
||||||
if response_format == "json_object":
|
if response_format == "json_schema":
|
||||||
response_format = {"type": "json_object"}
|
json_schema = model_parameters.get("json_schema")
|
||||||
|
if not json_schema:
|
||||||
|
raise ValueError("Must define JSON Schema when the response format is json_schema")
|
||||||
|
try:
|
||||||
|
schema = json.loads(json_schema)
|
||||||
|
except:
|
||||||
|
raise ValueError(f"not currect json_schema format: {json_schema}")
|
||||||
|
model_parameters.pop("json_schema")
|
||||||
|
model_parameters["response_format"] = {"type": "json_schema", "json_schema": schema}
|
||||||
else:
|
else:
|
||||||
response_format = {"type": "text"}
|
model_parameters["response_format"] = {"type": response_format}
|
||||||
|
|
||||||
model_parameters["response_format"] = response_format
|
|
||||||
|
|
||||||
extra_model_kwargs = {}
|
extra_model_kwargs = {}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue
Block a user