feat: 模型vision支持性参数

This commit is contained in:
RockChinQ 2024-05-16 20:11:54 +08:00
parent 404e5492a3
commit 2c478ccc25
7 changed files with 48 additions and 29 deletions

View File

@ -39,8 +39,13 @@ class PreProcessor(stage.PipelineStage):
query.prompt = conversation.prompt.copy()
query.messages = conversation.messages.copy()
query.use_model = conversation.use_model
query.use_funcs = conversation.use_funcs if query.use_model.tool_call_supported else None
# 检查vision是否启用没启用就删除所有图片
if not self.ap.provider_cfg.data['enable-vision']:
if not self.ap.provider_cfg.data['enable-vision'] or not query.use_model.vision_supported:
for msg in query.messages:
if isinstance(msg.content, list):
for me in msg.content:
@ -55,7 +60,7 @@ class PreProcessor(stage.PipelineStage):
llm_entities.ContentElement.from_text(me.text)
)
elif isinstance(me, mirai.Image):
if self.ap.provider_cfg.data['enable-vision']:
if self.ap.provider_cfg.data['enable-vision'] and query.use_model.vision_supported:
if me.url is not None:
content_list.append(
llm_entities.ContentElement.from_image_url(str(me.url))
@ -65,11 +70,6 @@ class PreProcessor(stage.PipelineStage):
role='user',
content=content_list
)
query.use_model = conversation.use_model
query.use_funcs = conversation.use_funcs
# =========== 触发事件 PromptPreProcessing
event_ctx = await self.ap.plugin_mgr.emit_event(

View File

@ -76,7 +76,7 @@ class OpenAIChatCompletions(api.LLMAPIRequester):
args = self.requester_cfg['args'].copy()
args["model"] = use_model.name if use_model.model_name is None else use_model.model_name
if use_model.tool_call_supported:
if use_funcs:
tools = await self.ap.tool_mgr.generate_tools_for_openai(use_funcs)
if tools:
@ -88,7 +88,7 @@ class OpenAIChatCompletions(api.LLMAPIRequester):
# 检查vision
if self.ap.oss_mgr.available():
for msg in messages:
if isinstance(msg["content"], list):
if 'content' in msg and isinstance(msg["content"], list):
for me in msg["content"]:
if me["type"] == "image_url":
me["image_url"]['url'] = await self.get_oss_url(me["image_url"]['url'])

View File

@ -28,7 +28,7 @@ class DeepseekChatCompletions(chatcmpl.OpenAIChatCompletions):
args = self.requester_cfg['args'].copy()
args["model"] = use_model.name if use_model.model_name is None else use_model.model_name
if use_model.tool_call_supported:
if use_funcs:
tools = await self.ap.tool_mgr.generate_tools_for_openai(use_funcs)
if tools:
@ -39,7 +39,7 @@ class DeepseekChatCompletions(chatcmpl.OpenAIChatCompletions):
# deepseek 不支持多模态把content都转换成纯文字
for m in messages:
if isinstance(m["content"], list):
if 'content' in m and isinstance(m["content"], list):
m["content"] = " ".join([c["text"] for c in m["content"]])
args["messages"] = messages

View File

@ -28,7 +28,7 @@ class MoonshotChatCompletions(chatcmpl.OpenAIChatCompletions):
args = self.requester_cfg['args'].copy()
args["model"] = use_model.name if use_model.model_name is None else use_model.model_name
if use_model.tool_call_supported:
if use_funcs:
tools = await self.ap.tool_mgr.generate_tools_for_openai(use_funcs)
if tools:
@ -39,7 +39,7 @@ class MoonshotChatCompletions(chatcmpl.OpenAIChatCompletions):
# deepseek 不支持多模态把content都转换成纯文字
for m in messages:
if isinstance(m["content"], list):
if 'content' in m and isinstance(m["content"], list):
m["content"] = " ".join([c["text"] for c in m["content"]])
# 删除空的

View File

@ -21,5 +21,7 @@ class LLMModelInfo(pydantic.BaseModel):
tool_call_supported: typing.Optional[bool] = False
vision_supported: typing.Optional[bool] = False
class Config:
arbitrary_types_allowed = True

View File

@ -87,7 +87,8 @@ class ModelManager:
model_name=None,
token_mgr=self.token_mgrs[model['token_mgr']],
requester=self.requesters[model['requester']],
tool_call_supported=model['tool_call_supported']
tool_call_supported=model['tool_call_supported'],
vision_supported=model['vision_supported']
)
break
@ -99,13 +100,15 @@ class ModelManager:
token_mgr = self.token_mgrs[model['token_mgr']] if 'token_mgr' in model else default_model_info.token_mgr
requester = self.requesters[model['requester']] if 'requester' in model else default_model_info.requester
tool_call_supported = model.get('tool_call_supported', default_model_info.tool_call_supported)
vision_supported = model.get('vision_supported', default_model_info.vision_supported)
model_info = entities.LLMModelInfo(
name=model['name'],
model_name=model_name,
token_mgr=token_mgr,
requester=requester,
tool_call_supported=tool_call_supported
tool_call_supported=tool_call_supported,
vision_supported=vision_supported
)
self.model_list.append(model_info)

View File

@ -4,59 +4,73 @@
"name": "default",
"requester": "openai-chat-completions",
"token_mgr": "openai",
"tool_call_supported": false
"tool_call_supported": false,
"vision_supported": false
},
{
"name": "gpt-3.5-turbo-0125",
"tool_call_supported": true
"tool_call_supported": true,
"vision_supported": false
},
{
"name": "gpt-3.5-turbo",
"tool_call_supported": true
"tool_call_supported": true,
"vision_supported": false
},
{
"name": "gpt-3.5-turbo-1106",
"tool_call_supported": true
"tool_call_supported": true,
"vision_supported": false
},
{
"name": "gpt-4-turbo",
"tool_call_supported": true
"tool_call_supported": true,
"vision_supported": true
},
{
"name": "gpt-4-turbo-2024-04-09",
"tool_call_supported": true
"tool_call_supported": true,
"vision_supported": true
},
{
"name": "gpt-4-turbo-preview",
"tool_call_supported": true
"tool_call_supported": true,
"vision_supported": true
},
{
"name": "gpt-4-0125-preview",
"tool_call_supported": true
"tool_call_supported": true,
"vision_supported": true
},
{
"name": "gpt-4-1106-preview",
"tool_call_supported": true
"tool_call_supported": true,
"vision_supported": true
},
{
"name": "gpt-4",
"tool_call_supported": true
"tool_call_supported": true,
"vision_supported": true
},
{
"name": "gpt-4o",
"tool_call_supported": true
"tool_call_supported": true,
"vision_supported": true
},
{
"name": "gpt-4-0613",
"tool_call_supported": true
"tool_call_supported": true,
"vision_supported": true
},
{
"name": "gpt-4-32k",
"tool_call_supported": true
"tool_call_supported": true,
"vision_supported": true
},
{
"name": "gpt-4-32k-0613",
"tool_call_supported": true
"tool_call_supported": true,
"vision_supported": true
},
{
"model_name": "SparkDesk",