diff --git a/pkg/core/app.py b/pkg/core/app.py index e1df54e..18cbeff 100644 --- a/pkg/core/app.py +++ b/pkg/core/app.py @@ -53,6 +53,8 @@ class Application: plugin_setting_meta: config_mgr.ConfigManager = None + llm_models_meta: config_mgr.ConfigManager = None + # ========================= ctr_mgr: center_mgr.V2CenterAPI = None diff --git a/pkg/core/stages/load_config.py b/pkg/core/stages/load_config.py index 7f83dba..9e61c1c 100644 --- a/pkg/core/stages/load_config.py +++ b/pkg/core/stages/load_config.py @@ -18,7 +18,6 @@ class LoadConfigStage(stage.BootingStage): ap.provider_cfg = await config.load_json_config("data/config/provider.json", "templates/provider.json") ap.system_cfg = await config.load_json_config("data/config/system.json", "templates/system.json") - ap.plugin_setting_meta = await config.load_json_config("plugins/plugins.json", "templates/plugin-settings.json") await ap.plugin_setting_meta.dump_config() @@ -27,3 +26,6 @@ class LoadConfigStage(stage.BootingStage): ap.adapter_qq_botpy_meta = await config.load_json_config("data/metadata/adapter-qq-botpy.json", "templates/metadata/adapter-qq-botpy.json") await ap.adapter_qq_botpy_meta.dump_config() + + ap.llm_models_meta = await config.load_json_config("data/metadata/llm-models.json", "templates/metadata/llm-models.json") + await ap.llm_models_meta.dump_config() diff --git a/pkg/provider/modelmgr/modelmgr.py b/pkg/provider/modelmgr/modelmgr.py index a91c311..3ad968d 100644 --- a/pkg/provider/modelmgr/modelmgr.py +++ b/pkg/provider/modelmgr/modelmgr.py @@ -1,11 +1,15 @@ from __future__ import annotations +import aiohttp + from . import entities from ...core import app -from . import token +from . import token, api from .apis import chatcmpl +FETCH_MODEL_LIST_URL = "https://api.qchatgpt.rockchin.top/api/v2/fetch/model_list" + class ModelManager: """模型管理器""" @@ -13,10 +17,16 @@ class ModelManager: ap: app.Application model_list: list[entities.LLMModelInfo] + + requesters: dict[str, api.LLMAPIRequester] + + token_mgrs: dict[str, token.TokenManager] def __init__(self, ap: app.Application): self.ap = ap self.model_list = [] + self.requesters = {} + self.token_mgrs = {} async def get_model_by_name(self, name: str) -> entities.LLMModelInfo: """通过名称获取模型 @@ -24,171 +34,73 @@ class ModelManager: for model in self.model_list: if model.name == name: return model - raise ValueError(f"不支持模型: {name} , 请检查配置文件") - + raise ValueError(f"无法确定模型 {name} 的信息,请在元数据中配置") + async def initialize(self): - openai_chat_completion = chatcmpl.OpenAIChatCompletion(self.ap) - await openai_chat_completion.initialize() - openai_token_mgr = token.TokenManager("openai", list(self.ap.provider_cfg.data['openai-config']['api-keys'])) + + # 初始化token_mgr, requester + self.token_mgrs = { + "openai": token.TokenManager("openai", list(self.ap.provider_cfg.data['openai-config']['api-keys'])) + } - model_list = [ - entities.LLMModelInfo( - name="gpt-3.5-turbo", - token_mgr=openai_token_mgr, - requester=openai_chat_completion, - tool_call_supported=True, - ), - entities.LLMModelInfo( - name="gpt-3.5-turbo-1106", - token_mgr=openai_token_mgr, - requester=openai_chat_completion, - tool_call_supported=True, - ), - entities.LLMModelInfo( - name="gpt-3.5-turbo-16k", - token_mgr=openai_token_mgr, - requester=openai_chat_completion, - tool_call_supported=True, - ), - entities.LLMModelInfo( - name="gpt-3.5-turbo-0613", - token_mgr=openai_token_mgr, - requester=openai_chat_completion, - tool_call_supported=True, - ), - entities.LLMModelInfo( - name="gpt-3.5-turbo-16k-0613", - token_mgr=openai_token_mgr, - requester=openai_chat_completion, - tool_call_supported=True, - ), - entities.LLMModelInfo( - name="gpt-3.5-turbo-0301", - token_mgr=openai_token_mgr, - requester=openai_chat_completion, - tool_call_supported=True, - ) - ] + for api_cls in api.preregistered_requesters: + api_inst = api_cls(self.ap) + await api_inst.initialize() + self.requesters[api_inst.name] = api_inst - self.model_list.extend(model_list) + # 尝试从api获取最新的模型信息 + try: + async with aiohttp.ClientSession() as session: + async with session.request( + method="GET", + url=FETCH_MODEL_LIST_URL, + ) as resp: + model_list = (await resp.json())['data']['list'] - gpt4_model_list = [ - entities.LLMModelInfo( - name="gpt-4-0125-preview", - token_mgr=openai_token_mgr, - requester=openai_chat_completion, - tool_call_supported=True, - ), - entities.LLMModelInfo( - name="gpt-4-turbo-preview", - token_mgr=openai_token_mgr, - requester=openai_chat_completion, - tool_call_supported=True, - ), - entities.LLMModelInfo( - name="gpt-4-1106-preview", - token_mgr=openai_token_mgr, - requester=openai_chat_completion, - tool_call_supported=True, - ), - entities.LLMModelInfo( - name="gpt-4-vision-preview", - token_mgr=openai_token_mgr, - requester=openai_chat_completion, - tool_call_supported=True, - ), - entities.LLMModelInfo( - name="gpt-4", - token_mgr=openai_token_mgr, - requester=openai_chat_completion, - tool_call_supported=True, - ), - entities.LLMModelInfo( - name="gpt-4-0613", - token_mgr=openai_token_mgr, - requester=openai_chat_completion, - tool_call_supported=True, - ), - entities.LLMModelInfo( - name="gpt-4-32k", - token_mgr=openai_token_mgr, - requester=openai_chat_completion, - tool_call_supported=True, - ), - entities.LLMModelInfo( - name="gpt-4-32k-0613", - token_mgr=openai_token_mgr, - requester=openai_chat_completion, - tool_call_supported=True, - ) - ] + for model in model_list: - self.model_list.extend(gpt4_model_list) + for index, local_model in enumerate(self.ap.llm_models_meta.data['list']): + if model['name'] == local_model['name']: + self.ap.llm_models_meta.data['list'][index] = model + break + else: + self.ap.llm_models_meta.data['list'].append(model) - one_api_model_list = [ - entities.LLMModelInfo( - name="OneAPI/SparkDesk", - model_name='SparkDesk', - token_mgr=openai_token_mgr, - requester=openai_chat_completion, - tool_call_supported=False, - ), - entities.LLMModelInfo( - name="OneAPI/chatglm_pro", - model_name='chatglm_pro', - token_mgr=openai_token_mgr, - requester=openai_chat_completion, - tool_call_supported=False, - ), - entities.LLMModelInfo( - name="OneAPI/chatglm_std", - model_name='chatglm_std', - token_mgr=openai_token_mgr, - requester=openai_chat_completion, - tool_call_supported=False, - ), - entities.LLMModelInfo( - name="OneAPI/chatglm_lite", - model_name='chatglm_lite', - token_mgr=openai_token_mgr, - requester=openai_chat_completion, - tool_call_supported=False, - ), - entities.LLMModelInfo( - name="OneAPI/qwen-v1", - model_name='qwen-v1', - token_mgr=openai_token_mgr, - requester=openai_chat_completion, - tool_call_supported=False, - ), - entities.LLMModelInfo( - name="OneAPI/qwen-plus-v1", - model_name='qwen-plus-v1', - token_mgr=openai_token_mgr, - requester=openai_chat_completion, - tool_call_supported=False, - ), - entities.LLMModelInfo( - name="OneAPI/ERNIE-Bot", - model_name='ERNIE-Bot', - token_mgr=openai_token_mgr, - requester=openai_chat_completion, - tool_call_supported=False, - ), - entities.LLMModelInfo( - name="OneAPI/ERNIE-Bot-turbo", - model_name='ERNIE-Bot-turbo', - token_mgr=openai_token_mgr, - requester=openai_chat_completion, - tool_call_supported=False, - ), - entities.LLMModelInfo( - name="OneAPI/gemini-pro", - model_name='gemini-pro', - token_mgr=openai_token_mgr, - requester=openai_chat_completion, - tool_call_supported=False, - ), - ] + await self.ap.llm_models_meta.dump_config() - self.model_list.extend(one_api_model_list) + except Exception as e: + self.ap.logger.debug(f'获取最新模型列表失败: {e}') + + default_model_info: entities.LLMModelInfo = None + + for model in self.ap.llm_models_meta.data['list']: + if model['name'] == 'default': + default_model_info = entities.LLMModelInfo( + name=model['name'], + model_name=None, + token_mgr=self.token_mgrs[model['token_mgr']], + requester=self.requesters[model['requester']], + tool_call_supported=model['tool_call_supported'] + ) + break + + for model in self.ap.llm_models_meta.data['list']: + + try: + + model_name = model.get('model_name', default_model_info.model_name) + token_mgr = self.token_mgrs[model['token_mgr']] if 'token_mgr' in model else default_model_info.token_mgr + requester = self.requesters[model['requester']] if 'requester' in model else default_model_info.requester + tool_call_supported = model.get('tool_call_supported', default_model_info.tool_call_supported) + + model_info = entities.LLMModelInfo( + name=model['name'], + model_name=model_name, + token_mgr=token_mgr, + requester=requester, + tool_call_supported=tool_call_supported + ) + self.model_list.append(model_info) + + except Exception as e: + self.ap.logger.error(f"初始化模型 {model['name']} 失败: {e} ,请检查配置文件") diff --git a/templates/metadata/llm-models.json b/templates/metadata/llm-models.json new file mode 100644 index 0000000..bfeaf62 --- /dev/null +++ b/templates/metadata/llm-models.json @@ -0,0 +1,34 @@ +{ + "list": [ + { + "name": "default", + "requester": "openai-chat-completion", + "token_mgr": "openai", + "tool_call_supported": false + }, + { + "name": "gpt-3.5-turbo", + "tool_call_supported": true + }, + { + "name": "gpt-4", + "tool_call_supported": true + }, + { + "name": "gpt-4-turbo-preview", + "tool_call_supported": true + }, + { + "name": "gpt-4-32k", + "tool_call_supported": true + }, + { + "model_name": "SparkDesk", + "name": "OneAPI/SparkDesk" + }, + { + "model_name": "gemini-pro", + "name": "OneAPI/gemini-pro" + } + ] +} \ No newline at end of file