mirror of
https://github.com/RockChinQ/QChatGPT.git
synced 2024-11-16 03:32:33 +08:00
chore: 删除冗余的兼容性检查判断
This commit is contained in:
parent
7aef1758e0
commit
d16cb25cde
3
main.py
3
main.py
|
@ -299,8 +299,7 @@ def start(first_time_init=False):
|
|||
logging.info('热重载完成')
|
||||
|
||||
# 发送赞赏码
|
||||
if hasattr(config, 'encourage_sponsor_at_start') \
|
||||
and config.encourage_sponsor_at_start \
|
||||
if config.encourage_sponsor_at_start \
|
||||
and pkg.utils.context.get_openai_manager().audit_mgr.get_total_text_length() >= 2048:
|
||||
|
||||
logging.info("发送赞赏码")
|
||||
|
|
|
@ -44,7 +44,7 @@ class DataGatherer:
|
|||
"""
|
||||
try:
|
||||
config = pkg.utils.context.get_config()
|
||||
if hasattr(config, "report_usage") and not config.report_usage:
|
||||
if not config.report_usage:
|
||||
return
|
||||
res = requests.get("http://reports.rockchin.top:18989/usage?service_name=qchatgpt.{}&version={}&count={}".format(subservice_name, self.version_str, count))
|
||||
if res.status_code != 200 or res.text != "ok":
|
||||
|
|
|
@ -83,7 +83,7 @@ class OpenAIInteract:
|
|||
dict: 响应
|
||||
"""
|
||||
config = pkg.utils.context.get_config()
|
||||
params = config.image_api_params if hasattr(config, "image_api_params") else self.default_image_api_params
|
||||
params = config.image_api_params
|
||||
|
||||
response = openai.Image.create(
|
||||
prompt=prompt,
|
||||
|
|
|
@ -213,7 +213,7 @@ class Session:
|
|||
return None
|
||||
|
||||
config = pkg.utils.context.get_config()
|
||||
max_length = config.prompt_submit_length if hasattr(config, "prompt_submit_length") else 1024
|
||||
max_length = config.prompt_submit_length
|
||||
|
||||
prompts, counts = self.cut_out(text, max_length)
|
||||
|
||||
|
|
|
@ -64,12 +64,7 @@ def text_to_image(text: str) -> MessageComponent:
|
|||
|
||||
def check_text(text: str) -> list:
|
||||
"""检查文本是否为长消息,并转换成该使用的消息链组件"""
|
||||
if not hasattr(config, 'blob_message_threshold'):
|
||||
return [text]
|
||||
|
||||
if len(text) > config.blob_message_threshold:
|
||||
if not hasattr(config, 'blob_message_strategy'):
|
||||
raise AttributeError('未定义长消息处理策略')
|
||||
|
||||
# logging.info("长消息: {}".format(text))
|
||||
if config.blob_message_strategy == 'image':
|
||||
|
|
|
@ -13,7 +13,7 @@ class HelpCommand(AbstractCommandNode):
|
|||
@classmethod
|
||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
||||
import config
|
||||
reply = [(config.help_message if hasattr(config, 'help_message') else "") + "\n请输入 !cmds 查看指令列表"]
|
||||
reply = [(config.help_message) + "\n请输入 !cmds 查看指令列表"]
|
||||
|
||||
return True, reply
|
||||
|
|
@ -21,11 +21,11 @@ class ReplyFilter:
|
|||
self.mask = mask
|
||||
self.mask_word = mask_word
|
||||
import config
|
||||
if hasattr(config, 'baidu_check') and hasattr(config, 'baidu_api_key') and hasattr(config, 'baidu_secret_key'):
|
||||
self.baidu_check = config.baidu_check
|
||||
self.baidu_api_key = config.baidu_api_key
|
||||
self.baidu_secret_key = config.baidu_secret_key
|
||||
self.inappropriate_message_tips = config.inappropriate_message_tips
|
||||
|
||||
self.baidu_check = config.baidu_check
|
||||
self.baidu_api_key = config.baidu_api_key
|
||||
self.baidu_secret_key = config.baidu_secret_key
|
||||
self.inappropriate_message_tips = config.inappropriate_message_tips
|
||||
|
||||
def is_illegal(self, message: str) -> bool:
|
||||
processed = self.process(message)
|
||||
|
|
|
@ -5,9 +5,6 @@ def ignore(msg: str) -> bool:
|
|||
"""检查消息是否应该被忽略"""
|
||||
import config
|
||||
|
||||
if not hasattr(config, 'ignore_rules'):
|
||||
return False
|
||||
|
||||
if 'prefix' in config.ignore_rules:
|
||||
for rule in config.ignore_rules['prefix']:
|
||||
if msg.startswith(rule):
|
||||
|
|
|
@ -24,8 +24,6 @@ import pkg.plugin.models as plugin_models
|
|||
# 检查消息是否符合泛响应匹配机制
|
||||
def check_response_rule(text: str):
|
||||
config = pkg.utils.context.get_config()
|
||||
if not hasattr(config, 'response_rules'):
|
||||
return False, ''
|
||||
|
||||
rules = config.response_rules
|
||||
# 检查前缀匹配
|
||||
|
@ -228,8 +226,7 @@ class QQBotManager:
|
|||
def send(self, event, msg, check_quote=True):
|
||||
config = pkg.utils.context.get_config()
|
||||
asyncio.run(
|
||||
self.bot.send(event, msg, quote=True if hasattr(config,
|
||||
"quote_origin") and config.quote_origin and check_quote else False))
|
||||
self.bot.send(event, msg, quote=True if config.quote_origin and check_quote else False))
|
||||
|
||||
# 私聊消息处理
|
||||
def on_person_message(self, event: MessageEvent):
|
||||
|
@ -333,7 +330,7 @@ class QQBotManager:
|
|||
# 通知系统管理员
|
||||
def notify_admin(self, message: str):
|
||||
config = pkg.utils.context.get_config()
|
||||
if hasattr(config, "admin_qq") and config.admin_qq != 0 and config.admin_qq != []:
|
||||
if config.admin_qq != 0 and config.admin_qq != []:
|
||||
logging.info("通知管理员:{}".format(message))
|
||||
if type(config.admin_qq) == int:
|
||||
send_task = self.bot.send_friend_message(config.admin_qq, "[bot]{}".format(message))
|
||||
|
@ -346,7 +343,7 @@ class QQBotManager:
|
|||
|
||||
def notify_admin_message_chain(self, message):
|
||||
config = pkg.utils.context.get_config()
|
||||
if hasattr(config, "admin_qq") and config.admin_qq != 0 and config.admin_qq != []:
|
||||
if config.admin_qq != 0 and config.admin_qq != []:
|
||||
logging.info("通知管理员:{}".format(message))
|
||||
if type(config.admin_qq) == int:
|
||||
send_task = self.bot.send_friend_message(config.admin_qq, message)
|
||||
|
|
|
@ -13,11 +13,8 @@ def handle_exception(notify_admin: str = "", set_reply: str = "") -> list:
|
|||
"""处理异常,当notify_admin不为空时,会通知管理员,返回通知用户的消息"""
|
||||
import config
|
||||
pkg.utils.context.get_qqbot_manager().notify_admin(notify_admin)
|
||||
if hasattr(config, 'hide_exce_info_to_user') and config.hide_exce_info_to_user:
|
||||
if hasattr(config, 'alter_tip_message'):
|
||||
return [config.alter_tip_message] if config.alter_tip_message else []
|
||||
else:
|
||||
return ["[bot]出错了,请重试或联系管理员"]
|
||||
if config.hide_exce_info_to_user:
|
||||
return [config.alter_tip_message] if config.alter_tip_message else []
|
||||
else:
|
||||
return [set_reply]
|
||||
|
||||
|
@ -40,7 +37,7 @@ def process_normal_message(text_message: str, mgr, config, launcher_type: str,
|
|||
reply = handle_exception(notify_admin=f"{session_name},多次尝试失败。", set_reply=f"[bot]多次尝试失败,请重试或联系管理员")
|
||||
break
|
||||
try:
|
||||
prefix = "[GPT]" if hasattr(config, "show_prefix") and config.show_prefix else ""
|
||||
prefix = "[GPT]" if config.show_prefix else ""
|
||||
|
||||
text = session.append(text_message)
|
||||
|
||||
|
|
|
@ -68,7 +68,7 @@ def process_message(launcher_type: str, launcher_id: int, text_message: str, mes
|
|||
return reply
|
||||
|
||||
import config
|
||||
if hasattr(config, 'income_msg_check') and config.income_msg_check:
|
||||
if config.income_msg_check:
|
||||
if mgr.reply_filter.is_illegal(text_message):
|
||||
return MessageChain(Plain("[bot] 你的提问中有不合适的内容, 请更换措辞~"))
|
||||
|
||||
|
@ -115,10 +115,10 @@ def process_message(launcher_type: str, launcher_id: int, text_message: str, mes
|
|||
else: # 消息
|
||||
# 限速丢弃检查
|
||||
# print(ratelimit.__crt_minute_usage__[session_name])
|
||||
if hasattr(config, "rate_limitation") and config.rate_limit_strategy == "drop":
|
||||
if config.rate_limit_strategy == "drop":
|
||||
if ratelimit.is_reach_limit(session_name):
|
||||
logging.info("根据限速策略丢弃[{}]消息: {}".format(session_name, text_message))
|
||||
return MessageChain(["[bot]"+config.rate_limit_drop_tip]) if hasattr(config, "rate_limit_drop_tip") and config.rate_limit_drop_tip != "" else []
|
||||
return MessageChain(["[bot]"+config.rate_limit_drop_tip]) if config.rate_limit_drop_tip != "" else []
|
||||
|
||||
before = time.time()
|
||||
# 触发插件事件
|
||||
|
@ -144,11 +144,10 @@ def process_message(launcher_type: str, launcher_id: int, text_message: str, mes
|
|||
mgr, config, launcher_type, launcher_id, sender_id)
|
||||
|
||||
# 限速等待时间
|
||||
if hasattr(config, "rate_limitation") and config.rate_limit_strategy == "wait":
|
||||
if config.rate_limit_strategy == "wait":
|
||||
time.sleep(ratelimit.get_rest_wait_time(session_name, time.time() - before))
|
||||
|
||||
if hasattr(config, "rate_limitation"):
|
||||
ratelimit.add_usage(session_name)
|
||||
ratelimit.add_usage(session_name)
|
||||
|
||||
if reply is not None and len(reply) > 0 and (type(reply[0]) == str or type(reply[0]) == mirai.Plain):
|
||||
if type(reply[0]) == mirai.Plain:
|
||||
|
|
|
@ -58,9 +58,6 @@ def get_rest_wait_time(session_name: str, spent: float) -> float:
|
|||
|
||||
import config
|
||||
|
||||
if not hasattr(config, 'rate_limitation'):
|
||||
return 0
|
||||
|
||||
min_seconds_per_round = 60.0 / config.rate_limitation
|
||||
|
||||
if session_name in __crt_minute_usage__:
|
||||
|
@ -75,9 +72,6 @@ def is_reach_limit(session_name: str) -> bool:
|
|||
|
||||
import config
|
||||
|
||||
if not hasattr(config, 'rate_limitation'):
|
||||
return False
|
||||
|
||||
if session_name in __crt_minute_usage__:
|
||||
return __crt_minute_usage__[session_name] >= config.rate_limitation
|
||||
else:
|
||||
|
|
|
@ -8,8 +8,8 @@ import traceback
|
|||
|
||||
text_render_font: ImageFont = None
|
||||
|
||||
if hasattr(config, "blob_message_strategy") and config.blob_message_strategy == "image": # 仅在启用了image时才加载字体
|
||||
use_font = config.font_path if hasattr(config, "font_path") else ""
|
||||
if config.blob_message_strategy == "image": # 仅在启用了image时才加载字体
|
||||
use_font = config.font_path
|
||||
try:
|
||||
|
||||
# 检查是否存在
|
||||
|
|
14
tests/gpt3_test.py
Normal file
14
tests/gpt3_test.py
Normal file
|
@ -0,0 +1,14 @@
|
|||
import openai
|
||||
|
||||
openai.api_key = "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"
|
||||
|
||||
resp = openai.Completion.create(
|
||||
prompt="user:你好,今天天气怎么样?\nbot:",
|
||||
model="text-davinci-003",
|
||||
temperature=0.9, # 数值越低得到的回答越理性,取值范围[0, 1]
|
||||
top_p=1, # 生成的文本的文本与要求的符合度, 取值范围[0, 1]
|
||||
frequency_penalty=0.2,
|
||||
presence_penalty=1.0,
|
||||
)
|
||||
|
||||
print(resp)
|
Loading…
Reference in New Issue
Block a user