Merge branch 'main' into dev

This commit is contained in:
HibiKier 2025-07-17 19:43:06 +08:00 committed by GitHub
commit 2906547702
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 162 additions and 16 deletions

View File

@ -18,12 +18,13 @@ from zhenxun.builtin_plugins.help._config import (
SIMPLE_DETAIL_HELP_IMAGE, SIMPLE_DETAIL_HELP_IMAGE,
SIMPLE_HELP_IMAGE, SIMPLE_HELP_IMAGE,
) )
from zhenxun.configs.config import Config
from zhenxun.configs.utils import PluginExtraData, RegisterConfig from zhenxun.configs.utils import PluginExtraData, RegisterConfig
from zhenxun.services.log import logger from zhenxun.services.log import logger
from zhenxun.utils.enum import PluginType from zhenxun.utils.enum import PluginType
from zhenxun.utils.message import MessageUtils from zhenxun.utils.message import MessageUtils
from ._data_source import create_help_img, get_plugin_help from ._data_source import create_help_img, get_llm_help, get_plugin_help
__plugin_meta__ = PluginMetadata( __plugin_meta__ = PluginMetadata(
name="帮助", name="帮助",
@ -47,6 +48,34 @@ __plugin_meta__ = PluginMetadata(
help="帮助详情图片样式 ['normal', 'zhenxun']", help="帮助详情图片样式 ['normal', 'zhenxun']",
default_value="zhenxun", default_value="zhenxun",
), ),
RegisterConfig(
key="ENABLE_LLM_HELPER",
value=False,
help="是否开启LLM智能帮助功能",
default_value=False,
type=bool,
),
RegisterConfig(
key="DEFAULT_LLM_MODEL",
value="Gemini/gemini-2.5-flash-lite-preview-06-17",
help="智能帮助功能使用的默认LLM模型",
default_value="Gemini/gemini-2.5-flash-lite-preview-06-17",
type=str,
),
RegisterConfig(
key="LLM_HELPER_STYLE",
value="绪山真寻",
help="设置智能帮助功能的回复口吻或风格",
default_value="绪山真寻",
type=str,
),
RegisterConfig(
key="LLM_HELPER_REPLY_AS_IMAGE_THRESHOLD",
value=100,
help="AI帮助回复超过多少字时转为图片发送",
default_value=100,
type=int,
),
], ],
).to_dict(), ).to_dict(),
) )
@ -83,20 +112,36 @@ async def _(
is_detail: Query[bool] = AlconnaQuery("detail.value", False), is_detail: Query[bool] = AlconnaQuery("detail.value", False),
): ):
_is_superuser = is_superuser.result if is_superuser.available else False _is_superuser = is_superuser.result if is_superuser.available else False
if name.available: if name.available:
if _is_superuser and session.user.id not in bot.config.superusers: traditional_help_result = await get_plugin_help(
_is_superuser = False session.user.id, name.result, _is_superuser
if result := await get_plugin_help(session.user.id, name.result, _is_superuser): )
await MessageUtils.build_message(result).send(reply_to=True)
else: is_plugin_found = not (
await MessageUtils.build_message("没有此功能的帮助信息...").send( isinstance(traditional_help_result, str)
and "没有查找到这个功能噢..." in traditional_help_result
)
if is_plugin_found:
await MessageUtils.build_message(traditional_help_result).send(
reply_to=True reply_to=True
) )
logger.info(f"查看帮助详情: {name.result}", "帮助", session=session) logger.info(f"查看帮助详情: {name.result}", "帮助", session=session)
elif Config.get_config("help", "ENABLE_LLM_HELPER"):
logger.info(f"智能帮助处理问题: {name.result}", "帮助", session=session)
llm_answer = await get_llm_help(name.result, session.user.id)
await MessageUtils.build_message(llm_answer).send(reply_to=True)
else:
await MessageUtils.build_message(traditional_help_result).send(
reply_to=True
)
logger.info(
f"查看帮助详情失败,未找到: {name.result}", "帮助", session=session
)
elif session.group and (gid := session.group.id): elif session.group and (gid := session.group.id):
_image_path = GROUP_HELP_PATH / f"{gid}_{is_detail.result}.png" _image_path = GROUP_HELP_PATH / f"{gid}_{is_detail.result}.png"
if not _image_path.exists(): if not _image_path.exists():
result = await create_help_img(session, gid, is_detail.result) await create_help_img(session, gid, is_detail.result)
await MessageUtils.build_message(_image_path).finish() await MessageUtils.build_message(_image_path).finish()
else: else:
if is_detail.result: if is_detail.result:
@ -104,5 +149,5 @@ async def _(
else: else:
_image_path = SIMPLE_HELP_IMAGE _image_path = SIMPLE_HELP_IMAGE
if not _image_path.exists(): if not _image_path.exists():
result = await create_help_img(session, None, is_detail.result) await create_help_img(session, None, is_detail.result)
await MessageUtils.build_message(_image_path).finish() await MessageUtils.build_message(_image_path).finish()

View File

@ -11,9 +11,15 @@ from zhenxun.configs.utils import PluginExtraData
from zhenxun.models.level_user import LevelUser from zhenxun.models.level_user import LevelUser
from zhenxun.models.plugin_info import PluginInfo from zhenxun.models.plugin_info import PluginInfo
from zhenxun.models.statistics import Statistics from zhenxun.models.statistics import Statistics
from zhenxun.utils._image_template import ImageTemplate from zhenxun.services import (
LLMException,
LLMMessage,
generate,
)
from zhenxun.services.log import logger
from zhenxun.utils._image_template import Markdown
from zhenxun.utils.enum import PluginType from zhenxun.utils.enum import PluginType
from zhenxun.utils.image_utils import BuildImage from zhenxun.utils.image_utils import BuildImage, ImageTemplate
from ._config import ( from ._config import (
GROUP_HELP_PATH, GROUP_HELP_PATH,
@ -202,3 +208,89 @@ async def get_plugin_help(user_id: str, name: str, is_superuser: bool) -> str |
return await get_normal_help(_plugin.metadata, extra_data, is_superuser) return await get_normal_help(_plugin.metadata, extra_data, is_superuser)
return "糟糕! 该功能没有帮助喔..." return "糟糕! 该功能没有帮助喔..."
return "没有查找到这个功能噢..." return "没有查找到这个功能噢..."
async def get_llm_help(question: str, user_id: str) -> str | bytes:
"""
使用LLM来回答用户的自然语言求助
参数:
question: 用户的问题
user_id: 提问用户的ID
返回:
str | bytes: LLM生成的回答或错误提示
"""
try:
allowed_types = await get_user_allow_help(user_id)
plugins = await PluginInfo.filter(
is_show=True, plugin_type__in=allowed_types
).all()
knowledge_base_parts = []
for p in plugins:
meta = nonebot.get_plugin_by_module_name(p.module_path)
if not meta or not meta.metadata:
continue
usage = meta.metadata.usage.strip() or ""
desc = meta.metadata.description.strip() or ""
part = f"功能名称: {p.name}\n功能描述: {desc}\n用法示例:\n{usage}"
knowledge_base_parts.append(part)
if not knowledge_base_parts:
return "抱歉,根据您的权限,当前没有可供查询的功能信息。"
knowledge_base = "\n\n---\n\n".join(knowledge_base_parts)
user_role = "普通用户"
if PluginType.SUPERUSER in allowed_types:
user_role = "超级管理员"
elif PluginType.ADMIN in allowed_types:
user_role = "管理员"
base_system_prompt = (
f"你是一个精通机器人功能的AI助手。当前向你提问的用户是一位「{user_role}」。\n"
"你的任务是根据下面提供的功能列表和详细说明,来回答用户关于如何使用机器人的问题。\n"
"请仔细阅读每个功能的描述和用法,然后用简洁、清晰的语言告诉用户应该使用哪个或哪些命令来解决他们的问题。\n"
"如果找不到完全匹配的功能,可以推荐最相关的一个或几个。直接给出操作指令和简要解释即可。"
)
if (
Config.get_config("help", "LLM_HELPER_STYLE")
and Config.get_config("help", "LLM_HELPER_STYLE").strip()
):
style = Config.get_config("help", "LLM_HELPER_STYLE")
style_instruction = f"请务必使用「{style}」的风格和口吻来回答。"
system_prompt = f"{base_system_prompt}\n{style_instruction}"
else:
system_prompt = base_system_prompt
full_instruction = (
f"{system_prompt}\n\n=== 功能列表和说明 ===\n{knowledge_base}"
)
messages = [
LLMMessage.system(full_instruction),
LLMMessage.user(question),
]
response = await generate(
messages=messages,
model=Config.get_config("help", "DEFAULT_LLM_MODEL"),
)
reply_text = response.text if response else "抱歉,我暂时无法回答这个问题。"
threshold = Config.get_config("help", "LLM_HELPER_REPLY_AS_IMAGE_THRESHOLD", 50)
if len(reply_text) > threshold:
markdown = Markdown()
markdown.text(reply_text)
return await markdown.build()
return reply_text
except LLMException as e:
logger.error(f"LLM智能帮助出错: {e}", "帮助", e=e)
return "抱歉,智能帮助功能当前不可用,请稍后再试或联系管理员。"
except Exception as e:
logger.error(f"构建LLM帮助时发生未知错误: {e}", "帮助", e=e)
return "抱歉,智能帮助功能遇到了一点小问题,正在紧急处理中!"

View File

@ -198,7 +198,9 @@ class StoreManager:
except ValueError as e: except ValueError as e:
return str(e) return str(e)
db_plugin_list = await cls.get_loaded_plugins("module") db_plugin_list = await cls.get_loaded_plugins("module")
plugin_info = next(p for p in plugin_list if p.module == plugin_key) plugin_info = next((p for p in plugin_list if p.module == plugin_key), None)
if plugin_info is None:
return f"未找到插件 {plugin_key}"
if plugin_info.module in [p[0] for p in db_plugin_list]: if plugin_info.module in [p[0] for p in db_plugin_list]:
return f"插件 {plugin_info.name} 已安装,无需重复安装" return f"插件 {plugin_info.name} 已安装,无需重复安装"
is_external = True is_external = True
@ -307,7 +309,9 @@ class StoreManager:
plugin_key = await cls._resolve_plugin_key(plugin_id) plugin_key = await cls._resolve_plugin_key(plugin_id)
except ValueError as e: except ValueError as e:
return str(e) return str(e)
plugin_info = next(p for p in plugin_list if p.module == plugin_key) plugin_info = next((p for p in plugin_list if p.module == plugin_key), None)
if plugin_info is None:
return f"未找到插件 {plugin_key}"
path = BASE_PATH path = BASE_PATH
if plugin_info.github_url: if plugin_info.github_url:
path = BASE_PATH / "plugins" path = BASE_PATH / "plugins"
@ -383,7 +387,9 @@ class StoreManager:
plugin_key = await cls._resolve_plugin_key(plugin_id) plugin_key = await cls._resolve_plugin_key(plugin_id)
except ValueError as e: except ValueError as e:
return str(e) return str(e)
plugin_info = next(p for p in plugin_list if p.module == plugin_key) plugin_info = next((p for p in plugin_list if p.module == plugin_key), None)
if plugin_info is None:
return f"未找到插件 {plugin_key}"
logger.info(f"尝试更新插件 {plugin_info.name}", LOG_COMMAND) logger.info(f"尝试更新插件 {plugin_info.name}", LOG_COMMAND)
db_plugin_list = await cls.get_loaded_plugins("module", "version") db_plugin_list = await cls.get_loaded_plugins("module", "version")
suc_plugin = {p[0]: (p[1] or "Unknown") for p in db_plugin_list} suc_plugin = {p[0]: (p[1] or "Unknown") for p in db_plugin_list}

View File

@ -1,4 +1,7 @@
class DbUrlIsNode(Exception): from zhenxun.utils.exception import HookPriorityException
class DbUrlIsNode(HookPriorityException):
""" """
数据库链接地址为空 数据库链接地址为空
""" """