mirror of
https://github.com/zhenxun-org/zhenxun_bot.git
synced 2025-12-15 14:22:55 +08:00
🔧 新增功能: - LLM模型管理插件 (builtin_plugins/llm_manager/) • llm list - 查看可用模型列表 (图片格式) • llm info - 查看模型详细信息 (Markdown图片) • llm default - 管理全局默认模型 • llm test - 测试模型连通性 • llm keys - 查看API Key状态 (表格图片,含健康度/成功率/延迟) • llm reset-key - 重置API Key失败状态 🏗️ 架构重构: - 会话管理: AI/AIConfig 类迁移至独立的 session.py - 类型定义: TaskType 枚举移至 types/enums.py - API增强: • chat() 函数返回完整 LLMResponse,支持工具调用 • 新增 generate() 函数用于一次性响应生成 • 统一API调用核心方法 _perform_api_call,返回使用的API密钥 🚀 密钥管理增强: - 详细状态跟踪: 健康度、成功率、平均延迟、错误信息、建议操作 - 状态持久化: 启动时加载,关闭时自动保存密钥状态 - 智能冷却策略: 根据错误类型设置不同冷却时间 - 延迟监控: with_smart_retry 记录API调用延迟并更新统计 Co-authored-by: webjoin111 <455457521@qq.com> Co-authored-by: HibiKier <45528451+HibiKier@users.noreply.github.com>
103 lines
2.0 KiB
Python
103 lines
2.0 KiB
Python
"""
|
|
LLM 服务模块 - 公共 API 入口
|
|
|
|
提供统一的 AI 服务调用接口、核心类型定义和模型管理功能。
|
|
"""
|
|
|
|
from .api import (
|
|
analyze,
|
|
analyze_multimodal,
|
|
chat,
|
|
code,
|
|
embed,
|
|
generate,
|
|
pipeline_chat,
|
|
search,
|
|
search_multimodal,
|
|
)
|
|
from .config import (
|
|
CommonOverrides,
|
|
LLMGenerationConfig,
|
|
register_llm_configs,
|
|
)
|
|
|
|
register_llm_configs()
|
|
from .api import ModelName
|
|
from .manager import (
|
|
clear_model_cache,
|
|
get_cache_stats,
|
|
get_global_default_model_name,
|
|
get_model_instance,
|
|
list_available_models,
|
|
list_embedding_models,
|
|
list_model_identifiers,
|
|
set_global_default_model_name,
|
|
)
|
|
from .session import AI, AIConfig
|
|
from .tools import tool_registry
|
|
from .types import (
|
|
EmbeddingTaskType,
|
|
LLMContentPart,
|
|
LLMErrorCode,
|
|
LLMException,
|
|
LLMMessage,
|
|
LLMResponse,
|
|
LLMTool,
|
|
MCPCompatible,
|
|
ModelDetail,
|
|
ModelInfo,
|
|
ModelProvider,
|
|
ResponseFormat,
|
|
TaskType,
|
|
ToolCategory,
|
|
ToolMetadata,
|
|
UsageInfo,
|
|
)
|
|
from .utils import create_multimodal_message, message_to_unimessage, unimsg_to_llm_parts
|
|
|
|
__all__ = [
|
|
"AI",
|
|
"AIConfig",
|
|
"CommonOverrides",
|
|
"EmbeddingTaskType",
|
|
"LLMContentPart",
|
|
"LLMErrorCode",
|
|
"LLMException",
|
|
"LLMGenerationConfig",
|
|
"LLMMessage",
|
|
"LLMResponse",
|
|
"LLMTool",
|
|
"MCPCompatible",
|
|
"ModelDetail",
|
|
"ModelInfo",
|
|
"ModelName",
|
|
"ModelProvider",
|
|
"ResponseFormat",
|
|
"TaskType",
|
|
"ToolCategory",
|
|
"ToolMetadata",
|
|
"UsageInfo",
|
|
"analyze",
|
|
"analyze_multimodal",
|
|
"chat",
|
|
"clear_model_cache",
|
|
"code",
|
|
"create_multimodal_message",
|
|
"embed",
|
|
"generate",
|
|
"get_cache_stats",
|
|
"get_global_default_model_name",
|
|
"get_model_instance",
|
|
"list_available_models",
|
|
"list_embedding_models",
|
|
"list_model_identifiers",
|
|
"message_to_unimessage",
|
|
"pipeline_chat",
|
|
"register_llm_configs",
|
|
"search",
|
|
"search_multimodal",
|
|
"set_global_default_model_name",
|
|
"tool_registry",
|
|
"unimsg_to_llm_parts",
|
|
]
|