zhenxun_bot/zhenxun/builtin_plugins/llm_manager/data_source.py
Rumio 7c153721f0
Some checks failed
检查bot是否运行正常 / bot check (push) Waiting to run
Sequential Lint and Type Check / ruff-call (push) Waiting to run
Sequential Lint and Type Check / pyright-call (push) Blocked by required conditions
Release Drafter / Update Release Draft (push) Waiting to run
Force Sync to Aliyun / sync (push) Waiting to run
Update Version / update-version (push) Waiting to run
CodeQL Code Security Analysis / Analyze (${{ matrix.language }}) (none, javascript-typescript) (push) Has been cancelled
CodeQL Code Security Analysis / Analyze (${{ matrix.language }}) (none, python) (push) Has been cancelled
♻️ refactor!: 重构LLM服务架构并统一Pydantic兼容性处理 (#2002)
* ♻️ refactor(pydantic): 提取 Pydantic 兼容函数到独立模块

* ♻️ refactor!(llm): 重构LLM服务,引入现代化工具和执行器架构

🏗️ **架构变更**
- 引入ToolProvider/ToolExecutable协议,取代ToolRegistry
- 新增LLMToolExecutor,分离工具调用逻辑
- 新增BaseMemory抽象,解耦会话状态管理

🔄 **API重构**
- 移除:analyze, analyze_multimodal, pipeline_chat
- 新增:generate_structured, run_with_tools
- 重构:chat, search, code变为无状态调用

🛠️ **工具系统**
- 新增@function_tool装饰器
- 统一工具定义到ToolExecutable协议
- 移除MCP工具系统和mcp_tools.json

---------

Co-authored-by: webjoin111 <455457521@qq.com>
2025-08-04 23:36:12 +08:00

122 lines
4.3 KiB
Python
Raw Permalink Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

import time
from typing import Any
from zhenxun.services.llm import (
LLMException,
get_global_default_model_name,
get_model_instance,
list_available_models,
set_global_default_model_name,
)
from zhenxun.services.llm.core import KeyStatus
from zhenxun.services.llm.manager import (
reset_key_status,
)
from zhenxun.services.llm.types import LLMMessage
class DataSource:
"""LLM管理插件的数据源和业务逻辑"""
@staticmethod
async def get_model_list(show_all: bool = False) -> list[dict[str, Any]]:
"""获取模型列表"""
models = list_available_models()
if show_all:
return models
return [m for m in models if m.get("is_available", True)]
@staticmethod
async def get_model_details(model_name_str: str) -> dict[str, Any] | None:
"""获取指定模型的详细信息"""
try:
model = await get_model_instance(model_name_str)
return {
"provider_config": model.provider_config,
"model_detail": model.model_detail,
"capabilities": model.capabilities,
}
except LLMException:
return None
@staticmethod
async def get_default_model() -> str | None:
"""获取全局默认模型"""
return get_global_default_model_name()
@staticmethod
async def set_default_model(model_name_str: str) -> tuple[bool, str]:
"""设置全局默认模型"""
success = set_global_default_model_name(model_name_str)
if success:
return True, f"✅ 成功将默认模型设置为: {model_name_str}"
else:
return False, f"❌ 设置失败,模型 '{model_name_str}' 不存在或无效。"
@staticmethod
async def test_model_connectivity(model_name_str: str) -> tuple[bool, str]:
"""测试模型连通性"""
start_time = time.monotonic()
try:
async with await get_model_instance(model_name_str) as model:
await model.generate_response([LLMMessage.user("你好")])
end_time = time.monotonic()
latency = (end_time - start_time) * 1000
return (
True,
f"✅ 模型 '{model_name_str}' 连接成功!\n响应延迟: {latency:.2f} ms",
)
except LLMException as e:
return (
False,
f"❌ 模型 '{model_name_str}' 连接测试失败:\n"
f"{e.user_friendly_message}\n错误码: {e.code.name}",
)
except Exception as e:
return False, f"❌ 测试时发生未知错误: {e!s}"
@staticmethod
async def get_key_status(provider_name: str) -> list[dict[str, Any]] | None:
"""获取并排序指定提供商的API Key状态"""
from zhenxun.services.llm.manager import get_key_usage_stats
all_stats = await get_key_usage_stats()
provider_stats = all_stats.get(provider_name)
if not provider_stats or not provider_stats.get("key_stats"):
return None
key_stats_dict = provider_stats["key_stats"]
stats_list = [
{"key_id": key_id, **stats} for key_id, stats in key_stats_dict.items()
]
def sort_key(item: dict[str, Any]):
status_priority = item.get("status_enum", KeyStatus.UNUSED).value
return (
status_priority,
100 - item.get("success_rate", 100.0),
-item.get("total_calls", 0),
)
sorted_stats_list = sorted(stats_list, key=sort_key)
return sorted_stats_list
@staticmethod
async def reset_key(provider_name: str, api_key: str | None) -> tuple[bool, str]:
"""重置API Key状态"""
success = await reset_key_status(provider_name, api_key)
if success:
if api_key:
if len(api_key) > 8:
target = f"API Key '{api_key[:4]}...{api_key[-4:]}'"
else:
target = f"API Key '{api_key}'"
else:
target = "所有API Keys"
return True, f"✅ 成功重置提供商 '{provider_name}'{target} 的状态。"
else:
return False, "❌ 重置失败请检查提供商名称或API Key是否正确。"