2025-06-21 16:33:21 +08:00
|
|
|
|
"""
|
2025-08-04 23:36:12 +08:00
|
|
|
|
LLM 服务的高级 API 接口 - 便捷函数入口 (无状态)
|
2025-06-21 16:33:21 +08:00
|
|
|
|
"""
|
|
|
|
|
|
|
2025-10-01 18:41:46 +08:00
|
|
|
|
from pathlib import Path
|
|
|
|
|
|
from typing import Any, TypeVar, overload
|
2025-06-21 16:33:21 +08:00
|
|
|
|
|
|
|
|
|
|
from nonebot_plugin_alconna.uniseg import UniMessage
|
2025-08-04 23:36:12 +08:00
|
|
|
|
from pydantic import BaseModel
|
2025-06-21 16:33:21 +08:00
|
|
|
|
|
|
|
|
|
|
from zhenxun.services.log import logger
|
|
|
|
|
|
|
2025-08-04 23:36:12 +08:00
|
|
|
|
from .config import CommonOverrides
|
2025-10-01 18:41:46 +08:00
|
|
|
|
from .config.generation import LLMGenerationConfig, create_generation_config_from_kwargs
|
2025-07-14 22:39:17 +08:00
|
|
|
|
from .manager import get_model_instance
|
|
|
|
|
|
from .session import AI
|
2025-08-04 23:36:12 +08:00
|
|
|
|
from .tools.manager import tool_provider_manager
|
2025-06-21 16:33:21 +08:00
|
|
|
|
from .types import (
|
|
|
|
|
|
EmbeddingTaskType,
|
|
|
|
|
|
LLMContentPart,
|
|
|
|
|
|
LLMErrorCode,
|
|
|
|
|
|
LLMException,
|
|
|
|
|
|
LLMMessage,
|
|
|
|
|
|
LLMResponse,
|
|
|
|
|
|
ModelName,
|
|
|
|
|
|
)
|
2025-10-01 18:41:46 +08:00
|
|
|
|
from .utils import create_multimodal_message
|
2025-08-04 23:36:12 +08:00
|
|
|
|
|
|
|
|
|
|
T = TypeVar("T", bound=BaseModel)
|
2025-06-21 16:33:21 +08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async def chat(
|
2025-08-04 23:36:12 +08:00
|
|
|
|
message: str | UniMessage | LLMMessage | list[LLMContentPart],
|
2025-06-21 16:33:21 +08:00
|
|
|
|
*,
|
|
|
|
|
|
model: ModelName = None,
|
2025-08-04 23:36:12 +08:00
|
|
|
|
instruction: str | None = None,
|
|
|
|
|
|
tools: list[dict[str, Any] | str] | None = None,
|
2025-07-14 22:39:17 +08:00
|
|
|
|
tool_choice: str | dict[str, Any] | None = None,
|
2025-06-21 16:33:21 +08:00
|
|
|
|
**kwargs: Any,
|
2025-07-14 22:39:17 +08:00
|
|
|
|
) -> LLMResponse:
|
2025-07-08 11:15:15 +08:00
|
|
|
|
"""
|
2025-08-04 23:36:12 +08:00
|
|
|
|
无状态的聊天对话便捷函数,通过临时的AI会话实例与LLM模型交互。
|
2025-07-08 11:15:15 +08:00
|
|
|
|
|
|
|
|
|
|
参数:
|
2025-08-04 23:36:12 +08:00
|
|
|
|
message: 用户输入的消息内容,支持多种格式。
|
|
|
|
|
|
model: 要使用的模型名称,如果为None则使用默认模型。
|
|
|
|
|
|
instruction: 系统指令,用于指导AI的行为和回复风格。
|
|
|
|
|
|
tools: 可用的工具列表,支持字典配置或字符串标识符。
|
|
|
|
|
|
tool_choice: 工具选择策略,控制AI如何选择和使用工具。
|
|
|
|
|
|
**kwargs: 额外的生成配置参数,会被转换为LLMGenerationConfig。
|
2025-07-08 11:15:15 +08:00
|
|
|
|
|
|
|
|
|
|
返回:
|
2025-08-04 23:36:12 +08:00
|
|
|
|
LLMResponse: 包含AI回复内容、使用信息和工具调用等的完整响应对象。
|
2025-07-08 11:15:15 +08:00
|
|
|
|
"""
|
2025-08-04 23:36:12 +08:00
|
|
|
|
try:
|
|
|
|
|
|
config = create_generation_config_from_kwargs(**kwargs) if kwargs else None
|
|
|
|
|
|
|
|
|
|
|
|
ai_session = AI()
|
|
|
|
|
|
|
|
|
|
|
|
return await ai_session.chat(
|
|
|
|
|
|
message,
|
|
|
|
|
|
model=model,
|
|
|
|
|
|
instruction=instruction,
|
|
|
|
|
|
tools=tools,
|
|
|
|
|
|
tool_choice=tool_choice,
|
|
|
|
|
|
config=config,
|
|
|
|
|
|
)
|
|
|
|
|
|
except LLMException:
|
|
|
|
|
|
raise
|
|
|
|
|
|
except Exception as e:
|
|
|
|
|
|
logger.error(f"执行 chat 函数失败: {e}", e=e)
|
|
|
|
|
|
raise LLMException(f"聊天执行失败: {e}", cause=e)
|
2025-06-21 16:33:21 +08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async def code(
|
|
|
|
|
|
prompt: str,
|
|
|
|
|
|
*,
|
|
|
|
|
|
model: ModelName = None,
|
|
|
|
|
|
timeout: int | None = None,
|
|
|
|
|
|
**kwargs: Any,
|
2025-08-04 23:36:12 +08:00
|
|
|
|
) -> LLMResponse:
|
2025-07-08 11:15:15 +08:00
|
|
|
|
"""
|
2025-08-04 23:36:12 +08:00
|
|
|
|
无状态的代码执行便捷函数,支持在沙箱环境中执行代码。
|
2025-07-08 11:15:15 +08:00
|
|
|
|
|
|
|
|
|
|
参数:
|
2025-08-04 23:36:12 +08:00
|
|
|
|
prompt: 代码执行的提示词,描述要执行的代码任务。
|
|
|
|
|
|
model: 要使用的模型名称,默认使用Gemini/gemini-2.0-flash。
|
|
|
|
|
|
timeout: 代码执行超时时间(秒),防止长时间运行的代码阻塞。
|
|
|
|
|
|
**kwargs: 额外的生成配置参数。
|
2025-07-08 11:15:15 +08:00
|
|
|
|
|
|
|
|
|
|
返回:
|
2025-08-04 23:36:12 +08:00
|
|
|
|
LLMResponse: 包含代码执行结果的完整响应对象。
|
2025-07-08 11:15:15 +08:00
|
|
|
|
"""
|
2025-08-04 23:36:12 +08:00
|
|
|
|
resolved_model = model or "Gemini/gemini-2.0-flash"
|
2025-06-21 16:33:21 +08:00
|
|
|
|
|
2025-08-04 23:36:12 +08:00
|
|
|
|
config = CommonOverrides.gemini_code_execution()
|
|
|
|
|
|
if timeout:
|
|
|
|
|
|
config.custom_params = config.custom_params or {}
|
|
|
|
|
|
config.custom_params["code_execution_timeout"] = timeout
|
2025-06-21 16:33:21 +08:00
|
|
|
|
|
2025-08-04 23:36:12 +08:00
|
|
|
|
final_config = config.to_dict()
|
|
|
|
|
|
final_config.update(kwargs)
|
2025-07-08 11:15:15 +08:00
|
|
|
|
|
2025-08-04 23:36:12 +08:00
|
|
|
|
return await chat(prompt, model=resolved_model, **final_config)
|
2025-07-08 11:15:15 +08:00
|
|
|
|
|
2025-06-21 16:33:21 +08:00
|
|
|
|
|
2025-08-04 23:36:12 +08:00
|
|
|
|
async def search(
|
|
|
|
|
|
query: str | UniMessage | LLMMessage | list[LLMContentPart],
|
2025-06-21 16:33:21 +08:00
|
|
|
|
*,
|
|
|
|
|
|
model: ModelName = None,
|
2025-08-04 23:36:12 +08:00
|
|
|
|
instruction: str = (
|
|
|
|
|
|
"你是一位强大的信息检索和整合专家。请利用可用的搜索工具,"
|
|
|
|
|
|
"根据用户的查询找到最相关的信息,并进行总结和回答。"
|
|
|
|
|
|
),
|
2025-06-21 16:33:21 +08:00
|
|
|
|
**kwargs: Any,
|
2025-08-04 23:36:12 +08:00
|
|
|
|
) -> LLMResponse:
|
2025-07-08 11:15:15 +08:00
|
|
|
|
"""
|
2025-08-04 23:36:12 +08:00
|
|
|
|
无状态的信息搜索便捷函数,利用搜索工具获取实时信息。
|
2025-07-08 11:15:15 +08:00
|
|
|
|
|
|
|
|
|
|
参数:
|
2025-08-04 23:36:12 +08:00
|
|
|
|
query: 搜索查询内容,支持多种输入格式。
|
|
|
|
|
|
model: 要使用的模型名称,如果为None则使用默认模型。
|
|
|
|
|
|
instruction: 搜索任务的系统指令,指导AI如何处理搜索结果。
|
|
|
|
|
|
**kwargs: 额外的生成配置参数。
|
2025-07-08 11:15:15 +08:00
|
|
|
|
|
|
|
|
|
|
返回:
|
2025-08-04 23:36:12 +08:00
|
|
|
|
LLMResponse: 包含搜索结果和AI整合回复的完整响应对象。
|
2025-07-08 11:15:15 +08:00
|
|
|
|
"""
|
2025-08-04 23:36:12 +08:00
|
|
|
|
logger.debug("执行无状态 'search' 任务...")
|
|
|
|
|
|
search_config = CommonOverrides.gemini_grounding()
|
2025-07-08 11:15:15 +08:00
|
|
|
|
|
2025-08-04 23:36:12 +08:00
|
|
|
|
final_config = search_config.to_dict()
|
|
|
|
|
|
final_config.update(kwargs)
|
2025-07-08 11:15:15 +08:00
|
|
|
|
|
2025-08-04 23:36:12 +08:00
|
|
|
|
return await chat(
|
|
|
|
|
|
query,
|
|
|
|
|
|
model=model,
|
|
|
|
|
|
instruction=instruction,
|
|
|
|
|
|
**final_config,
|
2025-06-21 16:33:21 +08:00
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async def embed(
|
|
|
|
|
|
texts: list[str] | str,
|
|
|
|
|
|
*,
|
|
|
|
|
|
model: ModelName = None,
|
|
|
|
|
|
task_type: EmbeddingTaskType | str = EmbeddingTaskType.RETRIEVAL_DOCUMENT,
|
|
|
|
|
|
**kwargs: Any,
|
|
|
|
|
|
) -> list[list[float]]:
|
2025-07-08 11:15:15 +08:00
|
|
|
|
"""
|
2025-08-04 23:36:12 +08:00
|
|
|
|
无状态的文本嵌入便捷函数,将文本转换为向量表示。
|
2025-07-08 11:15:15 +08:00
|
|
|
|
|
|
|
|
|
|
参数:
|
2025-08-04 23:36:12 +08:00
|
|
|
|
texts: 要生成嵌入的文本内容,支持单个字符串或字符串列表。
|
|
|
|
|
|
model: 要使用的嵌入模型名称,如果为None则使用默认模型。
|
|
|
|
|
|
task_type: 嵌入任务类型,影响向量的优化方向(如检索、分类等)。
|
|
|
|
|
|
**kwargs: 额外的模型配置参数。
|
2025-07-08 11:15:15 +08:00
|
|
|
|
|
|
|
|
|
|
返回:
|
2025-08-04 23:36:12 +08:00
|
|
|
|
list[list[float]]: 文本对应的嵌入向量列表,每个向量为浮点数列表。
|
2025-07-08 11:15:15 +08:00
|
|
|
|
"""
|
2025-08-04 23:36:12 +08:00
|
|
|
|
if isinstance(texts, str):
|
|
|
|
|
|
texts = [texts]
|
|
|
|
|
|
if not texts:
|
|
|
|
|
|
return []
|
|
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
|
async with await get_model_instance(model) as model_instance:
|
|
|
|
|
|
return await model_instance.generate_embeddings(
|
|
|
|
|
|
texts, task_type=task_type, **kwargs
|
|
|
|
|
|
)
|
|
|
|
|
|
except LLMException:
|
|
|
|
|
|
raise
|
|
|
|
|
|
except Exception as e:
|
|
|
|
|
|
logger.error(f"文本嵌入失败: {e}", e=e)
|
|
|
|
|
|
raise LLMException(
|
|
|
|
|
|
f"文本嵌入失败: {e}", code=LLMErrorCode.EMBEDDING_FAILED, cause=e
|
|
|
|
|
|
)
|
2025-07-08 11:15:15 +08:00
|
|
|
|
|
|
|
|
|
|
|
2025-08-04 23:36:12 +08:00
|
|
|
|
async def generate_structured(
|
|
|
|
|
|
message: str | LLMMessage | list[LLMContentPart],
|
|
|
|
|
|
response_model: type[T],
|
2025-07-08 11:15:15 +08:00
|
|
|
|
*,
|
2025-08-04 23:36:12 +08:00
|
|
|
|
model: ModelName = None,
|
|
|
|
|
|
instruction: str | None = None,
|
2025-07-08 11:15:15 +08:00
|
|
|
|
**kwargs: Any,
|
2025-08-04 23:36:12 +08:00
|
|
|
|
) -> T:
|
2025-07-08 11:15:15 +08:00
|
|
|
|
"""
|
2025-08-04 23:36:12 +08:00
|
|
|
|
无状态地生成结构化响应,并自动解析为指定的Pydantic模型。
|
2025-07-08 11:15:15 +08:00
|
|
|
|
|
|
|
|
|
|
参数:
|
2025-08-04 23:36:12 +08:00
|
|
|
|
message: 用户输入的消息内容,支持多种格式。
|
|
|
|
|
|
response_model: 用于解析和验证响应的Pydantic模型类。
|
|
|
|
|
|
model: 要使用的模型名称,如果为None则使用默认模型。
|
|
|
|
|
|
instruction: 系统指令,用于指导AI生成符合要求的结构化输出。
|
|
|
|
|
|
**kwargs: 额外的生成配置参数。
|
2025-07-08 11:15:15 +08:00
|
|
|
|
|
|
|
|
|
|
返回:
|
2025-08-04 23:36:12 +08:00
|
|
|
|
T: 解析后的Pydantic模型实例,类型为response_model指定的类型。
|
2025-07-08 11:15:15 +08:00
|
|
|
|
"""
|
2025-08-04 23:36:12 +08:00
|
|
|
|
try:
|
|
|
|
|
|
config = create_generation_config_from_kwargs(**kwargs) if kwargs else None
|
2025-07-08 11:15:15 +08:00
|
|
|
|
|
2025-08-04 23:36:12 +08:00
|
|
|
|
ai_session = AI()
|
2025-07-08 11:15:15 +08:00
|
|
|
|
|
2025-08-04 23:36:12 +08:00
|
|
|
|
return await ai_session.generate_structured(
|
|
|
|
|
|
message,
|
|
|
|
|
|
response_model,
|
|
|
|
|
|
model=model,
|
|
|
|
|
|
instruction=instruction,
|
|
|
|
|
|
config=config,
|
|
|
|
|
|
)
|
|
|
|
|
|
except LLMException:
|
|
|
|
|
|
raise
|
|
|
|
|
|
except Exception as e:
|
|
|
|
|
|
logger.error(f"生成结构化响应失败: {e}", e=e)
|
|
|
|
|
|
raise LLMException(f"生成结构化响应失败: {e}", cause=e)
|
2025-07-14 22:39:17 +08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async def generate(
|
|
|
|
|
|
messages: list[LLMMessage],
|
|
|
|
|
|
*,
|
|
|
|
|
|
model: ModelName = None,
|
2025-08-04 23:36:12 +08:00
|
|
|
|
tools: list[dict[str, Any] | str] | None = None,
|
2025-07-14 22:39:17 +08:00
|
|
|
|
tool_choice: str | dict[str, Any] | None = None,
|
|
|
|
|
|
**kwargs: Any,
|
|
|
|
|
|
) -> LLMResponse:
|
|
|
|
|
|
"""
|
2025-08-04 23:36:12 +08:00
|
|
|
|
根据完整的消息列表生成一次性响应,这是一个无状态的底层函数。
|
2025-07-14 22:39:17 +08:00
|
|
|
|
|
|
|
|
|
|
参数:
|
2025-08-04 23:36:12 +08:00
|
|
|
|
messages: 完整的消息历史列表,包括系统指令、用户消息和助手回复。
|
|
|
|
|
|
model: 要使用的模型名称,如果为None则使用默认模型。
|
|
|
|
|
|
tools: 可用的工具列表,支持字典配置或字符串标识符。
|
|
|
|
|
|
tool_choice: 工具选择策略,控制AI如何选择和使用工具。
|
|
|
|
|
|
**kwargs: 额外的生成配置参数,会覆盖默认配置。
|
2025-07-14 22:39:17 +08:00
|
|
|
|
|
|
|
|
|
|
返回:
|
2025-08-04 23:36:12 +08:00
|
|
|
|
LLMResponse: 包含AI回复内容、使用信息和工具调用等的完整响应对象。
|
2025-07-14 22:39:17 +08:00
|
|
|
|
"""
|
|
|
|
|
|
try:
|
|
|
|
|
|
async with await get_model_instance(
|
2025-08-04 23:36:12 +08:00
|
|
|
|
model, override_config=kwargs
|
2025-07-14 22:39:17 +08:00
|
|
|
|
) as model_instance:
|
|
|
|
|
|
return await model_instance.generate_response(
|
|
|
|
|
|
messages,
|
2025-08-04 23:36:12 +08:00
|
|
|
|
tools=tools, # type: ignore
|
2025-07-14 22:39:17 +08:00
|
|
|
|
tool_choice=tool_choice,
|
|
|
|
|
|
)
|
|
|
|
|
|
except LLMException:
|
|
|
|
|
|
raise
|
|
|
|
|
|
except Exception as e:
|
|
|
|
|
|
logger.error(f"生成响应失败: {e}", e=e)
|
|
|
|
|
|
raise LLMException(f"生成响应失败: {e}", cause=e)
|
2025-08-04 23:36:12 +08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async def run_with_tools(
|
|
|
|
|
|
message: str | UniMessage | LLMMessage | list[LLMContentPart],
|
|
|
|
|
|
*,
|
|
|
|
|
|
model: ModelName = None,
|
|
|
|
|
|
instruction: str | None = None,
|
|
|
|
|
|
tools: list[str],
|
|
|
|
|
|
max_cycles: int = 5,
|
|
|
|
|
|
**kwargs: Any,
|
|
|
|
|
|
) -> LLMResponse:
|
|
|
|
|
|
"""
|
|
|
|
|
|
无状态地执行一个带本地Python函数的LLM调用循环。
|
|
|
|
|
|
|
|
|
|
|
|
参数:
|
|
|
|
|
|
message: 用户输入。
|
|
|
|
|
|
model: 使用的模型。
|
|
|
|
|
|
instruction: 系统指令。
|
|
|
|
|
|
tools: 要使用的本地函数工具名称列表 (必须已通过 @function_tool 注册)。
|
|
|
|
|
|
max_cycles: 最大工具调用循环次数。
|
|
|
|
|
|
**kwargs: 额外的生成配置参数。
|
|
|
|
|
|
|
|
|
|
|
|
返回:
|
|
|
|
|
|
LLMResponse: 包含最终回复的响应对象。
|
|
|
|
|
|
"""
|
|
|
|
|
|
from .executor import ExecutionConfig, LLMToolExecutor
|
|
|
|
|
|
from .utils import normalize_to_llm_messages
|
|
|
|
|
|
|
|
|
|
|
|
messages = await normalize_to_llm_messages(message, instruction)
|
|
|
|
|
|
|
|
|
|
|
|
async with await get_model_instance(
|
|
|
|
|
|
model, override_config=kwargs
|
|
|
|
|
|
) as model_instance:
|
|
|
|
|
|
resolved_tools = await tool_provider_manager.get_function_tools(tools)
|
|
|
|
|
|
if not resolved_tools:
|
|
|
|
|
|
logger.warning(
|
|
|
|
|
|
"run_with_tools 未找到任何可用的本地函数工具,将作为普通聊天执行。"
|
|
|
|
|
|
)
|
|
|
|
|
|
return await model_instance.generate_response(messages, tools=None)
|
|
|
|
|
|
|
|
|
|
|
|
executor = LLMToolExecutor(model_instance)
|
|
|
|
|
|
config = ExecutionConfig(max_cycles=max_cycles)
|
|
|
|
|
|
final_history = await executor.run(messages, resolved_tools, config)
|
|
|
|
|
|
|
|
|
|
|
|
for msg in reversed(final_history):
|
|
|
|
|
|
if msg.role == "assistant":
|
|
|
|
|
|
text = msg.content if isinstance(msg.content, str) else str(msg.content)
|
|
|
|
|
|
return LLMResponse(text=text, tool_calls=msg.tool_calls)
|
|
|
|
|
|
|
|
|
|
|
|
raise LLMException(
|
|
|
|
|
|
"带工具的执行循环未能产生有效的助手回复。", code=LLMErrorCode.GENERATION_FAILED
|
|
|
|
|
|
)
|
2025-10-01 18:41:46 +08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async def _generate_image_from_message(
|
|
|
|
|
|
message: UniMessage,
|
|
|
|
|
|
model: ModelName = None,
|
|
|
|
|
|
**kwargs: Any,
|
|
|
|
|
|
) -> LLMResponse:
|
|
|
|
|
|
"""
|
|
|
|
|
|
[内部] 从 UniMessage 生成图片的核心辅助函数。
|
|
|
|
|
|
"""
|
|
|
|
|
|
from .utils import normalize_to_llm_messages
|
|
|
|
|
|
|
|
|
|
|
|
config = (
|
|
|
|
|
|
create_generation_config_from_kwargs(**kwargs)
|
|
|
|
|
|
if kwargs
|
|
|
|
|
|
else LLMGenerationConfig()
|
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
config.validation_policy = {"require_image": True}
|
|
|
|
|
|
config.response_modalities = ["IMAGE", "TEXT"]
|
|
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
|
messages = await normalize_to_llm_messages(message)
|
|
|
|
|
|
|
|
|
|
|
|
async with await get_model_instance(model) as model_instance:
|
|
|
|
|
|
if not model_instance.can_generate_images():
|
|
|
|
|
|
raise LLMException(
|
|
|
|
|
|
f"模型 '{model_instance.provider_name}/{model_instance.model_name}'"
|
|
|
|
|
|
f"不支持图片生成",
|
|
|
|
|
|
code=LLMErrorCode.CONFIGURATION_ERROR,
|
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
response = await model_instance.generate_response(messages, config=config)
|
|
|
|
|
|
|
2025-10-09 08:50:40 +08:00
|
|
|
|
if not response.images:
|
2025-10-01 18:41:46 +08:00
|
|
|
|
error_text = response.text or "模型未返回图片数据。"
|
|
|
|
|
|
logger.warning(f"图片生成调用未返回图片,返回文本内容: {error_text}")
|
|
|
|
|
|
|
|
|
|
|
|
return response
|
|
|
|
|
|
except LLMException:
|
|
|
|
|
|
raise
|
|
|
|
|
|
except Exception as e:
|
|
|
|
|
|
logger.error(f"执行图片生成时发生未知错误: {e}", e=e)
|
|
|
|
|
|
raise LLMException(f"图片生成失败: {e}", cause=e)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@overload
|
|
|
|
|
|
async def create_image(
|
|
|
|
|
|
prompt: str | UniMessage,
|
|
|
|
|
|
*,
|
|
|
|
|
|
images: None = None,
|
|
|
|
|
|
model: ModelName = None,
|
|
|
|
|
|
**kwargs: Any,
|
|
|
|
|
|
) -> LLMResponse:
|
|
|
|
|
|
"""根据文本提示生成一张新图片。"""
|
|
|
|
|
|
...
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@overload
|
|
|
|
|
|
async def create_image(
|
|
|
|
|
|
prompt: str | UniMessage,
|
|
|
|
|
|
*,
|
|
|
|
|
|
images: list[Path | bytes | str] | Path | bytes | str,
|
|
|
|
|
|
model: ModelName = None,
|
|
|
|
|
|
**kwargs: Any,
|
|
|
|
|
|
) -> LLMResponse:
|
|
|
|
|
|
"""在给定图片的基础上,根据文本提示进行编辑或重新生成。"""
|
|
|
|
|
|
...
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async def create_image(
|
|
|
|
|
|
prompt: str | UniMessage,
|
|
|
|
|
|
*,
|
|
|
|
|
|
images: list[Path | bytes | str] | Path | bytes | str | None = None,
|
|
|
|
|
|
model: ModelName = None,
|
|
|
|
|
|
**kwargs: Any,
|
|
|
|
|
|
) -> LLMResponse:
|
|
|
|
|
|
"""
|
|
|
|
|
|
智能图片生成/编辑函数。
|
|
|
|
|
|
- 如果 `images` 为 None,执行文生图。
|
|
|
|
|
|
- 如果提供了 `images`,执行图+文生图,支持多张图片输入。
|
|
|
|
|
|
"""
|
|
|
|
|
|
text_prompt = (
|
|
|
|
|
|
prompt.extract_plain_text() if isinstance(prompt, UniMessage) else str(prompt)
|
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
image_list = []
|
|
|
|
|
|
if images:
|
|
|
|
|
|
if isinstance(images, list):
|
|
|
|
|
|
image_list.extend(images)
|
|
|
|
|
|
else:
|
|
|
|
|
|
image_list.append(images)
|
|
|
|
|
|
|
|
|
|
|
|
message = create_multimodal_message(text=text_prompt, images=image_list)
|
|
|
|
|
|
|
|
|
|
|
|
return await _generate_image_from_message(message, model=model, **kwargs)
|