diff --git a/.env.dev b/.env.dev index 3e1059c2..62f9d1bd 100644 --- a/.env.dev +++ b/.env.dev @@ -27,6 +27,16 @@ QBOT_ID_DATA = '{ # 示例: "sqlite:data/db/zhenxun.db" 在data目录下建立db文件夹 DB_URL = "" +# REDIS配置,使用REDIS替换Cache内存缓存 +# REDIS地址 +# REDIS_HOST = "127.0.0.1" +# REDIS端口 +# REDIS_PORT = 6379 +# REDIS密码 +# REDIS_PASSWORD = "" +# REDIS过期时间 +# REDIS_EXPIRE = 600 + # 系统代理 # SYSTEM_PROXY = "http://127.0.0.1:7890" diff --git a/README.md b/README.md index b8dec874..83987f0d 100644 --- a/README.md +++ b/README.md @@ -150,7 +150,7 @@ poetry run python bot.py 1.在 .env.dev 文件中填写你的机器人配置项 -2.在 data/config.yaml 文件中修改你需要修改的插件配置项 +2.在 configs/config.yaml 文件中修改你需要修改的插件配置项
数据库地址(DB_URL)配置说明 diff --git a/poetry.lock b/poetry.lock index 21748cd0..d2ddf30d 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.1.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. [[package]] name = "aiocache" @@ -6,12 +6,14 @@ version = "0.12.3" description = "multi backend asyncio cache" optional = false python-versions = "*" -groups = ["main"] files = [ {file = "aiocache-0.12.3-py2.py3-none-any.whl", hash = "sha256:889086fc24710f431937b87ad3720a289f7fc31c4fd8b68e9f918b9bacd8270d"}, {file = "aiocache-0.12.3.tar.gz", hash = "sha256:f528b27bf4d436b497a1d0d1a8f59a542c153ab1e37c3621713cb376d44c4713"}, ] +[package.dependencies] +redis = {version = ">=4.2.0", optional = true, markers = "extra == \"redis\""} + [package.extras] memcached = ["aiomcache (>=0.5.2)"] msgpack = ["msgpack (>=0.5.5)"] @@ -28,7 +30,6 @@ version = "23.2.1" description = "File support for asyncio." optional = false python-versions = ">=3.7" -groups = ["main"] files = [ {file = "aiofiles-23.2.1-py3-none-any.whl", hash = "sha256:19297512c647d4b27a2cf7c34caa7e405c0d60b5560618a29a9fe027b18b0107"}, {file = "aiofiles-23.2.1.tar.gz", hash = "sha256:84ec2218d8419404abcb9f0c02df3f34c6e0a68ed41072acfb1cef5cbc29051a"}, @@ -45,7 +46,6 @@ version = "0.17.0" description = "asyncio bridge to the standard sqlite3 module" optional = false python-versions = ">=3.6" -groups = ["main"] files = [ {file = "aiosqlite-0.17.0-py3-none-any.whl", hash = "sha256:6c49dc6d3405929b1d08eeccc72306d3677503cc5e5e43771efc1e00232e8231"}, {file = "aiosqlite-0.17.0.tar.gz", hash = "sha256:f0e6acc24bc4864149267ac82fb46dfb3be4455f99fe21df82609cc6e6baee51"}, @@ -65,7 +65,6 @@ version = "0.7.0" description = "Reusable constraint types to use with typing.Annotated" optional = false python-versions = ">=3.8" -groups = ["main", "dev"] files = [ {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, @@ -82,7 +81,6 @@ version = "4.9.0" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false python-versions = ">=3.9" -groups = ["main", "dev"] files = [ {file = "anyio-4.9.0-py3-none-any.whl", hash = "sha256:9f76d541cad6e36af7beb62e978876f3b41e3e04f2c1fbf0884604c0a9c4d93c"}, {file = "anyio-4.9.0.tar.gz", hash = "sha256:673c0c244e15788651a4ff38710fea9675823028a6f08a5eda409e0c9840a028"}, @@ -96,7 +94,7 @@ typing_extensions = {version = ">=4.5", markers = "python_version < \"3.13\""} [package.extras] doc = ["Sphinx (>=8.2,<9.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx_rtd_theme"] -test = ["anyio[trio]", "blockbuster (>=1.5.23)", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "trustme", "truststore (>=0.9.1) ; python_version >= \"3.10\"", "uvloop (>=0.21) ; platform_python_implementation == \"CPython\" and platform_system != \"Windows\" and python_version < \"3.14\""] +test = ["anyio[trio]", "blockbuster (>=1.5.23)", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21)"] trio = ["trio (>=0.26.1)"] [package.source] @@ -110,7 +108,6 @@ version = "3.11.0" description = "In-process task scheduler with Cron-like capabilities" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "APScheduler-3.11.0-py3-none-any.whl", hash = "sha256:fc134ca32e50f5eadcc4938e3a4545ab19131435e851abb40b34d63d5141c6da"}, {file = "apscheduler-3.11.0.tar.gz", hash = "sha256:4c622d250b0955a65d5d0eb91c33e6d43fd879834bf541e0a18661ae60460133"}, @@ -127,7 +124,7 @@ mongodb = ["pymongo (>=3.0)"] redis = ["redis (>=3.0)"] rethinkdb = ["rethinkdb (>=2.4.0)"] sqlalchemy = ["sqlalchemy (>=1.4)"] -test = ["APScheduler[etcd,mongodb,redis,rethinkdb,sqlalchemy,tornado,zookeeper]", "PySide6 ; platform_python_implementation == \"CPython\" and python_version < \"3.14\"", "anyio (>=4.5.2)", "gevent ; python_version < \"3.14\"", "pytest", "pytz", "twisted ; python_version < \"3.14\""] +test = ["APScheduler[etcd,mongodb,redis,rethinkdb,sqlalchemy,tornado,zookeeper]", "PySide6", "anyio (>=4.5.2)", "gevent", "pytest", "pytz", "twisted"] tornado = ["tornado (>=4.3)"] twisted = ["twisted"] zookeeper = ["kazoo"] @@ -143,7 +140,6 @@ version = "1.8.36" description = "A High-performance, Generality, Humane Command Line Arguments Parser Library." optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "arclet_alconna-1.8.36-py3-none-any.whl", hash = "sha256:05912c6639a07959096ce4c6c3c1730b82343e154ceac72f8676661f1c5594fc"}, {file = "arclet_alconna-1.8.36.tar.gz", hash = "sha256:7d50b12e936ff7db37939921fc47a9bd68b90d8eee3ce0cfbbb5825248f14e70"}, @@ -168,7 +164,6 @@ version = "0.7.10" description = "Builtin Tools for Alconna" optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "arclet_alconna_tools-0.7.10-py3-none-any.whl", hash = "sha256:50e8b2f433fbc612dc8b99f4f5410006dcb1ef406c971c795071117a4eab8e20"}, {file = "arclet_alconna_tools-0.7.10.tar.gz", hash = "sha256:446a63a9c56886c23fb44548bb9a18655e0ba5b5dd80cc87915b858dfb02554c"}, @@ -189,7 +184,6 @@ version = "1.3.0" description = "Better dates & times for Python" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "arrow-1.3.0-py3-none-any.whl", hash = "sha256:c728b120ebc00eb84e01882a6f5e7927a53960aa990ce7dd2b10f39005a67f80"}, {file = "arrow-1.3.0.tar.gz", hash = "sha256:d4540617648cb5f895730f1ad8c82a65f2dad0166f57b75f3ca54759c4d67a85"}, @@ -214,7 +208,6 @@ version = "3.8.1" description = "ASGI specs, helper code, and adapters" optional = false python-versions = ">=3.8" -groups = ["dev"] files = [ {file = "asgiref-3.8.1-py3-none-any.whl", hash = "sha256:3e1e3ecc849832fe52ccf2cb6686b7a55f82bb1d6aee72a58826471390335e47"}, {file = "asgiref-3.8.1.tar.gz", hash = "sha256:c343bd80a0bec947a9860adb4c432ffa7db769836c64238fc34bdc3fec84d590"}, @@ -237,7 +230,6 @@ version = "1.4.11" description = "Async client for testing ASGI web applications" optional = false python-versions = "*" -groups = ["dev"] files = [ {file = "async-asgi-testclient-1.4.11.tar.gz", hash = "sha256:4449ac85d512d661998ec61f91c9ae01851639611d748d81ae7f816736551792"}, ] @@ -257,8 +249,6 @@ version = "5.0.1" description = "Timeout context manager for asyncio programs" optional = false python-versions = ">=3.8" -groups = ["main"] -markers = "python_version == \"3.10\"" files = [ {file = "async_timeout-5.0.1-py3-none-any.whl", hash = "sha256:39e3809566ff85354557ec2398b55e096c8364bacac9405a7a1fa429e77fe76c"}, {file = "async_timeout-5.0.1.tar.gz", hash = "sha256:d9321a7a3d5a6a5e187e824d2fa0793ce379a202935782d555d6e9d2735677d3"}, @@ -273,9 +263,8 @@ reference = "aliyun" name = "asyncpg" version = "0.30.0" description = "An asyncio PostgreSQL driver" -optional = false +optional = true python-versions = ">=3.8.0" -groups = ["main"] files = [ {file = "asyncpg-0.30.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bfb4dd5ae0699bad2b233672c8fc5ccbd9ad24b89afded02341786887e37927e"}, {file = "asyncpg-0.30.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:dc1f62c792752a49f88b7e6f774c26077091b44caceb1983509edc18a2222ec0"}, @@ -333,8 +322,8 @@ async-timeout = {version = ">=4.0.3", markers = "python_version < \"3.11.0\""} [package.extras] docs = ["Sphinx (>=8.1.3,<8.2.0)", "sphinx-rtd-theme (>=1.2.2)"] -gssauth = ["gssapi ; platform_system != \"Windows\"", "sspilib ; platform_system == \"Windows\""] -test = ["distro (>=1.9.0,<1.10.0)", "flake8 (>=6.1,<7.0)", "flake8-pyi (>=24.1.0,<24.2.0)", "gssapi ; platform_system == \"Linux\"", "k5test ; platform_system == \"Linux\"", "mypy (>=1.8.0,<1.9.0)", "sspilib ; platform_system == \"Windows\"", "uvloop (>=0.15.3) ; platform_system != \"Windows\" and python_version < \"3.14.0\""] +gssauth = ["gssapi", "sspilib"] +test = ["distro (>=1.9.0,<1.10.0)", "flake8 (>=6.1,<7.0)", "flake8-pyi (>=24.1.0,<24.2.0)", "gssapi", "k5test", "mypy (>=1.8.0,<1.9.0)", "sspilib", "uvloop (>=0.15.3)"] [package.source] type = "legacy" @@ -347,19 +336,18 @@ version = "25.3.0" description = "Classes Without Boilerplate" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3"}, {file = "attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b"}, ] [package.extras] -benchmark = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] -cov = ["cloudpickle ; platform_python_implementation == \"CPython\"", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] -dev = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier"] -tests = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] -tests-mypy = ["mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\""] +tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"] [package.source] type = "legacy" @@ -372,7 +360,6 @@ version = "4.13.3" description = "Screen-scraping library" optional = false python-versions = ">=3.7.0" -groups = ["main"] files = [ {file = "beautifulsoup4-4.13.3-py3-none-any.whl", hash = "sha256:99045d7d3f08f91f0d656bc9b7efbae189426cd913d830294a15eefa0ea4df16"}, {file = "beautifulsoup4-4.13.3.tar.gz", hash = "sha256:1bd32405dacc920b42b83ba01644747ed77456a65760e285fbc47633ceddaf8b"}, @@ -400,7 +387,6 @@ version = "0.2.3.post0" description = "" optional = false python-versions = ">=3.7,<4.0" -groups = ["main"] files = [ {file = "bilireq-0.2.3.post0-py3-none-any.whl", hash = "sha256:8d1f98bb8fb59c0ce1dec226329353ce51e2efaad0a6b4d240437b6132648322"}, {file = "bilireq-0.2.3.post0.tar.gz", hash = "sha256:3185c3952a2becc7d31b0c01a12fda463fa477253504a68f81ea871594887ab4"}, @@ -426,7 +412,6 @@ version = "0.4.4" description = "Ultra-lightweight pure Python package to check if a file is binary or text." optional = false python-versions = "*" -groups = ["main"] files = [ {file = "binaryornot-0.4.4-py2.py3-none-any.whl", hash = "sha256:b8b71173c917bddcd2c16070412e369c3ed7f0528926f70cac18a6c97fd563e4"}, {file = "binaryornot-0.4.4.tar.gz", hash = "sha256:359501dfc9d40632edc9fac890e19542db1a287bbcfa58175b66658392018061"}, @@ -446,7 +431,6 @@ version = "7.4.0" description = "cache tools with async power" optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "cashews-7.4.0-py3-none-any.whl", hash = "sha256:e881cc9b4be05ac9ce2c448784bca2864776b1c13ee262658d7c0ebf0d3d257a"}, {file = "cashews-7.4.0.tar.gz", hash = "sha256:c9d22b9b9da567788f232374a5de3b30ceed1e5c24085c96d304b696df0dcbd8"}, @@ -471,7 +455,6 @@ version = "23.2.3" description = "Composable complex class support for attrs and dataclasses." optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "cattrs-23.2.3-py3-none-any.whl", hash = "sha256:0341994d94971052e9ee70662542699a3162ea1e0c62f7ce1b4a57f563685108"}, {file = "cattrs-23.2.3.tar.gz", hash = "sha256:a934090d95abaa9e911dac357e3a8699e0b4b14f8529bcc7d2b1ad9d51672b9f"}, @@ -486,7 +469,7 @@ typing-extensions = {version = ">=4.1.0,<4.6.3 || >4.6.3", markers = "python_ver bson = ["pymongo (>=4.4.0)"] cbor2 = ["cbor2 (>=5.4.6)"] msgpack = ["msgpack (>=1.0.5)"] -orjson = ["orjson (>=3.9.2) ; implementation_name == \"cpython\""] +orjson = ["orjson (>=3.9.2)"] pyyaml = ["pyyaml (>=6.0)"] tomlkit = ["tomlkit (>=0.11.8)"] ujson = ["ujson (>=5.7.0)"] @@ -502,7 +485,6 @@ version = "2025.1.31" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" -groups = ["main", "dev"] files = [ {file = "certifi-2025.1.31-py3-none-any.whl", hash = "sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe"}, {file = "certifi-2025.1.31.tar.gz", hash = "sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651"}, @@ -519,8 +501,6 @@ version = "1.17.1" description = "Foreign Function Interface for Python calling C code." optional = false python-versions = ">=3.8" -groups = ["main"] -markers = "platform_python_implementation != \"PyPy\"" files = [ {file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"}, {file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"}, @@ -605,7 +585,6 @@ version = "3.4.0" description = "Validate configuration and produce human readable error messages." optional = false python-versions = ">=3.8" -groups = ["dev"] files = [ {file = "cfgv-3.4.0-py2.py3-none-any.whl", hash = "sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9"}, {file = "cfgv-3.4.0.tar.gz", hash = "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560"}, @@ -622,7 +601,6 @@ version = "5.2.0" description = "Universal encoding detector for Python 3" optional = false python-versions = ">=3.7" -groups = ["main"] files = [ {file = "chardet-5.2.0-py3-none-any.whl", hash = "sha256:e1cf59446890a00105fe7b7912492ea04b6e6f06d4b742b2c788469e34c82970"}, {file = "chardet-5.2.0.tar.gz", hash = "sha256:1b3b6ff479a8c414bc3fa2c0852995695c4a026dcd6d0633b2dd092ca39c1cf7"}, @@ -639,7 +617,6 @@ version = "3.4.1" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false python-versions = ">=3.7" -groups = ["main", "dev"] files = [ {file = "charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de"}, {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176"}, @@ -746,7 +723,6 @@ version = "8.1.8" description = "Composable command line interface toolkit" optional = false python-versions = ">=3.7" -groups = ["main"] files = [ {file = "click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2"}, {file = "click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a"}, @@ -766,7 +742,6 @@ version = "0.5.23" description = "Convert Chinese numerals and Arabic numerals." optional = false python-versions = ">=3.6" -groups = ["main"] files = [ {file = "cn2an-0.5.23-py3-none-any.whl", hash = "sha256:b19ab3c53676765c038ccdab51f69b7efa4f0b888139c34088935769241f1cbf"}, {file = "cn2an-0.5.23.tar.gz", hash = "sha256:eda06a63e5eff4a64488d9f22e5f2a4ceca6eaa63416e4f771e67edecb1a5bdb"}, @@ -786,12 +761,10 @@ version = "0.4.6" description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" -groups = ["main", "dev"] files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] -markers = {main = "sys_platform == \"win32\" or platform_system == \"Windows\"", dev = "sys_platform == \"win32\""} [package.source] type = "legacy" @@ -804,7 +777,6 @@ version = "2.6.0" description = "A command-line utility that creates projects from project templates, e.g. creating a Python package project from a Python package project template." optional = false python-versions = ">=3.7" -groups = ["main"] files = [ {file = "cookiecutter-2.6.0-py3-none-any.whl", hash = "sha256:a54a8e37995e4ed963b3e82831072d1ad4b005af736bb17b99c2cbd9d41b6e2d"}, {file = "cookiecutter-2.6.0.tar.gz", hash = "sha256:db21f8169ea4f4fdc2408d48ca44859349de2647fbe494a9d6c3edfc0542c21c"}, @@ -831,7 +803,6 @@ version = "7.8.0" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.9" -groups = ["dev"] files = [ {file = "coverage-7.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2931f66991175369859b5fd58529cd4b73582461877ecfd859b6549869287ffe"}, {file = "coverage-7.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:52a523153c568d2c0ef8826f6cc23031dc86cffb8c6aeab92c4ff776e7951b28"}, @@ -902,7 +873,7 @@ files = [ tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""} [package.extras] -toml = ["tomli ; python_full_version <= \"3.11.0a6\""] +toml = ["tomli"] [package.source] type = "legacy" @@ -915,7 +886,6 @@ version = "44.0.2" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." optional = false python-versions = ">=3.7, !=3.9.0, !=3.9.1" -groups = ["main"] files = [ {file = "cryptography-44.0.2-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:efcfe97d1b3c79e486554efddeb8f6f53a4cdd4cf6086642784fa31fc384e1d7"}, {file = "cryptography-44.0.2-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29ecec49f3ba3f3849362854b7253a9f59799e3763b0c9d0826259a88efa02f1"}, @@ -958,10 +928,10 @@ files = [ cffi = {version = ">=1.12", markers = "platform_python_implementation != \"PyPy\""} [package.extras] -docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=3.0.0) ; python_version >= \"3.8\""] +docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=3.0.0)"] docstest = ["pyenchant (>=3)", "readme-renderer (>=30.0)", "sphinxcontrib-spelling (>=7.3.1)"] -nox = ["nox (>=2024.4.15)", "nox[uv] (>=2024.3.2) ; python_version >= \"3.8\""] -pep8test = ["check-sdist ; python_version >= \"3.8\"", "click (>=8.0.1)", "mypy (>=1.4)", "ruff (>=0.3.6)"] +nox = ["nox (>=2024.4.15)", "nox[uv] (>=2024.3.2)"] +pep8test = ["check-sdist", "click (>=8.0.1)", "mypy (>=1.4)", "ruff (>=0.3.6)"] sdist = ["build (>=1.0.0)"] ssh = ["bcrypt (>=3.1.5)"] test = ["certifi (>=2024)", "cryptography-vectors (==44.0.2)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"] @@ -978,7 +948,6 @@ version = "1.2.1" description = "Date parsing library designed to parse dates from HTML pages" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "dateparser-1.2.1-py3-none-any.whl", hash = "sha256:bdcac262a467e6260030040748ad7c10d6bacd4f3b9cdb4cfd2251939174508c"}, {file = "dateparser-1.2.1.tar.gz", hash = "sha256:7e4919aeb48481dbfc01ac9683c8e20bfe95bb715a38c1e9f6af889f4f30ccc3"}, @@ -1006,7 +975,6 @@ version = "0.3.9" description = "Distribution utilities" optional = false python-versions = "*" -groups = ["main", "dev"] files = [ {file = "distlib-0.3.9-py2.py3-none-any.whl", hash = "sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87"}, {file = "distlib-0.3.9.tar.gz", hash = "sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403"}, @@ -1023,7 +991,6 @@ version = "0.19.1" description = "ECDSA cryptographic signature library (pure python)" optional = false python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" -groups = ["main"] files = [ {file = "ecdsa-0.19.1-py2.py3-none-any.whl", hash = "sha256:30638e27cf77b7e15c4c4cc1973720149e1033827cfd00661ca5c8cc0cdb24c3"}, {file = "ecdsa-0.19.1.tar.gz", hash = "sha256:478cba7b62555866fcb3bb3fe985e06decbdb68ef55713c4e5ab98c57d508e61"}, @@ -1047,7 +1014,6 @@ version = "1.2.2" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" -groups = ["main", "dev"] files = [ {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, @@ -1067,7 +1033,6 @@ version = "2.1.1" description = "execnet: rapid multi-Python deployment" optional = false python-versions = ">=3.8" -groups = ["dev"] files = [ {file = "execnet-2.1.1-py3-none-any.whl", hash = "sha256:26dee51f1b80cebd6d0ca8e74dd8745419761d3bef34163928cbebbdc4749fdc"}, {file = "execnet-2.1.1.tar.gz", hash = "sha256:5189b52c6121c24feae288166ab41b32549c7e2348652736540b9e6e7d4e72e3"}, @@ -1087,7 +1052,6 @@ version = "0.115.12" description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "fastapi-0.115.12-py3-none-any.whl", hash = "sha256:e94613d6c05e27be7ffebdd6ea5f388112e5e430c8f7d6494a9d1d88d43e814d"}, {file = "fastapi-0.115.12.tar.gz", hash = "sha256:1e2c2a2646905f9e83d32f04a3f86aff4a286669c6c950ca95b5fd68c2602681"}, @@ -1113,7 +1077,6 @@ version = "6.0.11" description = "Universal feed parser, handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds" optional = false python-versions = ">=3.6" -groups = ["main"] files = [ {file = "feedparser-6.0.11-py3-none-any.whl", hash = "sha256:0be7ee7b395572b19ebeb1d6aafb0028dee11169f1c934e0ed67d54992f4ad45"}, {file = "feedparser-6.0.11.tar.gz", hash = "sha256:c9d0407b64c6f2a065d0ebb292c2b35c01050cc0dc33757461aaabdc4c4184d5"}, @@ -1133,7 +1096,6 @@ version = "3.18.0" description = "A platform independent file lock." optional = false python-versions = ">=3.9" -groups = ["main", "dev"] files = [ {file = "filelock-3.18.0-py3-none-any.whl", hash = "sha256:c401f4f8377c4464e6db25fff06205fd89bdd83b65eb0488ed1b160f780e21de"}, {file = "filelock-3.18.0.tar.gz", hash = "sha256:adbc88eabb99d2fec8c9c1b229b171f18afa655400173ddc653d5d01501fb9f2"}, @@ -1142,7 +1104,7 @@ files = [ [package.extras] docs = ["furo (>=2024.8.6)", "sphinx (>=8.1.3)", "sphinx-autodoc-typehints (>=3)"] testing = ["covdefaults (>=2.3)", "coverage (>=7.6.10)", "diff-cover (>=9.2.1)", "pytest (>=8.3.4)", "pytest-asyncio (>=0.25.2)", "pytest-cov (>=6)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.28.1)"] -typing = ["typing-extensions (>=4.12.2) ; python_version < \"3.11\""] +typing = ["typing-extensions (>=4.12.2)"] [package.source] type = "legacy" @@ -1155,7 +1117,6 @@ version = "3.1.1" description = "Lightweight in-process concurrent programming" optional = false python-versions = ">=3.7" -groups = ["main"] files = [ {file = "greenlet-3.1.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:0bbae94a29c9e5c7e4a2b7f0aae5c17e8e90acbfd3bf6270eeba60c39fce3563"}, {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fde093fb93f35ca72a556cf72c92ea3ebfda3d79fc35bb19fbe685853869a83"}, @@ -1247,7 +1208,6 @@ version = "1.71.0" description = "HTTP/2-based RPC framework" optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "grpcio-1.71.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:c200cb6f2393468142eb50ab19613229dcc7829b5ccee8b658a36005f6669fdd"}, {file = "grpcio-1.71.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:b2266862c5ad664a380fbbcdbdb8289d71464c42a8c29053820ee78ba0119e5d"}, @@ -1316,7 +1276,6 @@ version = "0.14.0" description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" optional = false python-versions = ">=3.7" -groups = ["main", "dev"] files = [ {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, @@ -1333,7 +1292,6 @@ version = "0.16.3" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.7" -groups = ["main", "dev"] files = [ {file = "httpcore-0.16.3-py3-none-any.whl", hash = "sha256:da1fb708784a938aa084bde4feb8317056c55037247c787bd7e19eb2c2949dc0"}, {file = "httpcore-0.16.3.tar.gz", hash = "sha256:c5d6f04e2fc530f39e0c077e6a30caa53f1451096120f1f38b954afd0b17c0cb"}, @@ -1360,7 +1318,6 @@ version = "0.6.4" description = "A collection of framework independent HTTP protocol utils." optional = false python-versions = ">=3.8.0" -groups = ["main"] files = [ {file = "httptools-0.6.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3c73ce323711a6ffb0d247dcd5a550b8babf0f757e86a52558fe5b86d6fefcc0"}, {file = "httptools-0.6.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:345c288418f0944a6fe67be8e6afa9262b18c7626c3ef3c28adc5eabc06a68da"}, @@ -1421,7 +1378,6 @@ version = "0.23.3" description = "The next generation HTTP client." optional = false python-versions = ">=3.7" -groups = ["main", "dev"] files = [ {file = "httpx-0.23.3-py3-none-any.whl", hash = "sha256:a211fcce9b1254ea24f0cd6af9869b3d29aba40154e947d2a07bb499b3e310d6"}, {file = "httpx-0.23.3.tar.gz", hash = "sha256:9818458eb565bb54898ccb9b8b251a28785dd4a55afbc23d0eb410754fe7d0f9"}, @@ -1434,7 +1390,7 @@ rfc3986 = {version = ">=1.3,<2", extras = ["idna2008"]} sniffio = "*" [package.extras] -brotli = ["brotli ; platform_python_implementation == \"CPython\"", "brotlicffi ; platform_python_implementation != \"CPython\""] +brotli = ["brotli", "brotlicffi"] cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<13)"] http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] @@ -1450,7 +1406,6 @@ version = "2.6.9" description = "File identification library for Python" optional = false python-versions = ">=3.9" -groups = ["dev"] files = [ {file = "identify-2.6.9-py2.py3-none-any.whl", hash = "sha256:c98b4322da415a8e5a70ff6e51fbc2d2932c015532d77e9f8537b4ba7813b150"}, {file = "identify-2.6.9.tar.gz", hash = "sha256:d40dfe3142a1421d8518e3d3985ef5ac42890683e32306ad614a29490abeb6bf"}, @@ -1470,7 +1425,6 @@ version = "3.10" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.6" -groups = ["main", "dev"] files = [ {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, @@ -1490,7 +1444,6 @@ version = "4.3.2" description = "Image Hashing library" optional = false python-versions = "*" -groups = ["main"] files = [ {file = "ImageHash-4.3.2-py2.py3-none-any.whl", hash = "sha256:02b0f965f8c77cd813f61d7d39031ea27d4780e7ebcad56c6cd6a709acc06e5f"}, {file = "ImageHash-4.3.2.tar.gz", hash = "sha256:e54a79805afb82a34acde4746a16540503a9636fd1ffb31d8e099b29bbbf8156"}, @@ -1513,7 +1466,6 @@ version = "8.6.1" description = "Read metadata from Python packages" optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "importlib_metadata-8.6.1-py3-none-any.whl", hash = "sha256:02a89390c1e15fdfdc0d7c6b25cb3e62650d0494005c97d6f148bf5b9787525e"}, {file = "importlib_metadata-8.6.1.tar.gz", hash = "sha256:310b41d755445d74569f993ccfc22838295d9fe005425094fad953d7f15c8580"}, @@ -1523,12 +1475,12 @@ files = [ zipp = ">=3.20" [package.extras] -check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] enabler = ["pytest-enabler (>=2.2)"] perf = ["ipython"] -test = ["flufl.flake8", "importlib_resources (>=1.3) ; python_version < \"3.9\"", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] +test = ["flufl.flake8", "importlib_resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] type = ["pytest-mypy"] [package.source] @@ -1542,7 +1494,6 @@ version = "2.1.0" description = "brain-dead simple config-ini parsing" optional = false python-versions = ">=3.8" -groups = ["dev"] files = [ {file = "iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760"}, {file = "iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7"}, @@ -1559,7 +1510,6 @@ version = "1.1.0" description = "Simple module to parse ISO 8601 dates" optional = false python-versions = ">=3.6.2,<4.0" -groups = ["main"] files = [ {file = "iso8601-1.1.0-py3-none-any.whl", hash = "sha256:8400e90141bf792bce2634df533dc57e3bee19ea120a87bebcd3da89a58ad73f"}, {file = "iso8601-1.1.0.tar.gz", hash = "sha256:32811e7b81deee2063ea6d2e94f8819a86d1f3811e49d23623a41fa832bef03f"}, @@ -1576,7 +1526,6 @@ version = "3.1.6" description = "A very fast and expressive template engine." optional = false python-versions = ">=3.7" -groups = ["main"] files = [ {file = "jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67"}, {file = "jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d"}, @@ -1599,7 +1548,6 @@ version = "0.7.3" description = "Python logging made (stupidly) simple" optional = false python-versions = ">=3.5,<4.0" -groups = ["main", "dev"] files = [ {file = "loguru-0.7.3-py3-none-any.whl", hash = "sha256:31a33c10c8e1e10422bfd431aeb5d351c7cf7fa671e3c4df004162264b28220c"}, {file = "loguru-0.7.3.tar.gz", hash = "sha256:19480589e77d47b8d85b2c827ad95d49bf31b0dcde16593892eb51dd18706eb6"}, @@ -1610,7 +1558,7 @@ colorama = {version = ">=0.3.4", markers = "sys_platform == \"win32\""} win32-setctime = {version = ">=1.0.0", markers = "sys_platform == \"win32\""} [package.extras] -dev = ["Sphinx (==8.1.3) ; python_version >= \"3.11\"", "build (==1.2.2) ; python_version >= \"3.11\"", "colorama (==0.4.5) ; python_version < \"3.8\"", "colorama (==0.4.6) ; python_version >= \"3.8\"", "exceptiongroup (==1.1.3) ; python_version >= \"3.7\" and python_version < \"3.11\"", "freezegun (==1.1.0) ; python_version < \"3.8\"", "freezegun (==1.5.0) ; python_version >= \"3.8\"", "mypy (==v0.910) ; python_version < \"3.6\"", "mypy (==v0.971) ; python_version == \"3.6\"", "mypy (==v1.13.0) ; python_version >= \"3.8\"", "mypy (==v1.4.1) ; python_version == \"3.7\"", "myst-parser (==4.0.0) ; python_version >= \"3.11\"", "pre-commit (==4.0.1) ; python_version >= \"3.9\"", "pytest (==6.1.2) ; python_version < \"3.8\"", "pytest (==8.3.2) ; python_version >= \"3.8\"", "pytest-cov (==2.12.1) ; python_version < \"3.8\"", "pytest-cov (==5.0.0) ; python_version == \"3.8\"", "pytest-cov (==6.0.0) ; python_version >= \"3.9\"", "pytest-mypy-plugins (==1.9.3) ; python_version >= \"3.6\" and python_version < \"3.8\"", "pytest-mypy-plugins (==3.1.0) ; python_version >= \"3.8\"", "sphinx-rtd-theme (==3.0.2) ; python_version >= \"3.11\"", "tox (==3.27.1) ; python_version < \"3.8\"", "tox (==4.23.2) ; python_version >= \"3.8\"", "twine (==6.0.1) ; python_version >= \"3.11\""] +dev = ["Sphinx (==8.1.3)", "build (==1.2.2)", "colorama (==0.4.5)", "colorama (==0.4.6)", "exceptiongroup (==1.1.3)", "freezegun (==1.1.0)", "freezegun (==1.5.0)", "mypy (==v0.910)", "mypy (==v0.971)", "mypy (==v1.13.0)", "mypy (==v1.4.1)", "myst-parser (==4.0.0)", "pre-commit (==4.0.1)", "pytest (==6.1.2)", "pytest (==8.3.2)", "pytest-cov (==2.12.1)", "pytest-cov (==5.0.0)", "pytest-cov (==6.0.0)", "pytest-mypy-plugins (==1.9.3)", "pytest-mypy-plugins (==3.1.0)", "sphinx-rtd-theme (==3.0.2)", "tox (==3.27.1)", "tox (==4.23.2)", "twine (==6.0.1)"] [package.source] type = "legacy" @@ -1619,150 +1567,149 @@ reference = "aliyun" [[package]] name = "lxml" -version = "5.3.1" +version = "5.3.2" description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API." optional = false python-versions = ">=3.6" -groups = ["main"] files = [ - {file = "lxml-5.3.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a4058f16cee694577f7e4dd410263cd0ef75644b43802a689c2b3c2a7e69453b"}, - {file = "lxml-5.3.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:364de8f57d6eda0c16dcfb999af902da31396949efa0e583e12675d09709881b"}, - {file = "lxml-5.3.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:528f3a0498a8edc69af0559bdcf8a9f5a8bf7c00051a6ef3141fdcf27017bbf5"}, - {file = "lxml-5.3.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:db4743e30d6f5f92b6d2b7c86b3ad250e0bad8dee4b7ad8a0c44bfb276af89a3"}, - {file = "lxml-5.3.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:17b5d7f8acf809465086d498d62a981fa6a56d2718135bb0e4aa48c502055f5c"}, - {file = "lxml-5.3.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:928e75a7200a4c09e6efc7482a1337919cc61fe1ba289f297827a5b76d8969c2"}, - {file = "lxml-5.3.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a997b784a639e05b9d4053ef3b20c7e447ea80814a762f25b8ed5a89d261eac"}, - {file = "lxml-5.3.1-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:7b82e67c5feb682dbb559c3e6b78355f234943053af61606af126df2183b9ef9"}, - {file = "lxml-5.3.1-cp310-cp310-manylinux_2_28_ppc64le.whl", hash = "sha256:f1de541a9893cf8a1b1db9bf0bf670a2decab42e3e82233d36a74eda7822b4c9"}, - {file = "lxml-5.3.1-cp310-cp310-manylinux_2_28_s390x.whl", hash = "sha256:de1fc314c3ad6bc2f6bd5b5a5b9357b8c6896333d27fdbb7049aea8bd5af2d79"}, - {file = "lxml-5.3.1-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:7c0536bd9178f754b277a3e53f90f9c9454a3bd108b1531ffff720e082d824f2"}, - {file = "lxml-5.3.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:68018c4c67d7e89951a91fbd371e2e34cd8cfc71f0bb43b5332db38497025d51"}, - {file = "lxml-5.3.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:aa826340a609d0c954ba52fd831f0fba2a4165659ab0ee1a15e4aac21f302406"}, - {file = "lxml-5.3.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:796520afa499732191e39fc95b56a3b07f95256f2d22b1c26e217fb69a9db5b5"}, - {file = "lxml-5.3.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3effe081b3135237da6e4c4530ff2a868d3f80be0bda027e118a5971285d42d0"}, - {file = "lxml-5.3.1-cp310-cp310-win32.whl", hash = "sha256:a22f66270bd6d0804b02cd49dae2b33d4341015545d17f8426f2c4e22f557a23"}, - {file = "lxml-5.3.1-cp310-cp310-win_amd64.whl", hash = "sha256:0bcfadea3cdc68e678d2b20cb16a16716887dd00a881e16f7d806c2138b8ff0c"}, - {file = "lxml-5.3.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e220f7b3e8656ab063d2eb0cd536fafef396829cafe04cb314e734f87649058f"}, - {file = "lxml-5.3.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0f2cfae0688fd01f7056a17367e3b84f37c545fb447d7282cf2c242b16262607"}, - {file = "lxml-5.3.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:67d2f8ad9dcc3a9e826bdc7802ed541a44e124c29b7d95a679eeb58c1c14ade8"}, - {file = "lxml-5.3.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:db0c742aad702fd5d0c6611a73f9602f20aec2007c102630c06d7633d9c8f09a"}, - {file = "lxml-5.3.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:198bb4b4dd888e8390afa4f170d4fa28467a7eaf857f1952589f16cfbb67af27"}, - {file = "lxml-5.3.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d2a3e412ce1849be34b45922bfef03df32d1410a06d1cdeb793a343c2f1fd666"}, - {file = "lxml-5.3.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2b8969dbc8d09d9cd2ae06362c3bad27d03f433252601ef658a49bd9f2b22d79"}, - {file = "lxml-5.3.1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:5be8f5e4044146a69c96077c7e08f0709c13a314aa5315981185c1f00235fe65"}, - {file = "lxml-5.3.1-cp311-cp311-manylinux_2_28_ppc64le.whl", hash = "sha256:133f3493253a00db2c870d3740bc458ebb7d937bd0a6a4f9328373e0db305709"}, - {file = "lxml-5.3.1-cp311-cp311-manylinux_2_28_s390x.whl", hash = "sha256:52d82b0d436edd6a1d22d94a344b9a58abd6c68c357ed44f22d4ba8179b37629"}, - {file = "lxml-5.3.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:1b6f92e35e2658a5ed51c6634ceb5ddae32053182851d8cad2a5bc102a359b33"}, - {file = "lxml-5.3.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:203b1d3eaebd34277be06a3eb880050f18a4e4d60861efba4fb946e31071a295"}, - {file = "lxml-5.3.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:155e1a5693cf4b55af652f5c0f78ef36596c7f680ff3ec6eb4d7d85367259b2c"}, - {file = "lxml-5.3.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:22ec2b3c191f43ed21f9545e9df94c37c6b49a5af0a874008ddc9132d49a2d9c"}, - {file = "lxml-5.3.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:7eda194dd46e40ec745bf76795a7cccb02a6a41f445ad49d3cf66518b0bd9cff"}, - {file = "lxml-5.3.1-cp311-cp311-win32.whl", hash = "sha256:fb7c61d4be18e930f75948705e9718618862e6fc2ed0d7159b2262be73f167a2"}, - {file = "lxml-5.3.1-cp311-cp311-win_amd64.whl", hash = "sha256:c809eef167bf4a57af4b03007004896f5c60bd38dc3852fcd97a26eae3d4c9e6"}, - {file = "lxml-5.3.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:e69add9b6b7b08c60d7ff0152c7c9a6c45b4a71a919be5abde6f98f1ea16421c"}, - {file = "lxml-5.3.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:4e52e1b148867b01c05e21837586ee307a01e793b94072d7c7b91d2c2da02ffe"}, - {file = "lxml-5.3.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a4b382e0e636ed54cd278791d93fe2c4f370772743f02bcbe431a160089025c9"}, - {file = "lxml-5.3.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c2e49dc23a10a1296b04ca9db200c44d3eb32c8d8ec532e8c1fd24792276522a"}, - {file = "lxml-5.3.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4399b4226c4785575fb20998dc571bc48125dc92c367ce2602d0d70e0c455eb0"}, - {file = "lxml-5.3.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5412500e0dc5481b1ee9cf6b38bb3b473f6e411eb62b83dc9b62699c3b7b79f7"}, - {file = "lxml-5.3.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c93ed3c998ea8472be98fb55aed65b5198740bfceaec07b2eba551e55b7b9ae"}, - {file = "lxml-5.3.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:63d57fc94eb0bbb4735e45517afc21ef262991d8758a8f2f05dd6e4174944519"}, - {file = "lxml-5.3.1-cp312-cp312-manylinux_2_28_ppc64le.whl", hash = "sha256:b450d7cabcd49aa7ab46a3c6aa3ac7e1593600a1a0605ba536ec0f1b99a04322"}, - {file = "lxml-5.3.1-cp312-cp312-manylinux_2_28_s390x.whl", hash = "sha256:4df0ec814b50275ad6a99bc82a38b59f90e10e47714ac9871e1b223895825468"}, - {file = "lxml-5.3.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:d184f85ad2bb1f261eac55cddfcf62a70dee89982c978e92b9a74a1bfef2e367"}, - {file = "lxml-5.3.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b725e70d15906d24615201e650d5b0388b08a5187a55f119f25874d0103f90dd"}, - {file = "lxml-5.3.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:a31fa7536ec1fb7155a0cd3a4e3d956c835ad0a43e3610ca32384d01f079ea1c"}, - {file = "lxml-5.3.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:3c3c8b55c7fc7b7e8877b9366568cc73d68b82da7fe33d8b98527b73857a225f"}, - {file = "lxml-5.3.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d61ec60945d694df806a9aec88e8f29a27293c6e424f8ff91c80416e3c617645"}, - {file = "lxml-5.3.1-cp312-cp312-win32.whl", hash = "sha256:f4eac0584cdc3285ef2e74eee1513a6001681fd9753b259e8159421ed28a72e5"}, - {file = "lxml-5.3.1-cp312-cp312-win_amd64.whl", hash = "sha256:29bfc8d3d88e56ea0a27e7c4897b642706840247f59f4377d81be8f32aa0cfbf"}, - {file = "lxml-5.3.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:c093c7088b40d8266f57ed71d93112bd64c6724d31f0794c1e52cc4857c28e0e"}, - {file = "lxml-5.3.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b0884e3f22d87c30694e625b1e62e6f30d39782c806287450d9dc2fdf07692fd"}, - {file = "lxml-5.3.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1637fa31ec682cd5760092adfabe86d9b718a75d43e65e211d5931809bc111e7"}, - {file = "lxml-5.3.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a364e8e944d92dcbf33b6b494d4e0fb3499dcc3bd9485beb701aa4b4201fa414"}, - {file = "lxml-5.3.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:779e851fd0e19795ccc8a9bb4d705d6baa0ef475329fe44a13cf1e962f18ff1e"}, - {file = "lxml-5.3.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c4393600915c308e546dc7003d74371744234e8444a28622d76fe19b98fa59d1"}, - {file = "lxml-5.3.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:673b9d8e780f455091200bba8534d5f4f465944cbdd61f31dc832d70e29064a5"}, - {file = "lxml-5.3.1-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:2e4a570f6a99e96c457f7bec5ad459c9c420ee80b99eb04cbfcfe3fc18ec6423"}, - {file = "lxml-5.3.1-cp313-cp313-manylinux_2_28_ppc64le.whl", hash = "sha256:71f31eda4e370f46af42fc9f264fafa1b09f46ba07bdbee98f25689a04b81c20"}, - {file = "lxml-5.3.1-cp313-cp313-manylinux_2_28_s390x.whl", hash = "sha256:42978a68d3825eaac55399eb37a4d52012a205c0c6262199b8b44fcc6fd686e8"}, - {file = "lxml-5.3.1-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:8b1942b3e4ed9ed551ed3083a2e6e0772de1e5e3aca872d955e2e86385fb7ff9"}, - {file = "lxml-5.3.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:85c4f11be9cf08917ac2a5a8b6e1ef63b2f8e3799cec194417e76826e5f1de9c"}, - {file = "lxml-5.3.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:231cf4d140b22a923b1d0a0a4e0b4f972e5893efcdec188934cc65888fd0227b"}, - {file = "lxml-5.3.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:5865b270b420eda7b68928d70bb517ccbe045e53b1a428129bb44372bf3d7dd5"}, - {file = "lxml-5.3.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:dbf7bebc2275016cddf3c997bf8a0f7044160714c64a9b83975670a04e6d2252"}, - {file = "lxml-5.3.1-cp313-cp313-win32.whl", hash = "sha256:d0751528b97d2b19a388b302be2a0ee05817097bab46ff0ed76feeec24951f78"}, - {file = "lxml-5.3.1-cp313-cp313-win_amd64.whl", hash = "sha256:91fb6a43d72b4f8863d21f347a9163eecbf36e76e2f51068d59cd004c506f332"}, - {file = "lxml-5.3.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:016b96c58e9a4528219bb563acf1aaaa8bc5452e7651004894a973f03b84ba81"}, - {file = "lxml-5.3.1-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82a4bb10b0beef1434fb23a09f001ab5ca87895596b4581fd53f1e5145a8934a"}, - {file = "lxml-5.3.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d68eeef7b4d08a25e51897dac29bcb62aba830e9ac6c4e3297ee7c6a0cf6439"}, - {file = "lxml-5.3.1-cp36-cp36m-manylinux_2_28_x86_64.whl", hash = "sha256:f12582b8d3b4c6be1d298c49cb7ae64a3a73efaf4c2ab4e37db182e3545815ac"}, - {file = "lxml-5.3.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:2df7ed5edeb6bd5590914cd61df76eb6cce9d590ed04ec7c183cf5509f73530d"}, - {file = "lxml-5.3.1-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:585c4dc429deebc4307187d2b71ebe914843185ae16a4d582ee030e6cfbb4d8a"}, - {file = "lxml-5.3.1-cp36-cp36m-win32.whl", hash = "sha256:06a20d607a86fccab2fc15a77aa445f2bdef7b49ec0520a842c5c5afd8381576"}, - {file = "lxml-5.3.1-cp36-cp36m-win_amd64.whl", hash = "sha256:057e30d0012439bc54ca427a83d458752ccda725c1c161cc283db07bcad43cf9"}, - {file = "lxml-5.3.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4867361c049761a56bd21de507cab2c2a608c55102311d142ade7dab67b34f32"}, - {file = "lxml-5.3.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3dddf0fb832486cc1ea71d189cb92eb887826e8deebe128884e15020bb6e3f61"}, - {file = "lxml-5.3.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1bcc211542f7af6f2dfb705f5f8b74e865592778e6cafdfd19c792c244ccce19"}, - {file = "lxml-5.3.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aaca5a812f050ab55426c32177091130b1e49329b3f002a32934cd0245571307"}, - {file = "lxml-5.3.1-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:236610b77589faf462337b3305a1be91756c8abc5a45ff7ca8f245a71c5dab70"}, - {file = "lxml-5.3.1-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:aed57b541b589fa05ac248f4cb1c46cbb432ab82cbd467d1c4f6a2bdc18aecf9"}, - {file = "lxml-5.3.1-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:75fa3d6946d317ffc7016a6fcc44f42db6d514b7fdb8b4b28cbe058303cb6e53"}, - {file = "lxml-5.3.1-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:96eef5b9f336f623ffc555ab47a775495e7e8846dde88de5f941e2906453a1ce"}, - {file = "lxml-5.3.1-cp37-cp37m-win32.whl", hash = "sha256:ef45f31aec9be01379fc6c10f1d9c677f032f2bac9383c827d44f620e8a88407"}, - {file = "lxml-5.3.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a0611da6b07dd3720f492db1b463a4d1175b096b49438761cc9f35f0d9eaaef5"}, - {file = "lxml-5.3.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b2aca14c235c7a08558fe0a4786a1a05873a01e86b474dfa8f6df49101853a4e"}, - {file = "lxml-5.3.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae82fce1d964f065c32c9517309f0c7be588772352d2f40b1574a214bd6e6098"}, - {file = "lxml-5.3.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7aae7a3d63b935babfdc6864b31196afd5145878ddd22f5200729006366bc4d5"}, - {file = "lxml-5.3.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8e0d177b1fe251c3b1b914ab64135475c5273c8cfd2857964b2e3bb0fe196a7"}, - {file = "lxml-5.3.1-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:6c4dd3bfd0c82400060896717dd261137398edb7e524527438c54a8c34f736bf"}, - {file = "lxml-5.3.1-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:f1208c1c67ec9e151d78aa3435aa9b08a488b53d9cfac9b699f15255a3461ef2"}, - {file = "lxml-5.3.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:c6aacf00d05b38a5069826e50ae72751cb5bc27bdc4d5746203988e429b385bb"}, - {file = "lxml-5.3.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:5881aaa4bf3a2d086c5f20371d3a5856199a0d8ac72dd8d0dbd7a2ecfc26ab73"}, - {file = "lxml-5.3.1-cp38-cp38-win32.whl", hash = "sha256:45fbb70ccbc8683f2fb58bea89498a7274af1d9ec7995e9f4af5604e028233fc"}, - {file = "lxml-5.3.1-cp38-cp38-win_amd64.whl", hash = "sha256:7512b4d0fc5339d5abbb14d1843f70499cab90d0b864f790e73f780f041615d7"}, - {file = "lxml-5.3.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5885bc586f1edb48e5d68e7a4b4757b5feb2a496b64f462b4d65950f5af3364f"}, - {file = "lxml-5.3.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1b92fe86e04f680b848fff594a908edfa72b31bfc3499ef7433790c11d4c8cd8"}, - {file = "lxml-5.3.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a091026c3bf7519ab1e64655a3f52a59ad4a4e019a6f830c24d6430695b1cf6a"}, - {file = "lxml-5.3.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8ffb141361108e864ab5f1813f66e4e1164181227f9b1f105b042729b6c15125"}, - {file = "lxml-5.3.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3715cdf0dd31b836433af9ee9197af10e3df41d273c19bb249230043667a5dfd"}, - {file = "lxml-5.3.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:88b72eb7222d918c967202024812c2bfb4048deeb69ca328363fb8e15254c549"}, - {file = "lxml-5.3.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa59974880ab5ad8ef3afaa26f9bda148c5f39e06b11a8ada4660ecc9fb2feb3"}, - {file = "lxml-5.3.1-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:3bb8149840daf2c3f97cebf00e4ed4a65a0baff888bf2605a8d0135ff5cf764e"}, - {file = "lxml-5.3.1-cp39-cp39-manylinux_2_28_ppc64le.whl", hash = "sha256:0d6b2fa86becfa81f0a0271ccb9eb127ad45fb597733a77b92e8a35e53414914"}, - {file = "lxml-5.3.1-cp39-cp39-manylinux_2_28_s390x.whl", hash = "sha256:136bf638d92848a939fd8f0e06fcf92d9f2e4b57969d94faae27c55f3d85c05b"}, - {file = "lxml-5.3.1-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:89934f9f791566e54c1d92cdc8f8fd0009447a5ecdb1ec6b810d5f8c4955f6be"}, - {file = "lxml-5.3.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:a8ade0363f776f87f982572c2860cc43c65ace208db49c76df0a21dde4ddd16e"}, - {file = "lxml-5.3.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:bfbbab9316330cf81656fed435311386610f78b6c93cc5db4bebbce8dd146675"}, - {file = "lxml-5.3.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:172d65f7c72a35a6879217bcdb4bb11bc88d55fb4879e7569f55616062d387c2"}, - {file = "lxml-5.3.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e3c623923967f3e5961d272718655946e5322b8d058e094764180cdee7bab1af"}, - {file = "lxml-5.3.1-cp39-cp39-win32.whl", hash = "sha256:ce0930a963ff593e8bb6fda49a503911accc67dee7e5445eec972668e672a0f0"}, - {file = "lxml-5.3.1-cp39-cp39-win_amd64.whl", hash = "sha256:f7b64fcd670bca8800bc10ced36620c6bbb321e7bc1214b9c0c0df269c1dddc2"}, - {file = "lxml-5.3.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:afa578b6524ff85fb365f454cf61683771d0170470c48ad9d170c48075f86725"}, - {file = "lxml-5.3.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67f5e80adf0aafc7b5454f2c1cb0cde920c9b1f2cbd0485f07cc1d0497c35c5d"}, - {file = "lxml-5.3.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2dd0b80ac2d8f13ffc906123a6f20b459cb50a99222d0da492360512f3e50f84"}, - {file = "lxml-5.3.1-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:422c179022ecdedbe58b0e242607198580804253da220e9454ffe848daa1cfd2"}, - {file = "lxml-5.3.1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:524ccfded8989a6595dbdda80d779fb977dbc9a7bc458864fc9a0c2fc15dc877"}, - {file = "lxml-5.3.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:48fd46bf7155def2e15287c6f2b133a2f78e2d22cdf55647269977b873c65499"}, - {file = "lxml-5.3.1-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:05123fad495a429f123307ac6d8fd6f977b71e9a0b6d9aeeb8f80c017cb17131"}, - {file = "lxml-5.3.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a243132767150a44e6a93cd1dde41010036e1cbc63cc3e9fe1712b277d926ce3"}, - {file = "lxml-5.3.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c92ea6d9dd84a750b2bae72ff5e8cf5fdd13e58dda79c33e057862c29a8d5b50"}, - {file = "lxml-5.3.1-pp37-pypy37_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:2f1be45d4c15f237209bbf123a0e05b5d630c8717c42f59f31ea9eae2ad89394"}, - {file = "lxml-5.3.1-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:a83d3adea1e0ee36dac34627f78ddd7f093bb9cfc0a8e97f1572a949b695cb98"}, - {file = "lxml-5.3.1-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:3edbb9c9130bac05d8c3fe150c51c337a471cc7fdb6d2a0a7d3a88e88a829314"}, - {file = "lxml-5.3.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2f23cf50eccb3255b6e913188291af0150d89dab44137a69e14e4dcb7be981f1"}, - {file = "lxml-5.3.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df7e5edac4778127f2bf452e0721a58a1cfa4d1d9eac63bdd650535eb8543615"}, - {file = "lxml-5.3.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:094b28ed8a8a072b9e9e2113a81fda668d2053f2ca9f2d202c2c8c7c2d6516b1"}, - {file = "lxml-5.3.1-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:514fe78fc4b87e7a7601c92492210b20a1b0c6ab20e71e81307d9c2e377c64de"}, - {file = "lxml-5.3.1-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:8fffc08de02071c37865a155e5ea5fce0282e1546fd5bde7f6149fcaa32558ac"}, - {file = "lxml-5.3.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:4b0d5cdba1b655d5b18042ac9c9ff50bda33568eb80feaaca4fc237b9c4fbfde"}, - {file = "lxml-5.3.1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:3031e4c16b59424e8d78522c69b062d301d951dc55ad8685736c3335a97fc270"}, - {file = "lxml-5.3.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb659702a45136c743bc130760c6f137870d4df3a9e14386478b8a0511abcfca"}, - {file = "lxml-5.3.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a11b16a33656ffc43c92a5343a28dc71eefe460bcc2a4923a96f292692709f6"}, - {file = "lxml-5.3.1-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c5ae125276f254b01daa73e2c103363d3e99e3e10505686ac7d9d2442dd4627a"}, - {file = "lxml-5.3.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c76722b5ed4a31ba103e0dc77ab869222ec36efe1a614e42e9bcea88a36186fe"}, - {file = "lxml-5.3.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:33e06717c00c788ab4e79bc4726ecc50c54b9bfb55355eae21473c145d83c2d2"}, - {file = "lxml-5.3.1.tar.gz", hash = "sha256:106b7b5d2977b339f1e97efe2778e2ab20e99994cbb0ec5e55771ed0795920c8"}, + {file = "lxml-5.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:c4b84d6b580a9625dfa47269bf1fd7fbba7ad69e08b16366a46acb005959c395"}, + {file = "lxml-5.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b4c08ecb26e4270a62f81f81899dfff91623d349e433b126931c9c4577169666"}, + {file = "lxml-5.3.2-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef926e9f11e307b5a7c97b17c5c609a93fb59ffa8337afac8f89e6fe54eb0b37"}, + {file = "lxml-5.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:017ceeabe739100379fe6ed38b033cd244ce2da4e7f6f07903421f57da3a19a2"}, + {file = "lxml-5.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dae97d9435dc90590f119d056d233c33006b2fd235dd990d5564992261ee7ae8"}, + {file = "lxml-5.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:910f39425c6798ce63c93976ae5af5fff6949e2cb446acbd44d6d892103eaea8"}, + {file = "lxml-5.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c9780de781a0d62a7c3680d07963db3048b919fc9e3726d9cfd97296a65ffce1"}, + {file = "lxml-5.3.2-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:1a06b0c6ba2e3ca45a009a78a4eb4d6b63831830c0a83dcdc495c13b9ca97d3e"}, + {file = "lxml-5.3.2-cp310-cp310-manylinux_2_28_ppc64le.whl", hash = "sha256:4c62d0a34d1110769a1bbaf77871a4b711a6f59c4846064ccb78bc9735978644"}, + {file = "lxml-5.3.2-cp310-cp310-manylinux_2_28_s390x.whl", hash = "sha256:8f961a4e82f411b14538fe5efc3e6b953e17f5e809c463f0756a0d0e8039b700"}, + {file = "lxml-5.3.2-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:3dfc78f5f9251b6b8ad37c47d4d0bfe63ceb073a916e5b50a3bf5fd67a703335"}, + {file = "lxml-5.3.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:10e690bc03214d3537270c88e492b8612d5e41b884f232df2b069b25b09e6711"}, + {file = "lxml-5.3.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:aa837e6ee9534de8d63bc4c1249e83882a7ac22bd24523f83fad68e6ffdf41ae"}, + {file = "lxml-5.3.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:da4c9223319400b97a2acdfb10926b807e51b69eb7eb80aad4942c0516934858"}, + {file = "lxml-5.3.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:dc0e9bdb3aa4d1de703a437576007d366b54f52c9897cae1a3716bb44fc1fc85"}, + {file = "lxml-5.3.2-cp310-cp310-win32.win32.whl", hash = "sha256:dd755a0a78dd0b2c43f972e7b51a43be518ebc130c9f1a7c4480cf08b4385486"}, + {file = "lxml-5.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:d64ea1686474074b38da13ae218d9fde0d1dc6525266976808f41ac98d9d7980"}, + {file = "lxml-5.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9d61a7d0d208ace43986a92b111e035881c4ed45b1f5b7a270070acae8b0bfb4"}, + {file = "lxml-5.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:856dfd7eda0b75c29ac80a31a6411ca12209183e866c33faf46e77ace3ce8a79"}, + {file = "lxml-5.3.2-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7a01679e4aad0727bedd4c9407d4d65978e920f0200107ceeffd4b019bd48529"}, + {file = "lxml-5.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b6b37b4c3acb8472d191816d4582379f64d81cecbdce1a668601745c963ca5cc"}, + {file = "lxml-5.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3df5a54e7b7c31755383f126d3a84e12a4e0333db4679462ef1165d702517477"}, + {file = "lxml-5.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c09a40f28dcded933dc16217d6a092be0cc49ae25811d3b8e937c8060647c353"}, + {file = "lxml-5.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a1ef20f1851ccfbe6c5a04c67ec1ce49da16ba993fdbabdce87a92926e505412"}, + {file = "lxml-5.3.2-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:f79a63289dbaba964eb29ed3c103b7911f2dce28c36fe87c36a114e6bd21d7ad"}, + {file = "lxml-5.3.2-cp311-cp311-manylinux_2_28_ppc64le.whl", hash = "sha256:75a72697d95f27ae00e75086aed629f117e816387b74a2f2da6ef382b460b710"}, + {file = "lxml-5.3.2-cp311-cp311-manylinux_2_28_s390x.whl", hash = "sha256:b9b00c9ee1cc3a76f1f16e94a23c344e0b6e5c10bec7f94cf2d820ce303b8c01"}, + {file = "lxml-5.3.2-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:77cbcab50cbe8c857c6ba5f37f9a3976499c60eada1bf6d38f88311373d7b4bc"}, + {file = "lxml-5.3.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:29424058f072a24622a0a15357bca63d796954758248a72da6d512f9bd9a4493"}, + {file = "lxml-5.3.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:7d82737a8afe69a7c80ef31d7626075cc7d6e2267f16bf68af2c764b45ed68ab"}, + {file = "lxml-5.3.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:95473d1d50a5d9fcdb9321fdc0ca6e1edc164dce4c7da13616247d27f3d21e31"}, + {file = "lxml-5.3.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:2162068f6da83613f8b2a32ca105e37a564afd0d7009b0b25834d47693ce3538"}, + {file = "lxml-5.3.2-cp311-cp311-win32.whl", hash = "sha256:f8695752cf5d639b4e981afe6c99e060621362c416058effd5c704bede9cb5d1"}, + {file = "lxml-5.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:d1a94cbb4ee64af3ab386c2d63d6d9e9cf2e256ac0fd30f33ef0a3c88f575174"}, + {file = "lxml-5.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:16b3897691ec0316a1aa3c6585f61c8b7978475587c5b16fc1d2c28d283dc1b0"}, + {file = "lxml-5.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a8d4b34a0eeaf6e73169dcfd653c8d47f25f09d806c010daf074fba2db5e2d3f"}, + {file = "lxml-5.3.2-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9cd7a959396da425022e1e4214895b5cfe7de7035a043bcc2d11303792b67554"}, + {file = "lxml-5.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cac5eaeec3549c5df7f8f97a5a6db6963b91639389cdd735d5a806370847732b"}, + {file = "lxml-5.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:29b5f7d77334877c2146e7bb8b94e4df980325fab0a8af4d524e5d43cd6f789d"}, + {file = "lxml-5.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:13f3495cfec24e3d63fffd342cc8141355d1d26ee766ad388775f5c8c5ec3932"}, + {file = "lxml-5.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e70ad4c9658beeff99856926fd3ee5fde8b519b92c693f856007177c36eb2e30"}, + {file = "lxml-5.3.2-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:507085365783abd7879fa0a6fa55eddf4bdd06591b17a2418403bb3aff8a267d"}, + {file = "lxml-5.3.2-cp312-cp312-manylinux_2_28_ppc64le.whl", hash = "sha256:5bb304f67cbf5dfa07edad904732782cbf693286b9cd85af27059c5779131050"}, + {file = "lxml-5.3.2-cp312-cp312-manylinux_2_28_s390x.whl", hash = "sha256:3d84f5c093645c21c29a4e972b84cb7cf682f707f8706484a5a0c7ff13d7a988"}, + {file = "lxml-5.3.2-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:bdc13911db524bd63f37b0103af014b7161427ada41f1b0b3c9b5b5a9c1ca927"}, + {file = "lxml-5.3.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1ec944539543f66ebc060ae180d47e86aca0188bda9cbfadff47d86b0dc057dc"}, + {file = "lxml-5.3.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:59d437cc8a7f838282df5a199cf26f97ef08f1c0fbec6e84bd6f5cc2b7913f6e"}, + {file = "lxml-5.3.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:0e275961adbd32e15672e14e0cc976a982075208224ce06d149c92cb43db5b93"}, + {file = "lxml-5.3.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:038aeb6937aa404480c2966b7f26f1440a14005cb0702078c173c028eca72c31"}, + {file = "lxml-5.3.2-cp312-cp312-win32.whl", hash = "sha256:3c2c8d0fa3277147bff180e3590be67597e17d365ce94beb2efa3138a2131f71"}, + {file = "lxml-5.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:77809fcd97dfda3f399102db1794f7280737b69830cd5c961ac87b3c5c05662d"}, + {file = "lxml-5.3.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:77626571fb5270ceb36134765f25b665b896243529eefe840974269b083e090d"}, + {file = "lxml-5.3.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:78a533375dc7aa16d0da44af3cf6e96035e484c8c6b2b2445541a5d4d3d289ee"}, + {file = "lxml-5.3.2-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a6f62b2404b3f3f0744bbcabb0381c5fe186fa2a9a67ecca3603480f4846c585"}, + {file = "lxml-5.3.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ea918da00091194526d40c30c4996971f09dacab032607581f8d8872db34fbf"}, + {file = "lxml-5.3.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c35326f94702a7264aa0eea826a79547d3396a41ae87a70511b9f6e9667ad31c"}, + {file = "lxml-5.3.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e3bef90af21d31c4544bc917f51e04f94ae11b43156356aff243cdd84802cbf2"}, + {file = "lxml-5.3.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:52fa7ba11a495b7cbce51573c73f638f1dcff7b3ee23697467dc063f75352a69"}, + {file = "lxml-5.3.2-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:ad131e2c4d2c3803e736bb69063382334e03648de2a6b8f56a878d700d4b557d"}, + {file = "lxml-5.3.2-cp313-cp313-manylinux_2_28_ppc64le.whl", hash = "sha256:00a4463ca409ceacd20490a893a7e08deec7870840eff33dc3093067b559ce3e"}, + {file = "lxml-5.3.2-cp313-cp313-manylinux_2_28_s390x.whl", hash = "sha256:87e8d78205331cace2b73ac8249294c24ae3cba98220687b5b8ec5971a2267f1"}, + {file = "lxml-5.3.2-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:bf6389133bb255e530a4f2f553f41c4dd795b1fbb6f797aea1eff308f1e11606"}, + {file = "lxml-5.3.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b3709fc752b42fb6b6ffa2ba0a5b9871646d97d011d8f08f4d5b3ee61c7f3b2b"}, + {file = "lxml-5.3.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:abc795703d0de5d83943a4badd770fbe3d1ca16ee4ff3783d7caffc252f309ae"}, + {file = "lxml-5.3.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:98050830bb6510159f65d9ad1b8aca27f07c01bb3884ba95f17319ccedc4bcf9"}, + {file = "lxml-5.3.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6ba465a91acc419c5682f8b06bcc84a424a7aa5c91c220241c6fd31de2a72bc6"}, + {file = "lxml-5.3.2-cp313-cp313-win32.whl", hash = "sha256:56a1d56d60ea1ec940f949d7a309e0bff05243f9bd337f585721605670abb1c1"}, + {file = "lxml-5.3.2-cp313-cp313-win_amd64.whl", hash = "sha256:1a580dc232c33d2ad87d02c8a3069d47abbcdce974b9c9cc82a79ff603065dbe"}, + {file = "lxml-5.3.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:1a59f7fe888d0ec1916d0ad69364c5400cfa2f885ae0576d909f342e94d26bc9"}, + {file = "lxml-5.3.2-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d67b50abc2df68502a26ed2ccea60c1a7054c289fb7fc31c12e5e55e4eec66bd"}, + {file = "lxml-5.3.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2cb08d2cb047c98d6fbbb2e77d6edd132ad6e3fa5aa826ffa9ea0c9b1bc74a84"}, + {file = "lxml-5.3.2-cp36-cp36m-manylinux_2_28_x86_64.whl", hash = "sha256:495ddb7e10911fb4d673d8aa8edd98d1eadafb3b56e8c1b5f427fd33cadc455b"}, + {file = "lxml-5.3.2-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:884d9308ac7d581b705a3371185282e1b8eebefd68ccf288e00a2d47f077cc51"}, + {file = "lxml-5.3.2-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:37f3d7cf7f2dd2520df6cc8a13df4c3e3f913c8e0a1f9a875e44f9e5f98d7fee"}, + {file = "lxml-5.3.2-cp36-cp36m-win32.whl", hash = "sha256:e885a1bf98a76dff0a0648850c3083b99d9358ef91ba8fa307c681e8e0732503"}, + {file = "lxml-5.3.2-cp36-cp36m-win_amd64.whl", hash = "sha256:b45f505d0d85f4cdd440cd7500689b8e95110371eaa09da0c0b1103e9a05030f"}, + {file = "lxml-5.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b53cd668facd60b4f0dfcf092e01bbfefd88271b5b4e7b08eca3184dd006cb30"}, + {file = "lxml-5.3.2-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e5dea998c891f082fe204dec6565dbc2f9304478f2fc97bd4d7a940fec16c873"}, + {file = "lxml-5.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d46bc3e58b01e4f38d75e0d7f745a46875b7a282df145aca9d1479c65ff11561"}, + {file = "lxml-5.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:661feadde89159fd5f7d7639a81ccae36eec46974c4a4d5ccce533e2488949c8"}, + {file = "lxml-5.3.2-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:43af2a69af2cacc2039024da08a90174e85f3af53483e6b2e3485ced1bf37151"}, + {file = "lxml-5.3.2-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:1539f962d82436f3d386eb9f29b2a29bb42b80199c74a695dff51b367a61ec0a"}, + {file = "lxml-5.3.2-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:6673920bf976421b5fac4f29b937702eef4555ee42329546a5fc68bae6178a48"}, + {file = "lxml-5.3.2-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:9fa722a9cd8845594593cce399a49aa6bfc13b6c83a7ee05e2ab346d9253d52f"}, + {file = "lxml-5.3.2-cp37-cp37m-win32.whl", hash = "sha256:2eadd4efa487f4710755415aed3d6ae9ac8b4327ea45226ffccb239766c8c610"}, + {file = "lxml-5.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:83d8707b1b08cd02c04d3056230ec3b771b18c566ec35e723e60cdf037064e08"}, + {file = "lxml-5.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bc6e8678bfa5ccba370103976ccfcf776c85c83da9220ead41ea6fd15d2277b4"}, + {file = "lxml-5.3.2-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0bed509662f67f719119ad56006cd4a38efa68cfa74383060612044915e5f7ad"}, + {file = "lxml-5.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e3925975fadd6fd72a6d80541a6ec75dfbad54044a03aa37282dafcb80fbdfa"}, + {file = "lxml-5.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:83c0462dedc5213ac586164c6d7227da9d4d578cf45dd7fbab2ac49b63a008eb"}, + {file = "lxml-5.3.2-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:53e3f9ca72858834688afa17278649d62aa768a4b2018344be00c399c4d29e95"}, + {file = "lxml-5.3.2-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:32ba634ef3f1b20f781019a91d78599224dc45745dd572f951adbf1c0c9b0d75"}, + {file = "lxml-5.3.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:1b16504c53f41da5fcf04868a80ac40a39d3eec5329caf761114caec6e844ad1"}, + {file = "lxml-5.3.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:1f9682786138549da44ca4c49b20e7144d063b75f2b2ba611f4cff9b83db1062"}, + {file = "lxml-5.3.2-cp38-cp38-win32.whl", hash = "sha256:d8f74ef8aacdf6ee5c07566a597634bb8535f6b53dc89790db43412498cf6026"}, + {file = "lxml-5.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:49f1cee0fa27e1ee02589c696a9bdf4027e7427f184fa98e6bef0c6613f6f0fa"}, + {file = "lxml-5.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:741c126bcf9aa939e950e64e5e0a89c8e01eda7a5f5ffdfc67073f2ed849caea"}, + {file = "lxml-5.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ab6e9e6aca1fd7d725ffa132286e70dee5b9a4561c5ed291e836440b82888f89"}, + {file = "lxml-5.3.2-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:58e8c9b9ed3c15c2d96943c14efc324b69be6352fe5585733a7db2bf94d97841"}, + {file = "lxml-5.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7811828ddfb8c23f4f1fbf35e7a7b2edec2f2e4c793dee7c52014f28c4b35238"}, + {file = "lxml-5.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:72968623efb1e12e950cbdcd1d0f28eb14c8535bf4be153f1bfffa818b1cf189"}, + {file = "lxml-5.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ebfceaa2ea588b54efb6160e3520983663d45aed8a3895bb2031ada080fb5f04"}, + {file = "lxml-5.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d685d458505b2bfd2e28c812749fe9194a2b0ce285a83537e4309a187ffa270b"}, + {file = "lxml-5.3.2-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:334e0e414dab1f5366ead8ca34ec3148415f236d5660e175f1d640b11d645847"}, + {file = "lxml-5.3.2-cp39-cp39-manylinux_2_28_ppc64le.whl", hash = "sha256:02e56f7de72fa82561eae69628a7d6febd7891d72248c7ff7d3e7814d4031017"}, + {file = "lxml-5.3.2-cp39-cp39-manylinux_2_28_s390x.whl", hash = "sha256:638d06b4e1d34d1a074fa87deed5fb55c18485fa0dab97abc5604aad84c12031"}, + {file = "lxml-5.3.2-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:354dab7206d22d7a796fa27c4c5bffddd2393da2ad61835355a4759d435beb47"}, + {file = "lxml-5.3.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:d9d9f82ff2c3bf9bb777cb355149f7f3a98ec58f16b7428369dc27ea89556a4c"}, + {file = "lxml-5.3.2-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:95ad58340e3b7d2b828efc370d1791856613c5cb62ae267158d96e47b3c978c9"}, + {file = "lxml-5.3.2-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:30fe05f4b7f6e9eb32862745512e7cbd021070ad0f289a7f48d14a0d3fc1d8a9"}, + {file = "lxml-5.3.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:34c688fef86f73dbca0798e0a61bada114677006afa524a8ce97d9e5fabf42e6"}, + {file = "lxml-5.3.2-cp39-cp39-win32.whl", hash = "sha256:4d6d3d1436d57f41984920667ec5ef04bcb158f80df89ac4d0d3f775a2ac0c87"}, + {file = "lxml-5.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:2996e1116bbb3ae2a1fbb2ba4da8f92742290b4011e7e5bce2bd33bbc9d9485a"}, + {file = "lxml-5.3.2-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:521ab9c80b98c30b2d987001c3ede2e647e92eeb2ca02e8cb66ef5122d792b24"}, + {file = "lxml-5.3.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f1231b0f9810289d41df1eacc4ebb859c63e4ceee29908a0217403cddce38d0"}, + {file = "lxml-5.3.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:271f1a4d5d2b383c36ad8b9b489da5ea9c04eca795a215bae61ed6a57cf083cd"}, + {file = "lxml-5.3.2-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:6fca8a5a13906ba2677a5252752832beb0f483a22f6c86c71a2bb320fba04f61"}, + {file = "lxml-5.3.2-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:ea0c3b7922209160faef194a5b6995bfe7fa05ff7dda6c423ba17646b7b9de10"}, + {file = "lxml-5.3.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:0a006390834603e5952a2ff74b9a31a6007c7cc74282a087aa6467afb4eea987"}, + {file = "lxml-5.3.2-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:eae4136a3b8c4cf76f69461fc8f9410d55d34ea48e1185338848a888d71b9675"}, + {file = "lxml-5.3.2-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d48e06be8d8c58e7feaedd8a37897a6122637efb1637d7ce00ddf5f11f9a92ad"}, + {file = "lxml-5.3.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4b83aed409134093d90e114007034d2c1ebcd92e501b71fd9ec70e612c8b2eb"}, + {file = "lxml-5.3.2-pp37-pypy37_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:7a0e77edfe26d3703f954d46bed52c3ec55f58586f18f4b7f581fc56954f1d84"}, + {file = "lxml-5.3.2-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:19f6fcfd15b82036b4d235749d78785eb9c991c7812012dc084e0d8853b4c1c0"}, + {file = "lxml-5.3.2-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:d49919c95d31ee06eefd43d8c6f69a3cc9bdf0a9b979cc234c4071f0eb5cb173"}, + {file = "lxml-5.3.2-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2d0a60841410123c533990f392819804a8448853f06daf412c0f383443925e89"}, + {file = "lxml-5.3.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b7f729e03090eb4e3981f10efaee35e6004b548636b1a062b8b9a525e752abc"}, + {file = "lxml-5.3.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:579df6e20d8acce3bcbc9fb8389e6ae00c19562e929753f534ba4c29cfe0be4b"}, + {file = "lxml-5.3.2-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:2abcf3f3b8367d6400b908d00d4cd279fc0b8efa287e9043820525762d383699"}, + {file = "lxml-5.3.2-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:348c06cb2e3176ce98bee8c397ecc89181681afd13d85870df46167f140a305f"}, + {file = "lxml-5.3.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:617ecaccd565cbf1ac82ffcaa410e7da5bd3a4b892bb3543fb2fe19bd1c4467d"}, + {file = "lxml-5.3.2-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:c3eb4278dcdb9d86265ed2c20b9ecac45f2d6072e3904542e591e382c87a9c00"}, + {file = "lxml-5.3.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:258b6b53458c5cbd2a88795557ff7e0db99f73a96601b70bc039114cd4ee9e02"}, + {file = "lxml-5.3.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0a9d8d25ed2f2183e8471c97d512a31153e123ac5807f61396158ef2793cb6e"}, + {file = "lxml-5.3.2-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:73bcb635a848c18a3e422ea0ab0092f2e4ef3b02d8ebe87ab49748ebc8ec03d8"}, + {file = "lxml-5.3.2-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1545de0a69a16ced5767bae8cca1801b842e6e49e96f5e4a8a5acbef023d970b"}, + {file = "lxml-5.3.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:165fcdc2f40fc0fe88a3c3c06c9c2a097388a90bda6a16e6f7c9199c903c9b8e"}, + {file = "lxml-5.3.2.tar.gz", hash = "sha256:773947d0ed809ddad824b7b14467e1a481b8976e87278ac4a730c2f7c7fcddc1"}, ] [package.extras] @@ -1779,18 +1726,17 @@ reference = "aliyun" [[package]] name = "markdown" -version = "3.7" +version = "3.8" description = "Python implementation of John Gruber's Markdown." optional = false -python-versions = ">=3.8" -groups = ["main"] +python-versions = ">=3.9" files = [ - {file = "Markdown-3.7-py3-none-any.whl", hash = "sha256:7eb6df5690b81a1d7942992c97fad2938e956e79df20cbc6186e9c3a77b1c803"}, - {file = "markdown-3.7.tar.gz", hash = "sha256:2ae2471477cfd02dbbf038d5d9bc226d40def84b4fe2986e49b59b6b472bbed2"}, + {file = "markdown-3.8-py3-none-any.whl", hash = "sha256:794a929b79c5af141ef5ab0f2f642d0f7b1872981250230e72682346f7cc90dc"}, + {file = "markdown-3.8.tar.gz", hash = "sha256:7df81e63f0df5c4b24b7d156eb81e4690595239b7d70937d0409f1b0de319c6f"}, ] [package.extras] -docs = ["mdx-gh-links (>=0.2)", "mkdocs (>=1.5)", "mkdocs-gen-files", "mkdocs-literate-nav", "mkdocs-nature (>=0.6)", "mkdocs-section-index", "mkdocstrings[python]"] +docs = ["mdx_gh_links (>=0.2)", "mkdocs (>=1.6)", "mkdocs-gen-files", "mkdocs-literate-nav", "mkdocs-nature (>=0.6)", "mkdocs-section-index", "mkdocstrings[python]"] testing = ["coverage", "pyyaml"] [package.source] @@ -1804,7 +1750,6 @@ version = "3.0.0" description = "Python port of markdown-it. Markdown parsing, done right!" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, @@ -1834,7 +1779,6 @@ version = "3.0.2" description = "Safely add untrusted strings to HTML/XML markup." optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8"}, {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158"}, @@ -1910,7 +1854,6 @@ version = "0.1.2" description = "Markdown URL utilities" optional = false python-versions = ">=3.7" -groups = ["main"] files = [ {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, @@ -1927,7 +1870,6 @@ version = "1.1.0" description = "MessagePack serializer" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "msgpack-1.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7ad442d527a7e358a469faf43fda45aaf4ac3249c8310a82f0ccff9164e5dccd"}, {file = "msgpack-1.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:74bed8f63f8f14d75eec75cf3d04ad581da6b914001b474a5d3cd3372c8cc27d"}, @@ -2002,104 +1944,115 @@ reference = "aliyun" [[package]] name = "multidict" -version = "6.2.0" +version = "6.4.3" description = "multidict implementation" optional = false python-versions = ">=3.9" -groups = ["main", "dev"] files = [ - {file = "multidict-6.2.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:b9f6392d98c0bd70676ae41474e2eecf4c7150cb419237a41f8f96043fcb81d1"}, - {file = "multidict-6.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3501621d5e86f1a88521ea65d5cad0a0834c77b26f193747615b7c911e5422d2"}, - {file = "multidict-6.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:32ed748ff9ac682eae7859790d3044b50e3076c7d80e17a44239683769ff485e"}, - {file = "multidict-6.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc826b9a8176e686b67aa60fd6c6a7047b0461cae5591ea1dc73d28f72332a8a"}, - {file = "multidict-6.2.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:214207dcc7a6221d9942f23797fe89144128a71c03632bf713d918db99bd36de"}, - {file = "multidict-6.2.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:05fefbc3cddc4e36da209a5e49f1094bbece9a581faa7f3589201fd95df40e5d"}, - {file = "multidict-6.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e851e6363d0dbe515d8de81fd544a2c956fdec6f8a049739562286727d4a00c3"}, - {file = "multidict-6.2.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:32c9b4878f48be3e75808ea7e499d6223b1eea6d54c487a66bc10a1871e3dc6a"}, - {file = "multidict-6.2.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:7243c5a6523c5cfeca76e063efa5f6a656d1d74c8b1fc64b2cd1e84e507f7e2a"}, - {file = "multidict-6.2.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0e5a644e50ef9fb87878d4d57907f03a12410d2aa3b93b3acdf90a741df52c49"}, - {file = "multidict-6.2.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:0dc25a3293c50744796e87048de5e68996104d86d940bb24bc3ec31df281b191"}, - {file = "multidict-6.2.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:a49994481b99cd7dedde07f2e7e93b1d86c01c0fca1c32aded18f10695ae17eb"}, - {file = "multidict-6.2.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:641cf2e3447c9ecff2f7aa6e9eee9eaa286ea65d57b014543a4911ff2799d08a"}, - {file = "multidict-6.2.0-cp310-cp310-win32.whl", hash = "sha256:0c383d28857f66f5aebe3e91d6cf498da73af75fbd51cedbe1adfb85e90c0460"}, - {file = "multidict-6.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:a33273a541f1e1a8219b2a4ed2de355848ecc0254264915b9290c8d2de1c74e1"}, - {file = "multidict-6.2.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:84e87a7d75fa36839a3a432286d719975362d230c70ebfa0948549cc38bd5b46"}, - {file = "multidict-6.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8de4d42dffd5ced9117af2ce66ba8722402541a3aa98ffdf78dde92badb68932"}, - {file = "multidict-6.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e7d91a230c7f8af86c904a5a992b8c064b66330544693fd6759c3d6162382ecf"}, - {file = "multidict-6.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f6cad071960ba1914fa231677d21b1b4a3acdcce463cee41ea30bc82e6040cf"}, - {file = "multidict-6.2.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0f74f2fc51555f4b037ef278efc29a870d327053aba5cb7d86ae572426c7cccc"}, - {file = "multidict-6.2.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:14ed9ed1bfedd72a877807c71113deac292bf485159a29025dfdc524c326f3e1"}, - {file = "multidict-6.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ac3fcf9a2d369bd075b2c2965544036a27ccd277fc3c04f708338cc57533081"}, - {file = "multidict-6.2.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2fc6af8e39f7496047c7876314f4317736eac82bf85b54c7c76cf1a6f8e35d98"}, - {file = "multidict-6.2.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5f8cb1329f42fadfb40d6211e5ff568d71ab49be36e759345f91c69d1033d633"}, - {file = "multidict-6.2.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5389445f0173c197f4a3613713b5fb3f3879df1ded2a1a2e4bc4b5b9c5441b7e"}, - {file = "multidict-6.2.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:94a7bb972178a8bfc4055db80c51efd24baefaced5e51c59b0d598a004e8305d"}, - {file = "multidict-6.2.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:da51d8928ad8b4244926fe862ba1795f0b6e68ed8c42cd2f822d435db9c2a8f4"}, - {file = "multidict-6.2.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:063be88bd684782a0715641de853e1e58a2f25b76388538bd62d974777ce9bc2"}, - {file = "multidict-6.2.0-cp311-cp311-win32.whl", hash = "sha256:52b05e21ff05729fbea9bc20b3a791c3c11da61649ff64cce8257c82a020466d"}, - {file = "multidict-6.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:1e2a2193d3aa5cbf5758f6d5680a52aa848e0cf611da324f71e5e48a9695cc86"}, - {file = "multidict-6.2.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:437c33561edb6eb504b5a30203daf81d4a9b727e167e78b0854d9a4e18e8950b"}, - {file = "multidict-6.2.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:9f49585f4abadd2283034fc605961f40c638635bc60f5162276fec075f2e37a4"}, - {file = "multidict-6.2.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5dd7106d064d05896ce28c97da3f46caa442fe5a43bc26dfb258e90853b39b44"}, - {file = "multidict-6.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e25b11a0417475f093d0f0809a149aff3943c2c56da50fdf2c3c88d57fe3dfbd"}, - {file = "multidict-6.2.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac380cacdd3b183338ba63a144a34e9044520a6fb30c58aa14077157a033c13e"}, - {file = "multidict-6.2.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:61d5541f27533f803a941d3a3f8a3d10ed48c12cf918f557efcbf3cd04ef265c"}, - {file = "multidict-6.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:facaf11f21f3a4c51b62931feb13310e6fe3475f85e20d9c9fdce0d2ea561b87"}, - {file = "multidict-6.2.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:095a2eabe8c43041d3e6c2cb8287a257b5f1801c2d6ebd1dd877424f1e89cf29"}, - {file = "multidict-6.2.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a0cc398350ef31167e03f3ca7c19313d4e40a662adcb98a88755e4e861170bdd"}, - {file = "multidict-6.2.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:7c611345bbe7cb44aabb877cb94b63e86f2d0db03e382667dbd037866d44b4f8"}, - {file = "multidict-6.2.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8cd1a0644ccaf27e9d2f6d9c9474faabee21f0578fe85225cc5af9a61e1653df"}, - {file = "multidict-6.2.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:89b3857652183b8206a891168af47bac10b970d275bba1f6ee46565a758c078d"}, - {file = "multidict-6.2.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:125dd82b40f8c06d08d87b3510beaccb88afac94e9ed4a6f6c71362dc7dbb04b"}, - {file = "multidict-6.2.0-cp312-cp312-win32.whl", hash = "sha256:76b34c12b013d813e6cb325e6bd4f9c984db27758b16085926bbe7ceeaace626"}, - {file = "multidict-6.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:0b183a959fb88ad1be201de2c4bdf52fa8e46e6c185d76201286a97b6f5ee65c"}, - {file = "multidict-6.2.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:5c5e7d2e300d5cb3b2693b6d60d3e8c8e7dd4ebe27cd17c9cb57020cac0acb80"}, - {file = "multidict-6.2.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:256d431fe4583c5f1e0f2e9c4d9c22f3a04ae96009b8cfa096da3a8723db0a16"}, - {file = "multidict-6.2.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a3c0ff89fe40a152e77b191b83282c9664357dce3004032d42e68c514ceff27e"}, - {file = "multidict-6.2.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ef7d48207926edbf8b16b336f779c557dd8f5a33035a85db9c4b0febb0706817"}, - {file = "multidict-6.2.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1f3c099d3899b14e1ce52262eb82a5f5cb92157bb5106bf627b618c090a0eadc"}, - {file = "multidict-6.2.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e16e7297f29a544f49340012d6fc08cf14de0ab361c9eb7529f6a57a30cbfda1"}, - {file = "multidict-6.2.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:042028348dc5a1f2be6c666437042a98a5d24cee50380f4c0902215e5ec41844"}, - {file = "multidict-6.2.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:08549895e6a799bd551cf276f6e59820aa084f0f90665c0f03dd3a50db5d3c48"}, - {file = "multidict-6.2.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4ccfd74957ef53fa7380aaa1c961f523d582cd5e85a620880ffabd407f8202c0"}, - {file = "multidict-6.2.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:83b78c680d4b15d33042d330c2fa31813ca3974197bddb3836a5c635a5fd013f"}, - {file = "multidict-6.2.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:b4c153863dd6569f6511845922c53e39c8d61f6e81f228ad5443e690fca403de"}, - {file = "multidict-6.2.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:98aa8325c7f47183b45588af9c434533196e241be0a4e4ae2190b06d17675c02"}, - {file = "multidict-6.2.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9e658d1373c424457ddf6d55ec1db93c280b8579276bebd1f72f113072df8a5d"}, - {file = "multidict-6.2.0-cp313-cp313-win32.whl", hash = "sha256:3157126b028c074951839233647bd0e30df77ef1fedd801b48bdcad242a60f4e"}, - {file = "multidict-6.2.0-cp313-cp313-win_amd64.whl", hash = "sha256:2e87f1926e91855ae61769ba3e3f7315120788c099677e0842e697b0bfb659f2"}, - {file = "multidict-6.2.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:2529ddbdaa424b2c6c2eb668ea684dd6b75b839d0ad4b21aad60c168269478d7"}, - {file = "multidict-6.2.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:13551d0e2d7201f0959725a6a769b6f7b9019a168ed96006479c9ac33fe4096b"}, - {file = "multidict-6.2.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:d1996ee1330e245cd3aeda0887b4409e3930524c27642b046e4fae88ffa66c5e"}, - {file = "multidict-6.2.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c537da54ce4ff7c15e78ab1292e5799d0d43a2108e006578a57f531866f64025"}, - {file = "multidict-6.2.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0f249badb360b0b4d694307ad40f811f83df4da8cef7b68e429e4eea939e49dd"}, - {file = "multidict-6.2.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:48d39b1824b8d6ea7de878ef6226efbe0773f9c64333e1125e0efcfdd18a24c7"}, - {file = "multidict-6.2.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b99aac6bb2c37db336fa03a39b40ed4ef2818bf2dfb9441458165ebe88b793af"}, - {file = "multidict-6.2.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07bfa8bc649783e703263f783f73e27fef8cd37baaad4389816cf6a133141331"}, - {file = "multidict-6.2.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b2c00ad31fbc2cbac85d7d0fcf90853b2ca2e69d825a2d3f3edb842ef1544a2c"}, - {file = "multidict-6.2.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:0d57a01a2a9fa00234aace434d8c131f0ac6e0ac6ef131eda5962d7e79edfb5b"}, - {file = "multidict-6.2.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:abf5b17bc0cf626a8a497d89ac691308dbd825d2ac372aa990b1ca114e470151"}, - {file = "multidict-6.2.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:f7716f7e7138252d88607228ce40be22660d6608d20fd365d596e7ca0738e019"}, - {file = "multidict-6.2.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:d5a36953389f35f0a4e88dc796048829a2f467c9197265504593f0e420571547"}, - {file = "multidict-6.2.0-cp313-cp313t-win32.whl", hash = "sha256:e653d36b1bf48fa78c7fcebb5fa679342e025121ace8c87ab05c1cefd33b34fc"}, - {file = "multidict-6.2.0-cp313-cp313t-win_amd64.whl", hash = "sha256:ca23db5fb195b5ef4fd1f77ce26cadefdf13dba71dab14dadd29b34d457d7c44"}, - {file = "multidict-6.2.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b4f3d66dd0354b79761481fc15bdafaba0b9d9076f1f42cc9ce10d7fcbda205a"}, - {file = "multidict-6.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6e2a2d6749e1ff2c9c76a72c6530d5baa601205b14e441e6d98011000f47a7ac"}, - {file = "multidict-6.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cca83a629f77402cfadd58352e394d79a61c8015f1694b83ab72237ec3941f88"}, - {file = "multidict-6.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:781b5dd1db18c9e9eacc419027b0acb5073bdec9de1675c0be25ceb10e2ad133"}, - {file = "multidict-6.2.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cf8d370b2fea27fb300825ec3984334f7dd54a581bde6456799ba3776915a656"}, - {file = "multidict-6.2.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:25bb96338512e2f46f615a2bb7c6012fe92a4a5ebd353e5020836a7e33120349"}, - {file = "multidict-6.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:19e2819b0b468174de25c0ceed766606a07cedeab132383f1e83b9a4e96ccb4f"}, - {file = "multidict-6.2.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6aed763b6a1b28c46c055692836879328f0b334a6d61572ee4113a5d0c859872"}, - {file = "multidict-6.2.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:a1133414b771619aa3c3000701c11b2e4624a7f492f12f256aedde97c28331a2"}, - {file = "multidict-6.2.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:639556758c36093b35e2e368ca485dada6afc2bd6a1b1207d85ea6dfc3deab27"}, - {file = "multidict-6.2.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:163f4604e76639f728d127293d24c3e208b445b463168af3d031b92b0998bb90"}, - {file = "multidict-6.2.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:2325105e16d434749e1be8022f942876a936f9bece4ec41ae244e3d7fae42aaf"}, - {file = "multidict-6.2.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e4371591e621579cb6da8401e4ea405b33ff25a755874a3567c4075ca63d56e2"}, - {file = "multidict-6.2.0-cp39-cp39-win32.whl", hash = "sha256:d1175b0e0d6037fab207f05774a176d71210ebd40b1c51f480a04b65ec5c786d"}, - {file = "multidict-6.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:ad81012b24b88aad4c70b2cbc2dad84018783221b7f923e926f4690ff8569da3"}, - {file = "multidict-6.2.0-py3-none-any.whl", hash = "sha256:5d26547423e5e71dcc562c4acdc134b900640a39abd9066d7326a7cc2324c530"}, - {file = "multidict-6.2.0.tar.gz", hash = "sha256:0085b0afb2446e57050140240a8595846ed64d1cbd26cef936bfab3192c673b8"}, + {file = "multidict-6.4.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:32a998bd8a64ca48616eac5a8c1cc4fa38fb244a3facf2eeb14abe186e0f6cc5"}, + {file = "multidict-6.4.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a54ec568f1fc7f3c313c2f3b16e5db346bf3660e1309746e7fccbbfded856188"}, + {file = "multidict-6.4.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a7be07e5df178430621c716a63151165684d3e9958f2bbfcb644246162007ab7"}, + {file = "multidict-6.4.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b128dbf1c939674a50dd0b28f12c244d90e5015e751a4f339a96c54f7275e291"}, + {file = "multidict-6.4.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:b9cb19dfd83d35b6ff24a4022376ea6e45a2beba8ef3f0836b8a4b288b6ad685"}, + {file = "multidict-6.4.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3cf62f8e447ea2c1395afa289b332e49e13d07435369b6f4e41f887db65b40bf"}, + {file = "multidict-6.4.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:909f7d43ff8f13d1adccb6a397094adc369d4da794407f8dd592c51cf0eae4b1"}, + {file = "multidict-6.4.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0bb8f8302fbc7122033df959e25777b0b7659b1fd6bcb9cb6bed76b5de67afef"}, + {file = "multidict-6.4.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:224b79471b4f21169ea25ebc37ed6f058040c578e50ade532e2066562597b8a9"}, + {file = "multidict-6.4.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a7bd27f7ab3204f16967a6f899b3e8e9eb3362c0ab91f2ee659e0345445e0078"}, + {file = "multidict-6.4.3-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:99592bd3162e9c664671fd14e578a33bfdba487ea64bcb41d281286d3c870ad7"}, + {file = "multidict-6.4.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:a62d78a1c9072949018cdb05d3c533924ef8ac9bcb06cbf96f6d14772c5cd451"}, + {file = "multidict-6.4.3-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:3ccdde001578347e877ca4f629450973c510e88e8865d5aefbcb89b852ccc666"}, + {file = "multidict-6.4.3-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:eccb67b0e78aa2e38a04c5ecc13bab325a43e5159a181a9d1a6723db913cbb3c"}, + {file = "multidict-6.4.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8b6fcf6054fc4114a27aa865f8840ef3d675f9316e81868e0ad5866184a6cba5"}, + {file = "multidict-6.4.3-cp310-cp310-win32.whl", hash = "sha256:f92c7f62d59373cd93bc9969d2da9b4b21f78283b1379ba012f7ee8127b3152e"}, + {file = "multidict-6.4.3-cp310-cp310-win_amd64.whl", hash = "sha256:b57e28dbc031d13916b946719f213c494a517b442d7b48b29443e79610acd887"}, + {file = "multidict-6.4.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f6f19170197cc29baccd33ccc5b5d6a331058796485857cf34f7635aa25fb0cd"}, + {file = "multidict-6.4.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f2882bf27037eb687e49591690e5d491e677272964f9ec7bc2abbe09108bdfb8"}, + {file = "multidict-6.4.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fbf226ac85f7d6b6b9ba77db4ec0704fde88463dc17717aec78ec3c8546c70ad"}, + {file = "multidict-6.4.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e329114f82ad4b9dd291bef614ea8971ec119ecd0f54795109976de75c9a852"}, + {file = "multidict-6.4.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:1f4e0334d7a555c63f5c8952c57ab6f1c7b4f8c7f3442df689fc9f03df315c08"}, + {file = "multidict-6.4.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:740915eb776617b57142ce0bb13b7596933496e2f798d3d15a20614adf30d229"}, + {file = "multidict-6.4.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:255dac25134d2b141c944b59a0d2f7211ca12a6d4779f7586a98b4b03ea80508"}, + {file = "multidict-6.4.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4e8535bd4d741039b5aad4285ecd9b902ef9e224711f0b6afda6e38d7ac02c7"}, + {file = "multidict-6.4.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:30c433a33be000dd968f5750722eaa0991037be0be4a9d453eba121774985bc8"}, + {file = "multidict-6.4.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:4eb33b0bdc50acd538f45041f5f19945a1f32b909b76d7b117c0c25d8063df56"}, + {file = "multidict-6.4.3-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:75482f43465edefd8a5d72724887ccdcd0c83778ded8f0cb1e0594bf71736cc0"}, + {file = "multidict-6.4.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:ce5b3082e86aee80b3925ab4928198450d8e5b6466e11501fe03ad2191c6d777"}, + {file = "multidict-6.4.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e413152e3212c4d39f82cf83c6f91be44bec9ddea950ce17af87fbf4e32ca6b2"}, + {file = "multidict-6.4.3-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:8aac2eeff69b71f229a405c0a4b61b54bade8e10163bc7b44fcd257949620618"}, + {file = "multidict-6.4.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ab583ac203af1d09034be41458feeab7863c0635c650a16f15771e1386abf2d7"}, + {file = "multidict-6.4.3-cp311-cp311-win32.whl", hash = "sha256:1b2019317726f41e81154df636a897de1bfe9228c3724a433894e44cd2512378"}, + {file = "multidict-6.4.3-cp311-cp311-win_amd64.whl", hash = "sha256:43173924fa93c7486402217fab99b60baf78d33806af299c56133a3755f69589"}, + {file = "multidict-6.4.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:1f1c2f58f08b36f8475f3ec6f5aeb95270921d418bf18f90dffd6be5c7b0e676"}, + {file = "multidict-6.4.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:26ae9ad364fc61b936fb7bf4c9d8bd53f3a5b4417142cd0be5c509d6f767e2f1"}, + {file = "multidict-6.4.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:659318c6c8a85f6ecfc06b4e57529e5a78dfdd697260cc81f683492ad7e9435a"}, + {file = "multidict-6.4.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e1eb72c741fd24d5a28242ce72bb61bc91f8451877131fa3fe930edb195f7054"}, + {file = "multidict-6.4.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3cd06d88cb7398252284ee75c8db8e680aa0d321451132d0dba12bc995f0adcc"}, + {file = "multidict-6.4.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4543d8dc6470a82fde92b035a92529317191ce993533c3c0c68f56811164ed07"}, + {file = "multidict-6.4.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:30a3ebdc068c27e9d6081fca0e2c33fdf132ecea703a72ea216b81a66860adde"}, + {file = "multidict-6.4.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b038f10e23f277153f86f95c777ba1958bcd5993194fda26a1d06fae98b2f00c"}, + {file = "multidict-6.4.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c605a2b2dc14282b580454b9b5d14ebe0668381a3a26d0ac39daa0ca115eb2ae"}, + {file = "multidict-6.4.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8bd2b875f4ca2bb527fe23e318ddd509b7df163407b0fb717df229041c6df5d3"}, + {file = "multidict-6.4.3-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:c2e98c840c9c8e65c0e04b40c6c5066c8632678cd50c8721fdbcd2e09f21a507"}, + {file = "multidict-6.4.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:66eb80dd0ab36dbd559635e62fba3083a48a252633164857a1d1684f14326427"}, + {file = "multidict-6.4.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c23831bdee0a2a3cf21be057b5e5326292f60472fb6c6f86392bbf0de70ba731"}, + {file = "multidict-6.4.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:1535cec6443bfd80d028052e9d17ba6ff8a5a3534c51d285ba56c18af97e9713"}, + {file = "multidict-6.4.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3b73e7227681f85d19dec46e5b881827cd354aabe46049e1a61d2f9aaa4e285a"}, + {file = "multidict-6.4.3-cp312-cp312-win32.whl", hash = "sha256:8eac0c49df91b88bf91f818e0a24c1c46f3622978e2c27035bfdca98e0e18124"}, + {file = "multidict-6.4.3-cp312-cp312-win_amd64.whl", hash = "sha256:11990b5c757d956cd1db7cb140be50a63216af32cd6506329c2c59d732d802db"}, + {file = "multidict-6.4.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:7a76534263d03ae0cfa721fea40fd2b5b9d17a6f85e98025931d41dc49504474"}, + {file = "multidict-6.4.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:805031c2f599eee62ac579843555ed1ce389ae00c7e9f74c2a1b45e0564a88dd"}, + {file = "multidict-6.4.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:c56c179839d5dcf51d565132185409d1d5dd8e614ba501eb79023a6cab25576b"}, + {file = "multidict-6.4.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c64f4ddb3886dd8ab71b68a7431ad4aa01a8fa5be5b11543b29674f29ca0ba3"}, + {file = "multidict-6.4.3-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3002a856367c0b41cad6784f5b8d3ab008eda194ed7864aaa58f65312e2abcac"}, + {file = "multidict-6.4.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3d75e621e7d887d539d6e1d789f0c64271c250276c333480a9e1de089611f790"}, + {file = "multidict-6.4.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:995015cf4a3c0d72cbf453b10a999b92c5629eaf3a0c3e1efb4b5c1f602253bb"}, + {file = "multidict-6.4.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2b0fabae7939d09d7d16a711468c385272fa1b9b7fb0d37e51143585d8e72e0"}, + {file = "multidict-6.4.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:61ed4d82f8a1e67eb9eb04f8587970d78fe7cddb4e4d6230b77eda23d27938f9"}, + {file = "multidict-6.4.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:062428944a8dc69df9fdc5d5fc6279421e5f9c75a9ee3f586f274ba7b05ab3c8"}, + {file = "multidict-6.4.3-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:b90e27b4674e6c405ad6c64e515a505c6d113b832df52fdacb6b1ffd1fa9a1d1"}, + {file = "multidict-6.4.3-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:7d50d4abf6729921e9613d98344b74241572b751c6b37feed75fb0c37bd5a817"}, + {file = "multidict-6.4.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:43fe10524fb0a0514be3954be53258e61d87341008ce4914f8e8b92bee6f875d"}, + {file = "multidict-6.4.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:236966ca6c472ea4e2d3f02f6673ebfd36ba3f23159c323f5a496869bc8e47c9"}, + {file = "multidict-6.4.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:422a5ec315018e606473ba1f5431e064cf8b2a7468019233dcf8082fabad64c8"}, + {file = "multidict-6.4.3-cp313-cp313-win32.whl", hash = "sha256:f901a5aace8e8c25d78960dcc24c870c8d356660d3b49b93a78bf38eb682aac3"}, + {file = "multidict-6.4.3-cp313-cp313-win_amd64.whl", hash = "sha256:1c152c49e42277bc9a2f7b78bd5fa10b13e88d1b0328221e7aef89d5c60a99a5"}, + {file = "multidict-6.4.3-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:be8751869e28b9c0d368d94f5afcb4234db66fe8496144547b4b6d6a0645cfc6"}, + {file = "multidict-6.4.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0d4b31f8a68dccbcd2c0ea04f0e014f1defc6b78f0eb8b35f2265e8716a6df0c"}, + {file = "multidict-6.4.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:032efeab3049e37eef2ff91271884303becc9e54d740b492a93b7e7266e23756"}, + {file = "multidict-6.4.3-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9e78006af1a7c8a8007e4f56629d7252668344442f66982368ac06522445e375"}, + {file = "multidict-6.4.3-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:daeac9dd30cda8703c417e4fddccd7c4dc0c73421a0b54a7da2713be125846be"}, + {file = "multidict-6.4.3-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1f6f90700881438953eae443a9c6f8a509808bc3b185246992c4233ccee37fea"}, + {file = "multidict-6.4.3-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f84627997008390dd15762128dcf73c3365f4ec0106739cde6c20a07ed198ec8"}, + {file = "multidict-6.4.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3307b48cd156153b117c0ea54890a3bdbf858a5b296ddd40dc3852e5f16e9b02"}, + {file = "multidict-6.4.3-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ead46b0fa1dcf5af503a46e9f1c2e80b5d95c6011526352fa5f42ea201526124"}, + {file = "multidict-6.4.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:1748cb2743bedc339d63eb1bca314061568793acd603a6e37b09a326334c9f44"}, + {file = "multidict-6.4.3-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:acc9fa606f76fc111b4569348cc23a771cb52c61516dcc6bcef46d612edb483b"}, + {file = "multidict-6.4.3-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:31469d5832b5885adeb70982e531ce86f8c992334edd2f2254a10fa3182ac504"}, + {file = "multidict-6.4.3-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:ba46b51b6e51b4ef7bfb84b82f5db0dc5e300fb222a8a13b8cd4111898a869cf"}, + {file = "multidict-6.4.3-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:389cfefb599edf3fcfd5f64c0410da686f90f5f5e2c4d84e14f6797a5a337af4"}, + {file = "multidict-6.4.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:64bc2bbc5fba7b9db5c2c8d750824f41c6994e3882e6d73c903c2afa78d091e4"}, + {file = "multidict-6.4.3-cp313-cp313t-win32.whl", hash = "sha256:0ecdc12ea44bab2807d6b4a7e5eef25109ab1c82a8240d86d3c1fc9f3b72efd5"}, + {file = "multidict-6.4.3-cp313-cp313t-win_amd64.whl", hash = "sha256:7146a8742ea71b5d7d955bffcef58a9e6e04efba704b52a460134fefd10a8208"}, + {file = "multidict-6.4.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5427a2679e95a642b7f8b0f761e660c845c8e6fe3141cddd6b62005bd133fc21"}, + {file = "multidict-6.4.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:24a8caa26521b9ad09732972927d7b45b66453e6ebd91a3c6a46d811eeb7349b"}, + {file = "multidict-6.4.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6b5a272bc7c36a2cd1b56ddc6bff02e9ce499f9f14ee4a45c45434ef083f2459"}, + {file = "multidict-6.4.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edf74dc5e212b8c75165b435c43eb0d5e81b6b300a938a4eb82827119115e840"}, + {file = "multidict-6.4.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:9f35de41aec4b323c71f54b0ca461ebf694fb48bec62f65221f52e0017955b39"}, + {file = "multidict-6.4.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae93e0ff43b6f6892999af64097b18561691ffd835e21a8348a441e256592e1f"}, + {file = "multidict-6.4.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5e3929269e9d7eff905d6971d8b8c85e7dbc72c18fb99c8eae6fe0a152f2e343"}, + {file = "multidict-6.4.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb6214fe1750adc2a1b801a199d64b5a67671bf76ebf24c730b157846d0e90d2"}, + {file = "multidict-6.4.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6d79cf5c0c6284e90f72123f4a3e4add52d6c6ebb4a9054e88df15b8d08444c6"}, + {file = "multidict-6.4.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2427370f4a255262928cd14533a70d9738dfacadb7563bc3b7f704cc2360fc4e"}, + {file = "multidict-6.4.3-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:fbd8d737867912b6c5f99f56782b8cb81f978a97b4437a1c476de90a3e41c9a1"}, + {file = "multidict-6.4.3-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:0ee1bf613c448997f73fc4efb4ecebebb1c02268028dd4f11f011f02300cf1e8"}, + {file = "multidict-6.4.3-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:578568c4ba5f2b8abd956baf8b23790dbfdc953e87d5b110bce343b4a54fc9e7"}, + {file = "multidict-6.4.3-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:a059ad6b80de5b84b9fa02a39400319e62edd39d210b4e4f8c4f1243bdac4752"}, + {file = "multidict-6.4.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:dd53893675b729a965088aaadd6a1f326a72b83742b056c1065bdd2e2a42b4df"}, + {file = "multidict-6.4.3-cp39-cp39-win32.whl", hash = "sha256:abcfed2c4c139f25c2355e180bcc077a7cae91eefbb8b3927bb3f836c9586f1f"}, + {file = "multidict-6.4.3-cp39-cp39-win_amd64.whl", hash = "sha256:b1b389ae17296dd739015d5ddb222ee99fd66adeae910de21ac950e00979d897"}, + {file = "multidict-6.4.3-py3-none-any.whl", hash = "sha256:59fe01ee8e2a1e8ceb3f6dbb216b09c8d9f4ef1c22c4fc825d045a147fa2ebc9"}, + {file = "multidict-6.4.3.tar.gz", hash = "sha256:3ada0b058c9f213c5f95ba301f922d402ac234f1111a7d8fd70f1b99f3c281ec"}, ] [package.dependencies] @@ -2116,7 +2069,6 @@ version = "1.4.2" description = "CLI for nonebot2" optional = false python-versions = "<4.0,>=3.9" -groups = ["main"] files = [ {file = "nb_cli-1.4.2-py3-none-any.whl", hash = "sha256:8348480a988fb8632130e14925977ad117d4a0c76c971f91ad813f91a7592263"}, {file = "nb_cli-1.4.2.tar.gz", hash = "sha256:1d97b2d51569c7f7c7371744b9ed4b73361bc1853111bde2ddf1e990a1e19fef"}, @@ -2149,7 +2101,6 @@ version = "0.7.7" description = "a complex pattern, support typing" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "nepattern-0.7.7-py3-none-any.whl", hash = "sha256:2d66f964333f42df7971390da4fb98dfed1e8b769236f305c28a83c0bcda849a"}, {file = "nepattern-0.7.7.tar.gz", hash = "sha256:6667f888457e78937998f9412eb70ad16d220464d2d77850dd2b05e9ecfb3207"}, @@ -2170,7 +2121,6 @@ version = "1.9.1" description = "Node.js virtual environment builder" optional = false python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*" -groups = ["dev"] files = [ {file = "nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9"}, {file = "nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f"}, @@ -2187,7 +2137,6 @@ version = "2.4.6" description = "OneBot(CQHTTP) adapter for nonebot2" optional = false python-versions = ">=3.9,<4.0" -groups = ["main"] files = [ {file = "nonebot_adapter_onebot-2.4.6-py3-none-any.whl", hash = "sha256:b1ec7023fd83d731f63b513217327a57d12893a261944934b9195f79173791ad"}, {file = "nonebot_adapter_onebot-2.4.6.tar.gz", hash = "sha256:e33c93649ad11b320d8e9ff213635f29b23b4d0413c9158bd031c513c2f8f701"}, @@ -2210,7 +2159,6 @@ version = "0.54.2" description = "Alconna Adapter for Nonebot" optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "nonebot_plugin_alconna-0.54.2-py3-none-any.whl", hash = "sha256:ab9a1a5f0f8c9a30ba57a49bed5d3e9c3f761ea5954cbafb15bcd2aa9c7d5507"}, {file = "nonebot_plugin_alconna-0.54.2.tar.gz", hash = "sha256:0216da3bc2e5f8b4c4c44c2701f8f0a536d35ea0db79e708cc2ecd002b57ace6"}, @@ -2236,7 +2184,6 @@ version = "0.5.0" description = "APScheduler Support for NoneBot2" optional = false python-versions = ">=3.9,<4.0" -groups = ["main"] files = [ {file = "nonebot_plugin_apscheduler-0.5.0-py3-none-any.whl", hash = "sha256:8b99b5ee60c4bc195d4df2fd27dab3d6963691e3332f6cee31a06eb4277c307f"}, {file = "nonebot_plugin_apscheduler-0.5.0.tar.gz", hash = "sha256:6c0230e99765f275dc83d6639ff33bd6f71203fa10cd1b8a204b0f95530cda86"}, @@ -2258,7 +2205,6 @@ version = "0.6.3" description = "通过浏览器渲染图片" optional = false python-versions = "<4.0,>=3.9" -groups = ["main"] files = [ {file = "nonebot_plugin_htmlrender-0.6.3-py3-none-any.whl", hash = "sha256:bc9ce830a4652ff1a6501c7e335114921584a9528a8e7f53df2dddee0b2410b5"}, {file = "nonebot_plugin_htmlrender-0.6.3.tar.gz", hash = "sha256:212beb78f776416b0fe5536d799d59c09a39b7d663d3cf815dff5ebcb56cfb45"}, @@ -2285,7 +2231,6 @@ version = "0.2.3" description = "Nonebot2 会话信息提取与会话id定义" optional = false python-versions = ">=3.8,<4.0" -groups = ["main"] files = [ {file = "nonebot_plugin_session-0.2.3-py3-none-any.whl", hash = "sha256:5f652a0c082231c1cea72deb994a81e50f77ba532e14d30fdec09772f69079fd"}, {file = "nonebot_plugin_session-0.2.3.tar.gz", hash = "sha256:33af37400f5005927c4ff861e593774bedc314fba00cfe06f482e582d9f447b7"}, @@ -2306,7 +2251,6 @@ version = "0.7.2" description = "Universal Information Model for Nonebot2" optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "nonebot_plugin_uninfo-0.7.2-py3-none-any.whl", hash = "sha256:0fe133b7a0ab1babe740c8bfe64ad365a60a694f6ef08369f9a79666cd744957"}, {file = "nonebot_plugin_uninfo-0.7.2.tar.gz", hash = "sha256:623cfbf81806d8b0314be0b731b74fb3f16b414b9febb52c60643f7117c414a8"}, @@ -2327,7 +2271,6 @@ version = "0.8.1" description = "An alternative for got-and-reject in Nonebot" optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "nonebot_plugin_waiter-0.8.1-py3-none-any.whl", hash = "sha256:3e1afc8f134496d3a4ecefd9c3a2a98d6ef28a5318268cb22b99a0ef61a44080"}, {file = "nonebot_plugin_waiter-0.8.1.tar.gz", hash = "sha256:5e54213dfea1fd8a1e20dbe6d93b7881f35cbeedf80005148cdc39c1fd2ccc0f"}, @@ -2350,7 +2293,6 @@ version = "2.4.2" description = "An asynchronous python bot framework." optional = false python-versions = ">=3.9,<4.0" -groups = ["main", "dev"] files = [ {file = "nonebot2-2.4.2-py3-none-any.whl", hash = "sha256:ed3e970cdb6c885fb23349b65a045c08cf3ac7f43e28564ae0c72d3671ecda74"}, {file = "nonebot2-2.4.2.tar.gz", hash = "sha256:cf72d5920503ff373ba1d7963f3ddf573db913eb504e3b68ee347efb937db27d"}, @@ -2388,7 +2330,6 @@ version = "0.4.3" description = "nonebot2 test framework" optional = false python-versions = ">=3.9,<4.0" -groups = ["dev"] files = [ {file = "nonebug-0.4.3-py3-none-any.whl", hash = "sha256:eb9b2c8ab3d45459a4f00ebdaae90729e9e9628575c0685fca4c871dd4cfd425"}, {file = "nonebug-0.4.3.tar.gz", hash = "sha256:e9592d2c7a42b76f4a336f98726cba92e1300f6bab155c8822e865919786f10c"}, @@ -2412,7 +2353,6 @@ version = "0.1.9" description = "Prompt toolkit for console interaction" optional = false python-versions = ">=3.8,<4.0" -groups = ["main"] files = [ {file = "noneprompt-0.1.9-py3-none-any.whl", hash = "sha256:a54f1e6a19a3da2dedf7f365f80420e9ae49326a0ffe60a8a9c7afdee6b6eeb3"}, {file = "noneprompt-0.1.9.tar.gz", hash = "sha256:338b8bb89a8d22ef35f1dedb3aa7c1b228cf139973bdc43c5ffc3eef64457db9"}, @@ -2432,7 +2372,6 @@ version = "2.2.4" description = "Fundamental package for array computing in Python" optional = false python-versions = ">=3.10" -groups = ["main"] files = [ {file = "numpy-2.2.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8146f3550d627252269ac42ae660281d673eb6f8b32f113538e0cc2a9aed42b9"}, {file = "numpy-2.2.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e642d86b8f956098b564a45e6f6ce68a22c2c97a04f5acd3f221f57b8cb850ae"}, @@ -2502,7 +2441,6 @@ version = "24.2" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" -groups = ["dev"] files = [ {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, @@ -2519,7 +2457,6 @@ version = "10.4.0" description = "Python Imaging Library (Fork)" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "pillow-10.4.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:4d9667937cfa347525b319ae34375c37b9ee6b525440f3ef48542fcf66f2731e"}, {file = "pillow-10.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:543f3dc61c18dafb755773efc89aae60d06b6596a63914107f75459cf984164d"}, @@ -2608,7 +2545,7 @@ docs = ["furo", "olefile", "sphinx (>=7.3)", "sphinx-copybutton", "sphinx-inline fpx = ["olefile"] mic = ["olefile"] tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] -typing = ["typing-extensions ; python_version < \"3.10\""] +typing = ["typing-extensions"] xmp = ["defusedxml"] [package.source] @@ -2622,7 +2559,6 @@ version = "4.3.7" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.9" -groups = ["main", "dev"] files = [ {file = "platformdirs-4.3.7-py3-none-any.whl", hash = "sha256:a03875334331946f13c549dbd8f4bac7a13a50a895a0eb1e8c6a8ace80d40a94"}, {file = "platformdirs-4.3.7.tar.gz", hash = "sha256:eb437d586b6a0986388f0d6f74aa0cde27b48d0e3d66843640bfb6bdcdb6e351"}, @@ -2644,7 +2580,6 @@ version = "1.51.0" description = "A high-level API to automate web browsers" optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "playwright-1.51.0-py3-none-macosx_10_13_x86_64.whl", hash = "sha256:bcaaa3d5d73bda659bfb9ff2a288b51e85a91bd89eda86eaf8186550973e416a"}, {file = "playwright-1.51.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:2e0ae6eb44297b24738e1a6d9c580ca4243b4e21b7e65cf936a71492c08dd0d4"}, @@ -2670,7 +2605,6 @@ version = "1.5.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.8" -groups = ["dev"] files = [ {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, @@ -2691,7 +2625,6 @@ version = "4.2.0" description = "A framework for managing and maintaining multi-language pre-commit hooks." optional = false python-versions = ">=3.9" -groups = ["dev"] files = [ {file = "pre_commit-4.2.0-py2.py3-none-any.whl", hash = "sha256:a009ca7205f1eb497d10b845e52c838a98b6cdd2102a6c8e4540e94ee75c58bd"}, {file = "pre_commit-4.2.0.tar.gz", hash = "sha256:601283b9757afd87d40c4c4a9b2b5de9637a8ea02eaff7adc2d0fb4e04841146"}, @@ -2715,7 +2648,6 @@ version = "0.1.7" description = "text preprocess." optional = false python-versions = ">=3.6" -groups = ["main"] files = [ {file = "proces-0.1.7-py3-none-any.whl", hash = "sha256:308325bbc96877263f06e57e5e9c760c4b42cc722887ad60be6b18fc37d68762"}, {file = "proces-0.1.7.tar.gz", hash = "sha256:70a05d9e973dd685f7a9092c58be695a8181a411d63796c213232fd3fdc43775"}, @@ -2732,7 +2664,6 @@ version = "3.0.50" description = "Library for building powerful interactive command lines in Python" optional = false python-versions = ">=3.8.0" -groups = ["main"] files = [ {file = "prompt_toolkit-3.0.50-py3-none-any.whl", hash = "sha256:9b6427eb19e479d98acff65196a307c555eb567989e6d88ebbb1b509d9779198"}, {file = "prompt_toolkit-3.0.50.tar.gz", hash = "sha256:544748f3860a2623ca5cd6d2795e7a14f3d0e1c3c9728359013f79877fc89bab"}, @@ -2752,7 +2683,6 @@ version = "0.3.1" description = "Accelerated property cache" optional = false python-versions = ">=3.9" -groups = ["main", "dev"] files = [ {file = "propcache-0.3.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f27785888d2fdd918bc36de8b8739f2d6c791399552333721b58193f68ea3e98"}, {file = "propcache-0.3.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d4e89cde74154c7b5957f87a355bb9c8ec929c167b59c83d90654ea36aeb6180"}, @@ -2865,7 +2795,6 @@ version = "4.25.6" description = "" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "protobuf-4.25.6-cp310-abi3-win32.whl", hash = "sha256:61df6b5786e2b49fc0055f636c1e8f0aff263808bb724b95b164685ac1bcc13a"}, {file = "protobuf-4.25.6-cp310-abi3-win_amd64.whl", hash = "sha256:b8f837bfb77513fe0e2f263250f423217a173b6d85135be4d81e96a4653bcd3c"}, @@ -2891,7 +2820,6 @@ version = "5.9.8" description = "Cross-platform lib for process and system monitoring in Python." optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" -groups = ["main"] files = [ {file = "psutil-5.9.8-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:26bd09967ae00920df88e0352a91cff1a78f8d69b3ecabbfe733610c0af486c8"}, {file = "psutil-5.9.8-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:05806de88103b25903dff19bb6692bd2e714ccf9e668d050d144012055cbca73"}, @@ -2912,7 +2840,7 @@ files = [ ] [package.extras] -test = ["enum34 ; python_version <= \"3.4\"", "ipaddress ; python_version < \"3.0\"", "mock ; python_version < \"3.0\"", "pywin32 ; sys_platform == \"win32\"", "wmi ; sys_platform == \"win32\""] +test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"] [package.source] type = "legacy" @@ -2925,7 +2853,6 @@ version = "9.0.0" description = "Get CPU info with pure Python" optional = false python-versions = "*" -groups = ["main"] files = [ {file = "py-cpuinfo-9.0.0.tar.gz", hash = "sha256:3cdbbf3fac90dc6f118bfd64384f309edeadd902d7c8fb17f02ffa1fc3f49690"}, {file = "py_cpuinfo-9.0.0-py3-none-any.whl", hash = "sha256:859625bc251f64e21f077d099d4162689c762b5d6a4c3c97553d56241c9674d5"}, @@ -2942,7 +2869,6 @@ version = "0.4.8" description = "ASN.1 types and codecs" optional = false python-versions = "*" -groups = ["main"] files = [ {file = "pyasn1-0.4.8-py2.py3-none-any.whl", hash = "sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d"}, {file = "pyasn1-0.4.8.tar.gz", hash = "sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba"}, @@ -2959,8 +2885,6 @@ version = "2.22" description = "C parser in Python" optional = false python-versions = ">=3.8" -groups = ["main"] -markers = "platform_python_implementation != \"PyPy\"" files = [ {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"}, {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"}, @@ -2973,14 +2897,13 @@ reference = "aliyun" [[package]] name = "pydantic" -version = "2.11.2" +version = "2.11.3" description = "Data validation using Python type hints" optional = false python-versions = ">=3.9" -groups = ["main", "dev"] files = [ - {file = "pydantic-2.11.2-py3-none-any.whl", hash = "sha256:7f17d25846bcdf89b670a86cdfe7b29a9f1c9ca23dee154221c9aa81845cfca7"}, - {file = "pydantic-2.11.2.tar.gz", hash = "sha256:2138628e050bd7a1e70b91d4bf4a91167f4ad76fdb83209b107c8d84b854917e"}, + {file = "pydantic-2.11.3-py3-none-any.whl", hash = "sha256:a082753436a07f9ba1289c6ffa01cd93db3548776088aa917cc43b63f68fa60f"}, + {file = "pydantic-2.11.3.tar.gz", hash = "sha256:7471657138c16adad9322fe3070c0116dd6c3ad8d649300e3cbdfe91f4db4ec3"}, ] [package.dependencies] @@ -2991,7 +2914,7 @@ typing-inspection = ">=0.4.0" [package.extras] email = ["email-validator (>=2.0.0)"] -timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""] +timezone = ["tzdata"] [package.source] type = "legacy" @@ -3004,7 +2927,6 @@ version = "2.33.1" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.9" -groups = ["main", "dev"] files = [ {file = "pydantic_core-2.33.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3077cfdb6125cc8dab61b155fdd714663e401f0e6883f9632118ec12cf42df26"}, {file = "pydantic_core-2.33.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8ffab8b2908d152e74862d276cf5017c81a2f3719f14e8e3e8d6b83fda863927"}, @@ -3121,7 +3043,6 @@ version = "12.1.1" description = "A rough port of Node.js's EventEmitter to Python with a few tricks of its own" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "pyee-12.1.1-py3-none-any.whl", hash = "sha256:18a19c650556bb6b32b406d7f017c8f513aceed1ef7ca618fb65de7bd2d347ef"}, {file = "pyee-12.1.1.tar.gz", hash = "sha256:bbc33c09e2ff827f74191e3e5bbc6be7da02f627b7ec30d86f5ce1a6fb2424a3"}, @@ -3131,7 +3052,7 @@ files = [ typing-extensions = "*" [package.extras] -dev = ["black", "build", "flake8", "flake8-black", "isort", "jupyter-console", "mkdocs", "mkdocs-include-markdown-plugin", "mkdocstrings[python]", "pytest", "pytest-asyncio ; python_version >= \"3.4\"", "pytest-trio ; python_version >= \"3.7\"", "sphinx", "toml", "tox", "trio", "trio ; python_version > \"3.6\"", "trio-typing ; python_version > \"3.6\"", "twine", "twisted", "validate-pyproject[all]"] +dev = ["black", "build", "flake8", "flake8-black", "isort", "jupyter-console", "mkdocs", "mkdocs-include-markdown-plugin", "mkdocstrings[python]", "pytest", "pytest-asyncio", "pytest-trio", "sphinx", "toml", "tox", "trio", "trio", "trio-typing", "twine", "twisted", "validate-pyproject[all]"] [package.source] type = "legacy" @@ -3144,7 +3065,6 @@ version = "1.0.2" description = "Pure-python FIGlet implementation" optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "pyfiglet-1.0.2-py3-none-any.whl", hash = "sha256:889b351d79c99e50a3f619c8f8e6ffdb27fd8c939fc43ecbd7559bd57d5f93ea"}, {file = "pyfiglet-1.0.2.tar.gz", hash = "sha256:758788018ab8faaddc0984e1ea05ff330d3c64be663c513cc1f105f6a3066dab"}, @@ -3161,7 +3081,6 @@ version = "2.19.1" description = "Pygments is a syntax highlighting package written in Python." optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c"}, {file = "pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f"}, @@ -3181,7 +3100,6 @@ version = "2.5.0" description = "A pure Python trie data structure implementation." optional = false python-versions = "*" -groups = ["main", "dev"] files = [ {file = "pygtrie-2.5.0-py3-none-any.whl", hash = "sha256:8795cda8105493d5ae159a5bef313ff13156c5d4d72feddefacaad59f8c8ce16"}, {file = "pygtrie-2.5.0.tar.gz", hash = "sha256:203514ad826eb403dab1d2e2ddd034e0d1534bbe4dbe0213bb0593f66beba4e2"}, @@ -3198,7 +3116,6 @@ version = "10.14.3" description = "Extension pack for Python Markdown." optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "pymdown_extensions-10.14.3-py3-none-any.whl", hash = "sha256:05e0bee73d64b9c71a4ae17c72abc2f700e8bc8403755a00580b49a4e9f189e9"}, {file = "pymdown_extensions-10.14.3.tar.gz", hash = "sha256:41e576ce3f5d650be59e900e4ceff231e0aed2a88cf30acaee41e02f063a061b"}, @@ -3222,7 +3139,6 @@ version = "0.1.6" description = "Forked from pypika and streamline just for tortoise-orm" optional = false python-versions = ">=3.7,<4.0" -groups = ["main"] files = [ {file = "pypika-tortoise-0.1.6.tar.gz", hash = "sha256:d802868f479a708e3263724c7b5719a26ad79399b2a70cea065f4a4cadbebf36"}, {file = "pypika_tortoise-0.1.6-py3-none-any.whl", hash = "sha256:2d68bbb7e377673743cff42aa1059f3a80228d411fbcae591e4465e173109fd8"}, @@ -3239,7 +3155,6 @@ version = "0.51.0" description = "汉字拼音转换模块/工具." optional = false python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*, <4" -groups = ["main"] files = [ {file = "pypinyin-0.51.0-py2.py3-none-any.whl", hash = "sha256:ae8878f08fee15d0c5c11053a737e68a4158c22c63dc632b4de060af5c95bf84"}, {file = "pypinyin-0.51.0.tar.gz", hash = "sha256:cede34fc35a79ef6c799f161e2c280e7b6755ee072fb741cae5ce2a60c4ae0c5"}, @@ -3256,7 +3171,6 @@ version = "8.3.5" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.8" -groups = ["dev"] files = [ {file = "pytest-8.3.5-py3-none-any.whl", hash = "sha256:c69214aa47deac29fad6c2a4f590b9c4a9fdb16a403176fe154b79c0b4d4d820"}, {file = "pytest-8.3.5.tar.gz", hash = "sha256:f4efe70cc14e511565ac476b57c279e12a855b11f48f212af1080ef2263d3845"}, @@ -3284,7 +3198,6 @@ version = "0.25.3" description = "Pytest support for asyncio" optional = false python-versions = ">=3.9" -groups = ["dev"] files = [ {file = "pytest_asyncio-0.25.3-py3-none-any.whl", hash = "sha256:9e89518e0f9bd08928f97a3482fdc4e244df17529460bc038291ccaf8f85c7c3"}, {file = "pytest_asyncio-0.25.3.tar.gz", hash = "sha256:fc1da2cf9f125ada7e710b4ddad05518d4cee187ae9412e9ac9271003497f07a"}, @@ -3308,7 +3221,6 @@ version = "5.0.0" description = "Pytest plugin for measuring coverage." optional = false python-versions = ">=3.8" -groups = ["dev"] files = [ {file = "pytest-cov-5.0.0.tar.gz", hash = "sha256:5837b58e9f6ebd335b0f8060eecce69b662415b16dc503883a02f45dfeb14857"}, {file = "pytest_cov-5.0.0-py3-none-any.whl", hash = "sha256:4f0764a1219df53214206bf1feea4633c3b558a2925c8b59f144f682861ce652"}, @@ -3332,7 +3244,6 @@ version = "3.14.0" description = "Thin-wrapper around the mock package for easier use with pytest" optional = false python-versions = ">=3.8" -groups = ["dev"] files = [ {file = "pytest-mock-3.14.0.tar.gz", hash = "sha256:2719255a1efeceadbc056d6bf3df3d1c5015530fb40cf347c0f9afac88410bd0"}, {file = "pytest_mock-3.14.0-py3-none-any.whl", hash = "sha256:0b72c38033392a5f4621342fe11e9219ac11ec9d375f8e2a0c164539e0d70f6f"}, @@ -3355,7 +3266,6 @@ version = "3.6.1" description = "pytest xdist plugin for distributed testing, most importantly across multiple CPUs" optional = false python-versions = ">=3.8" -groups = ["dev"] files = [ {file = "pytest_xdist-3.6.1-py3-none-any.whl", hash = "sha256:9ed4adfb68a016610848639bb7e02c9352d5d9f03d04809919e2dafc3be4cca7"}, {file = "pytest_xdist-3.6.1.tar.gz", hash = "sha256:ead156a4db231eec769737f57668ef58a2084a34b2e55c4a8fa20d861107300d"}, @@ -3381,7 +3291,6 @@ version = "2.9.0.post0" description = "Extensions to the standard Python datetime module" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" -groups = ["main"] files = [ {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, @@ -3401,7 +3310,6 @@ version = "1.1.0" description = "Read key-value pairs from a .env file and set them as environment variables" optional = false python-versions = ">=3.9" -groups = ["main", "dev"] files = [ {file = "python_dotenv-1.1.0-py3-none-any.whl", hash = "sha256:d7c01d9e2293916c18baf562d95698754b0dbbb5e74d457c45d4f6561fb9d55d"}, {file = "python_dotenv-1.1.0.tar.gz", hash = "sha256:41f90bc6f5f177fb41f53e87666db362025010eb28f60a01c9143bfa33a2b2d5"}, @@ -3421,7 +3329,6 @@ version = "3.4.0" description = "JOSE implementation in Python" optional = false python-versions = "*" -groups = ["main"] files = [ {file = "python-jose-3.4.0.tar.gz", hash = "sha256:9a9a40f418ced8ecaf7e3b28d69887ceaa76adad3bcaa6dae0d9e596fec1d680"}, {file = "python_jose-3.4.0-py2.py3-none-any.whl", hash = "sha256:9c9f616819652d109bd889ecd1e15e9a162b9b94d682534c9c2146092945b78f"}, @@ -3446,14 +3353,13 @@ reference = "aliyun" [[package]] name = "python-markdown-math" -version = "0.8" +version = "0.9" description = "Math extension for Python-Markdown" optional = false -python-versions = ">=3.6" -groups = ["main"] +python-versions = ">=3.9" files = [ - {file = "python-markdown-math-0.8.tar.gz", hash = "sha256:8564212af679fc18d53f38681f16080fcd3d186073f23825c7ce86fadd3e3635"}, - {file = "python_markdown_math-0.8-py3-none-any.whl", hash = "sha256:c685249d84b5b697e9114d7beb352bd8ca2e07fd268fd4057ffca888c14641e5"}, + {file = "python_markdown_math-0.9-py3-none-any.whl", hash = "sha256:ac9932df517a5c0f6d01c56e7a44d065eca4a420893ac45f7a6937c67cb41e86"}, + {file = "python_markdown_math-0.9.tar.gz", hash = "sha256:567395553dc4941e79b3789a1096dcabb3fda9539d150d558ef3507948b264a3"}, ] [package.dependencies] @@ -3470,7 +3376,6 @@ version = "0.0.9" description = "A streaming multipart parser for Python" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "python_multipart-0.0.9-py3-none-any.whl", hash = "sha256:97ca7b8ea7b05f977dc3849c3ba99d51689822fab725c3703af7c866a0c2b215"}, {file = "python_multipart-0.0.9.tar.gz", hash = "sha256:03f54688c663f1b7977105f021043b0793151e4cb1c1a9d4a11fc13d622c4026"}, @@ -3490,7 +3395,6 @@ version = "8.0.4" description = "A Python slugify application that also handles Unicode" optional = false python-versions = ">=3.7" -groups = ["main"] files = [ {file = "python-slugify-8.0.4.tar.gz", hash = "sha256:59202371d1d05b54a9e7720c5e038f928f45daaffe41dd10822f3907b937c856"}, {file = "python_slugify-8.0.4-py2.py3-none-any.whl", hash = "sha256:276540b79961052b66b7d116620b36518847f52d5fd9e3a70164fc8c50faa6b8"}, @@ -3513,7 +3417,6 @@ version = "2025.2" description = "World timezone definitions, modern and historical" optional = false python-versions = "*" -groups = ["main"] files = [ {file = "pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00"}, {file = "pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3"}, @@ -3530,7 +3433,6 @@ version = "1.8.0" description = "PyWavelets, wavelet transform module" optional = false python-versions = ">=3.10" -groups = ["main"] files = [ {file = "pywavelets-1.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f5c86fcb203c8e61d1f3d4afbfc08d626c64e4e3708207315577264c724632bf"}, {file = "pywavelets-1.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fafb5fa126277e1690c3d6329287122fc08e4d25a262ce126e3d81b1f5709308"}, @@ -3589,7 +3491,6 @@ version = "6.0.2" description = "YAML parser and emitter for Python" optional = false python-versions = ">=3.8" -groups = ["main", "dev"] files = [ {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, @@ -3651,13 +3552,35 @@ type = "legacy" url = "https://mirrors.aliyun.com/pypi/simple" reference = "aliyun" +[[package]] +name = "redis" +version = "5.2.1" +description = "Python client for Redis database and key-value store" +optional = false +python-versions = ">=3.8" +files = [ + {file = "redis-5.2.1-py3-none-any.whl", hash = "sha256:ee7e1056b9aea0f04c6c2ed59452947f34c4940ee025f5dd83e6a6418b6989e4"}, + {file = "redis-5.2.1.tar.gz", hash = "sha256:16f2e22dff21d5125e8481515e386711a34cbec50f0e44413dd7d9c060a54e0f"}, +] + +[package.dependencies] +async-timeout = {version = ">=4.0.3", markers = "python_full_version < \"3.11.3\""} + +[package.extras] +hiredis = ["hiredis (>=3.0.0)"] +ocsp = ["cryptography (>=36.0.1)", "pyopenssl (==23.2.1)", "requests (>=2.31.0)"] + +[package.source] +type = "legacy" +url = "https://mirrors.aliyun.com/pypi/simple" +reference = "aliyun" + [[package]] name = "regex" version = "2024.11.6" description = "Alternative regular expression module, to replace re." optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ff590880083d60acc0433f9c3f713c51f7ac6ebb9adf889c79a261ecf541aa91"}, {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:658f90550f38270639e83ce492f27d2c8d2cd63805c65a13a14d36ca126753f0"}, @@ -3766,7 +3689,6 @@ version = "2.32.3" description = "Python HTTP for Humans." optional = false python-versions = ">=3.8" -groups = ["main", "dev"] files = [ {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, @@ -3793,7 +3715,6 @@ version = "0.21.1" description = "A utility for mocking out the Python HTTPX and HTTP Core libraries." optional = false python-versions = ">=3.7" -groups = ["dev"] files = [ {file = "respx-0.21.1-py2.py3-none-any.whl", hash = "sha256:05f45de23f0c785862a2c92a3e173916e8ca88e4caad715dd5f68584d6053c20"}, {file = "respx-0.21.1.tar.gz", hash = "sha256:0bd7fe21bfaa52106caa1223ce61224cf30786985f17c63c5d71eff0307ee8af"}, @@ -3813,7 +3734,6 @@ version = "1.3.4" description = "Retrying" optional = false python-versions = "*" -groups = ["main"] files = [ {file = "retrying-1.3.4-py3-none-any.whl", hash = "sha256:8cc4d43cb8e1125e0ff3344e9de678fefd85db3b750b81b2240dc0183af37b35"}, {file = "retrying-1.3.4.tar.gz", hash = "sha256:345da8c5765bd982b1d1915deb9102fd3d1f7ad16bd84a9700b85f64d24e8f3e"}, @@ -3833,7 +3753,6 @@ version = "1.5.0" description = "Validating URI References per RFC 3986" optional = false python-versions = "*" -groups = ["main", "dev"] files = [ {file = "rfc3986-1.5.0-py2.py3-none-any.whl", hash = "sha256:a86d6e1f5b1dc238b218b012df0aa79409667bb209e58da56d0b94704e712a97"}, {file = "rfc3986-1.5.0.tar.gz", hash = "sha256:270aaf10d87d0d4e095063c65bf3ddbc6ee3d0b226328ce21e036f946e421835"}, @@ -3856,7 +3775,6 @@ version = "14.0.0" description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" optional = false python-versions = ">=3.8.0" -groups = ["main"] files = [ {file = "rich-14.0.0-py3-none-any.whl", hash = "sha256:1c9491e1951aac09caffd42f448ee3d04e58923ffe14993f6e83068dc395d7e0"}, {file = "rich-14.0.0.tar.gz", hash = "sha256:82f1bc23a6a21ebca4ae0c45af9bdbc492ed20231dcb63f297d6d1021a9d5725"}, @@ -3881,7 +3799,6 @@ version = "4.9" description = "Pure-Python RSA implementation" optional = false python-versions = ">=3.6,<4" -groups = ["main"] files = [ {file = "rsa-4.9-py3-none-any.whl", hash = "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7"}, {file = "rsa-4.9.tar.gz", hash = "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21"}, @@ -3901,7 +3818,6 @@ version = "0.18.10" description = "ruamel.yaml is a YAML parser/emitter that supports roundtrip preservation of comments, seq/map flow style, and map key order" optional = false python-versions = ">=3.7" -groups = ["main"] files = [ {file = "ruamel.yaml-0.18.10-py3-none-any.whl", hash = "sha256:30f22513ab2301b3d2b577adc121c6471f28734d3d9728581245f1e76468b4f1"}, {file = "ruamel.yaml-0.18.10.tar.gz", hash = "sha256:20c86ab29ac2153f80a428e1254a8adf686d3383df04490514ca3b79a362db58"}, @@ -3925,8 +3841,6 @@ version = "0.2.12" description = "C version of reader, parser and emitter for ruamel.yaml derived from libyaml" optional = false python-versions = ">=3.9" -groups = ["main"] -markers = "platform_python_implementation == \"CPython\" and python_version < \"3.13\"" files = [ {file = "ruamel.yaml.clib-0.2.12-cp310-cp310-macosx_13_0_arm64.whl", hash = "sha256:11f891336688faf5156a36293a9c362bdc7c88f03a8a027c2c1d8e0bcde998e5"}, {file = "ruamel.yaml.clib-0.2.12-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:a606ef75a60ecf3d924613892cc603b154178ee25abb3055db5062da811fd969"}, @@ -3934,6 +3848,7 @@ files = [ {file = "ruamel.yaml.clib-0.2.12-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f66efbc1caa63c088dead1c4170d148eabc9b80d95fb75b6c92ac0aad2437d76"}, {file = "ruamel.yaml.clib-0.2.12-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:22353049ba4181685023b25b5b51a574bce33e7f51c759371a7422dcae5402a6"}, {file = "ruamel.yaml.clib-0.2.12-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:932205970b9f9991b34f55136be327501903f7c66830e9760a8ffb15b07f05cd"}, + {file = "ruamel.yaml.clib-0.2.12-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a52d48f4e7bf9005e8f0a89209bf9a73f7190ddf0489eee5eb51377385f59f2a"}, {file = "ruamel.yaml.clib-0.2.12-cp310-cp310-win32.whl", hash = "sha256:3eac5a91891ceb88138c113f9db04f3cebdae277f5d44eaa3651a4f573e6a5da"}, {file = "ruamel.yaml.clib-0.2.12-cp310-cp310-win_amd64.whl", hash = "sha256:ab007f2f5a87bd08ab1499bdf96f3d5c6ad4dcfa364884cb4549aa0154b13a28"}, {file = "ruamel.yaml.clib-0.2.12-cp311-cp311-macosx_13_0_arm64.whl", hash = "sha256:4a6679521a58256a90b0d89e03992c15144c5f3858f40d7c18886023d7943db6"}, @@ -3942,6 +3857,7 @@ files = [ {file = "ruamel.yaml.clib-0.2.12-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:811ea1594b8a0fb466172c384267a4e5e367298af6b228931f273b111f17ef52"}, {file = "ruamel.yaml.clib-0.2.12-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:cf12567a7b565cbf65d438dec6cfbe2917d3c1bdddfce84a9930b7d35ea59642"}, {file = "ruamel.yaml.clib-0.2.12-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7dd5adc8b930b12c8fc5b99e2d535a09889941aa0d0bd06f4749e9a9397c71d2"}, + {file = "ruamel.yaml.clib-0.2.12-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1492a6051dab8d912fc2adeef0e8c72216b24d57bd896ea607cb90bb0c4981d3"}, {file = "ruamel.yaml.clib-0.2.12-cp311-cp311-win32.whl", hash = "sha256:bd0a08f0bab19093c54e18a14a10b4322e1eacc5217056f3c063bd2f59853ce4"}, {file = "ruamel.yaml.clib-0.2.12-cp311-cp311-win_amd64.whl", hash = "sha256:a274fb2cb086c7a3dea4322ec27f4cb5cc4b6298adb583ab0e211a4682f241eb"}, {file = "ruamel.yaml.clib-0.2.12-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:20b0f8dc160ba83b6dcc0e256846e1a02d044e13f7ea74a3d1d56ede4e48c632"}, @@ -3950,6 +3866,7 @@ files = [ {file = "ruamel.yaml.clib-0.2.12-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:749c16fcc4a2b09f28843cda5a193e0283e47454b63ec4b81eaa2242f50e4ccd"}, {file = "ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bf165fef1f223beae7333275156ab2022cffe255dcc51c27f066b4370da81e31"}, {file = "ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:32621c177bbf782ca5a18ba4d7af0f1082a3f6e517ac2a18b3974d4edf349680"}, + {file = "ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b82a7c94a498853aa0b272fd5bc67f29008da798d4f93a2f9f289feb8426a58d"}, {file = "ruamel.yaml.clib-0.2.12-cp312-cp312-win32.whl", hash = "sha256:e8c4ebfcfd57177b572e2040777b8abc537cdef58a2120e830124946aa9b42c5"}, {file = "ruamel.yaml.clib-0.2.12-cp312-cp312-win_amd64.whl", hash = "sha256:0467c5965282c62203273b838ae77c0d29d7638c8a4e3a1c8bdd3602c10904e4"}, {file = "ruamel.yaml.clib-0.2.12-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:4c8c5d82f50bb53986a5e02d1b3092b03622c02c2eb78e29bec33fd9593bae1a"}, @@ -3958,6 +3875,7 @@ files = [ {file = "ruamel.yaml.clib-0.2.12-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:96777d473c05ee3e5e3c3e999f5d23c6f4ec5b0c38c098b3a5229085f74236c6"}, {file = "ruamel.yaml.clib-0.2.12-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:3bc2a80e6420ca8b7d3590791e2dfc709c88ab9152c00eeb511c9875ce5778bf"}, {file = "ruamel.yaml.clib-0.2.12-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:e188d2699864c11c36cdfdada94d781fd5d6b0071cd9c427bceb08ad3d7c70e1"}, + {file = "ruamel.yaml.clib-0.2.12-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4f6f3eac23941b32afccc23081e1f50612bdbe4e982012ef4f5797986828cd01"}, {file = "ruamel.yaml.clib-0.2.12-cp313-cp313-win32.whl", hash = "sha256:6442cb36270b3afb1b4951f060eccca1ce49f3d087ca1ca4563a6eb479cb3de6"}, {file = "ruamel.yaml.clib-0.2.12-cp313-cp313-win_amd64.whl", hash = "sha256:e5b8daf27af0b90da7bb903a876477a9e6d7270be6146906b276605997c7e9a3"}, {file = "ruamel.yaml.clib-0.2.12-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:fc4b630cd3fa2cf7fce38afa91d7cfe844a9f75d7f0f36393fa98815e911d987"}, @@ -3966,6 +3884,7 @@ files = [ {file = "ruamel.yaml.clib-0.2.12-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e2f1c3765db32be59d18ab3953f43ab62a761327aafc1594a2a1fbe038b8b8a7"}, {file = "ruamel.yaml.clib-0.2.12-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:d85252669dc32f98ebcd5d36768f5d4faeaeaa2d655ac0473be490ecdae3c285"}, {file = "ruamel.yaml.clib-0.2.12-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e143ada795c341b56de9418c58d028989093ee611aa27ffb9b7f609c00d813ed"}, + {file = "ruamel.yaml.clib-0.2.12-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2c59aa6170b990d8d2719323e628aaf36f3bfbc1c26279c0eeeb24d05d2d11c7"}, {file = "ruamel.yaml.clib-0.2.12-cp39-cp39-win32.whl", hash = "sha256:beffaed67936fbbeffd10966a4eb53c402fafd3d6833770516bf7314bc6ffa12"}, {file = "ruamel.yaml.clib-0.2.12-cp39-cp39-win_amd64.whl", hash = "sha256:040ae85536960525ea62868b642bdb0c2cc6021c9f9d507810c0c604e66f5a7b"}, {file = "ruamel.yaml.clib-0.2.12.tar.gz", hash = "sha256:6c8fbb13ec503f99a91901ab46e0b07ae7941cd527393187039aec586fdfd36f"}, @@ -3982,7 +3901,6 @@ version = "0.8.6" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" -groups = ["dev"] files = [ {file = "ruff-0.8.6-py3-none-linux_armv6l.whl", hash = "sha256:defed167955d42c68b407e8f2e6f56ba52520e790aba4ca707a9c88619e580e3"}, {file = "ruff-0.8.6-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:54799ca3d67ae5e0b7a7ac234baa657a9c1784b48ec954a094da7c206e0365b1"}, @@ -4015,7 +3933,6 @@ version = "1.15.2" description = "Fundamental algorithms for scientific computing in Python" optional = false python-versions = ">=3.10" -groups = ["main"] files = [ {file = "scipy-1.15.2-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:a2ec871edaa863e8213ea5df811cd600734f6400b4af272e1c011e69401218e9"}, {file = "scipy-1.15.2-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:6f223753c6ea76983af380787611ae1291e3ceb23917393079dcc746ba60cfb5"}, @@ -4071,7 +3988,7 @@ numpy = ">=1.23.5,<2.5" [package.extras] dev = ["cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy (==1.10.0)", "pycodestyle", "pydevtool", "rich-click", "ruff (>=0.0.292)", "types-psutil", "typing_extensions"] doc = ["intersphinx_registry", "jupyterlite-pyodide-kernel", "jupyterlite-sphinx (>=0.16.5)", "jupytext", "matplotlib (>=3.5)", "myst-nb", "numpydoc", "pooch", "pydata-sphinx-theme (>=0.15.2)", "sphinx (>=5.0.0,<8.0.0)", "sphinx-copybutton", "sphinx-design (>=0.4.0)"] -test = ["Cython", "array-api-strict (>=2.0,<2.1.1)", "asv", "gmpy2", "hypothesis (>=6.30)", "meson", "mpmath", "ninja ; sys_platform != \"emscripten\"", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] +test = ["Cython", "array-api-strict (>=2.0,<2.1.1)", "asv", "gmpy2", "hypothesis (>=6.30)", "meson", "mpmath", "ninja", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] [package.source] type = "legacy" @@ -4084,7 +4001,6 @@ version = "1.0.0" description = "Py3k port of sgmllib." optional = false python-versions = "*" -groups = ["main"] files = [ {file = "sgmllib3k-1.0.0.tar.gz", hash = "sha256:7868fb1c8bfa764c1ac563d3cf369c381d1325d36124933a726f29fcdaa812e9"}, ] @@ -4100,7 +4016,6 @@ version = "1.17.0" description = "Python 2 and 3 compatibility utilities" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" -groups = ["main"] files = [ {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"}, {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, @@ -4117,7 +4032,6 @@ version = "1.3.1" description = "Sniff out which async library your code is running under" optional = false python-versions = ">=3.7" -groups = ["main", "dev"] files = [ {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, @@ -4134,7 +4048,6 @@ version = "2.6" description = "A modern CSS selector implementation for Beautiful Soup." optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "soupsieve-2.6-py3-none-any.whl", hash = "sha256:e72c4ff06e4fb6e4b5a9f0f55fe6e81514581fca1515028625d0f299c602ccc9"}, {file = "soupsieve-2.6.tar.gz", hash = "sha256:e2e68417777af359ec65daac1057404a3c8a5455bb8abc36f1a9866ab1a51abb"}, @@ -4151,7 +4064,6 @@ version = "0.46.1" description = "The little ASGI library that shines." optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "starlette-0.46.1-py3-none-any.whl", hash = "sha256:77c74ed9d2720138b25875133f3a2dae6d854af2ec37dceb56aef370c1d8a227"}, {file = "starlette-0.46.1.tar.gz", hash = "sha256:3c88d58ee4bd1bb807c0d1acb381838afc7752f9ddaec81bbe4383611d833230"}, @@ -4174,7 +4086,6 @@ version = "0.4.15" description = "An Enum that inherits from str." optional = false python-versions = "*" -groups = ["main"] files = [ {file = "StrEnum-0.4.15-py3-none-any.whl", hash = "sha256:a30cda4af7cc6b5bf52c8055bc4bf4b2b6b14a93b574626da33df53cf7740659"}, {file = "StrEnum-0.4.15.tar.gz", hash = "sha256:878fb5ab705442070e4dd1929bb5e2249511c0bcf2b0eeacf3bcd80875c82eff"}, @@ -4196,7 +4107,6 @@ version = "0.6.8" description = "A collection of common utils for Arclet" optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "tarina-0.6.8-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a2f7b7e61912a020d6ba3c591c4edbc31bb468544640bd814470c69a07dcc4cd"}, {file = "tarina-0.6.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1cac7cbd49317b8e63eba7d0ce0ba11e1218ab51c9d6ee9df8404b5e226db15b"}, @@ -4294,7 +4204,6 @@ version = "9.1.2" description = "Retry code until it succeeds" optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "tenacity-9.1.2-py3-none-any.whl", hash = "sha256:f77bf36710d8b73a50b2dd155c97b870017ad21afe6ab300326b0371b3b05138"}, {file = "tenacity-9.1.2.tar.gz", hash = "sha256:1169d376c297e7de388d18b4481760d478b0e99a777cad3a9c86e556f4b697cb"}, @@ -4315,7 +4224,6 @@ version = "1.3" description = "The most basic Text::Unidecode port" optional = false python-versions = "*" -groups = ["main"] files = [ {file = "text-unidecode-1.3.tar.gz", hash = "sha256:bad6603bb14d279193107714b288be206cac565dfa49aa5b105294dd5c4aab93"}, {file = "text_unidecode-1.3-py2.py3-none-any.whl", hash = "sha256:1311f10e8b895935241623731c2ba64f4c455287888b18189350b67134a822e8"}, @@ -4332,7 +4240,6 @@ version = "2.2.1" description = "A lil' TOML parser" optional = false python-versions = ">=3.8" -groups = ["main", "dev"] files = [ {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"}, {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"}, @@ -4367,7 +4274,6 @@ files = [ {file = "tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc"}, {file = "tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff"}, ] -markers = {main = "python_version == \"3.10\"", dev = "python_full_version <= \"3.11.0a6\""} [package.source] type = "legacy" @@ -4380,7 +4286,6 @@ version = "0.13.2" description = "Style preserving TOML library" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "tomlkit-0.13.2-py3-none-any.whl", hash = "sha256:7a974427f6e119197f670fbbbeae7bef749a6c14e793db934baefc1b5f03efde"}, {file = "tomlkit-0.13.2.tar.gz", hash = "sha256:fff5fe59a87295b278abd31bec92c15d9bc4a06885ab12bcea52c71119392e79"}, @@ -4397,7 +4302,6 @@ version = "0.20.1" description = "Easy async ORM for python, built with relations in mind" optional = false python-versions = ">=3.8,<4.0" -groups = ["main"] files = [ {file = "tortoise_orm-0.20.1-py3-none-any.whl", hash = "sha256:bf88bc1ba7495a8827565c071efba0a89c4b5f83ff1c16be3c837a4e6b672c21"}, {file = "tortoise_orm-0.20.1.tar.gz", hash = "sha256:c896c90a90d1213b822ac0d607b61659ad5fcd5ff72698a8ba2d9efbad9932f3"}, @@ -4405,14 +4309,13 @@ files = [ [package.dependencies] aiosqlite = ">=0.16.0,<0.18.0" -asyncpg = {version = "*", optional = true, markers = "extra == \"asyncpg\""} iso8601 = ">=1.0.2,<2.0.0" pydantic = ">=2.0,<2.7.0 || >2.7.0,<3.0" pypika-tortoise = ">=0.1.6,<0.2.0" pytz = "*" [package.extras] -accel = ["ciso8601 ; sys_platform != \"win32\" and implementation_name == \"cpython\"", "orjson", "uvloop ; sys_platform != \"win32\" and implementation_name == \"cpython\""] +accel = ["ciso8601", "orjson", "uvloop"] aiomysql = ["aiomysql"] asyncmy = ["asyncmy (>=0.2.8,<0.3.0)"] asyncodbc = ["asyncodbc (>=0.1.1,<0.2.0)"] @@ -4430,7 +4333,6 @@ version = "2.9.0.20241206" description = "Typing stubs for python-dateutil" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "types_python_dateutil-2.9.0.20241206-py3-none-any.whl", hash = "sha256:e248a4bc70a486d3e3ec84d0dc30eec3a5f979d6e7ee4123ae043eedbb987f53"}, {file = "types_python_dateutil-2.9.0.20241206.tar.gz", hash = "sha256:18f493414c26ffba692a72369fea7a154c502646301ebfe3d56a04b3767284cb"}, @@ -4443,14 +4345,13 @@ reference = "aliyun" [[package]] name = "typing-extensions" -version = "4.13.1" +version = "4.13.2" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" -groups = ["main", "dev"] files = [ - {file = "typing_extensions-4.13.1-py3-none-any.whl", hash = "sha256:4b6cf02909eb5495cfbc3f6e8fd49217e6cc7944e145cdda8caa3734777f9e69"}, - {file = "typing_extensions-4.13.1.tar.gz", hash = "sha256:98795af00fb9640edec5b8e31fc647597b4691f099ad75f469a2616be1a76dff"}, + {file = "typing_extensions-4.13.2-py3-none-any.whl", hash = "sha256:a439e7c04b49fec3e5d3e2beaa21755cadbbdc391694e28ccdd36ca4a1408f8c"}, + {file = "typing_extensions-4.13.2.tar.gz", hash = "sha256:e6c81219bd689f51865d9e372991c540bda33a0379d5573cddb9a3a23f7caaef"}, ] [package.source] @@ -4464,7 +4365,6 @@ version = "0.4.0" description = "Runtime typing introspection tools" optional = false python-versions = ">=3.9" -groups = ["main", "dev"] files = [ {file = "typing_inspection-0.4.0-py3-none-any.whl", hash = "sha256:50e72559fcd2a6367a19f7a7e610e6afcb9fac940c650290eed893d61386832f"}, {file = "typing_inspection-0.4.0.tar.gz", hash = "sha256:9765c87de36671694a67904bf2c96e395be9c6439bb6c87b5142569dcdd65122"}, @@ -4484,8 +4384,6 @@ version = "2025.2" description = "Provider of IANA time zone data" optional = false python-versions = ">=2" -groups = ["main"] -markers = "platform_system == \"Windows\"" files = [ {file = "tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8"}, {file = "tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9"}, @@ -4502,7 +4400,6 @@ version = "5.3.1" description = "tzinfo object for the local timezone" optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "tzlocal-5.3.1-py3-none-any.whl", hash = "sha256:eb1a66c3ef5847adf7a834f1be0800581b683b5608e74f86ecbcef8ab91bb85d"}, {file = "tzlocal-5.3.1.tar.gz", hash = "sha256:cceffc7edecefea1f595541dbd6e990cb1ea3d19bf01b2809f362a03dd7921fd"}, @@ -4525,7 +4422,6 @@ version = "5.10.0" description = "Ultra fast JSON encoder and decoder for Python" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "ujson-5.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2601aa9ecdbee1118a1c2065323bda35e2c5a2cf0797ef4522d485f9d3ef65bd"}, {file = "ujson-5.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:348898dd702fc1c4f1051bc3aacbf894caa0927fe2c53e68679c073375f732cf"}, @@ -4614,18 +4510,17 @@ reference = "aliyun" [[package]] name = "urllib3" -version = "2.3.0" +version = "2.4.0" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.9" -groups = ["main", "dev"] files = [ - {file = "urllib3-2.3.0-py3-none-any.whl", hash = "sha256:1cee9ad369867bfdbbb48b7dd50374c0967a0bb7710050facf0dd6911440e3df"}, - {file = "urllib3-2.3.0.tar.gz", hash = "sha256:f8c5449b3cf0861679ce7e0503c7b44b5ec981bec0d1d3795a07f1ba96f0204d"}, + {file = "urllib3-2.4.0-py3-none-any.whl", hash = "sha256:4e16665048960a0900c702d4a66415956a584919c03361cac9f1df5c5dd7e813"}, + {file = "urllib3-2.4.0.tar.gz", hash = "sha256:414bc6535b787febd7567804cc015fee39daab8ad86268f1310a9250697de466"}, ] [package.extras] -brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] @@ -4641,7 +4536,6 @@ version = "0.34.0" description = "The lightning-fast ASGI server." optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "uvicorn-0.34.0-py3-none-any.whl", hash = "sha256:023dc038422502fa28a09c7a30bf2b6991512da7dcdb8fd35fe57cfc154126f4"}, {file = "uvicorn-0.34.0.tar.gz", hash = "sha256:404051050cd7e905de2c9a7e61790943440b3416f49cb409f965d9dcd0fa73e9"}, @@ -4655,12 +4549,12 @@ httptools = {version = ">=0.6.3", optional = true, markers = "extra == \"standar python-dotenv = {version = ">=0.13", optional = true, markers = "extra == \"standard\""} pyyaml = {version = ">=5.1", optional = true, markers = "extra == \"standard\""} typing-extensions = {version = ">=4.0", markers = "python_version < \"3.11\""} -uvloop = {version = ">=0.14.0,<0.15.0 || >0.15.0,<0.15.1 || >0.15.1", optional = true, markers = "sys_platform != \"win32\" and sys_platform != \"cygwin\" and platform_python_implementation != \"PyPy\" and extra == \"standard\""} +uvloop = {version = ">=0.14.0,<0.15.0 || >0.15.0,<0.15.1 || >0.15.1", optional = true, markers = "(sys_platform != \"win32\" and sys_platform != \"cygwin\") and platform_python_implementation != \"PyPy\" and extra == \"standard\""} watchfiles = {version = ">=0.13", optional = true, markers = "extra == \"standard\""} websockets = {version = ">=10.4", optional = true, markers = "extra == \"standard\""} [package.extras] -standard = ["colorama (>=0.4) ; sys_platform == \"win32\"", "httptools (>=0.6.3)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1) ; sys_platform != \"win32\" and sys_platform != \"cygwin\" and platform_python_implementation != \"PyPy\"", "watchfiles (>=0.13)", "websockets (>=10.4)"] +standard = ["colorama (>=0.4)", "httptools (>=0.6.3)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1)", "watchfiles (>=0.13)", "websockets (>=10.4)"] [package.source] type = "legacy" @@ -4673,8 +4567,6 @@ version = "0.21.0" description = "Fast implementation of asyncio event loop on top of libuv" optional = false python-versions = ">=3.8.0" -groups = ["main"] -markers = "sys_platform != \"win32\" and sys_platform != \"cygwin\" and platform_python_implementation != \"PyPy\"" files = [ {file = "uvloop-0.21.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ec7e6b09a6fdded42403182ab6b832b71f4edaf7f37a9a0e371a01db5f0cb45f"}, {file = "uvloop-0.21.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:196274f2adb9689a289ad7d65700d37df0c0930fd8e4e743fa4834e850d7719d"}, @@ -4731,7 +4623,6 @@ version = "20.30.0" description = "Virtual Python Environment builder" optional = false python-versions = ">=3.8" -groups = ["main", "dev"] files = [ {file = "virtualenv-20.30.0-py3-none-any.whl", hash = "sha256:e34302959180fca3af42d1800df014b35019490b119eba981af27f2fa486e5d6"}, {file = "virtualenv-20.30.0.tar.gz", hash = "sha256:800863162bcaa5450a6e4d721049730e7f2dae07720e0902b0e4040bd6f9ada8"}, @@ -4744,7 +4635,7 @@ platformdirs = ">=3.9.1,<5" [package.extras] docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2,!=7.3)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"] -test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8) ; platform_python_implementation == \"PyPy\" or platform_python_implementation == \"GraalVM\" or platform_python_implementation == \"CPython\" and sys_platform == \"win32\" and python_version >= \"3.13\"", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10) ; platform_python_implementation == \"CPython\""] +test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8)", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10)"] [package.source] type = "legacy" @@ -4757,7 +4648,6 @@ version = "0.24.0" description = "Simple, modern and high performance file watching and code reload in python." optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "watchfiles-0.24.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:083dc77dbdeef09fa44bb0f4d1df571d2e12d8a8f985dccde71ac3ac9ac067a0"}, {file = "watchfiles-0.24.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e94e98c7cb94cfa6e071d401ea3342767f28eb5a06a58fafdc0d2a4974f4f35c"}, @@ -4858,7 +4748,6 @@ version = "0.2.13" description = "Measures the displayed width of unicode strings in a terminal" optional = false python-versions = "*" -groups = ["main"] files = [ {file = "wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859"}, {file = "wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5"}, @@ -4875,7 +4764,6 @@ version = "15.0.1" description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)" optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "websockets-15.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d63efaa0cd96cf0c5fe4d581521d9fa87744540d4bc999ae6e08595a1014b45b"}, {file = "websockets-15.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ac60e3b188ec7574cb761b08d50fcedf9d77f1530352db4eef1707fe9dee7205"}, @@ -4959,15 +4847,13 @@ version = "1.2.0" description = "A small Python utility to set file creation time on Windows" optional = false python-versions = ">=3.5" -groups = ["main", "dev"] -markers = "sys_platform == \"win32\"" files = [ {file = "win32_setctime-1.2.0-py3-none-any.whl", hash = "sha256:95d644c4e708aba81dc3704a116d8cbc974d70b3bdb8be1d150e36be6e9d1390"}, {file = "win32_setctime-1.2.0.tar.gz", hash = "sha256:ae1fdf948f5640aae05c511ade119313fb6a30d7eabe25fef9764dca5873c4c0"}, ] [package.extras] -dev = ["black (>=19.3b0) ; python_version >= \"3.6\"", "pytest (>=4.6.2)"] +dev = ["black (>=19.3b0)", "pytest (>=4.6.2)"] [package.source] type = "legacy" @@ -4976,100 +4862,104 @@ reference = "aliyun" [[package]] name = "yarl" -version = "1.18.3" +version = "1.19.0" description = "Yet another URL library" optional = false python-versions = ">=3.9" -groups = ["main", "dev"] files = [ - {file = "yarl-1.18.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7df647e8edd71f000a5208fe6ff8c382a1de8edfbccdbbfe649d263de07d8c34"}, - {file = "yarl-1.18.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c69697d3adff5aa4f874b19c0e4ed65180ceed6318ec856ebc423aa5850d84f7"}, - {file = "yarl-1.18.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:602d98f2c2d929f8e697ed274fbadc09902c4025c5a9963bf4e9edfc3ab6f7ed"}, - {file = "yarl-1.18.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c654d5207c78e0bd6d749f6dae1dcbbfde3403ad3a4b11f3c5544d9906969dde"}, - {file = "yarl-1.18.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5094d9206c64181d0f6e76ebd8fb2f8fe274950a63890ee9e0ebfd58bf9d787b"}, - {file = "yarl-1.18.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35098b24e0327fc4ebdc8ffe336cee0a87a700c24ffed13161af80124b7dc8e5"}, - {file = "yarl-1.18.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3236da9272872443f81fedc389bace88408f64f89f75d1bdb2256069a8730ccc"}, - {file = "yarl-1.18.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e2c08cc9b16f4f4bc522771d96734c7901e7ebef70c6c5c35dd0f10845270bcd"}, - {file = "yarl-1.18.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:80316a8bd5109320d38eef8833ccf5f89608c9107d02d2a7f985f98ed6876990"}, - {file = "yarl-1.18.3-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:c1e1cc06da1491e6734f0ea1e6294ce00792193c463350626571c287c9a704db"}, - {file = "yarl-1.18.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:fea09ca13323376a2fdfb353a5fa2e59f90cd18d7ca4eaa1fd31f0a8b4f91e62"}, - {file = "yarl-1.18.3-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:e3b9fd71836999aad54084906f8663dffcd2a7fb5cdafd6c37713b2e72be1760"}, - {file = "yarl-1.18.3-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:757e81cae69244257d125ff31663249b3013b5dc0a8520d73694aed497fb195b"}, - {file = "yarl-1.18.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b1771de9944d875f1b98a745bc547e684b863abf8f8287da8466cf470ef52690"}, - {file = "yarl-1.18.3-cp310-cp310-win32.whl", hash = "sha256:8874027a53e3aea659a6d62751800cf6e63314c160fd607489ba5c2edd753cf6"}, - {file = "yarl-1.18.3-cp310-cp310-win_amd64.whl", hash = "sha256:93b2e109287f93db79210f86deb6b9bbb81ac32fc97236b16f7433db7fc437d8"}, - {file = "yarl-1.18.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8503ad47387b8ebd39cbbbdf0bf113e17330ffd339ba1144074da24c545f0069"}, - {file = "yarl-1.18.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:02ddb6756f8f4517a2d5e99d8b2f272488e18dd0bfbc802f31c16c6c20f22193"}, - {file = "yarl-1.18.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:67a283dd2882ac98cc6318384f565bffc751ab564605959df4752d42483ad889"}, - {file = "yarl-1.18.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d980e0325b6eddc81331d3f4551e2a333999fb176fd153e075c6d1c2530aa8a8"}, - {file = "yarl-1.18.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b643562c12680b01e17239be267bc306bbc6aac1f34f6444d1bded0c5ce438ca"}, - {file = "yarl-1.18.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c017a3b6df3a1bd45b9fa49a0f54005e53fbcad16633870104b66fa1a30a29d8"}, - {file = "yarl-1.18.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75674776d96d7b851b6498f17824ba17849d790a44d282929c42dbb77d4f17ae"}, - {file = "yarl-1.18.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ccaa3a4b521b780a7e771cc336a2dba389a0861592bbce09a476190bb0c8b4b3"}, - {file = "yarl-1.18.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2d06d3005e668744e11ed80812e61efd77d70bb7f03e33c1598c301eea20efbb"}, - {file = "yarl-1.18.3-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:9d41beda9dc97ca9ab0b9888cb71f7539124bc05df02c0cff6e5acc5a19dcc6e"}, - {file = "yarl-1.18.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:ba23302c0c61a9999784e73809427c9dbedd79f66a13d84ad1b1943802eaaf59"}, - {file = "yarl-1.18.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:6748dbf9bfa5ba1afcc7556b71cda0d7ce5f24768043a02a58846e4a443d808d"}, - {file = "yarl-1.18.3-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:0b0cad37311123211dc91eadcb322ef4d4a66008d3e1bdc404808992260e1a0e"}, - {file = "yarl-1.18.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0fb2171a4486bb075316ee754c6d8382ea6eb8b399d4ec62fde2b591f879778a"}, - {file = "yarl-1.18.3-cp311-cp311-win32.whl", hash = "sha256:61b1a825a13bef4a5f10b1885245377d3cd0bf87cba068e1d9a88c2ae36880e1"}, - {file = "yarl-1.18.3-cp311-cp311-win_amd64.whl", hash = "sha256:b9d60031cf568c627d028239693fd718025719c02c9f55df0a53e587aab951b5"}, - {file = "yarl-1.18.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:1dd4bdd05407ced96fed3d7f25dbbf88d2ffb045a0db60dbc247f5b3c5c25d50"}, - {file = "yarl-1.18.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7c33dd1931a95e5d9a772d0ac5e44cac8957eaf58e3c8da8c1414de7dd27c576"}, - {file = "yarl-1.18.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:25b411eddcfd56a2f0cd6a384e9f4f7aa3efee14b188de13048c25b5e91f1640"}, - {file = "yarl-1.18.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:436c4fc0a4d66b2badc6c5fc5ef4e47bb10e4fd9bf0c79524ac719a01f3607c2"}, - {file = "yarl-1.18.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e35ef8683211db69ffe129a25d5634319a677570ab6b2eba4afa860f54eeaf75"}, - {file = "yarl-1.18.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:84b2deecba4a3f1a398df819151eb72d29bfeb3b69abb145a00ddc8d30094512"}, - {file = "yarl-1.18.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00e5a1fea0fd4f5bfa7440a47eff01d9822a65b4488f7cff83155a0f31a2ecba"}, - {file = "yarl-1.18.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d0e883008013c0e4aef84dcfe2a0b172c4d23c2669412cf5b3371003941f72bb"}, - {file = "yarl-1.18.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5a3f356548e34a70b0172d8890006c37be92995f62d95a07b4a42e90fba54272"}, - {file = "yarl-1.18.3-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:ccd17349166b1bee6e529b4add61727d3f55edb7babbe4069b5764c9587a8cc6"}, - {file = "yarl-1.18.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b958ddd075ddba5b09bb0be8a6d9906d2ce933aee81100db289badbeb966f54e"}, - {file = "yarl-1.18.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c7d79f7d9aabd6011004e33b22bc13056a3e3fb54794d138af57f5ee9d9032cb"}, - {file = "yarl-1.18.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:4891ed92157e5430874dad17b15eb1fda57627710756c27422200c52d8a4e393"}, - {file = "yarl-1.18.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ce1af883b94304f493698b00d0f006d56aea98aeb49d75ec7d98cd4a777e9285"}, - {file = "yarl-1.18.3-cp312-cp312-win32.whl", hash = "sha256:f91c4803173928a25e1a55b943c81f55b8872f0018be83e3ad4938adffb77dd2"}, - {file = "yarl-1.18.3-cp312-cp312-win_amd64.whl", hash = "sha256:7e2ee16578af3b52ac2f334c3b1f92262f47e02cc6193c598502bd46f5cd1477"}, - {file = "yarl-1.18.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:90adb47ad432332d4f0bc28f83a5963f426ce9a1a8809f5e584e704b82685dcb"}, - {file = "yarl-1.18.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:913829534200eb0f789d45349e55203a091f45c37a2674678744ae52fae23efa"}, - {file = "yarl-1.18.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:ef9f7768395923c3039055c14334ba4d926f3baf7b776c923c93d80195624782"}, - {file = "yarl-1.18.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88a19f62ff30117e706ebc9090b8ecc79aeb77d0b1f5ec10d2d27a12bc9f66d0"}, - {file = "yarl-1.18.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e17c9361d46a4d5addf777c6dd5eab0715a7684c2f11b88c67ac37edfba6c482"}, - {file = "yarl-1.18.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1a74a13a4c857a84a845505fd2d68e54826a2cd01935a96efb1e9d86c728e186"}, - {file = "yarl-1.18.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41f7ce59d6ee7741af71d82020346af364949314ed3d87553763a2df1829cc58"}, - {file = "yarl-1.18.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f52a265001d830bc425f82ca9eabda94a64a4d753b07d623a9f2863fde532b53"}, - {file = "yarl-1.18.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:82123d0c954dc58db301f5021a01854a85bf1f3bb7d12ae0c01afc414a882ca2"}, - {file = "yarl-1.18.3-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:2ec9bbba33b2d00999af4631a3397d1fd78290c48e2a3e52d8dd72db3a067ac8"}, - {file = "yarl-1.18.3-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:fbd6748e8ab9b41171bb95c6142faf068f5ef1511935a0aa07025438dd9a9bc1"}, - {file = "yarl-1.18.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:877d209b6aebeb5b16c42cbb377f5f94d9e556626b1bfff66d7b0d115be88d0a"}, - {file = "yarl-1.18.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:b464c4ab4bfcb41e3bfd3f1c26600d038376c2de3297760dfe064d2cb7ea8e10"}, - {file = "yarl-1.18.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8d39d351e7faf01483cc7ff7c0213c412e38e5a340238826be7e0e4da450fdc8"}, - {file = "yarl-1.18.3-cp313-cp313-win32.whl", hash = "sha256:61ee62ead9b68b9123ec24bc866cbef297dd266175d53296e2db5e7f797f902d"}, - {file = "yarl-1.18.3-cp313-cp313-win_amd64.whl", hash = "sha256:578e281c393af575879990861823ef19d66e2b1d0098414855dd367e234f5b3c"}, - {file = "yarl-1.18.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:61e5e68cb65ac8f547f6b5ef933f510134a6bf31bb178be428994b0cb46c2a04"}, - {file = "yarl-1.18.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fe57328fbc1bfd0bd0514470ac692630f3901c0ee39052ae47acd1d90a436719"}, - {file = "yarl-1.18.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a440a2a624683108a1b454705ecd7afc1c3438a08e890a1513d468671d90a04e"}, - {file = "yarl-1.18.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09c7907c8548bcd6ab860e5f513e727c53b4a714f459b084f6580b49fa1b9cee"}, - {file = "yarl-1.18.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b4f6450109834af88cb4cc5ecddfc5380ebb9c228695afc11915a0bf82116789"}, - {file = "yarl-1.18.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a9ca04806f3be0ac6d558fffc2fdf8fcef767e0489d2684a21912cc4ed0cd1b8"}, - {file = "yarl-1.18.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77a6e85b90a7641d2e07184df5557132a337f136250caafc9ccaa4a2a998ca2c"}, - {file = "yarl-1.18.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6333c5a377c8e2f5fae35e7b8f145c617b02c939d04110c76f29ee3676b5f9a5"}, - {file = "yarl-1.18.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0b3c92fa08759dbf12b3a59579a4096ba9af8dd344d9a813fc7f5070d86bbab1"}, - {file = "yarl-1.18.3-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:4ac515b860c36becb81bb84b667466885096b5fc85596948548b667da3bf9f24"}, - {file = "yarl-1.18.3-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:045b8482ce9483ada4f3f23b3774f4e1bf4f23a2d5c912ed5170f68efb053318"}, - {file = "yarl-1.18.3-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:a4bb030cf46a434ec0225bddbebd4b89e6471814ca851abb8696170adb163985"}, - {file = "yarl-1.18.3-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:54d6921f07555713b9300bee9c50fb46e57e2e639027089b1d795ecd9f7fa910"}, - {file = "yarl-1.18.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1d407181cfa6e70077df3377938c08012d18893f9f20e92f7d2f314a437c30b1"}, - {file = "yarl-1.18.3-cp39-cp39-win32.whl", hash = "sha256:ac36703a585e0929b032fbaab0707b75dc12703766d0b53486eabd5139ebadd5"}, - {file = "yarl-1.18.3-cp39-cp39-win_amd64.whl", hash = "sha256:ba87babd629f8af77f557b61e49e7c7cac36f22f871156b91e10a6e9d4f829e9"}, - {file = "yarl-1.18.3-py3-none-any.whl", hash = "sha256:b57f4f58099328dfb26c6a771d09fb20dbbae81d20cfb66141251ea063bd101b"}, - {file = "yarl-1.18.3.tar.gz", hash = "sha256:ac1801c45cbf77b6c99242eeff4fffb5e4e73a800b5c4ad4fc0be5def634d2e1"}, + {file = "yarl-1.19.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0bae32f8ebd35c04d6528cedb4a26b8bf25339d3616b04613b97347f919b76d3"}, + {file = "yarl-1.19.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8015a076daf77823e7ebdcba474156587391dab4e70c732822960368c01251e6"}, + {file = "yarl-1.19.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9973ac95327f5d699eb620286c39365990b240031672b5c436a4cd00539596c5"}, + {file = "yarl-1.19.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd4b5fbd7b9dde785cfeb486b8cca211a0b138d4f3a7da27db89a25b3c482e5c"}, + {file = "yarl-1.19.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:75460740005de5a912b19f657848aef419387426a40f581b1dc9fac0eb9addb5"}, + {file = "yarl-1.19.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:57abd66ca913f2cfbb51eb3dbbbac3648f1f6983f614a4446e0802e241441d2a"}, + {file = "yarl-1.19.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:46ade37911b7c99ce28a959147cb28bffbd14cea9e7dd91021e06a8d2359a5aa"}, + {file = "yarl-1.19.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8346ec72ada749a6b5d82bff7be72578eab056ad7ec38c04f668a685abde6af0"}, + {file = "yarl-1.19.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7e4cb14a6ee5b6649ccf1c6d648b4da9220e8277d4d4380593c03cc08d8fe937"}, + {file = "yarl-1.19.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:66fc1c2926a73a2fb46e4b92e3a6c03904d9bc3a0b65e01cb7d2b84146a8bd3b"}, + {file = "yarl-1.19.0-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:5a70201dd1e0a4304849b6445a9891d7210604c27e67da59091d5412bc19e51c"}, + {file = "yarl-1.19.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e4807aab1bdeab6ae6f296be46337a260ae4b1f3a8c2fcd373e236b4b2b46efd"}, + {file = "yarl-1.19.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:ae584afe81a1de4c1bb06672481050f0d001cad13163e3c019477409f638f9b7"}, + {file = "yarl-1.19.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:30eaf4459df6e91f21b2999d1ee18f891bcd51e3cbe1de301b4858c84385895b"}, + {file = "yarl-1.19.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:0e617d45d03c8dec0dfce6f51f3e1b8a31aa81aaf4a4d1442fdb232bcf0c6d8c"}, + {file = "yarl-1.19.0-cp310-cp310-win32.whl", hash = "sha256:32ba32d0fa23893fd8ea8d05bdb05de6eb19d7f2106787024fd969f4ba5466cb"}, + {file = "yarl-1.19.0-cp310-cp310-win_amd64.whl", hash = "sha256:545575ecfcd465891b51546c2bcafdde0acd2c62c2097d8d71902050b20e4922"}, + {file = "yarl-1.19.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:163ff326680de5f6d4966954cf9e3fe1bf980f5fee2255e46e89b8cf0f3418b5"}, + {file = "yarl-1.19.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a626c4d9cca298d1be8625cff4b17004a9066330ac82d132bbda64a4c17c18d3"}, + {file = "yarl-1.19.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:961c3e401ea7f13d02b8bb7cb0c709152a632a6e14cdc8119e9c6ee5596cd45d"}, + {file = "yarl-1.19.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a39d7b807ab58e633ed760f80195cbd145b58ba265436af35f9080f1810dfe64"}, + {file = "yarl-1.19.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:c4228978fb59c6b10f60124ba8e311c26151e176df364e996f3f8ff8b93971b5"}, + {file = "yarl-1.19.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9ba536b17ecf3c74a94239ec1137a3ad3caea8c0e4deb8c8d2ffe847d870a8c5"}, + {file = "yarl-1.19.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a251e00e445d2e9df7b827c9843c0b87f58a3254aaa3f162fb610747491fe00f"}, + {file = "yarl-1.19.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9b92431d8b4d4ca5ccbfdbac95b05a3a6cd70cd73aa62f32f9627acfde7549c"}, + {file = "yarl-1.19.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ec2f56edaf476f70b5831bbd59700b53d9dd011b1f77cd4846b5ab5c5eafdb3f"}, + {file = "yarl-1.19.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:acf9b92c4245ac8b59bc7ec66a38d3dcb8d1f97fac934672529562bb824ecadb"}, + {file = "yarl-1.19.0-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:57711f1465c06fee8825b95c0b83e82991e6d9425f9a042c3c19070a70ac92bf"}, + {file = "yarl-1.19.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:528e86f5b1de0ad8dd758ddef4e0ed24f5d946d4a1cef80ffb2d4fca4e10f122"}, + {file = "yarl-1.19.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:3b77173663e075d9e5a57e09d711e9da2f3266be729ecca0b8ae78190990d260"}, + {file = "yarl-1.19.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:d8717924cf0a825b62b1a96fc7d28aab7f55a81bf5338b8ef41d7a76ab9223e9"}, + {file = "yarl-1.19.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0df9f0221a78d858793f40cbea3915c29f969c11366646a92ca47e080a14f881"}, + {file = "yarl-1.19.0-cp311-cp311-win32.whl", hash = "sha256:8b3ade62678ee2c7c10dcd6be19045135e9badad53108f7d2ed14896ee396045"}, + {file = "yarl-1.19.0-cp311-cp311-win_amd64.whl", hash = "sha256:0626ee31edb23ac36bdffe607231de2cca055ad3a5e2dc5da587ef8bc6a321bc"}, + {file = "yarl-1.19.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:7b687c334da3ff8eab848c9620c47a253d005e78335e9ce0d6868ed7e8fd170b"}, + {file = "yarl-1.19.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b0fe766febcf523a2930b819c87bb92407ae1368662c1bc267234e79b20ff894"}, + {file = "yarl-1.19.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:742ceffd3c7beeb2b20d47cdb92c513eef83c9ef88c46829f88d5b06be6734ee"}, + {file = "yarl-1.19.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2af682a1e97437382ee0791eacbf540318bd487a942e068e7e0a6c571fadbbd3"}, + {file = "yarl-1.19.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:63702f1a098d0eaaea755e9c9d63172be1acb9e2d4aeb28b187092bcc9ca2d17"}, + {file = "yarl-1.19.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3560dcba3c71ae7382975dc1e912ee76e50b4cd7c34b454ed620d55464f11876"}, + {file = "yarl-1.19.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:68972df6a0cc47c8abaf77525a76ee5c5f6ea9bbdb79b9565b3234ded3c5e675"}, + {file = "yarl-1.19.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5684e7ff93ea74e47542232bd132f608df4d449f8968fde6b05aaf9e08a140f9"}, + {file = "yarl-1.19.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8182ad422bfacdebd4759ce3adc6055c0c79d4740aea1104e05652a81cd868c6"}, + {file = "yarl-1.19.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:aee5b90a5a9b71ac57400a7bdd0feaa27c51e8f961decc8d412e720a004a1791"}, + {file = "yarl-1.19.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:8c0b2371858d5a814b08542d5d548adb03ff2d7ab32f23160e54e92250961a72"}, + {file = "yarl-1.19.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:cd430c2b7df4ae92498da09e9b12cad5bdbb140d22d138f9e507de1aa3edfea3"}, + {file = "yarl-1.19.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:a93208282c0ccdf73065fd76c6c129bd428dba5ff65d338ae7d2ab27169861a0"}, + {file = "yarl-1.19.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:b8179280cdeb4c36eb18d6534a328f9d40da60d2b96ac4a295c5f93e2799e9d9"}, + {file = "yarl-1.19.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:eda3c2b42dc0c389b7cfda2c4df81c12eeb552019e0de28bde8f913fc3d1fcf3"}, + {file = "yarl-1.19.0-cp312-cp312-win32.whl", hash = "sha256:57f3fed859af367b9ca316ecc05ce79ce327d6466342734305aa5cc380e4d8be"}, + {file = "yarl-1.19.0-cp312-cp312-win_amd64.whl", hash = "sha256:5507c1f7dd3d41251b67eecba331c8b2157cfd324849879bebf74676ce76aff7"}, + {file = "yarl-1.19.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:59281b9ed27bc410e0793833bcbe7fc149739d56ffa071d1e0fe70536a4f7b61"}, + {file = "yarl-1.19.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:d27a6482ad5e05e8bafd47bf42866f8a1c0c3345abcb48d4511b3c29ecc197dc"}, + {file = "yarl-1.19.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7a8e19fd5a6fdf19a91f2409665c7a089ffe7b9b5394ab33c0eec04cbecdd01f"}, + {file = "yarl-1.19.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cda34ab19099c3a1685ad48fe45172536610c312b993310b5f1ca3eb83453b36"}, + {file = "yarl-1.19.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:7908a25d33f94852b479910f9cae6cdb9e2a509894e8d5f416c8342c0253c397"}, + {file = "yarl-1.19.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e66c14d162bac94973e767b24de5d7e6c5153f7305a64ff4fcba701210bcd638"}, + {file = "yarl-1.19.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c03607bf932aa4cfae371e2dc9ca8b76faf031f106dac6a6ff1458418140c165"}, + {file = "yarl-1.19.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9931343d1c1f4e77421687b6b94bbebd8a15a64ab8279adf6fbb047eff47e536"}, + {file = "yarl-1.19.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:262087a8a0d73e1d169d45c2baf968126f93c97cf403e1af23a7d5455d52721f"}, + {file = "yarl-1.19.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:70f384921c24e703d249a6ccdabeb57dd6312b568b504c69e428a8dd3e8e68ca"}, + {file = "yarl-1.19.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:756b9ea5292a2c180d1fe782a377bc4159b3cfefaca7e41b5b0a00328ef62fa9"}, + {file = "yarl-1.19.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cbeb9c145d534c240a63b6ecc8a8dd451faeb67b3dc61d729ec197bb93e29497"}, + {file = "yarl-1.19.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:087ae8f8319848c18e0d114d0f56131a9c017f29200ab1413b0137ad7c83e2ae"}, + {file = "yarl-1.19.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:362f5480ba527b6c26ff58cff1f229afe8b7fdd54ee5ffac2ab827c1a75fc71c"}, + {file = "yarl-1.19.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f408d4b4315e814e5c3668094e33d885f13c7809cbe831cbdc5b1bb8c7a448f4"}, + {file = "yarl-1.19.0-cp313-cp313-win32.whl", hash = "sha256:24e4c367ad69988a2283dd45ea88172561ca24b2326b9781e164eb46eea68345"}, + {file = "yarl-1.19.0-cp313-cp313-win_amd64.whl", hash = "sha256:0110f91c57ab43d1538dfa92d61c45e33b84df9257bd08fcfcda90cce931cbc9"}, + {file = "yarl-1.19.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:85ac908cd5a97bbd3048cca9f1bf37b932ea26c3885099444f34b0bf5d5e9fa6"}, + {file = "yarl-1.19.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6ba0931b559f1345df48a78521c31cfe356585670e8be22af84a33a39f7b9221"}, + {file = "yarl-1.19.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5bc503e1c1fee1b86bcb58db67c032957a52cae39fe8ddd95441f414ffbab83e"}, + {file = "yarl-1.19.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d995122dcaf180fd4830a9aa425abddab7c0246107c21ecca2fa085611fa7ce9"}, + {file = "yarl-1.19.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:217f69e60a14da4eed454a030ea8283f8fbd01a7d6d81e57efb865856822489b"}, + {file = "yarl-1.19.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aad67c8f13a4b79990082f72ef09c078a77de2b39899aabf3960a48069704973"}, + {file = "yarl-1.19.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dff065a1a8ed051d7e641369ba1ad030d5a707afac54cf4ede7069b959898835"}, + {file = "yarl-1.19.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ada882e26b16ee651ab6544ce956f2f4beaed38261238f67c2a96db748e17741"}, + {file = "yarl-1.19.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:67a56b1acc7093451ea2de0687aa3bd4e58d6b4ef6cbeeaad137b45203deaade"}, + {file = "yarl-1.19.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:e97d2f0a06b39e231e59ebab0e6eec45c7683b339e8262299ac952707bdf7688"}, + {file = "yarl-1.19.0-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:a5288adb7c59d0f54e4ad58d86fb06d4b26e08a59ed06d00a1aac978c0e32884"}, + {file = "yarl-1.19.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1efbf4d03e6eddf5da27752e0b67a8e70599053436e9344d0969532baa99df53"}, + {file = "yarl-1.19.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:f228f42f29cc87db67020f7d71624102b2c837686e55317b16e1d3ef2747a993"}, + {file = "yarl-1.19.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:c515f7dd60ca724e4c62b34aeaa603188964abed2eb66bb8e220f7f104d5a187"}, + {file = "yarl-1.19.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:4815ec6d3d68a96557fa71bd36661b45ac773fb50e5cfa31a7e843edb098f060"}, + {file = "yarl-1.19.0-cp39-cp39-win32.whl", hash = "sha256:9fac2dd1c5ecb921359d9546bc23a6dcc18c6acd50c6d96f118188d68010f497"}, + {file = "yarl-1.19.0-cp39-cp39-win_amd64.whl", hash = "sha256:5864f539ce86b935053bfa18205fa08ce38e9a40ea4d51b19ce923345f0ed5db"}, + {file = "yarl-1.19.0-py3-none-any.whl", hash = "sha256:a727101eb27f66727576630d02985d8a065d09cd0b5fcbe38a5793f71b2a97ef"}, + {file = "yarl-1.19.0.tar.gz", hash = "sha256:01e02bb80ae0dbed44273c304095295106e1d9470460e773268a27d11e594892"}, ] [package.dependencies] idna = ">=2.0" multidict = ">=4.0" -propcache = ">=0.2.0" +propcache = ">=0.2.1" [package.source] type = "legacy" @@ -5082,18 +4972,17 @@ version = "3.21.0" description = "Backport of pathlib-compatible object wrapper for zip files" optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "zipp-3.21.0-py3-none-any.whl", hash = "sha256:ac1bbe05fd2991f160ebce24ffbac5f6d11d83dc90891255885223d42b3cd931"}, {file = "zipp-3.21.0.tar.gz", hash = "sha256:2c9958f6430a2040341a52eb608ed6dd93ef4392e02ffe219417c1b28b5dd1f4"}, ] [package.extras] -check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] enabler = ["pytest-enabler (>=2.2)"] -test = ["big-O", "importlib-resources ; python_version < \"3.9\"", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"] +test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"] type = ["pytest-mypy"] [package.source] @@ -5101,7 +4990,11 @@ type = "legacy" url = "https://mirrors.aliyun.com/pypi/simple" reference = "aliyun" +[extras] +postgresql = ["asyncpg"] +redis = ["redis"] + [metadata] -lock-version = "2.1" +lock-version = "2.0" python-versions = "^3.10" -content-hash = "48aa6fabc582a0c75b333f9bd3418264a1fd15a5c8c50220b456ba00d03cd35e" +content-hash = "ed42547d3e975f73e9e1fd1d4c4660d4363d70f40a47c5626cb1664508a9c156" diff --git a/pyproject.toml b/pyproject.toml index 621472fe..4dd31f16 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,7 +16,7 @@ python = "^3.10" playwright = "^1.41.1" nonebot-adapter-onebot = "^2.3.1" nonebot-plugin-apscheduler = "^0.5" -tortoise-orm = { extras = ["asyncpg"], version = "^0.20.0" } +tortoise-orm = "^0.20.0" cattrs = "^23.2.3" ruamel-yaml = "^0.18.5" strenum = "^0.4.15" @@ -39,7 +39,7 @@ dateparser = "^1.2.0" bilireq = "0.2.3post0" python-jose = { extras = ["cryptography"], version = "^3.3.0" } python-multipart = "^0.0.9" -aiocache = "^0.12.2" +aiocache = {extras = ["redis"], version = "^0.12.3"} py-cpuinfo = "^9.0.0" nonebot-plugin-alconna = "^0.54.0" tenacity = "^9.0.0" @@ -47,6 +47,9 @@ nonebot-plugin-uninfo = ">0.4.1" nonebot-plugin-waiter = "^0.8.1" multidict = ">=6.0.0,!=6.3.2" +redis = { version = ">=5", optional = true } +asyncpg = { version = ">=0.20.0", optional = true } + [tool.poetry.group.dev.dependencies] nonebug = "^0.4" pytest-cov = "^5.0.0" @@ -57,6 +60,9 @@ respx = "^0.21.1" ruff = "^0.8.0" pre-commit = "^4.0.0" +[tool.poetry.extras] +redis = ["redis"] +postgresql = ["asyncpg"] [tool.nonebot] plugins = [ diff --git a/zhenxun/builtin_plugins/about.py b/zhenxun/builtin_plugins/about.py index 31c77bc7..faa0ba0e 100644 --- a/zhenxun/builtin_plugins/about.py +++ b/zhenxun/builtin_plugins/about.py @@ -26,21 +26,6 @@ __plugin_meta__ = PluginMetadata( _matcher = on_alconna(Alconna("关于"), priority=5, block=True, rule=to_me()) -QQ_INFO = """ -『绪山真寻Bot』 -版本:{version} -简介:基于Nonebot2开发,支持多平台,是一个非常可爱的Bot呀,希望与大家要好好相处 -""".strip() - -INFO = """ -『绪山真寻Bot』 -版本:{version} -简介:基于Nonebot2开发,支持多平台,是一个非常可爱的Bot呀,希望与大家要好好相处 -项目地址:https://github.com/zhenxun-org/zhenxun_bot -文档地址:https://zhenxun-org.github.io/zhenxun_bot/ -""".strip() - - @_matcher.handle() async def _(session: Uninfo, arparma: Arparma): ver_file = Path() / "__version__" @@ -50,11 +35,25 @@ async def _(session: Uninfo, arparma: Arparma): if text := await f.read(): version = text.split(":")[-1].strip() if PlatformUtils.is_qbot(session): - result: list[str | Path] = [QQ_INFO.format(version=version)] + info: list[str | Path] = [ + f""" +『绪山真寻Bot』 +版本:{version} +简介:基于Nonebot2开发,支持多平台,是一个非常可爱的Bot呀,希望与大家要好好相处 + """.strip() + ] path = DATA_PATH / "about.png" if path.exists(): - result.append(path) - await MessageUtils.build_message(result).send() # type: ignore + info.append(path) else: - await MessageUtils.build_message(INFO.format(version=version)).send() - logger.info("查看关于", arparma.header_result, session=session) + info = [ + f""" +『绪山真寻Bot』 +版本:{version} +简介:基于Nonebot2开发,支持多平台,是一个非常可爱的Bot呀,希望与大家要好好相处 +项目地址:https://github.com/HibiKier/zhenxun_bot +文档地址:https://hibikier.github.io/zhenxun_bot/ + """.strip() + ] + await MessageUtils.build_message(info).send() # type: ignore + logger.info("查看关于", arparma.header_result, session=session) diff --git a/zhenxun/builtin_plugins/admin/ban/__init__.py b/zhenxun/builtin_plugins/admin/ban/__init__.py index 91bbf2ba..32e97f2d 100644 --- a/zhenxun/builtin_plugins/admin/ban/__init__.py +++ b/zhenxun/builtin_plugins/admin/ban/__init__.py @@ -14,13 +14,19 @@ from nonebot_plugin_alconna import ( from nonebot_plugin_session import EventSession from zhenxun.configs.config import BotConfig, Config -from zhenxun.configs.utils import PluginExtraData, RegisterConfig +from zhenxun.configs.utils import ( + AICallableParam, + AICallableProperties, + AICallableTag, + PluginExtraData, + RegisterConfig, +) from zhenxun.services.log import logger from zhenxun.utils.enum import PluginType from zhenxun.utils.message import MessageUtils from zhenxun.utils.rules import admin_check -from ._data_source import BanManage +from ._data_source import BanManage, call_ban base_config = Config.get("ban") @@ -78,6 +84,22 @@ __plugin_meta__ = PluginMetadata( type=int, ) ], + smart_tools=[ + AICallableTag( + name="call_ban", + description="某人多次(至少三次)辱骂你,调用此方法进行封禁", + parameters=AICallableParam( + type="object", + properties={ + "user_id": AICallableProperties( + type="string", description="用户的id" + ), + }, + required=["user_id"], + ), + func=call_ban, + ) + ], ).to_dict(), ) diff --git a/zhenxun/builtin_plugins/admin/ban/_data_source.py b/zhenxun/builtin_plugins/admin/ban/_data_source.py index f38d2440..2d4dd6dc 100644 --- a/zhenxun/builtin_plugins/admin/ban/_data_source.py +++ b/zhenxun/builtin_plugins/admin/ban/_data_source.py @@ -5,8 +5,19 @@ from nonebot_plugin_session import EventSession from zhenxun.models.ban_console import BanConsole from zhenxun.models.level_user import LevelUser +from zhenxun.services.log import logger from zhenxun.utils.image_utils import BuildImage, ImageTemplate +async def call_ban(user_id: str): + """调用ban + + 参数: + user_id: 用户id + """ + await BanConsole.ban(user_id, None, 9, 60 * 12) + logger.info("辱骂次数过多,已将用户加入黑名单...", "ban", session=user_id) + + class BanManage: @classmethod diff --git a/zhenxun/builtin_plugins/chat_history/chat_message.py b/zhenxun/builtin_plugins/chat_history/chat_message.py index b3bebb4f..36ea4930 100644 --- a/zhenxun/builtin_plugins/chat_history/chat_message.py +++ b/zhenxun/builtin_plugins/chat_history/chat_message.py @@ -1,13 +1,15 @@ from nonebot import on_message from nonebot.plugin import PluginMetadata from nonebot_plugin_alconna import UniMsg -from nonebot_plugin_session import EventSession +from nonebot_plugin_apscheduler import scheduler +from nonebot_plugin_uninfo import Uninfo from zhenxun.configs.config import Config from zhenxun.configs.utils import PluginExtraData, RegisterConfig from zhenxun.models.chat_history import ChatHistory from zhenxun.services.log import logger from zhenxun.utils.enum import PluginType +from zhenxun.utils.utils import get_entity_ids __plugin_meta__ = PluginMetadata( name="消息存储", @@ -37,18 +39,34 @@ def rule(message: UniMsg) -> bool: chat_history = on_message(rule=rule, priority=1, block=False) +TEMP_LIST = [] + @chat_history.handle() -async def handle_message(message: UniMsg, session: EventSession): - """处理消息存储""" - try: - await ChatHistory.create( - user_id=session.id1, - group_id=session.id2, +async def _(message: UniMsg, session: Uninfo): + entity = get_entity_ids(session) + TEMP_LIST.append( + ChatHistory( + user_id=entity.user_id, + group_id=entity.group_id, text=str(message), plain_text=message.extract_plain_text(), - bot_id=session.bot_id, + bot_id=session.self_id, platform=session.platform, ) + ) + + +@scheduler.scheduled_job( + "interval", + minutes=1, +) +async def _(): + try: + message_list = TEMP_LIST.copy() + TEMP_LIST.clear() + if message_list: + await ChatHistory.bulk_create(message_list) + logger.debug(f"批量添加聊天记录 {len(message_list)} 条", "定时任务") except Exception as e: logger.warning("存储聊天记录失败", "chat_history", e=e) diff --git a/zhenxun/builtin_plugins/chat_history/chat_message_handle.py b/zhenxun/builtin_plugins/chat_history/chat_message_handle.py index 10cfcf43..d9eae97f 100644 --- a/zhenxun/builtin_plugins/chat_history/chat_message_handle.py +++ b/zhenxun/builtin_plugins/chat_history/chat_message_handle.py @@ -1,4 +1,5 @@ from datetime import datetime, timedelta +from io import BytesIO from nonebot.plugin import PluginMetadata from nonebot_plugin_alconna import ( @@ -14,35 +15,38 @@ from nonebot_plugin_alconna import ( from nonebot_plugin_session import EventSession import pytz -from zhenxun.configs.utils import Command, PluginExtraData +from zhenxun.configs.config import Config +from zhenxun.configs.utils import Command, PluginExtraData, RegisterConfig from zhenxun.models.chat_history import ChatHistory from zhenxun.models.group_member_info import GroupInfoUser from zhenxun.services.log import logger from zhenxun.utils.enum import PluginType -from zhenxun.utils.image_utils import ImageTemplate +from zhenxun.utils.image_utils import BuildImage, ImageTemplate from zhenxun.utils.message import MessageUtils +from zhenxun.utils.platform import PlatformUtils __plugin_meta__ = PluginMetadata( name="消息统计", description="消息统计查询", usage=""" 格式: - 消息排行 ?[type [日,周,月,年]] ?[--des] + 消息排行 ?[type [日,周,月,季,年]] ?[--des] 快捷: - [日,周,月,年]消息排行 ?[数量] + [日,周,月,季,年]消息排行 ?[数量] 示例: 消息排行 : 所有记录排行 日消息排行 : 今日记录排行 - 周消息排行 : 今日记录排行 - 月消息排行 : 今日记录排行 - 年消息排行 : 今日记录排行 + 周消息排行 : 本周记录排行 + 月消息排行 : 本月记录排行 + 季消息排行 : 本季度记录排行 + 年消息排行 : 本年记录排行 消息排行 周 --des : 逆序周记录排行 """.strip(), extra=PluginExtraData( author="HibiKier", - version="0.1", + version="0.2", plugin_type=PluginType.NORMAL, menu_type="数据统计", commands=[ @@ -50,8 +54,19 @@ __plugin_meta__ = PluginMetadata( Command(command="日消息统计"), Command(command="周消息排行"), Command(command="月消息排行"), + Command(command="季消息排行"), Command(command="年消息排行"), ], + configs=[ + RegisterConfig( + module="chat_history", + key="SHOW_QUIT_MEMBER", + value=True, + help="是否在消息排行中显示已退群用户", + default_value=True, + type=bool, + ) + ], ).to_dict(), ) @@ -60,7 +75,7 @@ _matcher = on_alconna( Alconna( "消息排行", Option("--des", action=store_true, help_text="逆序"), - Args["type?", ["日", "周", "月", "年"]]["count?", int, 10], + Args["type?", ["日", "周", "月", "季", "年"]]["count?", int, 10], ), aliases={"消息统计"}, priority=5, @@ -68,7 +83,7 @@ _matcher = on_alconna( ) _matcher.shortcut( - r"(?P['日', '周', '月', '年'])?消息(排行|统计)\s?(?P\d+)?", + r"(?P['日', '周', '月', '季', '年'])?消息(排行|统计)\s?(?P\d+)?", command="消息排行", arguments=["{type}", "{cnt}"], prefix=True, @@ -96,20 +111,57 @@ async def _( date_scope = (time_now - timedelta(days=7), time_now) elif date in ["月"]: date_scope = (time_now - timedelta(days=30), time_now) - column_name = ["名次", "昵称", "发言次数"] + elif date in ["季"]: + date_scope = (time_now - timedelta(days=90), time_now) + column_name = ["名次", "头像", "昵称", "发言次数"] + show_quit_member = Config.get_config("chat_history", "SHOW_QUIT_MEMBER", True) + + fetch_count = count.result + if not show_quit_member: + fetch_count = count.result * 2 + if rank_data := await ChatHistory.get_group_msg_rank( - group_id, count.result, "DES" if arparma.find("des") else "DESC", date_scope + group_id, fetch_count, "DES" if arparma.find("des") else "DESC", date_scope ): idx = 1 data_list = [] + for uid, num in rank_data: - if user := await GroupInfoUser.filter( + if len(data_list) >= count.result: + break + + user_in_group = await GroupInfoUser.filter( user_id=uid, group_id=group_id - ).first(): - user_name = user.user_name + ).first() + + if not user_in_group and not show_quit_member: + continue + + if user_in_group: + user_name = user_in_group.user_name else: - user_name = uid - data_list.append([idx, user_name, num]) + user_name = f"{uid}(已退群)" + + avatar_size = 40 + try: + avatar_bytes = await PlatformUtils.get_user_avatar(str(uid), "qq") + if avatar_bytes: + avatar_img = BuildImage( + avatar_size, avatar_size, background=BytesIO(avatar_bytes) + ) + await avatar_img.circle() + avatar_tuple = (avatar_img, avatar_size, avatar_size) + else: + avatar_img = BuildImage(avatar_size, avatar_size, color="#CCCCCC") + await avatar_img.circle() + avatar_tuple = (avatar_img, avatar_size, avatar_size) + except Exception as e: + logger.warning(f"获取用户头像失败: {e}", "chat_history") + avatar_img = BuildImage(avatar_size, avatar_size, color="#CCCCCC") + await avatar_img.circle() + avatar_tuple = (avatar_img, avatar_size, avatar_size) + + data_list.append([idx, avatar_tuple, user_name, num]) idx += 1 if not date_scope: if date_scope := await ChatHistory.get_group_first_msg_datetime(group_id): @@ -132,13 +184,3 @@ async def _( ) await MessageUtils.build_message(A).finish(reply_to=True) await MessageUtils.build_message("群组消息记录为空...").finish() - - -# # @test.handle() -# # async def _(event: MessageEvent): -# # print(await ChatHistory.get_user_msg(event.user_id, "private")) -# # print(await ChatHistory.get_user_msg_count(event.user_id, "private")) -# # print(await ChatHistory.get_user_msg(event.user_id, "group")) -# # print(await ChatHistory.get_user_msg_count(event.user_id, "group")) -# # print(await ChatHistory.get_group_msg(event.group_id)) -# # print(await ChatHistory.get_group_msg_count(event.group_id)) diff --git a/zhenxun/builtin_plugins/help/__init__.py b/zhenxun/builtin_plugins/help/__init__.py index 726d4d1e..17002f0c 100644 --- a/zhenxun/builtin_plugins/help/__init__.py +++ b/zhenxun/builtin_plugins/help/__init__.py @@ -37,8 +37,8 @@ __plugin_meta__ = PluginMetadata( configs=[ RegisterConfig( key="type", - value="normal", - help="帮助图片样式 ['normal', 'HTML', 'zhenxun']", + value="zhenxun", + help="帮助图片样式 [normal, HTML, zhenxun]", default_value="zhenxun", ) ], diff --git a/zhenxun/builtin_plugins/help/_data_source.py b/zhenxun/builtin_plugins/help/_data_source.py index cfaa4503..86f42536 100644 --- a/zhenxun/builtin_plugins/help/_data_source.py +++ b/zhenxun/builtin_plugins/help/_data_source.py @@ -40,7 +40,9 @@ async def create_help_img( match help_type: case "html": - result = BuildImage.open(await build_html_image(group_id, is_detail)) + result = BuildImage.open( + await build_html_image(session, group_id, is_detail) + ) case "zhenxun": result = BuildImage.open( await build_zhenxun_image(session, group_id, is_detail) diff --git a/zhenxun/builtin_plugins/help/_utils.py b/zhenxun/builtin_plugins/help/_utils.py index 6c382c7d..0554fc8d 100644 --- a/zhenxun/builtin_plugins/help/_utils.py +++ b/zhenxun/builtin_plugins/help/_utils.py @@ -1,5 +1,8 @@ from collections.abc import Callable +from nonebot_plugin_uninfo import Uninfo + +from zhenxun.models.bot_console import BotConsole from zhenxun.models.group_console import GroupConsole from zhenxun.models.plugin_info import PluginInfo from zhenxun.utils.enum import PluginType @@ -27,13 +30,15 @@ async def sort_type() -> dict[str, list[PluginInfo]]: async def classify_plugin( - group_id: str | None, is_detail: bool, handle: Callable + session: Uninfo, group_id: str | None, is_detail: bool, handle: Callable ) -> dict[str, list]: """对插件进行分类并判断状态 参数: + session: Uninfo对象 group_id: 群组id is_detail: 是否详细帮助 + handle: 回调方法 返回: dict[str, list[Item]]: 分类插件数据 @@ -41,9 +46,10 @@ async def classify_plugin( sort_data = await sort_type() classify: dict[str, list] = {} group = await GroupConsole.get_or_none(group_id=group_id) if group_id else None + bot = await BotConsole.get_or_none(bot_id=session.self_id) for menu, value in sort_data.items(): for plugin in value: if not classify.get(menu): classify[menu] = [] - classify[menu].append(handle(plugin, group, is_detail)) + classify[menu].append(handle(bot, plugin, group, is_detail)) return classify diff --git a/zhenxun/builtin_plugins/help/html_help.py b/zhenxun/builtin_plugins/help/html_help.py index 1815b99a..7c552a0d 100644 --- a/zhenxun/builtin_plugins/help/html_help.py +++ b/zhenxun/builtin_plugins/help/html_help.py @@ -2,9 +2,11 @@ import os import random from nonebot_plugin_htmlrender import template_to_pic +from nonebot_plugin_uninfo import Uninfo from pydantic import BaseModel from zhenxun.configs.path_config import TEMPLATE_PATH +from zhenxun.models.bot_console import BotConsole from zhenxun.models.group_console import GroupConsole from zhenxun.models.plugin_info import PluginInfo from zhenxun.utils.enum import BlockType @@ -48,11 +50,12 @@ ICON2STR = { def __handle_item( - plugin: PluginInfo, group: GroupConsole | None, is_detail: bool + bot: BotConsole, plugin: PluginInfo, group: GroupConsole | None, is_detail: bool ) -> Item: """构造Item 参数: + bot: BotConsole plugin: PluginInfo group: 群组 is_detail: 是否详细 @@ -73,10 +76,13 @@ def __handle_item( ]: sta = 2 if group: - if f"{plugin.module}:super," in group.block_plugin: + if f"{plugin.module}," in group.superuser_block_plugin: sta = 2 if f"{plugin.module}," in group.block_plugin: sta = 1 + if bot: + if f"{plugin.module}," in bot.block_plugins: + sta = 2 return Item(plugin_name=plugin.name, sta=sta) @@ -119,14 +125,17 @@ def build_plugin_data(classify: dict[str, list[Item]]) -> list[dict[str, str]]: return plugin_list -async def build_html_image(group_id: str | None, is_detail: bool) -> bytes: +async def build_html_image( + session: Uninfo, group_id: str | None, is_detail: bool +) -> bytes: """构造HTML帮助图片 参数: + session: Uninfo group_id: 群号 is_detail: 是否详细帮助 """ - classify = await classify_plugin(group_id, is_detail, __handle_item) + classify = await classify_plugin(session, group_id, is_detail, __handle_item) plugin_list = build_plugin_data(classify) return await template_to_pic( template_path=str((TEMPLATE_PATH / "menu").absolute()), diff --git a/zhenxun/builtin_plugins/help/zhenxun_help.py b/zhenxun/builtin_plugins/help/zhenxun_help.py index f6d930e6..b96d3c59 100644 --- a/zhenxun/builtin_plugins/help/zhenxun_help.py +++ b/zhenxun/builtin_plugins/help/zhenxun_help.py @@ -6,6 +6,7 @@ from pydantic import BaseModel from zhenxun.configs.config import BotConfig from zhenxun.configs.path_config import TEMPLATE_PATH from zhenxun.configs.utils import PluginExtraData +from zhenxun.models.bot_console import BotConsole from zhenxun.models.group_console import GroupConsole from zhenxun.models.plugin_info import PluginInfo from zhenxun.utils.enum import BlockType @@ -21,12 +22,19 @@ class Item(BaseModel): """插件命令""" -def __handle_item(plugin: PluginInfo, group: GroupConsole | None, is_detail: bool): +def __handle_item( + bot: BotConsole | None, + plugin: PluginInfo, + group: GroupConsole | None, + is_detail: bool, +): """构造Item 参数: + bot: BotConsole plugin: PluginInfo group: 群组 + is_detail: 是否为详细 返回: Item: Item @@ -40,6 +48,8 @@ def __handle_item(plugin: PluginInfo, group: GroupConsole | None, is_detail: boo plugin.name = f"{plugin.name}(不可用)" elif group and f"{plugin.module}," in group.block_plugin: plugin.name = f"{plugin.name}(不可用)" + elif bot and f"{plugin.module}," in bot.block_plugins: + plugin.name = f"{plugin.name}(不可用)" commands = [] nb_plugin = nonebot.get_plugin_by_module_name(plugin.module_path) if is_detail and nb_plugin and nb_plugin.metadata and nb_plugin.metadata.extra: @@ -142,7 +152,7 @@ async def build_zhenxun_image( group_id: 群号 is_detail: 是否详细帮助 """ - classify = await classify_plugin(group_id, is_detail, __handle_item) + classify = await classify_plugin(session, group_id, is_detail, __handle_item) plugin_list = build_plugin_data(classify) platform = PlatformUtils.get_platform(session) bot_id = BotConfig.get_qbot_uid(session.self_id) or session.self_id diff --git a/zhenxun/builtin_plugins/hooks/__init__.py b/zhenxun/builtin_plugins/hooks/__init__.py index 3ad29d71..4136ca95 100644 --- a/zhenxun/builtin_plugins/hooks/__init__.py +++ b/zhenxun/builtin_plugins/hooks/__init__.py @@ -49,4 +49,14 @@ Config.add_plugin_config( type=bool, ) +Config.add_plugin_config( + "hook", + "RECORD_BOT_SENT_MESSAGES", + True, + help="记录bot消息校内", + default_value=True, + type=bool, +) + + nonebot.load_plugins(str(Path(__file__).parent.resolve())) diff --git a/zhenxun/builtin_plugins/hooks/_auth_checker.py b/zhenxun/builtin_plugins/hooks/_auth_checker.py deleted file mode 100644 index 3a990d89..00000000 --- a/zhenxun/builtin_plugins/hooks/_auth_checker.py +++ /dev/null @@ -1,597 +0,0 @@ -from typing import ClassVar - -from nonebot.adapters import Bot, Event -from nonebot.adapters.onebot.v11 import PokeNotifyEvent -from nonebot.exception import IgnoredException -from nonebot.matcher import Matcher -from nonebot_plugin_alconna import At, UniMsg -from nonebot_plugin_session import EventSession -from pydantic import BaseModel -from tortoise.exceptions import IntegrityError - -from zhenxun.configs.config import Config -from zhenxun.models.bot_console import BotConsole -from zhenxun.models.group_console import GroupConsole -from zhenxun.models.level_user import LevelUser -from zhenxun.models.plugin_info import PluginInfo -from zhenxun.models.plugin_limit import PluginLimit -from zhenxun.models.sign_user import SignUser -from zhenxun.models.user_console import UserConsole -from zhenxun.services.log import logger -from zhenxun.utils.enum import ( - BlockType, - GoldHandle, - LimitWatchType, - PluginLimitType, - PluginType, -) -from zhenxun.utils.exception import InsufficientGold -from zhenxun.utils.message import MessageUtils -from zhenxun.utils.utils import CountLimiter, FreqLimiter, UserBlockLimiter - -base_config = Config.get("hook") - - -class Limit(BaseModel): - limit: PluginLimit - limiter: FreqLimiter | UserBlockLimiter | CountLimiter - - class Config: - arbitrary_types_allowed = True - - -class LimitManage: - add_module: ClassVar[list] = [] - - cd_limit: ClassVar[dict[str, Limit]] = {} - block_limit: ClassVar[dict[str, Limit]] = {} - count_limit: ClassVar[dict[str, Limit]] = {} - - @classmethod - def add_limit(cls, limit: PluginLimit): - """添加限制 - - 参数: - limit: PluginLimit - """ - if limit.module not in cls.add_module: - cls.add_module.append(limit.module) - if limit.limit_type == PluginLimitType.BLOCK: - cls.block_limit[limit.module] = Limit( - limit=limit, limiter=UserBlockLimiter() - ) - elif limit.limit_type == PluginLimitType.CD: - cls.cd_limit[limit.module] = Limit( - limit=limit, limiter=FreqLimiter(limit.cd) - ) - elif limit.limit_type == PluginLimitType.COUNT: - cls.count_limit[limit.module] = Limit( - limit=limit, limiter=CountLimiter(limit.max_count) - ) - - @classmethod - def unblock( - cls, module: str, user_id: str, group_id: str | None, channel_id: str | None - ): - """解除插件block - - 参数: - module: 模块名 - user_id: 用户id - group_id: 群组id - channel_id: 频道id - """ - if limit_model := cls.block_limit.get(module): - limit = limit_model.limit - limiter: UserBlockLimiter = limit_model.limiter # type: ignore - key_type = user_id - if group_id and limit.watch_type == LimitWatchType.GROUP: - key_type = channel_id or group_id - logger.debug( - f"解除对象: {key_type} 的block限制", - "AuthChecker", - session=user_id, - group_id=group_id, - ) - limiter.set_false(key_type) - - @classmethod - async def check( - cls, - module: str, - user_id: str, - group_id: str | None, - channel_id: str | None, - session: EventSession, - ): - """检测限制 - - 参数: - module: 模块名 - user_id: 用户id - group_id: 群组id - channel_id: 频道id - session: Session - - 异常: - IgnoredException: IgnoredException - """ - if limit_model := cls.cd_limit.get(module): - await cls.__check(limit_model, user_id, group_id, channel_id, session) - if limit_model := cls.block_limit.get(module): - await cls.__check(limit_model, user_id, group_id, channel_id, session) - if limit_model := cls.count_limit.get(module): - await cls.__check(limit_model, user_id, group_id, channel_id, session) - - @classmethod - async def __check( - cls, - limit_model: Limit | None, - user_id: str, - group_id: str | None, - channel_id: str | None, - session: EventSession, - ): - """检测限制 - - 参数: - limit_model: Limit - user_id: 用户id - group_id: 群组id - channel_id: 频道id - session: Session - - 异常: - IgnoredException: IgnoredException - """ - if not limit_model: - return - limit = limit_model.limit - limiter = limit_model.limiter - is_limit = ( - LimitWatchType.ALL - or (group_id and limit.watch_type == LimitWatchType.GROUP) - or (not group_id and limit.watch_type == LimitWatchType.USER) - ) - key_type = user_id - if group_id and limit.watch_type == LimitWatchType.GROUP: - key_type = channel_id or group_id - if is_limit and not limiter.check(key_type): - if limit.result: - await MessageUtils.build_message(limit.result).send() - logger.debug( - f"{limit.module}({limit.limit_type}) 正在限制中...", - "AuthChecker", - session=session, - ) - raise IgnoredException(f"{limit.module} 正在限制中...") - else: - logger.debug( - f"开始进行限制 {limit.module}({limit.limit_type})...", - "AuthChecker", - session=user_id, - group_id=group_id, - ) - if isinstance(limiter, FreqLimiter): - limiter.start_cd(key_type) - if isinstance(limiter, UserBlockLimiter): - limiter.set_true(key_type) - if isinstance(limiter, CountLimiter): - limiter.increase(key_type) - - -class IsSuperuserException(Exception): - pass - - -class AuthChecker: - """ - 权限检查 - """ - - def __init__(self): - check_notice_info_cd = Config.get_config("hook", "CHECK_NOTICE_INFO_CD") - if check_notice_info_cd is None or check_notice_info_cd < 0: - raise ValueError("模块: [hook], 配置项: [CHECK_NOTICE_INFO_CD] 为空或小于0") - self._flmt = FreqLimiter(check_notice_info_cd) - self._flmt_g = FreqLimiter(check_notice_info_cd) - self._flmt_s = FreqLimiter(check_notice_info_cd) - self._flmt_c = FreqLimiter(check_notice_info_cd) - - def is_send_limit_message(self, plugin: PluginInfo, sid: str) -> bool: - """是否发送提示消息 - - 参数: - plugin: PluginInfo - - 返回: - bool: 是否发送提示消息 - """ - if not base_config.get("IS_SEND_TIP_MESSAGE"): - return False - if plugin.plugin_type == PluginType.DEPENDANT: - return False - if plugin.ignore_prompt: - return False - return self._flmt_s.check(sid) - - async def auth( - self, - matcher: Matcher, - event: Event, - bot: Bot, - session: EventSession, - message: UniMsg, - ): - """权限检查 - - 参数: - matcher: matcher - bot: bot - session: EventSession - message: UniMsg - """ - is_ignore = False - cost_gold = 0 - user_id = session.id1 - group_id = session.id3 - channel_id = session.id2 - if not group_id: - group_id = channel_id - channel_id = None - if matcher.type == "notice" and not isinstance(event, PokeNotifyEvent): - """过滤除poke外的notice""" - return - if user_id and matcher.plugin and (module_path := matcher.plugin.module_name): - try: - user = await UserConsole.get_user(user_id, session.platform) - except IntegrityError as e: - logger.debug( - "重复创建用户,已跳过该次权限...", - "AuthChecker", - session=session, - e=e, - ) - return - if plugin := await PluginInfo.get_or_none(module_path=module_path): - if plugin.plugin_type == PluginType.HIDDEN: - logger.debug( - f"插件: {plugin.name}:{plugin.module} " - "为HIDDEN,已跳过权限检查..." - ) - return - try: - cost_gold = await self.auth_cost(user, plugin, session) - if session.id1 in bot.config.superusers: - if plugin.plugin_type == PluginType.SUPERUSER: - raise IsSuperuserException() - if not plugin.limit_superuser: - cost_gold = 0 - raise IsSuperuserException() - await self.auth_bot(plugin, bot.self_id) - await self.auth_group(plugin, session, message) - await self.auth_admin(plugin, session) - await self.auth_plugin(plugin, session, event) - await self.auth_limit(plugin, session) - except IsSuperuserException: - logger.debug( - "超级用户或被ban跳过权限检测...", "AuthChecker", session=session - ) - except IgnoredException: - is_ignore = True - LimitManage.unblock( - matcher.plugin.name, user_id, group_id, channel_id - ) - except AssertionError as e: - is_ignore = True - logger.debug("消息无法发送", session=session, e=e) - if cost_gold and user_id: - """花费金币""" - try: - await UserConsole.reduce_gold( - user_id, - cost_gold, - GoldHandle.PLUGIN, - matcher.plugin.name if matcher.plugin else "", - session.platform, - ) - except InsufficientGold: - if u := await UserConsole.get_user(user_id): - u.gold = 0 - await u.save(update_fields=["gold"]) - logger.debug( - f"调用功能花费金币: {cost_gold}", "AuthChecker", session=session - ) - if is_ignore: - raise IgnoredException("权限检测 ignore") - - async def auth_bot(self, plugin: PluginInfo, bot_id: str): - """机器人权限 - - 参数: - plugin: PluginInfo - bot_id: bot_id - """ - if not await BotConsole.get_bot_status(bot_id): - logger.debug("Bot休眠中阻断权限检测...", "AuthChecker") - raise IgnoredException("BotConsole休眠权限检测 ignore") - if await BotConsole.is_block_plugin(bot_id, plugin.module): - logger.debug( - f"Bot插件 {plugin.name}({plugin.module}) 权限检查结果为关闭...", - "AuthChecker", - ) - raise IgnoredException("BotConsole插件权限检测 ignore") - - async def auth_limit(self, plugin: PluginInfo, session: EventSession): - """插件限制 - - 参数: - plugin: PluginInfo - session: EventSession - """ - user_id = session.id1 - group_id = session.id3 - channel_id = session.id2 - if not group_id: - group_id = channel_id - channel_id = None - if plugin.module not in LimitManage.add_module: - limit_list: list[PluginLimit] = await plugin.plugin_limit.filter( - status=True - ).all() # type: ignore - for limit in limit_list: - LimitManage.add_limit(limit) - if user_id: - await LimitManage.check( - plugin.module, user_id, group_id, channel_id, session - ) - - async def auth_plugin( - self, plugin: PluginInfo, session: EventSession, event: Event - ): - """插件状态 - - 参数: - plugin: PluginInfo - session: EventSession - """ - group_id = session.id3 - channel_id = session.id2 - if not group_id: - group_id = channel_id - channel_id = None - if user_id := session.id1: - if plugin.impression > 0: - sign_user = await SignUser.get_user(user_id) - if float(sign_user.impression) < plugin.impression: - if self.is_send_limit_message(plugin, user_id): - self._flmt_s.start_cd(user_id) - await MessageUtils.build_message( - f"好感度不足哦,当前功能需要好感度: {plugin.impression}," - "请继续签到提升好感度吧!" - ).send(reply_to=True) - logger.debug( - f"{plugin.name}({plugin.module}) 用户好感度不足...", - "AuthChecker", - session=session, - ) - raise IgnoredException("好感度不足...") - if group_id: - sid = group_id or user_id - if await GroupConsole.is_superuser_block_plugin( - group_id, plugin.module - ): - """超级用户群组插件状态""" - if self.is_send_limit_message(plugin, sid): - self._flmt_s.start_cd(group_id or user_id) - await MessageUtils.build_message( - "超级管理员禁用了该群此功能..." - ).send(reply_to=True) - logger.debug( - f"{plugin.name}({plugin.module}) 超级管理员禁用了该群此功能...", - "AuthChecker", - session=session, - ) - raise IgnoredException("超级管理员禁用了该群此功能...") - if await GroupConsole.is_normal_block_plugin(group_id, plugin.module): - """群组插件状态""" - if self.is_send_limit_message(plugin, sid): - self._flmt_s.start_cd(group_id or user_id) - await MessageUtils.build_message("该群未开启此功能...").send( - reply_to=True - ) - logger.debug( - f"{plugin.name}({plugin.module}) 未开启此功能...", - "AuthChecker", - session=session, - ) - raise IgnoredException("该群未开启此功能...") - if plugin.block_type == BlockType.GROUP: - """全局群组禁用""" - try: - if self.is_send_limit_message(plugin, sid): - self._flmt_c.start_cd(group_id) - await MessageUtils.build_message( - "该功能在群组中已被禁用..." - ).send(reply_to=True) - except Exception as e: - logger.error( - "auth_plugin 发送消息失败", - "AuthChecker", - session=session, - e=e, - ) - logger.debug( - f"{plugin.name}({plugin.module}) 该插件在群组中已被禁用...", - "AuthChecker", - session=session, - ) - raise IgnoredException("该插件在群组中已被禁用...") - else: - sid = user_id - if plugin.block_type == BlockType.PRIVATE: - """全局私聊禁用""" - try: - if self.is_send_limit_message(plugin, sid): - self._flmt_c.start_cd(user_id) - await MessageUtils.build_message( - "该功能在私聊中已被禁用..." - ).send() - except Exception as e: - logger.error( - "auth_admin 发送消息失败", - "AuthChecker", - session=session, - e=e, - ) - logger.debug( - f"{plugin.name}({plugin.module}) 该插件在私聊中已被禁用...", - "AuthChecker", - session=session, - ) - raise IgnoredException("该插件在私聊中已被禁用...") - if not plugin.status and plugin.block_type == BlockType.ALL: - """全局状态""" - if group_id and await GroupConsole.is_super_group(group_id): - raise IsSuperuserException() - logger.debug( - f"{plugin.name}({plugin.module}) 全局未开启此功能...", - "AuthChecker", - session=session, - ) - if self.is_send_limit_message(plugin, sid): - self._flmt_s.start_cd(group_id or user_id) - await MessageUtils.build_message("全局未开启此功能...").send() - raise IgnoredException("全局未开启此功能...") - - async def auth_admin(self, plugin: PluginInfo, session: EventSession): - """管理员命令 个人权限 - - 参数: - plugin: PluginInfo - session: EventSession - """ - user_id = session.id1 - if user_id and plugin.admin_level: - if group_id := session.id3 or session.id2: - if not await LevelUser.check_level( - user_id, group_id, plugin.admin_level - ): - try: - if self._flmt.check(user_id): - self._flmt.start_cd(user_id) - await MessageUtils.build_message( - [ - At(flag="user", target=user_id), - f"你的权限不足喔," - f"该功能需要的权限等级: {plugin.admin_level}", - ] - ).send(reply_to=True) - except Exception as e: - logger.error( - "auth_admin 发送消息失败", - "AuthChecker", - session=session, - e=e, - ) - logger.debug( - f"{plugin.name}({plugin.module}) 管理员权限不足...", - "AuthChecker", - session=session, - ) - raise IgnoredException("管理员权限不足...") - elif not await LevelUser.check_level(user_id, None, plugin.admin_level): - try: - await MessageUtils.build_message( - f"你的权限不足喔,该功能需要的权限等级: {plugin.admin_level}" - ).send() - except Exception as e: - logger.error( - "auth_admin 发送消息失败", "AuthChecker", session=session, e=e - ) - logger.debug( - f"{plugin.name}({plugin.module}) 管理员权限不足...", - "AuthChecker", - session=session, - ) - raise IgnoredException("权限不足") - - async def auth_group( - self, plugin: PluginInfo, session: EventSession, message: UniMsg - ): - """群黑名单检测 群总开关检测 - - 参数: - plugin: PluginInfo - session: EventSession - message: UniMsg - """ - if not (group_id := session.id3 or session.id2): - return - text = message.extract_plain_text() - group = await GroupConsole.get_group(group_id) - if not group: - """群不存在""" - logger.debug( - "群组信息不存在...", - "AuthChecker", - session=session, - ) - raise IgnoredException("群不存在") - if group.level < 0: - """群权限小于0""" - logger.debug( - "群黑名单, 群权限-1...", - "AuthChecker", - session=session, - ) - raise IgnoredException("群黑名单") - if not group.status: - """群休眠""" - if text.strip() != "醒来": - logger.debug("群休眠状态...", "AuthChecker", session=session) - raise IgnoredException("群休眠状态") - if plugin.level > group.level: - """插件等级大于群等级""" - logger.debug( - f"{plugin.name}({plugin.module}) 群等级限制.." - f"该功能需要的群等级: {plugin.level}..", - "AuthChecker", - session=session, - ) - raise IgnoredException(f"{plugin.name}({plugin.module}) 群等级限制...") - - async def auth_cost( - self, user: UserConsole, plugin: PluginInfo, session: EventSession - ) -> int: - """检测是否满足金币条件 - - 参数: - user: UserConsole - plugin: PluginInfo - session: EventSession - - 返回: - int: 需要消耗的金币 - """ - if user.gold < plugin.cost_gold: - """插件消耗金币不足""" - try: - await MessageUtils.build_message( - f"金币不足..该功能需要{plugin.cost_gold}金币.." - ).send() - except Exception as e: - logger.error( - "auth_cost 发送消息失败", "AuthChecker", session=session, e=e - ) - logger.debug( - f"{plugin.name}({plugin.module}) 金币限制.." - f"该功能需要{plugin.cost_gold}金币..", - "AuthChecker", - session=session, - ) - raise IgnoredException(f"{plugin.name}({plugin.module}) 金币限制...") - return plugin.cost_gold - - -checker = AuthChecker() diff --git a/zhenxun/builtin_plugins/hooks/auth/auth_admin.py b/zhenxun/builtin_plugins/hooks/auth/auth_admin.py new file mode 100644 index 00000000..1e93f089 --- /dev/null +++ b/zhenxun/builtin_plugins/hooks/auth/auth_admin.py @@ -0,0 +1,52 @@ +from nonebot_plugin_alconna import At +from nonebot_plugin_uninfo import Uninfo + +from zhenxun.models.level_user import LevelUser +from zhenxun.models.plugin_info import PluginInfo +from zhenxun.services.cache import Cache +from zhenxun.utils.enum import CacheType +from zhenxun.utils.utils import get_entity_ids + +from .exception import SkipPluginException +from .utils import send_message + + +async def auth_admin(plugin: PluginInfo, session: Uninfo): + """管理员命令 个人权限 + + 参数: + plugin: PluginInfo + session: Uninfo + """ + if not plugin.admin_level: + return + entity = get_entity_ids(session) + cache = Cache[list[LevelUser]](CacheType.LEVEL) + user_list = await cache.get(session.user.id) or [] + if entity.group_id: + user_list += await cache.get(session.user.id, entity.group_id) or [] + if user_list: + user = max(user_list, key=lambda x: x.user_level) + user_level = user.user_level + else: + user_level = 0 + if user_level < plugin.admin_level: + await send_message( + session, + [ + At(flag="user", target=session.user.id), + f"你的权限不足喔,该功能需要的权限等级: {plugin.admin_level}", + ], + entity.user_id, + ) + raise SkipPluginException( + f"{plugin.name}({plugin.module}) 管理员权限不足..." + ) + elif user_list: + user = max(user_list, key=lambda x: x.user_level) + if user.user_level < plugin.admin_level: + await send_message( + session, + f"你的权限不足喔,该功能需要的权限等级: {plugin.admin_level}", + ) + raise SkipPluginException(f"{plugin.name}({plugin.module}) 管理员权限不足...") diff --git a/zhenxun/builtin_plugins/hooks/auth/auth_ban.py b/zhenxun/builtin_plugins/hooks/auth/auth_ban.py new file mode 100644 index 00000000..dcca0731 --- /dev/null +++ b/zhenxun/builtin_plugins/hooks/auth/auth_ban.py @@ -0,0 +1,175 @@ +import asyncio + +from nonebot.adapters import Bot +from nonebot.matcher import Matcher +from nonebot_plugin_alconna import At +from nonebot_plugin_uninfo import Uninfo +from tortoise.exceptions import MultipleObjectsReturned + +from zhenxun.configs.config import Config +from zhenxun.models.ban_console import BanConsole +from zhenxun.models.plugin_info import PluginInfo +from zhenxun.services.cache import Cache +from zhenxun.services.log import logger +from zhenxun.utils.enum import CacheType, PluginType +from zhenxun.utils.utils import EntityIDs, get_entity_ids + +from .config import LOGGER_COMMAND +from .exception import SkipPluginException +from .utils import freq, send_message + +Config.add_plugin_config( + "hook", + "BAN_RESULT", + "才不会给你发消息.", + help="对被ban用户发送的消息", +) + + +async def is_ban(user_id: str | None, group_id: str | None) -> int: + if not user_id and not group_id: + return 0 + cache = Cache[BanConsole](CacheType.BAN) + group_user, user = await asyncio.gather( + cache.get(user_id, group_id), cache.get(user_id) + ) + results = [] + if group_user: + results.append(group_user) + if user: + results.append(user) + if not results: + return 0 + for result in results: + if result.duration > 0 or result.duration == -1: + return await BanConsole.check_ban_time(user_id, group_id) + return 0 + + +def check_plugin_type(matcher: Matcher) -> bool: + """判断插件类型是否是隐藏插件 + + 参数: + matcher: Matcher + + 返回: + bool: 是否为隐藏插件 + """ + if plugin := matcher.plugin: + if metadata := plugin.metadata: + extra = metadata.extra + if extra.get("plugin_type") in [PluginType.HIDDEN]: + return False + return True + + +def format_time(time: float) -> str: + """格式化时间 + + 参数: + time: ban时长 + + 返回: + str: 格式化时间文本 + """ + if time == -1: + return "∞" + time = abs(int(time)) + if time < 60: + time_str = f"{time!s} 秒" + else: + minute = int(time / 60) + if minute > 60: + hours = minute // 60 + minute %= 60 + time_str = f"{hours} 小时 {minute}分钟" + else: + time_str = f"{minute} 分钟" + return time_str + + +async def group_handle(cache: Cache[list[BanConsole]], group_id: str): + """群组ban检查 + + 参数: + cache: cache + group_id: 群组id + + 异常: + SkipPluginException: 群组处于黑名单 + """ + try: + if await is_ban(None, group_id): + raise SkipPluginException("群组处于黑名单中...") + except MultipleObjectsReturned: + logger.warning( + "群组黑名单数据重复,过滤该次hook并移除多余数据...", LOGGER_COMMAND + ) + ids = await BanConsole.filter(user_id="", group_id=group_id).values_list( + "id", flat=True + ) + await BanConsole.filter(id__in=ids[:-1]).delete() + await cache.reload() + + +async def user_handle( + module: str, cache: Cache[list[BanConsole]], entity: EntityIDs, session: Uninfo +): + """用户ban检查 + + 参数: + module: 插件模块名 + cache: cache + user_id: 用户id + session: Uninfo + + 异常: + SkipPluginException: 用户处于黑名单 + """ + ban_result = Config.get_config("hook", "BAN_RESULT") + try: + time = await is_ban(entity.user_id, entity.group_id) + if not time: + return + time_str = format_time(time) + db_plugin = await Cache[PluginInfo](CacheType.PLUGINS).get(module) + if ( + db_plugin + # and not db_plugin.ignore_prompt + and time != -1 + and ban_result + and freq.is_send_limit_message(db_plugin, entity.user_id, False) + ): + await send_message( + session, + [ + At(flag="user", target=entity.user_id), + f"{ban_result}\n在..在 {time_str} 后才会理你喔", + ], + entity.user_id, + ) + raise SkipPluginException("用户处于黑名单中...") + except MultipleObjectsReturned: + logger.warning( + "用户黑名单数据重复,过滤该次hook并移除多余数据...", LOGGER_COMMAND + ) + ids = await BanConsole.filter(user_id=entity.user_id, group_id="").values_list( + "id", flat=True + ) + await BanConsole.filter(id__in=ids[:-1]).delete() + await cache.reload() + + +async def auth_ban(matcher: Matcher, bot: Bot, session: Uninfo): + if not check_plugin_type(matcher): + return + if not matcher.plugin_name: + return + entity = get_entity_ids(session) + if entity.user_id in bot.config.superusers: + return + cache = Cache[list[BanConsole]](CacheType.BAN) + if entity.group_id: + await group_handle(cache, entity.group_id) + if entity.user_id: + await user_handle(matcher.plugin_name, cache, entity, session) diff --git a/zhenxun/builtin_plugins/hooks/auth/auth_bot.py b/zhenxun/builtin_plugins/hooks/auth/auth_bot.py new file mode 100644 index 00000000..2427223f --- /dev/null +++ b/zhenxun/builtin_plugins/hooks/auth/auth_bot.py @@ -0,0 +1,28 @@ +from zhenxun.models.bot_console import BotConsole +from zhenxun.models.plugin_info import PluginInfo +from zhenxun.services.cache import Cache +from zhenxun.utils.common_utils import CommonUtils +from zhenxun.utils.enum import CacheType + +from .exception import SkipPluginException + + +async def auth_bot(plugin: PluginInfo, bot_id: str): + """bot层面的权限检查 + + 参数: + plugin: PluginInfo + bot_id: bot id + + 异常: + SkipPluginException: 忽略插件 + SkipPluginException: 忽略插件 + """ + if cache := Cache[BotConsole](CacheType.BOT): + bot = await cache.get(bot_id) + if not bot or not bot.status: + raise SkipPluginException("Bot不存在或休眠中阻断权限检测...") + if CommonUtils.format(plugin.module) in bot.block_plugins: + raise SkipPluginException( + f"Bot插件 {plugin.name}({plugin.module}) 权限检查结果为关闭..." + ) diff --git a/zhenxun/builtin_plugins/hooks/auth/auth_cost.py b/zhenxun/builtin_plugins/hooks/auth/auth_cost.py new file mode 100644 index 00000000..7a971085 --- /dev/null +++ b/zhenxun/builtin_plugins/hooks/auth/auth_cost.py @@ -0,0 +1,24 @@ +from nonebot_plugin_uninfo import Uninfo + +from zhenxun.models.plugin_info import PluginInfo +from zhenxun.models.user_console import UserConsole + +from .exception import SkipPluginException +from .utils import send_message + + +async def auth_cost(user: UserConsole, plugin: PluginInfo, session: Uninfo) -> int: + """检测是否满足金币条件 + + 参数: + plugin: PluginInfo + session: Uninfo + + 返回: + int: 需要消耗的金币 + """ + if user.gold < plugin.cost_gold: + """插件消耗金币不足""" + await send_message(session, f"金币不足..该功能需要{plugin.cost_gold}金币..") + raise SkipPluginException(f"{plugin.name}({plugin.module}) 金币限制...") + return plugin.cost_gold diff --git a/zhenxun/builtin_plugins/hooks/auth/auth_group.py b/zhenxun/builtin_plugins/hooks/auth/auth_group.py new file mode 100644 index 00000000..290a3ad9 --- /dev/null +++ b/zhenxun/builtin_plugins/hooks/auth/auth_group.py @@ -0,0 +1,35 @@ +from nonebot_plugin_alconna import UniMsg + +from zhenxun.models.group_console import GroupConsole +from zhenxun.models.plugin_info import PluginInfo +from zhenxun.services.cache import Cache +from zhenxun.utils.enum import CacheType +from zhenxun.utils.utils import EntityIDs + +from .config import SwitchEnum +from .exception import SkipPluginException + + +async def auth_group(plugin: PluginInfo, entity: EntityIDs, message: UniMsg): + """群黑名单检测 群总开关检测 + + 参数: + plugin: PluginInfo + entity: EntityIDs + message: UniMsg + """ + if not entity.group_id: + return + text = message.extract_plain_text() + group = await Cache[GroupConsole](CacheType.GROUPS).get(entity.group_id) + if not group: + raise SkipPluginException("群组信息不存在...") + if group.level < 0: + raise SkipPluginException("群组黑名单, 目标群组群权限权限-1...") + if text.strip() != SwitchEnum.ENABLE and not group.status: + raise SkipPluginException("群组休眠状态...") + if plugin.level > group.level: + raise SkipPluginException( + f"{plugin.name}({plugin.module}) 群等级限制," + f"该功能需要的群等级: {plugin.level}..." + ) diff --git a/zhenxun/builtin_plugins/hooks/auth/auth_limit.py b/zhenxun/builtin_plugins/hooks/auth/auth_limit.py new file mode 100644 index 00000000..fe29ebc4 --- /dev/null +++ b/zhenxun/builtin_plugins/hooks/auth/auth_limit.py @@ -0,0 +1,194 @@ +from typing import ClassVar + +import nonebot +from nonebot_plugin_uninfo import Uninfo +from pydantic import BaseModel + +from zhenxun.models.plugin_info import PluginInfo +from zhenxun.models.plugin_limit import PluginLimit +from zhenxun.services.log import logger +from zhenxun.utils.enum import LimitWatchType, PluginLimitType +from zhenxun.utils.message import MessageUtils +from zhenxun.utils.utils import ( + CountLimiter, + FreqLimiter, + UserBlockLimiter, + get_entity_ids, +) + +from .config import LOGGER_COMMAND +from .exception import SkipPluginException + +driver = nonebot.get_driver() + + +@driver.on_startup +async def _(): + """初始化限制""" + await LimitManager.init_limit() + + +class Limit(BaseModel): + limit: PluginLimit + limiter: FreqLimiter | UserBlockLimiter | CountLimiter + + class Config: + arbitrary_types_allowed = True + + +class LimitManager: + add_module: ClassVar[list] = [] + + cd_limit: ClassVar[dict[str, Limit]] = {} + block_limit: ClassVar[dict[str, Limit]] = {} + count_limit: ClassVar[dict[str, Limit]] = {} + + @classmethod + async def init_limit(cls): + """初始化限制""" + limit_list = await PluginLimit.filter(status=True).all() + for limit in limit_list: + cls.add_limit(limit) + + @classmethod + def add_limit(cls, limit: PluginLimit): + """添加限制 + + 参数: + limit: PluginLimit + """ + if limit.module not in cls.add_module: + cls.add_module.append(limit.module) + if limit.limit_type == PluginLimitType.BLOCK: + cls.block_limit[limit.module] = Limit( + limit=limit, limiter=UserBlockLimiter() + ) + elif limit.limit_type == PluginLimitType.CD: + cls.cd_limit[limit.module] = Limit( + limit=limit, limiter=FreqLimiter(limit.cd) + ) + elif limit.limit_type == PluginLimitType.COUNT: + cls.count_limit[limit.module] = Limit( + limit=limit, limiter=CountLimiter(limit.max_count) + ) + + @classmethod + def unblock( + cls, module: str, user_id: str, group_id: str | None, channel_id: str | None + ): + """解除插件block + + 参数: + module: 模块名 + user_id: 用户id + group_id: 群组id + channel_id: 频道id + """ + if limit_model := cls.block_limit.get(module): + limit = limit_model.limit + limiter: UserBlockLimiter = limit_model.limiter # type: ignore + key_type = user_id + if group_id and limit.watch_type == LimitWatchType.GROUP: + key_type = channel_id or group_id + logger.debug( + f"解除对象: {key_type} 的block限制", + LOGGER_COMMAND, + session=user_id, + group_id=group_id, + ) + limiter.set_false(key_type) + + @classmethod + async def check( + cls, + module: str, + user_id: str, + group_id: str | None, + channel_id: str | None, + ): + """检测限制 + + 参数: + module: 模块名 + user_id: 用户id + group_id: 群组id + channel_id: 频道id + + 异常: + IgnoredException: IgnoredException + """ + if limit_model := cls.cd_limit.get(module): + await cls.__check(limit_model, user_id, group_id, channel_id) + if limit_model := cls.block_limit.get(module): + await cls.__check(limit_model, user_id, group_id, channel_id) + if limit_model := cls.count_limit.get(module): + await cls.__check(limit_model, user_id, group_id, channel_id) + + @classmethod + async def __check( + cls, + limit_model: Limit | None, + user_id: str, + group_id: str | None, + channel_id: str | None, + ): + """检测限制 + + 参数: + limit_model: Limit + user_id: 用户id + group_id: 群组id + channel_id: 频道id + + 异常: + IgnoredException: IgnoredException + """ + if not limit_model: + return + limit = limit_model.limit + limiter = limit_model.limiter + is_limit = ( + LimitWatchType.ALL + or (group_id and limit.watch_type == LimitWatchType.GROUP) + or (not group_id and limit.watch_type == LimitWatchType.USER) + ) + key_type = user_id + if group_id and limit.watch_type == LimitWatchType.GROUP: + key_type = channel_id or group_id + if is_limit and not limiter.check(key_type): + if limit.result: + await MessageUtils.build_message(limit.result).send() + raise SkipPluginException( + f"{limit.module}({limit.limit_type}) 正在限制中..." + ) + else: + logger.debug( + f"开始进行限制 {limit.module}({limit.limit_type})...", + LOGGER_COMMAND, + session=user_id, + group_id=group_id, + ) + if isinstance(limiter, FreqLimiter): + limiter.start_cd(key_type) + if isinstance(limiter, UserBlockLimiter): + limiter.set_true(key_type) + if isinstance(limiter, CountLimiter): + limiter.increase(key_type) + + +async def auth_limit(plugin: PluginInfo, session: Uninfo): + """插件限制 + + 参数: + plugin: PluginInfo + session: Uninfo + """ + entity = get_entity_ids(session) + if plugin.module not in LimitManager.add_module: + limit_list = await PluginLimit.filter(module=plugin.module, status=True).all() + for limit in limit_list: + LimitManager.add_limit(limit) + if entity.user_id: + await LimitManager.check( + plugin.module, entity.user_id, entity.group_id, entity.channel_id + ) diff --git a/zhenxun/builtin_plugins/hooks/auth/auth_plugin.py b/zhenxun/builtin_plugins/hooks/auth/auth_plugin.py new file mode 100644 index 00000000..ebfe7be1 --- /dev/null +++ b/zhenxun/builtin_plugins/hooks/auth/auth_plugin.py @@ -0,0 +1,147 @@ +from nonebot.adapters import Event +from nonebot_plugin_uninfo import Uninfo + +from zhenxun.models.group_console import GroupConsole +from zhenxun.models.plugin_info import PluginInfo +from zhenxun.services.cache import Cache +from zhenxun.utils.common_utils import CommonUtils +from zhenxun.utils.enum import BlockType, CacheType +from zhenxun.utils.utils import get_entity_ids + +from .exception import IsSuperuserException, SkipPluginException +from .utils import freq, is_poke, send_message + + +class GroupCheck: + def __init__( + self, plugin: PluginInfo, group_id: str, session: Uninfo, is_poke: bool + ) -> None: + self.group_id = group_id + self.session = session + self.is_poke = is_poke + self.plugin = plugin + + async def __get_data(self): + cache = Cache[GroupConsole](CacheType.GROUPS) + return await cache.get(self.group_id) + + async def check(self): + await self.check_superuser_block(self.plugin) + + async def check_superuser_block(self, plugin: PluginInfo): + """超级用户禁用群组插件检测 + + 参数: + plugin: PluginInfo + + 异常: + IgnoredException: 忽略插件 + """ + group = await self.__get_data() + if group and CommonUtils.format(plugin.module) in group.superuser_block_plugin: + if freq.is_send_limit_message(plugin, group.group_id, self.is_poke): + await send_message( + self.session, "超级管理员禁用了该群此功能...", self.group_id + ) + raise SkipPluginException( + f"{plugin.name}({plugin.module}) 超级管理员禁用了该群此功能..." + ) + await self.check_normal_block(self.plugin) + + async def check_normal_block(self, plugin: PluginInfo): + """群组插件状态 + + 参数: + plugin: PluginInfo + + 异常: + IgnoredException: 忽略插件 + """ + group = await self.__get_data() + if group and CommonUtils.format(plugin.module) in group.block_plugin: + if freq.is_send_limit_message(plugin, self.group_id, self.is_poke): + await send_message(self.session, "该群未开启此功能...", self.group_id) + raise SkipPluginException(f"{plugin.name}({plugin.module}) 未开启此功能...") + await self.check_global_block(self.plugin) + + async def check_global_block(self, plugin: PluginInfo): + """全局禁用插件检测 + + 参数: + plugin: PluginInfo + + 异常: + IgnoredException: 忽略插件 + """ + if plugin.block_type == BlockType.GROUP: + """全局群组禁用""" + if freq.is_send_limit_message(plugin, self.group_id, self.is_poke): + await send_message( + self.session, "该功能在群组中已被禁用...", self.group_id + ) + raise SkipPluginException( + f"{plugin.name}({plugin.module}) 该插件在群组中已被禁用..." + ) + + +class PluginCheck: + def __init__(self, group_id: str | None, session: Uninfo, is_poke: bool): + self.session = session + self.is_poke = is_poke + self.group_id = group_id + + async def check_user(self, plugin: PluginInfo): + """全局私聊禁用检测 + + 参数: + plugin: PluginInfo + + 异常: + IgnoredException: 忽略插件 + """ + if plugin.block_type == BlockType.PRIVATE: + if freq.is_send_limit_message(plugin, self.session.user.id, self.is_poke): + await send_message(self.session, "该功能在私聊中已被禁用...") + raise SkipPluginException( + f"{plugin.name}({plugin.module}) 该插件在私聊中已被禁用..." + ) + + async def check_global(self, plugin: PluginInfo): + """全局状态 + + 参数: + plugin: PluginInfo + + 异常: + IgnoredException: 忽略插件 + """ + if plugin.status or plugin.block_type != BlockType.ALL: + return + """全局状态""" + cache = Cache[GroupConsole](CacheType.GROUPS) + if self.group_id and (group := await cache.get(self.group_id)): + if group.is_super: + raise IsSuperuserException() + sid = self.group_id or self.session.user.id + if freq.is_send_limit_message(plugin, sid, self.is_poke): + await send_message(self.session, "全局未开启此功能...", sid) + raise SkipPluginException(f"{plugin.name}({plugin.module}) 全局未开启此功能...") + + +async def auth_plugin(plugin: PluginInfo, session: Uninfo, event: Event): + """插件状态 + + 参数: + plugin: PluginInfo + session: Uninfo + event: Event + """ + entity = get_entity_ids(session) + is_poke_event = is_poke(event) + user_check = PluginCheck(entity.group_id, session, is_poke_event) + if entity.group_id: + group_check = GroupCheck(plugin, entity.group_id, session, is_poke_event) + await group_check.check() + else: + await user_check.check_user(plugin) + await user_check.check_global(plugin) diff --git a/zhenxun/builtin_plugins/hooks/auth/bot_filter.py b/zhenxun/builtin_plugins/hooks/auth/bot_filter.py new file mode 100644 index 00000000..04e47372 --- /dev/null +++ b/zhenxun/builtin_plugins/hooks/auth/bot_filter.py @@ -0,0 +1,35 @@ +import nonebot +from nonebot_plugin_uninfo import Uninfo + +from zhenxun.configs.config import Config + +from .exception import SkipPluginException + +Config.add_plugin_config( + "hook", + "FILTER_BOT", + True, + help="过滤当前连接bot(防止bot互相调用)", + default_value=True, + type=bool, +) + + +def bot_filter(session: Uninfo): + """过滤bot调用bot + + 参数: + session: Uninfo + + 异常: + SkipPluginException: bot互相调用 + """ + if not Config.get_config("hook", "FILTER_BOT"): + return + bot_ids = list(nonebot.get_bots().keys()) + if session.user.id == session.self_id: + return + if session.user.id in bot_ids: + raise SkipPluginException( + f"bot:{session.self_id} 尝试调用 bot:{session.user.id}" + ) diff --git a/zhenxun/builtin_plugins/hooks/auth/config.py b/zhenxun/builtin_plugins/hooks/auth/config.py new file mode 100644 index 00000000..d68b7d00 --- /dev/null +++ b/zhenxun/builtin_plugins/hooks/auth/config.py @@ -0,0 +1,13 @@ +import sys + +if sys.version_info >= (3, 11): + from enum import StrEnum +else: + from strenum import StrEnum + +LOGGER_COMMAND = "AuthChecker" + + +class SwitchEnum(StrEnum): + ENABLE = "醒来" + DISABLE = "休息吧" diff --git a/zhenxun/builtin_plugins/hooks/auth/exception.py b/zhenxun/builtin_plugins/hooks/auth/exception.py new file mode 100644 index 00000000..392a6718 --- /dev/null +++ b/zhenxun/builtin_plugins/hooks/auth/exception.py @@ -0,0 +1,26 @@ +class IsSuperuserException(Exception): + pass + + +class SkipPluginException(Exception): + def __init__(self, info: str, *args: object) -> None: + super().__init__(*args) + self.info = info + + def __str__(self) -> str: + return self.info + + def __repr__(self) -> str: + return self.info + + +class PermissionExemption(Exception): + def __init__(self, info: str, *args: object) -> None: + super().__init__(*args) + self.info = info + + def __str__(self) -> str: + return self.info + + def __repr__(self) -> str: + return self.info diff --git a/zhenxun/builtin_plugins/hooks/auth/utils.py b/zhenxun/builtin_plugins/hooks/auth/utils.py new file mode 100644 index 00000000..0f925590 --- /dev/null +++ b/zhenxun/builtin_plugins/hooks/auth/utils.py @@ -0,0 +1,91 @@ +import contextlib + +from nonebot.adapters import Event +from nonebot_plugin_uninfo import Uninfo + +from zhenxun.configs.config import Config +from zhenxun.models.plugin_info import PluginInfo +from zhenxun.services.log import logger +from zhenxun.utils.enum import PluginType +from zhenxun.utils.message import MessageUtils +from zhenxun.utils.utils import FreqLimiter + +from .config import LOGGER_COMMAND + +base_config = Config.get("hook") + + +def is_poke(event: Event) -> bool: + """判断是否为poke类型 + + 参数: + event: Event + + 返回: + bool: 是否为poke类型 + """ + with contextlib.suppress(ImportError): + from nonebot.adapters.onebot.v11 import PokeNotifyEvent + + return isinstance(event, PokeNotifyEvent) + return False + + +async def send_message( + session: Uninfo, message: list | str, check_tag: str | None = None +): + """发送消息 + + 参数: + session: Uninfo + message: 消息 + check_tag: cd flag + """ + try: + if not check_tag: + await MessageUtils.build_message(message).send(reply_to=True) + elif freq._flmt.check(check_tag): + freq._flmt.start_cd(check_tag) + await MessageUtils.build_message(message).send(reply_to=True) + except Exception as e: + logger.error( + "发送消息失败", + LOGGER_COMMAND, + session=session, + e=e, + ) + + +class FreqUtils: + def __init__(self): + check_notice_info_cd = Config.get_config("hook", "CHECK_NOTICE_INFO_CD") + if check_notice_info_cd is None or check_notice_info_cd < 0: + raise ValueError("模块: [hook], 配置项: [CHECK_NOTICE_INFO_CD] 为空或小于0") + self._flmt = FreqLimiter(check_notice_info_cd) + self._flmt_g = FreqLimiter(check_notice_info_cd) + self._flmt_s = FreqLimiter(check_notice_info_cd) + self._flmt_c = FreqLimiter(check_notice_info_cd) + + def is_send_limit_message( + self, plugin: PluginInfo, sid: str, is_poke: bool + ) -> bool: + """是否发送提示消息 + + 参数: + plugin: PluginInfo + sid: 检测键 + is_poke: 是否是戳一戳 + + 返回: + bool: 是否发送提示消息 + """ + if is_poke: + return False + if not base_config.get("IS_SEND_TIP_MESSAGE"): + return False + if plugin.plugin_type == PluginType.DEPENDANT: + return False + return plugin.module != "ai" if self._flmt_s.check(sid) else False + + +freq = FreqUtils() diff --git a/zhenxun/builtin_plugins/hooks/auth_checker.py b/zhenxun/builtin_plugins/hooks/auth_checker.py new file mode 100644 index 00000000..0e0d5c64 --- /dev/null +++ b/zhenxun/builtin_plugins/hooks/auth_checker.py @@ -0,0 +1,176 @@ +import asyncio + +from nonebot.adapters import Bot, Event +from nonebot.exception import IgnoredException +from nonebot.matcher import Matcher +from nonebot_plugin_alconna import UniMsg +from nonebot_plugin_uninfo import Uninfo +from tortoise.exceptions import IntegrityError + +from zhenxun.models.plugin_info import PluginInfo +from zhenxun.models.user_console import UserConsole +from zhenxun.services.cache import Cache +from zhenxun.services.log import logger +from zhenxun.utils.enum import ( + CacheType, + GoldHandle, + PluginType, +) +from zhenxun.utils.exception import InsufficientGold +from zhenxun.utils.platform import PlatformUtils +from zhenxun.utils.utils import get_entity_ids + +from .auth.auth_admin import auth_admin +from .auth.auth_ban import auth_ban +from .auth.auth_bot import auth_bot +from .auth.auth_cost import auth_cost +from .auth.auth_group import auth_group +from .auth.auth_limit import LimitManager, auth_limit +from .auth.auth_plugin import auth_plugin +from .auth.bot_filter import bot_filter +from .auth.config import LOGGER_COMMAND +from .auth.exception import ( + IsSuperuserException, + PermissionExemption, + SkipPluginException, +) + + +async def get_plugin_and_user( + module: str, user_id: str +) -> tuple[PluginInfo, UserConsole]: + """获取用户数据和插件信息 + + 参数: + module: 模块名 + user_id: 用户id + + 异常: + PermissionExemption: 插件数据不存在 + PermissionExemption: 插件类型为HIDDEN + PermissionExemption: 重复创建用户 + PermissionExemption: 用户数据不存在 + + 返回: + tuple[PluginInfo, UserConsole]: 插件信息,用户信息 + """ + user_cache = Cache[UserConsole](CacheType.USERS) + plugin = await Cache[PluginInfo](CacheType.PLUGINS).get(module) + if not plugin: + raise PermissionExemption(f"插件:{module} 数据不存在,已跳过权限检查...") + if plugin.plugin_type == PluginType.HIDDEN: + raise PermissionExemption( + f"插件: {plugin.name}:{plugin.module} 为HIDDEN,已跳过权限检查..." + ) + user = None + try: + user = await user_cache.get(user_id) + except IntegrityError as e: + raise PermissionExemption("重复创建用户,已跳过该次权限检查...") from e + if not user: + raise PermissionExemption("用户数据不存在,已跳过权限检查...") + return plugin, user + + +async def get_plugin_cost( + bot: Bot, user: UserConsole, plugin: PluginInfo, session: Uninfo +) -> int: + """获取插件费用 + + 参数: + bot: Bot + user: 用户数据 + plugin: 插件数据 + session: Uninfo + + 异常: + IsSuperuserException: 超级用户 + IsSuperuserException: 超级用户 + + 返回: + int: 调用插件金币费用 + """ + cost_gold = await auth_cost(user, plugin, session) + if session.user.id in bot.config.superusers: + if plugin.plugin_type == PluginType.SUPERUSER: + raise IsSuperuserException() + if not plugin.limit_superuser: + raise IsSuperuserException() + return cost_gold + + +async def reduce_gold(user_id: str, module: str, cost_gold: int, session: Uninfo): + """扣除用户金币 + + 参数: + user_id: 用户id + module: 插件模块名称 + cost_gold: 消耗金币 + session: Uninfo + """ + user_cache = Cache[UserConsole](CacheType.USERS) + try: + await UserConsole.reduce_gold( + user_id, + cost_gold, + GoldHandle.PLUGIN, + module, + PlatformUtils.get_platform(session), + ) + except InsufficientGold: + if u := await UserConsole.get_user(user_id): + u.gold = 0 + await u.save(update_fields=["gold"]) + # 更新缓存 + await user_cache.update(user_id) + logger.debug(f"调用功能花费金币: {cost_gold}", LOGGER_COMMAND, session=session) + + +async def auth( + matcher: Matcher, + event: Event, + bot: Bot, + session: Uninfo, + message: UniMsg, +): + """权限检查 + + 参数: + matcher: matcher + event: Event + bot: bot + session: Uninfo + message: UniMsg + """ + cost_gold = 0 + ignore_flag = False + entity = get_entity_ids(session) + module = matcher.plugin_name or "" + try: + if not module: + raise PermissionExemption("Matcher插件名称不存在...") + plugin, user = await get_plugin_and_user(module, entity.user_id) + cost_gold = await get_plugin_cost(bot, user, plugin, session) + bot_filter(session) + await asyncio.gather( + *[ + auth_ban(matcher, bot, session), + auth_bot(plugin, bot.self_id), + auth_group(plugin, entity, message), + auth_admin(plugin, session), + auth_plugin(plugin, session, event), + auth_limit(plugin, session), + ] + ) + except SkipPluginException as e: + LimitManager.unblock(module, entity.user_id, entity.group_id, entity.channel_id) + logger.info(str(e), LOGGER_COMMAND, session=session) + ignore_flag = True + except IsSuperuserException: + logger.debug("超级用户跳过权限检测...", LOGGER_COMMAND, session=session) + except PermissionExemption as e: + logger.info(str(e), LOGGER_COMMAND, session=session) + if not ignore_flag and cost_gold > 0: + await reduce_gold(entity.user_id, module, cost_gold, session) + if ignore_flag: + raise IgnoredException("权限检测 ignore") diff --git a/zhenxun/builtin_plugins/hooks/auth_hook.py b/zhenxun/builtin_plugins/hooks/auth_hook.py index 0ccca75c..5c83cb75 100644 --- a/zhenxun/builtin_plugins/hooks/auth_hook.py +++ b/zhenxun/builtin_plugins/hooks/auth_hook.py @@ -1,41 +1,45 @@ -from nonebot.adapters.onebot.v11 import Bot, Event +import time + +from nonebot.adapters import Bot, Event from nonebot.matcher import Matcher from nonebot.message import run_postprocessor, run_preprocessor from nonebot_plugin_alconna import UniMsg -from nonebot_plugin_session import EventSession +from nonebot_plugin_uninfo import Uninfo -from ._auth_checker import LimitManage, checker +from zhenxun.services.log import logger + +from .auth.config import LOGGER_COMMAND +from .auth_checker import LimitManager, auth # # 权限检测 @run_preprocessor async def _( - matcher: Matcher, event: Event, bot: Bot, session: EventSession, message: UniMsg + matcher: Matcher, event: Event, bot: Bot, session: Uninfo, message: UniMsg ): - await checker.auth( + start_time = time.time() + await auth( matcher, event, bot, session, message, ) + logger.debug(f"权限检测耗时:{time.time() - start_time}秒", LOGGER_COMMAND) # 解除命令block阻塞 @run_postprocessor -async def _( - matcher: Matcher, - exception: Exception | None, - bot: Bot, - event: Event, - session: EventSession, -): - user_id = session.id1 - group_id = session.id3 - channel_id = session.id2 - if not group_id: - group_id = channel_id - channel_id = None +async def _(matcher: Matcher, session: Uninfo): + user_id = session.user.id + group_id = None + channel_id = None + if session.group: + if session.group.parent: + group_id = session.group.parent.id + channel_id = session.group.id + else: + group_id = session.group.id if user_id and matcher.plugin: module = matcher.plugin.name - LimitManage.unblock(module, user_id, group_id, channel_id) + LimitManager.unblock(module, user_id, group_id, channel_id) diff --git a/zhenxun/builtin_plugins/hooks/ban_hook.py b/zhenxun/builtin_plugins/hooks/ban_hook.py deleted file mode 100644 index 66dfb11c..00000000 --- a/zhenxun/builtin_plugins/hooks/ban_hook.py +++ /dev/null @@ -1,84 +0,0 @@ -from nonebot.adapters import Bot, Event -from nonebot.exception import IgnoredException -from nonebot.matcher import Matcher -from nonebot.message import run_preprocessor -from nonebot.typing import T_State -from nonebot_plugin_alconna import At -from nonebot_plugin_session import EventSession - -from zhenxun.configs.config import Config -from zhenxun.models.ban_console import BanConsole -from zhenxun.models.group_console import GroupConsole -from zhenxun.services.log import logger -from zhenxun.utils.enum import PluginType -from zhenxun.utils.message import MessageUtils -from zhenxun.utils.utils import FreqLimiter - -Config.add_plugin_config( - "hook", - "BAN_RESULT", - "才不会给你发消息.", - help="对被ban用户发送的消息", -) - -_flmt = FreqLimiter(300) - - -# 检查是否被ban -@run_preprocessor -async def _( - matcher: Matcher, bot: Bot, event: Event, state: T_State, session: EventSession -): - extra = {} - if plugin := matcher.plugin: - if metadata := plugin.metadata: - extra = metadata.extra - if extra.get("plugin_type") in [PluginType.HIDDEN]: - return - user_id = session.id1 - group_id = session.id3 or session.id2 - if group_id: - if user_id in bot.config.superusers: - return - if await BanConsole.is_ban(None, group_id): - logger.debug("群组处于黑名单中...", "ban_hook") - raise IgnoredException("群组处于黑名单中...") - if g := await GroupConsole.get_group(group_id): - if g.level < 0: - logger.debug("群黑名单, 群权限-1...", "ban_hook") - raise IgnoredException("群黑名单, 群权限-1..") - if user_id: - ban_result = Config.get_config("hook", "BAN_RESULT") - if user_id in bot.config.superusers: - return - if await BanConsole.is_ban(user_id, group_id): - time = await BanConsole.check_ban_time(user_id, group_id) - if time == -1: - time_str = "∞" - else: - time = abs(int(time)) - if time < 60: - time_str = f"{time!s} 秒" - else: - minute = int(time / 60) - if minute > 60: - hours = minute // 60 - minute %= 60 - time_str = f"{hours} 小时 {minute}分钟" - else: - time_str = f"{minute} 分钟" - if ( - not extra.get("ignore_prompt") - and time != -1 - and ban_result - and _flmt.check(user_id) - ): - _flmt.start_cd(user_id) - await MessageUtils.build_message( - [ - At(flag="user", target=user_id), - f"{ban_result}\n在..在 {time_str} 后才会理你喔", - ] - ).send() - logger.debug("用户处于黑名单中...", "ban_hook") - raise IgnoredException("用户处于黑名单中...") diff --git a/zhenxun/builtin_plugins/hooks/call_hook.py b/zhenxun/builtin_plugins/hooks/call_hook.py index 2ff4d39c..1893754d 100644 --- a/zhenxun/builtin_plugins/hooks/call_hook.py +++ b/zhenxun/builtin_plugins/hooks/call_hook.py @@ -1,23 +1,85 @@ from typing import Any -from nonebot.adapters import Bot +from nonebot.adapters import Bot, Message +from zhenxun.configs.config import Config +from zhenxun.models.bot_message_store import BotMessageStore from zhenxun.services.log import logger +from zhenxun.utils.enum import BotSentType from zhenxun.utils.manager.message_manager import MessageManager +from zhenxun.utils.platform import PlatformUtils + + +def replace_message(message: Message) -> str: + """将消息中的at、image、record、face替换为字符串 + + 参数: + message: Message + + 返回: + str: 文本消息 + """ + result = "" + for msg in message: + if isinstance(msg, str): + result += msg + elif msg.type == "at": + result += f"@{msg.data['qq']}" + elif msg.type == "image": + result += "[image]" + elif msg.type == "record": + result += "[record]" + elif msg.type == "face": + result += f"[face:{msg.data['id']}]" + elif msg.type == "reply": + result += "" + else: + result += str(msg) + return result @Bot.on_called_api async def handle_api_result( bot: Bot, exception: Exception | None, api: str, data: dict[str, Any], result: Any ): - if not exception and api == "send_msg": - try: - if (uid := data.get("user_id")) and (msg_id := result.get("message_id")): - MessageManager.add(str(uid), str(msg_id)) - logger.debug( - f"收集消息id,user_id: {uid}, msg_id: {msg_id}", "msg_hook" - ) - except Exception as e: - logger.warning( - f"收集消息id发生错误...data: {data}, result: {result}", "msg_hook", e=e + if exception or api != "send_msg": + return + user_id = data.get("user_id") + group_id = data.get("group_id") + message_id = result.get("message_id") + message: Message = data.get("message", "") + message_type = data.get("message_type") + try: + # 记录消息id + if user_id and message_id: + MessageManager.add(str(user_id), str(message_id)) + logger.debug( + f"收集消息id,user_id: {user_id}, msg_id: {message_id}", "msg_hook" ) + except Exception as e: + logger.warning( + f"收集消息id发生错误...data: {data}, result: {result}", "msg_hook", e=e + ) + if not Config.get_config("hook", "RECORD_BOT_SENT_MESSAGES"): + return + try: + await BotMessageStore.create( + bot_id=bot.self_id, + user_id=user_id, + group_id=group_id, + sent_type=BotSentType.GROUP + if message_type == "group" + else BotSentType.PRIVATE, + text=replace_message(message), + plain_text=message.extract_plain_text() + if isinstance(message, Message) + else replace_message(message), + platform=PlatformUtils.get_platform(bot), + ) + logger.debug(f"消息发送记录,message: {message}") + except Exception as e: + logger.warning( + f"消息发送记录发生错误...data: {data}, result: {result}", + "msg_hook", + e=e, + ) diff --git a/zhenxun/builtin_plugins/init/__init__.py b/zhenxun/builtin_plugins/init/__init__.py index 3d97a47c..7c78b019 100644 --- a/zhenxun/builtin_plugins/init/__init__.py +++ b/zhenxun/builtin_plugins/init/__init__.py @@ -4,15 +4,25 @@ import nonebot from nonebot.adapters import Bot from zhenxun.models.group_console import GroupConsole +from zhenxun.services.cache import DbCacheException from zhenxun.services.log import logger from zhenxun.utils.platform import PlatformUtils nonebot.load_plugins(str(Path(__file__).parent.resolve())) +try: + from .__init_cache import CacheRoot +except DbCacheException as e: + raise SystemError(f"ERROR:{e}") driver = nonebot.get_driver() +@driver.on_startup +async def _(): + await CacheRoot.init_non_lazy_caches() + + @driver.on_bot_connect async def _(bot: Bot): """将bot已存在的群组添加群认证 diff --git a/zhenxun/builtin_plugins/init/__init_cache.py b/zhenxun/builtin_plugins/init/__init_cache.py new file mode 100644 index 00000000..53bfe7e7 --- /dev/null +++ b/zhenxun/builtin_plugins/init/__init_cache.py @@ -0,0 +1,208 @@ +from zhenxun.models.ban_console import BanConsole +from zhenxun.models.bot_console import BotConsole +from zhenxun.models.group_console import GroupConsole +from zhenxun.models.level_user import LevelUser +from zhenxun.models.plugin_info import PluginInfo +from zhenxun.models.user_console import UserConsole +from zhenxun.services.cache import CacheData, CacheRoot +from zhenxun.services.log import logger +from zhenxun.utils.enum import CacheType + + +@CacheRoot.new(CacheType.PLUGINS) +async def _(): + """初始化插件缓存""" + data_list = await PluginInfo.get_plugins() + return {p.module: p for p in data_list} + + +@CacheRoot.getter(CacheType.PLUGINS, result_model=PluginInfo) +async def _(cache_data: CacheData, module: str): + """获取插件缓存""" + data = await cache_data.get_key(module) + if not data: + if plugin := await PluginInfo.get_plugin(module=module): + await cache_data.set_key(module, plugin) + logger.debug(f"插件 {module} 数据已设置到缓存") + return plugin + return data + + +@CacheRoot.with_refresh(CacheType.PLUGINS) +async def _(cache_data: CacheData, data: dict[str, PluginInfo] | None): + """刷新插件缓存""" + if not data: + return + plugins = await PluginInfo.filter(module__in=data.keys(), load_status=True).all() + for plugin in plugins: + await cache_data.set_key(plugin.module, plugin) + + +@CacheRoot.new(CacheType.GROUPS) +async def _(): + """初始化群组缓存""" + data_list = await GroupConsole.all() + return {p.group_id: p for p in data_list if not p.channel_id} + + +@CacheRoot.getter(CacheType.GROUPS, result_model=GroupConsole) +async def _(cache_data: CacheData, group_id: str): + """获取群组缓存""" + data = await cache_data.get_key(group_id) + if not data: + if group := await GroupConsole.get_group(group_id=group_id): + await cache_data.set_key(group_id, group) + return group + return data + + +@CacheRoot.with_refresh(CacheType.GROUPS) +async def _(cache_data: CacheData, data: dict[str, GroupConsole] | None): + """刷新群组缓存""" + if not data: + return + groups = await GroupConsole.filter( + group_id__in=data.keys(), channel_id__isnull=True + ).all() + for group in groups: + await cache_data.set_key(group.group_id, group) + + +@CacheRoot.new(CacheType.BOT) +async def _(): + """初始化机器人缓存""" + data_list = await BotConsole.all() + return {p.bot_id: p for p in data_list} + + +@CacheRoot.getter(CacheType.BOT, result_model=BotConsole) +async def _(cache_data: CacheData, bot_id: str): + """获取机器人缓存""" + data = await cache_data.get_key(bot_id) + if not data: + if bot := await BotConsole.get_or_none(bot_id=bot_id): + await cache_data.set_key(bot_id, bot) + return bot + return data + + +@CacheRoot.with_refresh(CacheType.BOT) +async def _(cache_data: CacheData, data: dict[str, BotConsole] | None): + """刷新机器人缓存""" + if not data: + return + bots = await BotConsole.filter(bot_id__in=data.keys()).all() + for bot in bots: + await cache_data.set_key(bot.bot_id, bot) + + +@CacheRoot.new(CacheType.USERS) +async def _(): + """初始化用户缓存""" + data_list = await UserConsole.all() + return {p.user_id: p for p in data_list} + + +@CacheRoot.getter(CacheType.USERS, result_model=UserConsole) +async def _(cache_data: CacheData, user_id: str): + """获取用户缓存""" + data = await cache_data.get_key(user_id) + if not data: + if user := await UserConsole.get_user(user_id=user_id): + await cache_data.set_key(user_id, user) + return user + return data + + +@CacheRoot.with_refresh(CacheType.USERS) +async def _(cache_data: CacheData, data: dict[str, UserConsole] | None): + """刷新用户缓存""" + if not data: + return + users = await UserConsole.filter(user_id__in=data.keys()).all() + for user in users: + await cache_data.set_key(user.user_id, user) + + +@CacheRoot.new(CacheType.LEVEL) +async def _(): + """初始化等级缓存""" + data_list = await LevelUser().all() + return {f"{d.user_id}:{d.group_id or ''}": d for d in data_list} + + +@CacheRoot.getter(CacheType.LEVEL, result_model=list[LevelUser]) +async def _(cache_data: CacheData, user_id: str, group_id: str | None = None): + """获取等级缓存""" + key = f"{user_id}:{group_id or ''}" + data = await cache_data.get_key(key) + if not data: + if group_id: + data = await LevelUser.filter(user_id=user_id, group_id=group_id).all() + else: + data = await LevelUser.filter(user_id=user_id, group_id__isnull=True).all() + if data: + await cache_data.set_key(key, data) + return data + return data or [] + + +@CacheRoot.new(CacheType.BAN, False) +async def _(): + """初始化封禁缓存""" + data_list = await BanConsole.all() + return {f"{d.group_id or ''}:{d.user_id or ''}": d for d in data_list} + + +@CacheRoot.getter(CacheType.BAN, result_model=BanConsole) +async def _(cache_data: CacheData, user_id: str | None, group_id: str | None = None): + """获取封禁缓存""" + if not user_id and not group_id: + return [] + key = f"{group_id or ''}:{user_id or ''}" + data = await cache_data.get_key(key) + # if not data: + # start = time.time() + # if user_id and group_id: + # data = await BanConsole.filter(user_id=user_id, group_id=group_id).all() + # elif user_id: + # data = await BanConsole.filter(user_id=user_id, group_id__isnull=True).all() + # elif group_id: + # data = await BanConsole.filter( + # user_id__isnull=True, group_id=group_id + # ).all() + # logger.info( + # f"获取封禁缓存耗时: {time.time() - start:.2f}秒, key: {key}, data: {data}" + # ) + # if data: + # await cache_data.set_key(key, data) + # return data + return data or [] + + +# @CacheRoot.new(CacheType.LIMIT) +# async def _(): +# """初始化限制缓存""" +# data_list = await PluginLimit.filter(status=True).all() +# return {data.module: data for data in data_list} + + +# @CacheRoot.getter(CacheType.LIMIT, result_model=list[PluginLimit]) +# async def _(cache_data: CacheData, module: str): +# """获取限制缓存""" +# data = await cache_data.get_key(module) +# if not data: +# if limits := await PluginLimit.filter(module=module, status=True): +# await cache_data.set_key(module, limits) +# return limits +# return data or [] + + +# @CacheRoot.with_refresh(CacheType.LIMIT) +# async def _(cache_data: CacheData, data: dict[str, list[PluginLimit]] | None): +# """刷新限制缓存""" +# if not data: +# return +# limits = await PluginLimit.filter(module__in=data.keys(), load_status=True).all() +# for limit in limits: +# await cache_data.set_key(limit.module, limit) diff --git a/zhenxun/builtin_plugins/init/init_config.py b/zhenxun/builtin_plugins/init/init_config.py index 112d29de..51a7da47 100644 --- a/zhenxun/builtin_plugins/init/init_config.py +++ b/zhenxun/builtin_plugins/init/init_config.py @@ -11,6 +11,7 @@ from zhenxun.configs.config import Config from zhenxun.configs.path_config import DATA_PATH from zhenxun.configs.utils import RegisterConfig from zhenxun.services.log import logger +from zhenxun.utils.manager.priority_manager import PriorityLifecycle _yaml = YAML(pure=True) _yaml.allow_unicode = True @@ -57,7 +58,7 @@ def _generate_simple_config(exists_module: list[str]): 生成简易配置 异常: - AttributeError: _description_ + AttributeError: AttributeError """ # 读取用户配置 _data = {} @@ -73,7 +74,9 @@ def _generate_simple_config(exists_module: list[str]): if _data.get(module) and k in _data[module].keys(): Config.set_config(module, k, _data[module][k]) if f"{module}:{k}".lower() in exists_module: - _tmp_data[module][k] = Config.get_config(module, k) + _tmp_data[module][k] = Config.get_config( + module, k, build_model=False + ) except AttributeError as e: raise AttributeError(f"{e}\n可能为config.yaml配置文件填写不规范") from e if not _tmp_data[module]: @@ -102,7 +105,7 @@ def _generate_simple_config(exists_module: list[str]): temp_file.unlink() -@driver.on_startup +@PriorityLifecycle.on_startup(priority=0) def _(): """ 初始化插件数据配置 @@ -125,3 +128,4 @@ def _(): with plugins2config_file.open("w", encoding="utf8") as wf: _yaml.dump(_data, wf) _generate_simple_config(exists_module) + Config.reload() diff --git a/zhenxun/builtin_plugins/mahiro_bank/__init__.py b/zhenxun/builtin_plugins/mahiro_bank/__init__.py new file mode 100644 index 00000000..2f6fbf1f --- /dev/null +++ b/zhenxun/builtin_plugins/mahiro_bank/__init__.py @@ -0,0 +1,259 @@ +from datetime import datetime + +from nonebot.plugin import PluginMetadata +from nonebot_plugin_alconna import Alconna, Args, Arparma, Match, Subcommand, on_alconna +from nonebot_plugin_apscheduler import scheduler +from nonebot_plugin_uninfo import Uninfo +from nonebot_plugin_waiter import prompt_until + +from zhenxun.configs.utils import PluginExtraData, RegisterConfig +from zhenxun.services.log import logger +from zhenxun.utils.depends import UserName +from zhenxun.utils.message import MessageUtils +from zhenxun.utils.utils import is_number + +from .data_source import BankManager + +__plugin_meta__ = PluginMetadata( + name="小真寻银行", + description=""" + 小真寻银行,提供高品质的存款!当好感度等级达到指初识时,小真寻会偷偷的帮助你哦。 + 存款额度与好感度有关,每日存款次数有限制。 + 基础存款提供基础利息 + 每日存款提供高额利息 + """.strip(), + usage=""" + 指令: + 存款 [金额] + 取款 [金额] + 银行信息 + 我的银行信息 + """.strip(), + extra=PluginExtraData( + author="HibiKier", + version="0.1", + menu_type="群内小游戏", + configs=[ + RegisterConfig( + key="sign_max_deposit", + value=100, + help="好感度换算存款金额比例,当值是100时,最大存款金额=好感度*100,存款的最低金额是100(强制)", + default_value=100, + type=int, + ), + RegisterConfig( + key="max_daily_deposit_count", + value=3, + help="每日最大存款次数", + default_value=3, + type=int, + ), + RegisterConfig( + key="rate_range", + value=[0.0005, 0.001], + help="小时利率范围", + default_value=[0.0005, 0.001], + type=list[float], + ), + RegisterConfig( + key="impression_event", + value=25, + help="到达指定好感度时随机提高或降低利率", + default_value=25, + type=int, + ), + RegisterConfig( + key="impression_event_range", + value=[0.00001, 0.0003], + help="到达指定好感度时随机提高或降低利率", + default_value=[0.00001, 0.0003], + type=list[float], + ), + RegisterConfig( + key="impression_event_prop", + value=0.3, + help="到达指定好感度时随机提高或降低利率触发概率", + default_value=0.3, + type=float, + ), + ], + ).to_dict(), +) + + +_matcher = on_alconna( + Alconna( + "mahiro-bank", + Subcommand("deposit", Args["amount?", int]), + Subcommand("withdraw", Args["amount?", int]), + Subcommand("user-info"), + Subcommand("bank-info"), + # Subcommand("loan", Args["amount?", int]), + # Subcommand("repayment", Args["amount?", int]), + ), + priority=5, + block=True, +) + +_matcher.shortcut( + r"1111", + command="mahiro-bank", + arguments=["test"], + prefix=True, +) + +_matcher.shortcut( + r"存款\s*(?P\d+)?", + command="mahiro-bank", + arguments=["deposit", "{amount}"], + prefix=True, +) + +_matcher.shortcut( + r"取款\s*(?P\d+)?", + command="mahiro-bank", + arguments=["withdraw", "{withdraw}"], + prefix=True, +) + +_matcher.shortcut( + r"我的银行信息", + command="mahiro-bank", + arguments=["user-info"], + prefix=True, +) + +_matcher.shortcut( + r"银行信息", + command="mahiro-bank", + arguments=["bank-info"], + prefix=True, +) + + +async def get_amount(handle_type: str) -> int: + amount_num = await prompt_until( + f"请输入{handle_type}金币数量", + lambda msg: is_number(msg.extract_plain_text()), + timeout=60, + retry=3, + retry_prompt="输入错误,请输入数字。剩余次数:{count}", + ) + if not amount_num: + await MessageUtils.build_message( + "输入超时了哦,小真寻柜员以取消本次存款操作..." + ).finish() + return int(amount_num.extract_plain_text()) + + +@_matcher.assign("deposit") +async def _(session: Uninfo, arparma: Arparma, amount: Match[int]): + amount_num = amount.result if amount.available else await get_amount("存款") + if result := await BankManager.deposit_check(session.user.id, amount_num): + await MessageUtils.build_message(result).finish(reply_to=True) + _, rate, event_rate = await BankManager.deposit(session.user.id, amount_num) + result = ( + f"存款成功!\n此次存款金额为: {amount.result}\n" + f"当前小时利率为: {rate * 100:.2f}%" + ) + effective_hour = int(24 - datetime.now().hour) + if event_rate: + result += f"(小真寻偷偷将小时利率给你增加了 {event_rate:.2f}% 哦)" + result += ( + f"\n预计总收益为: {int(amount.result * rate * effective_hour) or 1} 金币。" + ) + logger.info( + f"小真寻银行存款:{amount_num},当前存款数:{amount.result},存款小时利率: {rate}", + arparma.header_result, + session=session, + ) + await MessageUtils.build_message(result).finish(at_sender=True) + + +@_matcher.assign("withdraw") +async def _(session: Uninfo, arparma: Arparma, amount: Match[int]): + amount_num = amount.result if amount.available else await get_amount("取款") + if result := await BankManager.withdraw_check(session.user.id, amount_num): + await MessageUtils.build_message(result).finish(reply_to=True) + try: + user = await BankManager.withdraw(session.user.id, amount_num) + result = ( + f"取款成功!\n当前取款金额为: {amount_num}\n当前存款金额为: {user.amount}" + ) + logger.info( + f"小真寻银行取款:{amount_num}, 当前存款数:{user.amount}," + f" 存款小时利率:{user.rate}", + arparma.header_result, + session=session, + ) + await MessageUtils.build_message(result).finish(reply_to=True) + except ValueError: + await MessageUtils.build_message("你的银行内的存款数量不足哦...").finish( + reply_to=True + ) + + +@_matcher.assign("user-info") +async def _(session: Uninfo, arparma: Arparma, uname: str = UserName()): + result = await BankManager.get_user_info(session, uname) + await MessageUtils.build_message(result).send() + logger.info("查看银行个人信息", arparma.header_result, session=session) + + +@_matcher.assign("bank-info") +async def _(session: Uninfo, arparma: Arparma): + result = await BankManager.get_bank_info() + await MessageUtils.build_message(result).send() + logger.info("查看银行信息", arparma.header_result, session=session) + + +# @_matcher.assign("loan") +# async def _(session: Uninfo, arparma: Arparma, amount: Match[int]): +# amount_num = amount.result if amount.available else await get_amount("贷款") +# if amount_num <= 0: +# await MessageUtils.build_message("贷款数量必须大于 0 啊笨蛋!").finish() +# try: +# user, event_rate = await BankManager.loan(session.user.id, amount_num) +# result = ( +# f"贷款成功!\n当前贷金额为: {user.loan_amount}" +# f"\n当前利率为: {user.loan_rate * 100}%" +# ) +# if event_rate: +# result += f"(小真寻偷偷将利率给你降低了 {event_rate}% 哦)" +# result += f"\n预计每小时利息为:{int(user.loan_amount * user.loan_rate)}金币。" +# logger.info( +# f"小真寻银行贷款: {amount_num}, 当前贷款数: {user.loan_amount}, " +# f"贷款利率: {user.loan_rate}", +# arparma.header_result, +# session=session, +# ) +# except ValueError: +# await MessageUtils.build_message( +# "贷款数量超过最大限制,请签到提升好感度获取更多额度吧..." +# ).finish(reply_to=True) + + +# @_matcher.assign("repayment") +# async def _(session: Uninfo, arparma: Arparma, amount: Match[int]): +# amount_num = amount.result if amount.available else await get_amount("还款") +# if amount_num <= 0: +# await MessageUtils.build_message("还款数量必须大于 0 啊笨蛋!").finish() +# user = await BankManager.repayment(session.user.id, amount_num) +# result = (f"还款成功!\n当前还款金额为: {amount_num}\n" +# f"当前贷款金额为: {user.loan_amount}") +# logger.info( +# f"小真寻银行还款:{amount_num},当前贷款数:{user.amount}, 贷款利率:{user.rate}", +# arparma.header_result, +# session=session, +# ) +# await MessageUtils.build_message(result).finish(at_sender=True) + + +@scheduler.scheduled_job( + "cron", + hour=0, + minute=0, +) +async def _(): + await BankManager.settlement() + logger.info("小真寻银行结算", "定时任务") diff --git a/zhenxun/builtin_plugins/mahiro_bank/data_source.py b/zhenxun/builtin_plugins/mahiro_bank/data_source.py new file mode 100644 index 00000000..dc64fa16 --- /dev/null +++ b/zhenxun/builtin_plugins/mahiro_bank/data_source.py @@ -0,0 +1,448 @@ +import asyncio +from datetime import datetime, timedelta +import random + +from nonebot_plugin_htmlrender import template_to_pic +from nonebot_plugin_uninfo import Uninfo +from tortoise.expressions import RawSQL +from tortoise.functions import Count, Sum + +from zhenxun.configs.config import Config +from zhenxun.configs.path_config import TEMPLATE_PATH +from zhenxun.models.mahiro_bank import MahiroBank +from zhenxun.models.mahiro_bank_log import MahiroBankLog +from zhenxun.models.sign_user import SignUser +from zhenxun.models.user_console import UserConsole +from zhenxun.utils.enum import BankHandleType, GoldHandle +from zhenxun.utils.platform import PlatformUtils + +base_config = Config.get("mahiro_bank") + + +class BankManager: + @classmethod + async def random_event(cls, impression: float): + """随机事件""" + impression_event = base_config.get("impression_event") + impression_event_prop = base_config.get("impression_event_prop") + impression_event_range = base_config.get("impression_event_range") + if impression >= impression_event and random.random() < impression_event_prop: + """触发好感度事件""" + return random.uniform(impression_event_range[0], impression_event_range[1]) + return None + + @classmethod + async def deposit_check(cls, user_id: str, amount: int) -> str | None: + """检查存款是否合法 + + 参数: + user_id: 用户id + amount: 存款金额 + + 返回: + str | None: 存款信息 + """ + if amount <= 0: + return "存款数量必须大于 0 啊笨蛋!" + user, sign_user, bank_user = await asyncio.gather( + *[ + UserConsole.get_user(user_id), + SignUser.get_user(user_id), + cls.get_user(user_id), + ] + ) + sign_max_deposit: int = base_config.get("sign_max_deposit") + max_deposit = max(int(float(sign_user.impression) * sign_max_deposit), 100) + if user.gold < amount: + return f"金币数量不足,当前你的金币为:{user.gold}." + if bank_user.amount + amount > max_deposit: + return ( + f"存款超过上限,存款上限为:{max_deposit}," + f"当前你的还可以存款金额:{max_deposit - bank_user.amount}。" + ) + max_daily_deposit_count: int = base_config.get("max_daily_deposit_count") + today_deposit_count = len(await cls.get_user_deposit(user_id)) + if today_deposit_count >= max_daily_deposit_count: + return f"存款次数超过上限,每日存款次数上限为:{max_daily_deposit_count}。" + return None + + @classmethod + async def withdraw_check(cls, user_id: str, amount: int) -> str | None: + """检查取款是否合法 + + 参数: + user_id: 用户id + amount: 取款金额 + + 返回: + str | None: 取款信息 + """ + if amount <= 0: + return "取款数量必须大于 0 啊笨蛋!" + user = await cls.get_user(user_id) + data_list = await cls.get_user_deposit(user_id) + lock_amount = sum(data.amount for data in data_list) + if user.amount - lock_amount < amount: + return ( + "取款金额不足,当前你的存款为:" + f"{user.amount}({lock_amount}已被锁定)!" + ) + return None + + @classmethod + async def get_user_deposit( + cls, user_id: str, is_completed: bool = False + ) -> list[MahiroBankLog]: + """获取用户今日存款次数 + + 参数: + user_id: 用户id + + 返回: + list[MahiroBankLog]: 存款列表 + """ + return await MahiroBankLog.filter( + user_id=user_id, + handle_type=BankHandleType.DEPOSIT, + is_completed=is_completed, + ) + + @classmethod + async def get_user(cls, user_id: str) -> MahiroBank: + """查询余额 + + 参数: + user_id: 用户id + + 返回: + MahiroBank + """ + user, _ = await MahiroBank.get_or_create(user_id=user_id) + return user + + @classmethod + async def get_user_data( + cls, + user_id: str, + data_type: BankHandleType, + is_completed: bool = False, + count: int = 5, + ) -> list[MahiroBankLog]: + return ( + await MahiroBankLog.filter( + user_id=user_id, handle_type=data_type, is_completed=is_completed + ) + .order_by("-id") + .limit(count) + .all() + ) + + @classmethod + async def complete_projected_revenue(cls, user_id: str) -> int: + """预计收益 + + 参数: + user_id: 用户id + + 返回: + int: 预计收益金额 + """ + deposit_list = await cls.get_user_deposit(user_id) + if not deposit_list: + return 0 + return int( + sum( + deposit.rate * deposit.amount * deposit.effective_hour + for deposit in deposit_list + ) + ) + + @classmethod + async def get_user_info(cls, session: Uninfo, uname: str) -> bytes: + """获取用户数据 + + 参数: + session: Uninfo + uname: 用户id + + 返回: + bytes: 图片数据 + """ + user_id = session.user.id + user = await cls.get_user(user_id=user_id) + ( + rank, + deposit_count, + user_today_deposit, + projected_revenue, + sum_data, + ) = await asyncio.gather( + *[ + MahiroBank.filter(amount__gt=user.amount).count(), + MahiroBankLog.filter(user_id=user_id).count(), + cls.get_user_deposit(user_id), + cls.complete_projected_revenue(user_id), + MahiroBankLog.filter( + user_id=user_id, handle_type=BankHandleType.INTEREST + ) + .annotate(sum=Sum("amount")) + .values("sum"), + ] + ) + now = datetime.now() + end_time = ( + now + + timedelta(days=1) + - timedelta(hours=now.hour, minutes=now.minute, seconds=now.second) + ) + today_deposit_amount = sum(deposit.amount for deposit in user_today_deposit) + deposit_list = [ + { + "id": deposit.id, + "date": now.date(), + "start_time": str(deposit.create_time).split(".")[0], + "end_time": end_time.replace(microsecond=0), + "amount": deposit.amount, + "rate": f"{deposit.rate * 100:.2f}", + "projected_revenue": int( + deposit.amount * deposit.rate * deposit.effective_hour + ) + or 1, + } + for deposit in user_today_deposit + ] + platform = PlatformUtils.get_platform(session) + data = { + "name": uname, + "rank": rank + 1, + "avatar_url": PlatformUtils.get_user_avatar_url( + user_id, platform, session.self_id + ), + "amount": user.amount, + "deposit_count": deposit_count, + "today_deposit_count": len(user_today_deposit), + "cumulative_gain": sum_data[0]["sum"] or 0, + "projected_revenue": projected_revenue, + "today_deposit_amount": today_deposit_amount, + "deposit_list": deposit_list, + "create_time": now.replace(microsecond=0), + } + return await template_to_pic( + template_path=str((TEMPLATE_PATH / "mahiro_bank").absolute()), + template_name="user.html", + templates={"data": data}, + pages={ + "viewport": {"width": 386, "height": 700}, + "base_url": f"file://{TEMPLATE_PATH}", + }, + wait=2, + ) + + @classmethod + async def get_bank_info(cls) -> bytes: + now = datetime.now() + now_start = datetime.now() - timedelta( + hours=now.hour, minutes=now.minute, seconds=now.second + ) + ( + bank_data, + today_count, + interest_amount, + active_user_count, + date_data, + ) = await asyncio.gather( + *[ + MahiroBank.annotate( + amount_sum=Sum("amount"), user_count=Count("id") + ).values("amount_sum", "user_count"), + MahiroBankLog.filter(create_time__gt=now_start).count(), + MahiroBankLog.filter(handle_type=BankHandleType.INTEREST) + .annotate(amount_sum=Sum("amount")) + .values("amount_sum"), + MahiroBankLog.filter( + create_time__gte=now_start - timedelta(days=7), + handle_type=BankHandleType.DEPOSIT, + ) + .annotate(count=Count("user_id", distinct=True)) + .values("count"), + MahiroBankLog.filter( + create_time__gte=now_start - timedelta(days=7), + handle_type=BankHandleType.DEPOSIT, + ) + .annotate(date=RawSQL("DATE(create_time)"), total_amount=Sum("amount")) + .group_by("date") + .values("date", "total_amount"), + ] + ) + date2cnt = {str(date["date"]): date["total_amount"] for date in date_data} + date = now.date() + e_date, e_amount = [], [] + for _ in range(7): + if str(date) in date2cnt: + e_amount.append(date2cnt[str(date)]) + else: + e_amount.append(0) + e_date.append(str(date)[5:]) + date -= timedelta(days=1) + e_date.reverse() + e_amount.reverse() + date = 1 + lasted_log = await MahiroBankLog.annotate().order_by("create_time").first() + if lasted_log: + date = now.date() - lasted_log.create_time.date() + date = (date.days or 1) + 1 + data = { + "amount_sum": bank_data[0]["amount_sum"], + "user_count": bank_data[0]["user_count"], + "today_count": today_count, + "day_amount": int(bank_data[0]["amount_sum"] / date), + "interest_amount": interest_amount[0]["amount_sum"] or 0, + "active_user_count": active_user_count[0]["count"] or 0, + "e_data": e_date, + "e_amount": e_amount, + "create_time": now.replace(microsecond=0), + } + return await template_to_pic( + template_path=str((TEMPLATE_PATH / "mahiro_bank").absolute()), + template_name="bank.html", + templates={"data": data}, + pages={ + "viewport": {"width": 450, "height": 750}, + "base_url": f"file://{TEMPLATE_PATH}", + }, + wait=2, + ) + + @classmethod + async def deposit( + cls, user_id: str, amount: int + ) -> tuple[MahiroBank, float, float | None]: + """存款 + + 参数: + user_id: 用户id + amount: 存款数量 + + 返回: + tuple[MahiroBank, float, float]: MahiroBank,利率,增加的利率 + """ + rate_range = base_config.get("rate_range") + rate = random.uniform(rate_range[0], rate_range[1]) + sign_user = await SignUser.get_user(user_id) + random_add_rate = await cls.random_event(float(sign_user.impression)) + if random_add_rate: + rate += random_add_rate + await UserConsole.reduce_gold(user_id, amount, GoldHandle.PLUGIN, "bank") + return await MahiroBank.deposit(user_id, amount, rate), rate, random_add_rate + + @classmethod + async def withdraw(cls, user_id: str, amount: int) -> MahiroBank: + """取款 + + 参数: + user_id: 用户id + amount: 取款数量 + + 返回: + MahiroBank + """ + await UserConsole.add_gold(user_id, amount, "bank") + return await MahiroBank.withdraw(user_id, amount) + + @classmethod + async def loan(cls, user_id: str, amount: int) -> tuple[MahiroBank, float | None]: + """贷款 + + 参数: + user_id: 用户id + amount: 贷款数量 + + 返回: + tuple[MahiroBank, float]: MahiroBank,贷款利率 + """ + rate_range = base_config.get("rate_range") + rate = random.uniform(rate_range[0], rate_range[1]) + sign_user = await SignUser.get_user(user_id) + user, _ = await MahiroBank.get_or_create(user_id=user_id) + if user.loan_amount + amount > sign_user.impression * 150: + raise ValueError("贷款数量超过最大限制,请签到提升好感度获取更多额度吧...") + random_reduce_rate = await cls.random_event(float(sign_user.impression)) + if random_reduce_rate: + rate -= random_reduce_rate + await UserConsole.add_gold(user_id, amount, "bank") + return await MahiroBank.loan(user_id, amount, rate), random_reduce_rate + + @classmethod + async def repayment(cls, user_id: str, amount: int) -> MahiroBank: + """还款 + + 参数: + user_id: 用户id + amount: 还款数量 + + 返回: + MahiroBank + """ + await UserConsole.reduce_gold(user_id, amount, GoldHandle.PLUGIN, "bank") + return await MahiroBank.repayment(user_id, amount) + + @classmethod + async def settlement(cls): + """结算每日利率""" + bank_user_list = await MahiroBank.filter(amount__gt=0).all() + log_list = await MahiroBankLog.filter( + is_completed=False, handle_type=BankHandleType.DEPOSIT + ).all() + user_list = await UserConsole.filter( + user_id__in=[user.user_id for user in bank_user_list] + ).all() + user_data = {user.user_id: user for user in user_list} + bank_data: dict[str, list[MahiroBankLog]] = {} + for log in log_list: + if log.user_id not in bank_data: + bank_data[log.user_id] = [] + bank_data[log.user_id].append(log) + log_create_list = [] + log_update_list = [] + # 计算每日默认金币 + for bank_user in bank_user_list: + if user := user_data.get(bank_user.user_id): + amount = bank_user.amount + if logs := bank_data.get(bank_user.user_id): + amount -= sum(log.amount for log in logs) + if not amount: + continue + # 计算每日默认金币 + gold = int(amount * bank_user.rate) + user.gold += gold + log_create_list.append( + MahiroBankLog( + user_id=bank_user.user_id, + amount=gold, + rate=bank_user.rate, + handle_type=BankHandleType.INTEREST, + is_completed=True, + ) + ) + # 计算每日存款金币 + for user_id, logs in bank_data.items(): + if user := user_data.get(user_id): + for log in logs: + gold = int(log.amount * log.rate * log.effective_hour) or 1 + user.gold += gold + log.is_completed = True + log_update_list.append(log) + log_create_list.append( + MahiroBankLog( + user_id=user_id, + amount=gold, + rate=log.rate, + handle_type=BankHandleType.INTEREST, + is_completed=True, + ) + ) + if log_create_list: + await MahiroBankLog.bulk_create(log_create_list, 10) + if log_update_list: + await MahiroBankLog.bulk_update(log_update_list, ["is_completed"], 10) + await UserConsole.bulk_update(user_list, ["gold"], 10) diff --git a/zhenxun/builtin_plugins/nickname.py b/zhenxun/builtin_plugins/nickname.py index 7dd9a697..5cbc519e 100644 --- a/zhenxun/builtin_plugins/nickname.py +++ b/zhenxun/builtin_plugins/nickname.py @@ -1,12 +1,17 @@ import random -from typing import Any -from nonebot import on_regex from nonebot.adapters import Bot -from nonebot.params import Depends, RegexGroup from nonebot.plugin import PluginMetadata from nonebot.rule import to_me -from nonebot_plugin_alconna import Alconna, Option, on_alconna, store_true +from nonebot_plugin_alconna import ( + Alconna, + Args, + Arparma, + CommandMeta, + Option, + on_alconna, + store_true, +) from nonebot_plugin_uninfo import Uninfo from zhenxun.configs.config import BotConfig, Config @@ -54,15 +59,22 @@ __plugin_meta__ = PluginMetadata( ).to_dict(), ) -_nickname_matcher = on_regex( - "(?:以后)?(?:叫我|请叫我|称呼我)(.*)", +_nickname_matcher = on_alconna( + Alconna( + "re:(?:以后)?(?:叫我|请叫我|称呼我)", + Args["name?", str], + meta=CommandMeta(compact=True), + ), rule=to_me(), priority=5, block=True, ) -_global_nickname_matcher = on_regex( - "设置全局昵称(.*)", rule=to_me(), priority=5, block=True +_global_nickname_matcher = on_alconna( + Alconna("设置全局昵称", Args["name?", str], meta=CommandMeta(compact=True)), + rule=to_me(), + priority=5, + block=True, ) _matcher = on_alconna( @@ -117,34 +129,32 @@ CANCEL = [ ] -def CheckNickname(): +async def CheckNickname( + bot: Bot, + session: Uninfo, + params: Arparma, +): """ 检查名称是否合法 """ - - async def dependency( - bot: Bot, - session: Uninfo, - reg_group: tuple[Any, ...] = RegexGroup(), - ): - black_word = Config.get_config("nickname", "BLACK_WORD") - (name,) = reg_group - logger.debug(f"昵称检查: {name}", "昵称设置", session=session) - if not name: - await MessageUtils.build_message("叫你空白?叫你虚空?叫你无名??").finish( - at_sender=True - ) - if session.user.id in bot.config.superusers: - logger.debug( - f"超级用户设置昵称, 跳过合法检测: {name}", "昵称设置", session=session - ) - return + black_word = Config.get_config("nickname", "BLACK_WORD") + name = params.query("name") + logger.debug(f"昵称检查: {name}", "昵称设置", session=session) + if not name: + await MessageUtils.build_message("叫你空白?叫你虚空?叫你无名??").finish( + at_sender=True + ) + if session.user.id in bot.config.superusers: + logger.debug( + f"超级用户设置昵称, 跳过合法检测: {name}", "昵称设置", session=session + ) + else: if len(name) > 20: await MessageUtils.build_message("昵称可不能超过20个字!").finish( at_sender=True ) if name in bot.config.nickname: - await MessageUtils.build_message("笨蛋!休想占用我的名字! #").finish( + await MessageUtils.build_message("笨蛋!休想占用我的名字! ").finish( at_sender=True ) if black_word: @@ -162,17 +172,17 @@ def CheckNickname(): await MessageUtils.build_message( f"字符 [{word}] 为禁止字符!" ).finish(at_sender=True) - - return Depends(dependency) + return name -@_nickname_matcher.handle(parameterless=[CheckNickname()]) +@_nickname_matcher.handle() async def _( + bot: Bot, session: Uninfo, + name_: Arparma, uname: str = UserName(), - reg_group: tuple[Any, ...] = RegexGroup(), ): - (name,) = reg_group + name = await CheckNickname(bot, session, name_) if len(name) < 5 and random.random() < 0.3: name = "~".join(name) group_id = None @@ -200,13 +210,14 @@ async def _( ) -@_global_nickname_matcher.handle(parameterless=[CheckNickname()]) +@_global_nickname_matcher.handle() async def _( + bot: Bot, session: Uninfo, + name_: Arparma, nickname: str = UserName(), - reg_group: tuple[Any, ...] = RegexGroup(), ): - (name,) = reg_group + name = await CheckNickname(bot, session, name_) await FriendUser.set_user_nickname( session.user.id, name, @@ -227,15 +238,14 @@ async def _(session: Uninfo, uname: str = UserName()): group_id = session.group.parent.id if session.group.parent else session.group.id if group_id: nickname = await GroupInfoUser.get_user_nickname(session.user.id, group_id) - card = uname else: nickname = await FriendUser.get_user_nickname(session.user.id) - card = uname if nickname: await MessageUtils.build_message(random.choice(REMIND).format(nickname)).finish( reply_to=True ) else: + card = uname await MessageUtils.build_message( random.choice( [ diff --git a/zhenxun/builtin_plugins/platform/qq/group_handle/__init__.py b/zhenxun/builtin_plugins/platform/qq/group_handle/__init__.py index d621f087..f4c28f04 100644 --- a/zhenxun/builtin_plugins/platform/qq/group_handle/__init__.py +++ b/zhenxun/builtin_plugins/platform/qq/group_handle/__init__.py @@ -1,4 +1,4 @@ -from nonebot import on_notice, on_request +from nonebot import on_notice from nonebot.adapters import Bot from nonebot.adapters.onebot.v11 import ( GroupDecreaseNoticeEvent, @@ -14,9 +14,10 @@ from nonebot_plugin_uninfo import Uninfo from zhenxun.builtin_plugins.platform.qq.exception import ForceAddGroupError from zhenxun.configs.config import BotConfig, Config from zhenxun.configs.utils import PluginExtraData, RegisterConfig, Task +from zhenxun.models.event_log import EventLog from zhenxun.models.group_console import GroupConsole from zhenxun.utils.common_utils import CommonUtils -from zhenxun.utils.enum import PluginType +from zhenxun.utils.enum import EventLogType, PluginType from zhenxun.utils.platform import PlatformUtils from zhenxun.utils.rules import notice_rule @@ -106,8 +107,6 @@ group_decrease_handle = on_notice( rule=notice_rule([GroupMemberDecreaseEvent, GroupDecreaseNoticeEvent]), ) """群员减少处理""" -add_group = on_request(priority=1, block=False) -"""加群同意请求""" @group_increase_handle.handle() @@ -141,8 +140,21 @@ async def _( group_id = str(event.group_id) if event.sub_type == "kick_me": """踢出Bot""" - await GroupManager.kick_bot(bot, user_id, group_id) + await GroupManager.kick_bot(bot, group_id, str(event.operator_id)) + await EventLog.create( + user_id=user_id, group_id=group_id, event_type=EventLogType.KICK_BOT + ) elif event.sub_type in ["leave", "kick"]: + if event.sub_type == "leave": + """主动退群""" + await EventLog.create( + user_id=user_id, group_id=group_id, event_type=EventLogType.LEAVE_MEMBER + ) + else: + """被踢出群""" + await EventLog.create( + user_id=user_id, group_id=group_id, event_type=EventLogType.KICK_MEMBER + ) result = await GroupManager.run_user( bot, user_id, group_id, str(event.operator_id), event.sub_type ) diff --git a/zhenxun/builtin_plugins/platform/qq/group_handle/data_source.py b/zhenxun/builtin_plugins/platform/qq/group_handle/data_source.py index 1190fb5e..9e8d7ea2 100644 --- a/zhenxun/builtin_plugins/platform/qq/group_handle/data_source.py +++ b/zhenxun/builtin_plugins/platform/qq/group_handle/data_source.py @@ -55,15 +55,17 @@ class GroupManager: if plugin_list := await PluginInfo.filter(default_status=False).all(): for plugin in plugin_list: block_plugin += f"<{plugin.module}," - group_info = await bot.get_group_info(group_id=group_id, no_cache=True) - await GroupConsole.create( + group_info = await bot.get_group_info(group_id=group_id) + await GroupConsole.update_or_create( group_id=group_info["group_id"], - group_name=group_info["group_name"], - max_member_count=group_info["max_member_count"], - member_count=group_info["member_count"], - group_flag=1, - block_plugin=block_plugin, - platform="qq", + defaults={ + "group_name": group_info["group_name"], + "max_member_count": group_info["max_member_count"], + "member_count": group_info["member_count"], + "group_flag": 1, + "block_plugin": block_plugin, + "platform": "qq", + }, ) @classmethod @@ -145,7 +147,7 @@ class GroupManager: e=e, ) raise ForceAddGroupError("强制拉群或未有群信息,退出群聊失败...") from e - await GroupConsole.filter(group_id=group_id).delete() + # await GroupConsole.filter(group_id=group_id).delete() raise ForceAddGroupError(f"触发强制入群保护,已成功退出群聊 {group_id}...") else: await cls.__handle_add_group(bot, group_id, group) diff --git a/zhenxun/builtin_plugins/platform/qq/user_group_request.py b/zhenxun/builtin_plugins/platform/qq/user_group_request.py new file mode 100644 index 00000000..ae1d32ed --- /dev/null +++ b/zhenxun/builtin_plugins/platform/qq/user_group_request.py @@ -0,0 +1,100 @@ +import asyncio +from datetime import datetime +import random + +from nonebot.adapters import Bot +from nonebot.plugin import PluginMetadata +from nonebot.rule import to_me +from nonebot_plugin_alconna import Alconna, Args, Arparma, Field, on_alconna +from nonebot_plugin_uninfo import Uninfo + +from zhenxun.configs.utils import PluginCdBlock, PluginExtraData +from zhenxun.models.fg_request import FgRequest +from zhenxun.services.log import logger +from zhenxun.utils.depends import UserName +from zhenxun.utils.enum import RequestHandleType, RequestType +from zhenxun.utils.platform import PlatformUtils + +__plugin_meta__ = PluginMetadata( + name="群组申请", + description=""" + 一些小群直接邀请入群导致无法正常生成审核请求,需要用该方法手动生成审核请求。 + 当管理员同意同意时会发送消息进行提示,之后再进行拉群不会退出。 + 该消息会发送至管理员,多次发送不存在的群组id或相同群组id可能导致ban。 + """.strip(), + usage=""" + 指令: + 申请入群 [群号] + 示例: 申请入群 123123123 + """.strip(), + extra=PluginExtraData( + author="HibiKier", + version="0.1", + menu_type="其他", + limits=[PluginCdBlock(cd=300, result="每5分钟只能申请一次哦~")], + ).to_dict(), +) + + +_matcher = on_alconna( + Alconna( + "申请入群", + Args[ + "group_id", + int, + Field( + missing_tips=lambda: "请在命令后跟随群组id!", + unmatch_tips=lambda _: "群组id必须为数字!", + ), + ], + ), + skip_for_unmatch=False, + priority=5, + block=True, + rule=to_me(), +) + + +@_matcher.handle() +async def _( + bot: Bot, session: Uninfo, arparma: Arparma, group_id: int, uname: str = UserName() +): + # 旧请求全部设置为过期 + await FgRequest.filter( + request_type=RequestType.GROUP, + user_id=session.user.id, + group_id=str(group_id), + handle_type__isnull=True, + ).update(handle_type=RequestHandleType.EXPIRE) + f = await FgRequest.create( + request_type=RequestType.GROUP, + platform=PlatformUtils.get_platform(session), + bot_id=bot.self_id, + flag="0", + user_id=session.user.id, + nickname=uname, + group_id=str(group_id), + ) + results = await PlatformUtils.send_superuser( + bot, + f"*****一份入群申请*****\n" + f"ID:{f.id}\n" + f"申请人:{uname}({session.user.id})\n群聊:" + f"{group_id}\n邀请日期:{datetime.now().replace(microsecond=0)}\n" + "注:该请求为手动申请入群", + ) + if message_ids := [ + str(r[1].msg_ids[0]["message_id"]) for r in results if r[1] and r[1].msg_ids + ]: + f.message_ids = ",".join(message_ids) + await f.save(update_fields=["message_ids"]) + await asyncio.sleep(random.randint(1, 5)) + await bot.send_private_msg( + user_id=int(session.user.id), + message=f"已发送申请,请等待管理员审核,ID:{f.id}。", + ) + logger.info( + f"用户 {uname}({session.user.id}) 申请入群 {group_id},ID:{f.id}。", + arparma.header_result, + session=session, + ) diff --git a/zhenxun/builtin_plugins/platform/qq_api/ug_watch.py b/zhenxun/builtin_plugins/platform/qq_api/ug_watch.py index 4e7a708c..4435e880 100644 --- a/zhenxun/builtin_plugins/platform/qq_api/ug_watch.py +++ b/zhenxun/builtin_plugins/platform/qq_api/ug_watch.py @@ -1,4 +1,4 @@ -from nonebot.message import run_preprocessor +from nonebot import on_message from nonebot_plugin_uninfo import Uninfo from zhenxun.models.friend_user import FriendUser @@ -8,24 +8,27 @@ from zhenxun.services.log import logger from zhenxun.utils.platform import PlatformUtils -@run_preprocessor -async def do_something(session: Uninfo): +def rule(session: Uninfo) -> bool: + return PlatformUtils.is_qbot(session) + + +_matcher = on_message(priority=999, block=False, rule=rule) + + +@_matcher.handle() +async def _(session: Uninfo): platform = PlatformUtils.get_platform(session) if session.group: if not await GroupConsole.exists(group_id=session.group.id): await GroupConsole.create(group_id=session.group.id) - logger.info("添加当前群组ID信息" "", session=session) - - if not await GroupInfoUser.exists( - user_id=session.user.id, group_id=session.group.id - ): - await GroupInfoUser.create( - user_id=session.user.id, group_id=session.group.id, platform=platform - ) - logger.info("添加当前用户群组ID信息", "", session=session) + logger.info("添加当前群组ID信息", session=session) + await GroupInfoUser.update_or_create( + user_id=session.user.id, + group_id=session.group.id, + platform=PlatformUtils.get_platform(session), + ) elif not await FriendUser.exists(user_id=session.user.id, platform=platform): - try: - await FriendUser.create(user_id=session.user.id, platform=platform) - logger.info("添加当前好友用户信息", "", session=session) - except Exception as e: - logger.error("添加当前好友用户信息失败", session=session, e=e) + await FriendUser.create( + user_id=session.user.id, platform=PlatformUtils.get_platform(session) + ) + logger.info("添加当前好友用户信息", "", session=session) diff --git a/zhenxun/builtin_plugins/plugin_store/__init__.py b/zhenxun/builtin_plugins/plugin_store/__init__.py index 7e9f52a0..72d6d7dd 100644 --- a/zhenxun/builtin_plugins/plugin_store/__init__.py +++ b/zhenxun/builtin_plugins/plugin_store/__init__.py @@ -9,7 +9,7 @@ from zhenxun.utils.enum import PluginType from zhenxun.utils.message import MessageUtils from zhenxun.utils.utils import is_number -from .data_source import ShopManage +from .data_source import StoreManager __plugin_meta__ = PluginMetadata( name="插件商店", @@ -82,7 +82,7 @@ _matcher.shortcut( @_matcher.assign("$main") async def _(session: EventSession): try: - result = await ShopManage.get_plugins_info() + result = await StoreManager.get_plugins_info() logger.info("查看插件列表", "插件商店", session=session) await MessageUtils.build_message(result).send() except Exception as e: @@ -97,7 +97,7 @@ async def _(session: EventSession, plugin_id: str): await MessageUtils.build_message(f"正在添加插件 Id: {plugin_id}").send() else: await MessageUtils.build_message(f"正在添加插件 Module: {plugin_id}").send() - result = await ShopManage.add_plugin(plugin_id) + result = await StoreManager.add_plugin(plugin_id) except Exception as e: logger.error(f"添加插件 Id: {plugin_id}失败", "插件商店", session=session, e=e) await MessageUtils.build_message( @@ -110,7 +110,7 @@ async def _(session: EventSession, plugin_id: str): @_matcher.assign("remove") async def _(session: EventSession, plugin_id: str): try: - result = await ShopManage.remove_plugin(plugin_id) + result = await StoreManager.remove_plugin(plugin_id) except Exception as e: logger.error(f"移除插件 Id: {plugin_id}失败", "插件商店", session=session, e=e) await MessageUtils.build_message( @@ -123,7 +123,7 @@ async def _(session: EventSession, plugin_id: str): @_matcher.assign("search") async def _(session: EventSession, plugin_name_or_author: str): try: - result = await ShopManage.search_plugin(plugin_name_or_author) + result = await StoreManager.search_plugin(plugin_name_or_author) except Exception as e: logger.error( f"搜索插件 name: {plugin_name_or_author}失败", @@ -145,7 +145,7 @@ async def _(session: EventSession, plugin_id: str): await MessageUtils.build_message(f"正在更新插件 Id: {plugin_id}").send() else: await MessageUtils.build_message(f"正在更新插件 Module: {plugin_id}").send() - result = await ShopManage.update_plugin(plugin_id) + result = await StoreManager.update_plugin(plugin_id) except Exception as e: logger.error(f"更新插件 Id: {plugin_id}失败", "插件商店", session=session, e=e) await MessageUtils.build_message( @@ -159,7 +159,7 @@ async def _(session: EventSession, plugin_id: str): async def _(session: EventSession): try: await MessageUtils.build_message("正在更新全部插件").send() - result = await ShopManage.update_all_plugin() + result = await StoreManager.update_all_plugin() except Exception as e: logger.error("更新全部插件失败", "插件商店", session=session, e=e) await MessageUtils.build_message(f"更新全部插件失败 e: {e}").finish() diff --git a/zhenxun/builtin_plugins/plugin_store/config.py b/zhenxun/builtin_plugins/plugin_store/config.py index dacaffec..7512d49e 100644 --- a/zhenxun/builtin_plugins/plugin_store/config.py +++ b/zhenxun/builtin_plugins/plugin_store/config.py @@ -9,3 +9,14 @@ DEFAULT_GITHUB_URL = "https://github.com/zhenxun-org/zhenxun_bot_plugins/tree/ma EXTRA_GITHUB_URL = "https://github.com/zhenxun-org/zhenxun_bot_plugins_index/tree/index" """插件库索引github仓库地址""" + +GITEE_RAW_URL = "https://gitee.com/two_Dimension/zhenxun_bot_plugins/raw/main" +"""GITEE仓库文件内容""" + +GITEE_CONTENTS_URL = ( + "https://gitee.com/api/v5/repos/two_Dimension/zhenxun_bot_plugins/contents" +) +"""GITEE仓库文件列表获取""" + + +LOG_COMMAND = "插件商店" diff --git a/zhenxun/builtin_plugins/plugin_store/data_source.py b/zhenxun/builtin_plugins/plugin_store/data_source.py index 6e662a81..b2dc96dc 100644 --- a/zhenxun/builtin_plugins/plugin_store/data_source.py +++ b/zhenxun/builtin_plugins/plugin_store/data_source.py @@ -1,6 +1,5 @@ from pathlib import Path import shutil -import subprocess from aiocache import cached import ujson as json @@ -14,9 +13,15 @@ from zhenxun.utils.github_utils import GithubUtils from zhenxun.utils.github_utils.models import RepoAPI from zhenxun.utils.http_utils import AsyncHttpx from zhenxun.utils.image_utils import BuildImage, ImageTemplate, RowStyle +from zhenxun.utils.manager.virtual_env_package_manager import VirtualEnvPackageManager from zhenxun.utils.utils import is_number -from .config import BASE_PATH, DEFAULT_GITHUB_URL, EXTRA_GITHUB_URL +from .config import ( + BASE_PATH, + DEFAULT_GITHUB_URL, + EXTRA_GITHUB_URL, + LOG_COMMAND, +) def row_style(column: str, text: str) -> RowStyle: @@ -39,72 +44,69 @@ def install_requirement(plugin_path: Path): requirement_files = ["requirement.txt", "requirements.txt"] requirement_paths = [plugin_path / file for file in requirement_files] - existing_requirements = next( + if existing_requirements := next( (path for path in requirement_paths if path.exists()), None - ) - - if not existing_requirements: - logger.debug( - f"No requirement.txt found for plugin: {plugin_path.name}", "插件管理" - ) - return - - try: - result = subprocess.run( - ["poetry", "run", "pip", "install", "-r", str(existing_requirements)], - check=True, - capture_output=True, - text=True, - ) - logger.debug( - "Successfully installed dependencies for" - f" plugin: {plugin_path.name}. Output:\n{result.stdout}", - "插件管理", - ) - except subprocess.CalledProcessError: - logger.error( - f"Failed to install dependencies for plugin: {plugin_path.name}. " - " Error:\n{e.stderr}" - ) + ): + VirtualEnvPackageManager.install_requirement(existing_requirements) -class ShopManage: +class StoreManager: @classmethod - @cached(60) - async def get_data(cls) -> dict[str, StorePluginInfo]: - """获取插件信息数据 - - 异常: - ValueError: 访问请求失败 + async def get_github_plugins(cls) -> list[StorePluginInfo]: + """获取github插件列表信息 返回: - dict: 插件信息数据 + list[StorePluginInfo]: 插件列表数据 """ - default_github_repo = GithubUtils.parse_github_url(DEFAULT_GITHUB_URL) - extra_github_repo = GithubUtils.parse_github_url(EXTRA_GITHUB_URL) - for repo_info in [default_github_repo, extra_github_repo]: - if await repo_info.update_repo_commit(): - logger.info(f"获取最新提交: {repo_info.branch}", "插件管理") - else: - logger.warning(f"获取最新提交失败: {repo_info}", "插件管理") - default_github_url = await default_github_repo.get_raw_download_urls( - "plugins.json" - ) - extra_github_url = await extra_github_repo.get_raw_download_urls("plugins.json") - res = await AsyncHttpx.get(default_github_url) - res2 = await AsyncHttpx.get(extra_github_url) + repo_info = GithubUtils.parse_github_url(DEFAULT_GITHUB_URL) + if await repo_info.update_repo_commit(): + logger.info(f"获取最新提交: {repo_info.branch}", LOG_COMMAND) + else: + logger.warning(f"获取最新提交失败: {repo_info}", LOG_COMMAND) + default_github_url = await repo_info.get_raw_download_urls("plugins.json") + response = await AsyncHttpx.get(default_github_url, check_status_code=200) + if response.status_code == 200: + logger.info("获取github插件列表成功", LOG_COMMAND) + return [StorePluginInfo(**detail) for detail in json.loads(response.text)] + else: + logger.warning( + f"获取github插件列表失败: {response.status_code}", LOG_COMMAND + ) + return [] - # 检查请求结果 - if res.status_code != 200 or res2.status_code != 200: - raise ValueError(f"下载错误, code: {res.status_code}, {res2.status_code}") + @classmethod + async def get_extra_plugins(cls) -> list[StorePluginInfo]: + """获取额外插件列表信息 - # 解析并合并返回的 JSON 数据 - data1 = json.loads(res.text) - data2 = json.loads(res2.text) - return { - name: StorePluginInfo(**detail) - for name, detail in {**data1, **data2}.items() - } + 返回: + list[StorePluginInfo]: 插件列表数据 + """ + repo_info = GithubUtils.parse_github_url(EXTRA_GITHUB_URL) + if await repo_info.update_repo_commit(): + logger.info(f"获取最新提交: {repo_info.branch}", LOG_COMMAND) + else: + logger.warning(f"获取最新提交失败: {repo_info}", LOG_COMMAND) + extra_github_url = await repo_info.get_raw_download_urls("plugins.json") + response = await AsyncHttpx.get(extra_github_url, check_status_code=200) + if response.status_code == 200: + return [StorePluginInfo(**detail) for detail in json.loads(response.text)] + else: + logger.warning( + f"获取github扩展插件列表失败: {response.status_code}", LOG_COMMAND + ) + return [] + + @classmethod + @cached(60) + async def get_data(cls) -> list[StorePluginInfo]: + """获取插件信息数据 + + 返回: + list[StorePluginInfo]: 插件信息数据 + """ + plugins = await cls.get_github_plugins() + extra_plugins = await cls.get_extra_plugins() + return [*plugins, *extra_plugins] @classmethod def version_check(cls, plugin_info: StorePluginInfo, suc_plugin: dict[str, str]): @@ -112,7 +114,7 @@ class ShopManage: 参数: plugin_info: StorePluginInfo - suc_plugin: dict[str, str] + suc_plugin: 模块名: 版本号 返回: str: 版本号 @@ -132,7 +134,7 @@ class ShopManage: 参数: plugin_info: StorePluginInfo - suc_plugin: dict[str, str] + suc_plugin: 模块名: 版本号 返回: bool: 是否有更新 @@ -156,21 +158,21 @@ class ShopManage: 返回: BuildImage | str: 返回消息 """ - data: dict[str, StorePluginInfo] = await cls.get_data() + plugin_list: list[StorePluginInfo] = await cls.get_data() column_name = ["-", "ID", "名称", "简介", "作者", "版本", "类型"] - plugin_list = await cls.get_loaded_plugins("module", "version") - suc_plugin = {p[0]: (p[1] or "0.1") for p in plugin_list} + db_plugin_list = await cls.get_loaded_plugins("module", "version") + suc_plugin = {p[0]: (p[1] or "0.1") for p in db_plugin_list} data_list = [ [ - "已安装" if plugin_info[1].module in suc_plugin else "", + "已安装" if plugin_info.module in suc_plugin else "", id, - plugin_info[0], - plugin_info[1].description, - plugin_info[1].author, - cls.version_check(plugin_info[1], suc_plugin), - plugin_info[1].plugin_type_name, + plugin_info.name, + plugin_info.description, + plugin_info.author, + cls.version_check(plugin_info, suc_plugin), + plugin_info.plugin_type_name, ] - for id, plugin_info in enumerate(data.items()) + for id, plugin_info in enumerate(plugin_list) ] return await ImageTemplate.table_page( "插件列表", @@ -190,15 +192,15 @@ class ShopManage: 返回: str: 返回消息 """ - data: dict[str, StorePluginInfo] = await cls.get_data() + plugin_list: list[StorePluginInfo] = await cls.get_data() try: plugin_key = await cls._resolve_plugin_key(plugin_id) except ValueError as e: return str(e) - plugin_list = await cls.get_loaded_plugins("module") - plugin_info = data[plugin_key] - if plugin_info.module in [p[0] for p in plugin_list]: - return f"插件 {plugin_key} 已安装,无需重复安装" + db_plugin_list = await cls.get_loaded_plugins("module") + plugin_info = next(p for p in plugin_list if p.module == plugin_key) + if plugin_info.module in [p[0] for p in db_plugin_list]: + return f"插件 {plugin_info.name} 已安装,无需重复安装" is_external = True if plugin_info.github_url is None: plugin_info.github_url = DEFAULT_GITHUB_URL @@ -207,34 +209,39 @@ class ShopManage: if len(version_split) > 1: github_url_split = plugin_info.github_url.split("/tree/") plugin_info.github_url = f"{github_url_split[0]}/tree/{version_split[1]}" - logger.info(f"正在安装插件 {plugin_key}...") + logger.info(f"正在安装插件 {plugin_info.name}...", LOG_COMMAND) await cls.install_plugin_with_repo( plugin_info.github_url, plugin_info.module_path, plugin_info.is_dir, is_external, ) - return f"插件 {plugin_key} 安装成功! 重启后生效" + return f"插件 {plugin_info.name} 安装成功! 重启后生效" @classmethod async def install_plugin_with_repo( - cls, github_url: str, module_path: str, is_dir: bool, is_external: bool = False + cls, + github_url: str, + module_path: str, + is_dir: bool, + is_external: bool = False, ): - files: list[str] repo_api: RepoAPI repo_info = GithubUtils.parse_github_url(github_url) if await repo_info.update_repo_commit(): - logger.info(f"获取最新提交: {repo_info.branch}", "插件管理") + logger.info(f"获取最新提交: {repo_info.branch}", LOG_COMMAND) else: - logger.warning(f"获取最新提交失败: {repo_info}", "插件管理") - logger.debug(f"成功获取仓库信息: {repo_info}", "插件管理") + logger.warning(f"获取最新提交失败: {repo_info}", LOG_COMMAND) + logger.debug(f"成功获取仓库信息: {repo_info}", LOG_COMMAND) for repo_api in GithubUtils.iter_api_strategies(): try: await repo_api.parse_repo_info(repo_info) break except Exception as e: logger.warning( - f"获取插件文件失败: {e} | API类型: {repo_api.strategy}", "插件管理" + f"获取插件文件失败 | API类型: {repo_api.strategy}", + LOG_COMMAND, + e=e, ) continue else: @@ -250,7 +257,7 @@ class ShopManage: base_path = BASE_PATH / "plugins" if is_external else BASE_PATH base_path = base_path if module_path else base_path / repo_info.repo download_paths: list[Path | str] = [base_path / file for file in files] - logger.debug(f"插件下载路径: {download_paths}", "插件管理") + logger.debug(f"插件下载路径: {download_paths}", LOG_COMMAND) result = await AsyncHttpx.gather_download_file(download_urls, download_paths) for _id, success in enumerate(result): if not success: @@ -265,12 +272,12 @@ class ShopManage: req_files.extend( repo_api.get_files(f"{replace_module_path}/requirement.txt", False) ) - logger.debug(f"获取插件依赖文件列表: {req_files}", "插件管理") + logger.debug(f"获取插件依赖文件列表: {req_files}", LOG_COMMAND) req_download_urls = [ await repo_info.get_raw_download_urls(file) for file in req_files ] req_paths: list[Path | str] = [plugin_path / file for file in req_files] - logger.debug(f"插件依赖文件下载路径: {req_paths}", "插件管理") + logger.debug(f"插件依赖文件下载路径: {req_paths}", LOG_COMMAND) if req_files: result = await AsyncHttpx.gather_download_file( req_download_urls, req_paths @@ -278,7 +285,7 @@ class ShopManage: for success in result: if not success: raise Exception("插件依赖文件下载失败") - logger.debug(f"插件依赖文件列表: {req_paths}", "插件管理") + logger.debug(f"插件依赖文件列表: {req_paths}", LOG_COMMAND) install_requirement(plugin_path) except ValueError as e: logger.warning("未获取到依赖文件路径...", e=e) @@ -295,12 +302,12 @@ class ShopManage: 返回: str: 返回消息 """ - data: dict[str, StorePluginInfo] = await cls.get_data() + plugin_list: list[StorePluginInfo] = await cls.get_data() try: plugin_key = await cls._resolve_plugin_key(plugin_id) except ValueError as e: return str(e) - plugin_info = data[plugin_key] + plugin_info = next(p for p in plugin_list if p.module == plugin_key) path = BASE_PATH if plugin_info.github_url: path = BASE_PATH / "plugins" @@ -309,14 +316,14 @@ class ShopManage: if not plugin_info.is_dir: path = Path(f"{path}.py") if not path.exists(): - return f"插件 {plugin_key} 不存在..." - logger.debug(f"尝试移除插件 {plugin_key} 文件: {path}", "插件管理") + return f"插件 {plugin_info.name} 不存在..." + logger.debug(f"尝试移除插件 {plugin_info.name} 文件: {path}", LOG_COMMAND) if plugin_info.is_dir: shutil.rmtree(path) else: path.unlink() await PluginInitManager.remove(f"zhenxun.{plugin_info.module_path}") - return f"插件 {plugin_key} 移除成功! 重启后生效" + return f"插件 {plugin_info.name} 移除成功! 重启后生效" @classmethod async def search_plugin(cls, plugin_name_or_author: str) -> BuildImage | str: @@ -328,25 +335,25 @@ class ShopManage: 返回: BuildImage | str: 返回消息 """ - data: dict[str, StorePluginInfo] = await cls.get_data() - plugin_list = await cls.get_loaded_plugins("module", "version") - suc_plugin = {p[0]: (p[1] or "Unknown") for p in plugin_list} + plugin_list: list[StorePluginInfo] = await cls.get_data() + db_plugin_list = await cls.get_loaded_plugins("module", "version") + suc_plugin = {p[0]: (p[1] or "Unknown") for p in db_plugin_list} filtered_data = [ (id, plugin_info) - for id, plugin_info in enumerate(data.items()) - if plugin_name_or_author.lower() in plugin_info[0].lower() - or plugin_name_or_author.lower() in plugin_info[1].author.lower() + for id, plugin_info in enumerate(plugin_list) + if plugin_name_or_author.lower() in plugin_info.name.lower() + or plugin_name_or_author.lower() in plugin_info.author.lower() ] data_list = [ [ - "已安装" if plugin_info[1].module in suc_plugin else "", + "已安装" if plugin_info.module in suc_plugin else "", id, - plugin_info[0], - plugin_info[1].description, - plugin_info[1].author, - cls.version_check(plugin_info[1], suc_plugin), - plugin_info[1].plugin_type_name, + plugin_info.name, + plugin_info.description, + plugin_info.author, + cls.version_check(plugin_info, suc_plugin), + plugin_info.plugin_type_name, ] for id, plugin_info in filtered_data ] @@ -354,7 +361,7 @@ class ShopManage: return "未找到相关插件..." column_name = ["-", "ID", "名称", "简介", "作者", "版本", "类型"] return await ImageTemplate.table_page( - "插件列表", + "商店插件列表", "通过添加/移除插件 ID 来管理插件", column_name, data_list, @@ -371,20 +378,20 @@ class ShopManage: 返回: str: 返回消息 """ - data: dict[str, StorePluginInfo] = await cls.get_data() + plugin_list: list[StorePluginInfo] = await cls.get_data() try: plugin_key = await cls._resolve_plugin_key(plugin_id) except ValueError as e: return str(e) - logger.info(f"尝试更新插件 {plugin_key}", "插件管理") - plugin_info = data[plugin_key] - plugin_list = await cls.get_loaded_plugins("module", "version") - suc_plugin = {p[0]: (p[1] or "Unknown") for p in plugin_list} - if plugin_info.module not in [p[0] for p in plugin_list]: - return f"插件 {plugin_key} 未安装,无法更新" - logger.debug(f"当前插件列表: {suc_plugin}", "插件管理") + plugin_info = next(p for p in plugin_list if p.module == plugin_key) + logger.info(f"尝试更新插件 {plugin_info.name}", LOG_COMMAND) + db_plugin_list = await cls.get_loaded_plugins("module", "version") + suc_plugin = {p[0]: (p[1] or "Unknown") for p in db_plugin_list} + if plugin_info.module not in [p[0] for p in db_plugin_list]: + return f"插件 {plugin_info.name} 未安装,无法更新" + logger.debug(f"当前插件列表: {suc_plugin}", LOG_COMMAND) if cls.check_version_is_new(plugin_info, suc_plugin): - return f"插件 {plugin_key} 已是最新版本" + return f"插件 {plugin_info.name} 已是最新版本" is_external = True if plugin_info.github_url is None: plugin_info.github_url = DEFAULT_GITHUB_URL @@ -395,7 +402,7 @@ class ShopManage: plugin_info.is_dir, is_external, ) - return f"插件 {plugin_key} 更新成功! 重启后生效" + return f"插件 {plugin_info.name} 更新成功! 重启后生效" @classmethod async def update_all_plugin(cls) -> str: @@ -407,24 +414,33 @@ class ShopManage: 返回: str: 返回消息 """ - data: dict[str, StorePluginInfo] = await cls.get_data() - plugin_list = list(data.keys()) + plugin_list: list[StorePluginInfo] = await cls.get_data() + plugin_name_list = [p.name for p in plugin_list] update_failed_list = [] update_success_list = [] result = "--已更新{}个插件 {}个失败 {}个成功--" - logger.info(f"尝试更新全部插件 {plugin_list}", "插件管理") - for plugin_key in plugin_list: + logger.info(f"尝试更新全部插件 {plugin_name_list}", LOG_COMMAND) + for plugin_info in plugin_list: try: - plugin_info = data[plugin_key] - plugin_list = await cls.get_loaded_plugins("module", "version") - suc_plugin = {p[0]: (p[1] or "Unknown") for p in plugin_list} - if plugin_info.module not in [p[0] for p in plugin_list]: - logger.debug(f"插件 {plugin_key} 未安装,跳过", "插件管理") + db_plugin_list = await cls.get_loaded_plugins("module", "version") + suc_plugin = {p[0]: (p[1] or "Unknown") for p in db_plugin_list} + if plugin_info.module not in [p[0] for p in db_plugin_list]: + logger.debug( + f"插件 {plugin_info.name}({plugin_info.module}) 未安装,跳过", + LOG_COMMAND, + ) continue if cls.check_version_is_new(plugin_info, suc_plugin): - logger.debug(f"插件 {plugin_key} 已是最新版本,跳过", "插件管理") + logger.debug( + f"插件 {plugin_info.name}({plugin_info.module}) " + "已是最新版本,跳过", + LOG_COMMAND, + ) continue - logger.info(f"正在更新插件 {plugin_key}", "插件管理") + logger.info( + f"正在更新插件 {plugin_info.name}({plugin_info.module})", + LOG_COMMAND, + ) is_external = True if plugin_info.github_url is None: plugin_info.github_url = DEFAULT_GITHUB_URL @@ -435,10 +451,14 @@ class ShopManage: plugin_info.is_dir, is_external, ) - update_success_list.append(plugin_key) + update_success_list.append(plugin_info.name) except Exception as e: - logger.error(f"更新插件 {plugin_key} 失败: {e}", "插件管理") - update_failed_list.append(plugin_key) + logger.error( + f"更新插件 {plugin_info.name}({plugin_info.module}) 失败", + LOG_COMMAND, + e=e, + ) + update_failed_list.append(plugin_info.name) if not update_success_list and not update_failed_list: return "全部插件已是最新版本" if update_success_list: @@ -460,13 +480,28 @@ class ShopManage: @classmethod async def _resolve_plugin_key(cls, plugin_id: str) -> str: - data: dict[str, StorePluginInfo] = await cls.get_data() + """获取插件module + + 参数: + plugin_id: module,id或插件名称 + + 异常: + ValueError: 插件不存在 + ValueError: 插件不存在 + + 返回: + str: 插件模块名 + """ + plugin_list: list[StorePluginInfo] = await cls.get_data() if is_number(plugin_id): idx = int(plugin_id) - if idx < 0 or idx >= len(data): + if idx < 0 or idx >= len(plugin_list): raise ValueError("插件ID不存在...") - return list(data.keys())[idx] + return plugin_list[idx].module elif isinstance(plugin_id, str): - if plugin_id not in [v.module for k, v in data.items()]: - raise ValueError("插件Module不存在...") - return {v.module: k for k, v in data.items()}[plugin_id] + result = ( + None if plugin_id not in [v.module for v in plugin_list] else plugin_id + ) or next(v for v in plugin_list if v.name == plugin_id).module + if not result: + raise ValueError("插件 Module / 名称 不存在...") + return result diff --git a/zhenxun/builtin_plugins/plugin_store/models.py b/zhenxun/builtin_plugins/plugin_store/models.py index df65dd56..2bea1315 100644 --- a/zhenxun/builtin_plugins/plugin_store/models.py +++ b/zhenxun/builtin_plugins/plugin_store/models.py @@ -1,3 +1,5 @@ +from typing import Any, Literal + from nonebot.compat import model_dump from pydantic import BaseModel @@ -13,9 +15,30 @@ type2name: dict[str, str] = { } +class GiteeContents(BaseModel): + """Gitee Api内容""" + + type: Literal["file", "dir"] + """类型""" + size: Any + """文件大小""" + name: str + """文件名""" + path: str + """文件路径""" + url: str + """文件链接""" + html_url: str + """文件html链接""" + download_url: str + """文件raw链接""" + + class StorePluginInfo(BaseModel): """插件信息""" + name: str + """插件名""" module: str """模块名""" module_path: str diff --git a/zhenxun/builtin_plugins/record_request.py b/zhenxun/builtin_plugins/record_request.py index d4b0c694..32d5d551 100644 --- a/zhenxun/builtin_plugins/record_request.py +++ b/zhenxun/builtin_plugins/record_request.py @@ -17,11 +17,12 @@ from nonebot_plugin_session import EventSession from zhenxun.configs.config import BotConfig, Config from zhenxun.configs.utils import PluginExtraData, RegisterConfig +from zhenxun.models.event_log import EventLog from zhenxun.models.fg_request import FgRequest from zhenxun.models.friend_user import FriendUser from zhenxun.models.group_console import GroupConsole from zhenxun.services.log import logger -from zhenxun.utils.enum import PluginType, RequestHandleType, RequestType +from zhenxun.utils.enum import EventLogType, PluginType, RequestHandleType, RequestType from zhenxun.utils.platform import PlatformUtils base_config = Config.get("invite_manager") @@ -112,21 +113,29 @@ async def _(bot: v12Bot | v11Bot, event: FriendRequestEvent, session: EventSessi nickname=nickname, comment=comment, ) - await PlatformUtils.send_superuser( + results = await PlatformUtils.send_superuser( bot, f"*****一份好友申请*****\n" f"ID: {f.id}\n" f"昵称:{nickname}({event.user_id})\n" f"自动同意:{'√' if base_config.get('AUTO_ADD_FRIEND') else '×'}\n" - f"日期:{str(datetime.now()).split('.')[0]}\n" + f"日期:{datetime.now().replace(microsecond=0)}\n" f"备注:{event.comment}", ) + if message_ids := [ + str(r[1].msg_ids[0]["message_id"]) + for r in results + if r[1] and r[1].msg_ids + ]: + f.message_ids = ",".join(message_ids) + await f.save(update_fields=["message_ids"]) else: logger.debug("好友请求五分钟内重复, 已忽略", "好友请求", target=event.user_id) @group_req.handle() async def _(bot: v12Bot | v11Bot, event: GroupRequestEvent, session: EventSession): + # sourcery skip: low-code-quality if event.sub_type != "invite": return if str(event.user_id) in bot.config.superusers or base_config.get("AUTO_ADD_GROUP"): @@ -186,7 +195,7 @@ async def _(bot: v12Bot | v11Bot, event: GroupRequestEvent, session: EventSessio group_id=str(event.group_id), handle_type=RequestHandleType.APPROVE, ) - await PlatformUtils.send_superuser( + results = await PlatformUtils.send_superuser( bot, f"*****一份入群申请*****\n" f"ID:{f.id}\n" @@ -230,13 +239,27 @@ async def _(bot: v12Bot | v11Bot, event: GroupRequestEvent, session: EventSessio nickname=nickname, group_id=str(event.group_id), ) - await PlatformUtils.send_superuser( + kick_count = await EventLog.filter( + group_id=str(event.group_id), event_type=EventLogType.KICK_BOT + ).count() + kick_message = ( + f"\n该群累计踢出{BotConfig.self_nickname} <{kick_count}>次" + if kick_count + else "" + ) + results = await PlatformUtils.send_superuser( bot, f"*****一份入群申请*****\n" f"ID:{f.id}\n" f"申请人:{nickname}({event.user_id})\n群聊:" - f"{event.group_id}\n邀请日期:{datetime.now().replace(microsecond=0)}", + f"{event.group_id}\n邀请日期:{datetime.now().replace(microsecond=0)}" + f"{kick_message}", ) + if message_ids := [ + str(r[1].msg_ids[0]["message_id"]) for r in results if r[1] and r[1].msg_ids + ]: + f.message_ids = ",".join(message_ids) + await f.save(update_fields=["message_ids"]) else: logger.debug( "群聊请求五分钟内重复, 已忽略", diff --git a/zhenxun/builtin_plugins/scheduler_admin/__init__.py b/zhenxun/builtin_plugins/scheduler_admin/__init__.py new file mode 100644 index 00000000..adaaa621 --- /dev/null +++ b/zhenxun/builtin_plugins/scheduler_admin/__init__.py @@ -0,0 +1,51 @@ +from nonebot.plugin import PluginMetadata + +from zhenxun.configs.utils import PluginExtraData +from zhenxun.utils.enum import PluginType + +from . import command # noqa: F401 + +__plugin_meta__ = PluginMetadata( + name="定时任务管理", + description="查看和管理由 SchedulerManager 控制的定时任务。", + usage=""" +📋 定时任务管理 - 支持群聊和私聊操作 + +🔍 查看任务: + 定时任务 查看 [-all] [-g <群号>] [-p <插件>] [--page <页码>] + • 群聊中: 查看本群任务 + • 私聊中: 必须使用 -g <群号> 或 -all 选项 (SUPERUSER) + +📊 任务状态: + 定时任务 状态 <任务ID> 或 任务状态 <任务ID> + • 查看单个任务的详细信息和状态 + +⚙️ 任务管理 (SUPERUSER): + 定时任务 设置 <插件> [时间选项] [-g <群号> | -g all] [--kwargs <参数>] + 定时任务 删除 <任务ID> | -p <插件> [-g <群号>] | -all + 定时任务 暂停 <任务ID> | -p <插件> [-g <群号>] | -all + 定时任务 恢复 <任务ID> | -p <插件> [-g <群号>] | -all + 定时任务 执行 <任务ID> + 定时任务 更新 <任务ID> [时间选项] [--kwargs <参数>] + +📝 时间选项 (三选一): + --cron "<分> <时> <日> <月> <周>" # 例: --cron "0 8 * * *" + --interval <时间间隔> # 例: --interval 30m, 2h, 10s + --date "" # 例: --date "2024-01-01 08:00:00" + --daily "" # 例: --daily "08:30" + +📚 其他功能: + 定时任务 插件列表 # 查看所有可设置定时任务的插件 (SUPERUSER) + +🏷️ 别名支持: + 查看: ls, list | 设置: add, 开启 | 删除: del, rm, remove, 关闭, 取消 + 暂停: pause | 恢复: resume | 执行: trigger, run | 状态: status, info + 更新: update, modify, 修改 | 插件列表: plugins + """.strip(), + extra=PluginExtraData( + author="HibiKier", + version="0.1.2", + plugin_type=PluginType.SUPERUSER, + is_show=False, + ).to_dict(), +) diff --git a/zhenxun/builtin_plugins/scheduler_admin/command.py b/zhenxun/builtin_plugins/scheduler_admin/command.py new file mode 100644 index 00000000..08a085fb --- /dev/null +++ b/zhenxun/builtin_plugins/scheduler_admin/command.py @@ -0,0 +1,836 @@ +import asyncio +from datetime import datetime +import re + +from nonebot.adapters import Event +from nonebot.adapters.onebot.v11 import Bot +from nonebot.params import Depends +from nonebot.permission import SUPERUSER +from nonebot_plugin_alconna import ( + Alconna, + AlconnaMatch, + Args, + Arparma, + Match, + Option, + Query, + Subcommand, + on_alconna, +) +from pydantic import BaseModel, ValidationError + +from zhenxun.utils._image_template import ImageTemplate +from zhenxun.utils.manager.schedule_manager import scheduler_manager + + +def _get_type_name(annotation) -> str: + """获取类型注解的名称""" + if hasattr(annotation, "__name__"): + return annotation.__name__ + elif hasattr(annotation, "_name"): + return annotation._name + else: + return str(annotation) + + +from zhenxun.utils.message import MessageUtils +from zhenxun.utils.rules import admin_check + + +def _format_trigger(schedule_status: dict) -> str: + """将触发器配置格式化为人类可读的字符串""" + trigger_type = schedule_status["trigger_type"] + config = schedule_status["trigger_config"] + + if trigger_type == "cron": + minute = config.get("minute", "*") + hour = config.get("hour", "*") + day = config.get("day", "*") + month = config.get("month", "*") + day_of_week = config.get("day_of_week", "*") + + if day == "*" and month == "*" and day_of_week == "*": + formatted_hour = hour if hour == "*" else f"{int(hour):02d}" + formatted_minute = minute if minute == "*" else f"{int(minute):02d}" + return f"每天 {formatted_hour}:{formatted_minute}" + else: + return f"Cron: {minute} {hour} {day} {month} {day_of_week}" + elif trigger_type == "interval": + seconds = config.get("seconds", 0) + minutes = config.get("minutes", 0) + hours = config.get("hours", 0) + days = config.get("days", 0) + if days: + trigger_str = f"每 {days} 天" + elif hours: + trigger_str = f"每 {hours} 小时" + elif minutes: + trigger_str = f"每 {minutes} 分钟" + else: + trigger_str = f"每 {seconds} 秒" + elif trigger_type == "date": + run_date = config.get("run_date", "未知时间") + trigger_str = f"在 {run_date}" + else: + trigger_str = f"{trigger_type}: {config}" + + return trigger_str + + +def _format_params(schedule_status: dict) -> str: + """将任务参数格式化为人类可读的字符串""" + if kwargs := schedule_status.get("job_kwargs"): + kwargs_str = " | ".join(f"{k}: {v}" for k, v in kwargs.items()) + return kwargs_str + return "-" + + +def _parse_interval(interval_str: str) -> dict: + """增强版解析器,支持 d(天)""" + match = re.match(r"(\d+)([smhd])", interval_str.lower()) + if not match: + raise ValueError("时间间隔格式错误, 请使用如 '30m', '2h', '1d', '10s' 的格式。") + + value, unit = int(match.group(1)), match.group(2) + if unit == "s": + return {"seconds": value} + if unit == "m": + return {"minutes": value} + if unit == "h": + return {"hours": value} + if unit == "d": + return {"days": value} + return {} + + +def _parse_daily_time(time_str: str) -> dict: + """解析 HH:MM 或 HH:MM:SS 格式的时间为 cron 配置""" + if match := re.match(r"^(\d{1,2}):(\d{1,2})(?::(\d{1,2}))?$", time_str): + hour, minute, second = match.groups() + hour, minute = int(hour), int(minute) + + if not (0 <= hour <= 23 and 0 <= minute <= 59): + raise ValueError("小时或分钟数值超出范围。") + + cron_config = { + "minute": str(minute), + "hour": str(hour), + "day": "*", + "month": "*", + "day_of_week": "*", + } + if second is not None: + if not (0 <= int(second) <= 59): + raise ValueError("秒数值超出范围。") + cron_config["second"] = str(second) + + return cron_config + else: + raise ValueError("时间格式错误,请使用 'HH:MM' 或 'HH:MM:SS' 格式。") + + +async def GetBotId( + bot: Bot, + bot_id_match: Match[str] = AlconnaMatch("bot_id"), +) -> str: + """获取要操作的Bot ID""" + if bot_id_match.available: + return bot_id_match.result + return bot.self_id + + +class ScheduleTarget: + """定时任务操作目标的基类""" + + pass + + +class TargetByID(ScheduleTarget): + """按任务ID操作""" + + def __init__(self, id: int): + self.id = id + + +class TargetByPlugin(ScheduleTarget): + """按插件名操作""" + + def __init__( + self, plugin: str, group_id: str | None = None, all_groups: bool = False + ): + self.plugin = plugin + self.group_id = group_id + self.all_groups = all_groups + + +class TargetAll(ScheduleTarget): + """操作所有任务""" + + def __init__(self, for_group: str | None = None): + self.for_group = for_group + + +TargetScope = TargetByID | TargetByPlugin | TargetAll | None + + +def create_target_parser(subcommand_name: str): + """ + 创建一个依赖注入函数,用于解析删除、暂停、恢复等命令的操作目标。 + """ + + async def dependency( + event: Event, + schedule_id: Match[int] = AlconnaMatch("schedule_id"), + plugin_name: Match[str] = AlconnaMatch("plugin_name"), + group_id: Match[str] = AlconnaMatch("group_id"), + all_enabled: Query[bool] = Query(f"{subcommand_name}.all"), + ) -> TargetScope: + if schedule_id.available: + return TargetByID(schedule_id.result) + + if plugin_name.available: + p_name = plugin_name.result + if all_enabled.available: + return TargetByPlugin(plugin=p_name, all_groups=True) + elif group_id.available: + gid = group_id.result + if gid.lower() == "all": + return TargetByPlugin(plugin=p_name, all_groups=True) + return TargetByPlugin(plugin=p_name, group_id=gid) + else: + current_group_id = getattr(event, "group_id", None) + if current_group_id: + return TargetByPlugin(plugin=p_name, group_id=str(current_group_id)) + else: + await schedule_cmd.finish( + "私聊中操作插件任务必须使用 -g <群号> 或 -all 选项。" + ) + + if all_enabled.available: + return TargetAll(for_group=group_id.result if group_id.available else None) + + return None + + return dependency + + +schedule_cmd = on_alconna( + Alconna( + "定时任务", + Subcommand( + "查看", + Option("-g", Args["target_group_id", str]), + Option("-all", help_text="查看所有群聊 (SUPERUSER)"), + Option("-p", Args["plugin_name", str], help_text="按插件名筛选"), + Option("--page", Args["page", int, 1], help_text="指定页码"), + alias=["ls", "list"], + help_text="查看定时任务", + ), + Subcommand( + "设置", + Args["plugin_name", str], + Option("--cron", Args["cron_expr", str], help_text="设置 cron 表达式"), + Option("--interval", Args["interval_expr", str], help_text="设置时间间隔"), + Option("--date", Args["date_expr", str], help_text="设置特定执行日期"), + Option( + "--daily", + Args["daily_expr", str], + help_text="设置每天执行的时间 (如 08:20)", + ), + Option("-g", Args["group_id", str], help_text="指定群组ID或'all'"), + Option("-all", help_text="对所有群生效 (等同于 -g all)"), + Option("--kwargs", Args["kwargs_str", str], help_text="设置任务参数"), + Option( + "--bot", Args["bot_id", str], help_text="指定操作的Bot ID (SUPERUSER)" + ), + alias=["add", "开启"], + help_text="设置/开启一个定时任务", + ), + Subcommand( + "删除", + Args["schedule_id?", int], + Option("-p", Args["plugin_name", str], help_text="指定插件名"), + Option("-g", Args["group_id", str], help_text="指定群组ID"), + Option("-all", help_text="对所有群生效"), + Option( + "--bot", Args["bot_id", str], help_text="指定操作的Bot ID (SUPERUSER)" + ), + alias=["del", "rm", "remove", "关闭", "取消"], + help_text="删除一个或多个定时任务", + ), + Subcommand( + "暂停", + Args["schedule_id?", int], + Option("-all", help_text="对当前群所有任务生效"), + Option("-p", Args["plugin_name", str], help_text="指定插件名"), + Option("-g", Args["group_id", str], help_text="指定群组ID (SUPERUSER)"), + Option( + "--bot", Args["bot_id", str], help_text="指定操作的Bot ID (SUPERUSER)" + ), + alias=["pause"], + help_text="暂停一个或多个定时任务", + ), + Subcommand( + "恢复", + Args["schedule_id?", int], + Option("-all", help_text="对当前群所有任务生效"), + Option("-p", Args["plugin_name", str], help_text="指定插件名"), + Option("-g", Args["group_id", str], help_text="指定群组ID (SUPERUSER)"), + Option( + "--bot", Args["bot_id", str], help_text="指定操作的Bot ID (SUPERUSER)" + ), + alias=["resume"], + help_text="恢复一个或多个定时任务", + ), + Subcommand( + "执行", + Args["schedule_id", int], + alias=["trigger", "run"], + help_text="立即执行一次任务", + ), + Subcommand( + "更新", + Args["schedule_id", int], + Option("--cron", Args["cron_expr", str], help_text="设置 cron 表达式"), + Option("--interval", Args["interval_expr", str], help_text="设置时间间隔"), + Option("--date", Args["date_expr", str], help_text="设置特定执行日期"), + Option( + "--daily", + Args["daily_expr", str], + help_text="更新每天执行的时间 (如 08:20)", + ), + Option("--kwargs", Args["kwargs_str", str], help_text="更新参数"), + alias=["update", "modify", "修改"], + help_text="更新任务配置", + ), + Subcommand( + "状态", + Args["schedule_id", int], + alias=["status", "info"], + help_text="查看单个任务的详细状态", + ), + Subcommand( + "插件列表", + alias=["plugins"], + help_text="列出所有可用的插件", + ), + ), + priority=5, + block=True, + rule=admin_check(1), +) + +schedule_cmd.shortcut( + "任务状态", + command="定时任务", + arguments=["状态", "{%0}"], + prefix=True, +) + + +@schedule_cmd.handle() +async def _handle_time_options_mutex(arp: Arparma): + time_options = ["cron", "interval", "date", "daily"] + provided_options = [opt for opt in time_options if arp.query(opt) is not None] + if len(provided_options) > 1: + await schedule_cmd.finish( + f"时间选项 --{', --'.join(provided_options)} 不能同时使用,请只选择一个。" + ) + + +@schedule_cmd.assign("查看") +async def _( + bot: Bot, + event: Event, + target_group_id: Match[str] = AlconnaMatch("target_group_id"), + all_groups: Query[bool] = Query("查看.all"), + plugin_name: Match[str] = AlconnaMatch("plugin_name"), + page: Match[int] = AlconnaMatch("page"), +): + is_superuser = await SUPERUSER(bot, event) + schedules = [] + title = "" + + current_group_id = getattr(event, "group_id", None) + if not (all_groups.available or target_group_id.available) and not current_group_id: + await schedule_cmd.finish("私聊中查看任务必须使用 -g <群号> 或 -all 选项。") + + if all_groups.available: + if not is_superuser: + await schedule_cmd.finish("需要超级用户权限才能查看所有群组的定时任务。") + schedules = await scheduler_manager.get_all_schedules() + title = "所有群组的定时任务" + elif target_group_id.available: + if not is_superuser: + await schedule_cmd.finish("需要超级用户权限才能查看指定群组的定时任务。") + gid = target_group_id.result + schedules = [ + s for s in await scheduler_manager.get_all_schedules() if s.group_id == gid + ] + title = f"群 {gid} 的定时任务" + else: + gid = str(current_group_id) + schedules = [ + s for s in await scheduler_manager.get_all_schedules() if s.group_id == gid + ] + title = "本群的定时任务" + + if plugin_name.available: + schedules = [s for s in schedules if s.plugin_name == plugin_name.result] + title += f" [插件: {plugin_name.result}]" + + if not schedules: + await schedule_cmd.finish("没有找到任何相关的定时任务。") + + page_size = 15 + current_page = page.result + total_items = len(schedules) + total_pages = (total_items + page_size - 1) // page_size + start_index = (current_page - 1) * page_size + end_index = start_index + page_size + paginated_schedules = schedules[start_index:end_index] + + if not paginated_schedules: + await schedule_cmd.finish("这一页没有内容了哦~") + + status_tasks = [ + scheduler_manager.get_schedule_status(s.id) for s in paginated_schedules + ] + all_statuses = await asyncio.gather(*status_tasks) + data_list = [ + [ + s["id"], + s["plugin_name"], + s.get("bot_id") or "N/A", + s["group_id"] or "全局", + s["next_run_time"], + _format_trigger(s), + _format_params(s), + "✔️ 已启用" if s["is_enabled"] else "⏸️ 已暂停", + ] + for s in all_statuses + if s + ] + + if not data_list: + await schedule_cmd.finish("没有找到任何相关的定时任务。") + + img = await ImageTemplate.table_page( + head_text=title, + tip_text=f"第 {current_page}/{total_pages} 页,共 {total_items} 条任务", + column_name=[ + "ID", + "插件", + "Bot ID", + "群组/目标", + "下次运行", + "触发规则", + "参数", + "状态", + ], + data_list=data_list, + column_space=20, + ) + await MessageUtils.build_message(img).send(reply_to=True) + + +@schedule_cmd.assign("设置") +async def _( + event: Event, + plugin_name: str, + cron_expr: str | None = None, + interval_expr: str | None = None, + date_expr: str | None = None, + daily_expr: str | None = None, + group_id: str | None = None, + kwargs_str: str | None = None, + all_enabled: Query[bool] = Query("设置.all"), + bot_id_to_operate: str = Depends(GetBotId), +): + if plugin_name not in scheduler_manager._registered_tasks: + await schedule_cmd.finish( + f"插件 '{plugin_name}' 没有注册可用的定时任务。\n" + f"可用插件: {list(scheduler_manager._registered_tasks.keys())}" + ) + + trigger_type = "" + trigger_config = {} + + try: + if cron_expr: + trigger_type = "cron" + parts = cron_expr.split() + if len(parts) != 5: + raise ValueError("Cron 表达式必须有5个部分 (分 时 日 月 周)") + cron_keys = ["minute", "hour", "day", "month", "day_of_week"] + trigger_config = dict(zip(cron_keys, parts)) + elif interval_expr: + trigger_type = "interval" + trigger_config = _parse_interval(interval_expr) + elif date_expr: + trigger_type = "date" + trigger_config = {"run_date": datetime.fromisoformat(date_expr)} + elif daily_expr: + trigger_type = "cron" + trigger_config = _parse_daily_time(daily_expr) + else: + await schedule_cmd.finish( + "必须提供一种时间选项: --cron, --interval, --date, 或 --daily。" + ) + except ValueError as e: + await schedule_cmd.finish(f"时间参数解析错误: {e}") + + job_kwargs = {} + if kwargs_str: + task_meta = scheduler_manager._registered_tasks[plugin_name] + params_model = task_meta.get("model") + if not params_model: + await schedule_cmd.finish(f"插件 '{plugin_name}' 不支持设置额外参数。") + + if not (isinstance(params_model, type) and issubclass(params_model, BaseModel)): + await schedule_cmd.finish(f"插件 '{plugin_name}' 的参数模型配置错误。") + + raw_kwargs = {} + try: + for item in kwargs_str.split(","): + key, value = item.strip().split("=", 1) + raw_kwargs[key.strip()] = value + except Exception as e: + await schedule_cmd.finish( + f"参数格式错误,请使用 'key=value,key2=value2' 格式。错误: {e}" + ) + + try: + model_validate = getattr(params_model, "model_validate", None) + if not model_validate: + await schedule_cmd.finish( + f"插件 '{plugin_name}' 的参数模型不支持验证。" + ) + return + + validated_model = model_validate(raw_kwargs) + + model_dump = getattr(validated_model, "model_dump", None) + if not model_dump: + await schedule_cmd.finish( + f"插件 '{plugin_name}' 的参数模型不支持导出。" + ) + return + + job_kwargs = model_dump() + except ValidationError as e: + errors = [f" - {err['loc'][0]}: {err['msg']}" for err in e.errors()] + error_str = "\n".join(errors) + await schedule_cmd.finish( + f"插件 '{plugin_name}' 的任务参数验证失败:\n{error_str}" + ) + return + + target_group_id: str | None + current_group_id = getattr(event, "group_id", None) + + if group_id and group_id.lower() == "all": + target_group_id = "__ALL_GROUPS__" + elif all_enabled.available: + target_group_id = "__ALL_GROUPS__" + elif group_id: + target_group_id = group_id + elif current_group_id: + target_group_id = str(current_group_id) + else: + await schedule_cmd.finish( + "私聊中设置定时任务时,必须使用 -g <群号> 或 --all 选项指定目标。" + ) + return + + success, msg = await scheduler_manager.add_schedule( + plugin_name, + target_group_id, + trigger_type, + trigger_config, + job_kwargs, + bot_id=bot_id_to_operate, + ) + + if target_group_id == "__ALL_GROUPS__": + target_desc = f"所有群组 (Bot: {bot_id_to_operate})" + elif target_group_id is None: + target_desc = "全局" + else: + target_desc = f"群组 {target_group_id}" + + if success: + await schedule_cmd.finish(f"已成功为 [{target_desc}] {msg}") + else: + await schedule_cmd.finish(f"为 [{target_desc}] 设置任务失败: {msg}") + + +@schedule_cmd.assign("删除") +async def _( + target: TargetScope = Depends(create_target_parser("删除")), + bot_id_to_operate: str = Depends(GetBotId), +): + if isinstance(target, TargetByID): + _, message = await scheduler_manager.remove_schedule_by_id(target.id) + await schedule_cmd.finish(message) + + elif isinstance(target, TargetByPlugin): + p_name = target.plugin + if p_name not in scheduler_manager.get_registered_plugins(): + await schedule_cmd.finish(f"未找到插件 '{p_name}'。") + + if target.all_groups: + removed_count = await scheduler_manager.remove_schedule_for_all( + p_name, bot_id=bot_id_to_operate + ) + message = ( + f"已取消了 {removed_count} 个群组的插件 '{p_name}' 定时任务。" + if removed_count > 0 + else f"没有找到插件 '{p_name}' 的定时任务。" + ) + await schedule_cmd.finish(message) + else: + _, message = await scheduler_manager.remove_schedule( + p_name, target.group_id, bot_id=bot_id_to_operate + ) + await schedule_cmd.finish(message) + + elif isinstance(target, TargetAll): + if target.for_group: + _, message = await scheduler_manager.remove_schedules_by_group( + target.for_group + ) + await schedule_cmd.finish(message) + else: + _, message = await scheduler_manager.remove_all_schedules() + await schedule_cmd.finish(message) + + else: + await schedule_cmd.finish( + "删除任务失败:请提供任务ID,或通过 -p <插件> 或 -all 指定要删除的任务。" + ) + + +@schedule_cmd.assign("暂停") +async def _( + target: TargetScope = Depends(create_target_parser("暂停")), + bot_id_to_operate: str = Depends(GetBotId), +): + if isinstance(target, TargetByID): + _, message = await scheduler_manager.pause_schedule(target.id) + await schedule_cmd.finish(message) + + elif isinstance(target, TargetByPlugin): + p_name = target.plugin + if p_name not in scheduler_manager.get_registered_plugins(): + await schedule_cmd.finish(f"未找到插件 '{p_name}'。") + + if target.all_groups: + _, message = await scheduler_manager.pause_schedules_by_plugin(p_name) + await schedule_cmd.finish(message) + else: + _, message = await scheduler_manager.pause_schedule_by_plugin_group( + p_name, target.group_id, bot_id=bot_id_to_operate + ) + await schedule_cmd.finish(message) + + elif isinstance(target, TargetAll): + if target.for_group: + _, message = await scheduler_manager.pause_schedules_by_group( + target.for_group + ) + await schedule_cmd.finish(message) + else: + _, message = await scheduler_manager.pause_all_schedules() + await schedule_cmd.finish(message) + + else: + await schedule_cmd.finish("请提供任务ID、使用 -p <插件> 或 -all 选项。") + + +@schedule_cmd.assign("恢复") +async def _( + target: TargetScope = Depends(create_target_parser("恢复")), + bot_id_to_operate: str = Depends(GetBotId), +): + if isinstance(target, TargetByID): + _, message = await scheduler_manager.resume_schedule(target.id) + await schedule_cmd.finish(message) + + elif isinstance(target, TargetByPlugin): + p_name = target.plugin + if p_name not in scheduler_manager.get_registered_plugins(): + await schedule_cmd.finish(f"未找到插件 '{p_name}'。") + + if target.all_groups: + _, message = await scheduler_manager.resume_schedules_by_plugin(p_name) + await schedule_cmd.finish(message) + else: + _, message = await scheduler_manager.resume_schedule_by_plugin_group( + p_name, target.group_id, bot_id=bot_id_to_operate + ) + await schedule_cmd.finish(message) + + elif isinstance(target, TargetAll): + if target.for_group: + _, message = await scheduler_manager.resume_schedules_by_group( + target.for_group + ) + await schedule_cmd.finish(message) + else: + _, message = await scheduler_manager.resume_all_schedules() + await schedule_cmd.finish(message) + + else: + await schedule_cmd.finish("请提供任务ID、使用 -p <插件> 或 -all 选项。") + + +@schedule_cmd.assign("执行") +async def _(schedule_id: int): + _, message = await scheduler_manager.trigger_now(schedule_id) + await schedule_cmd.finish(message) + + +@schedule_cmd.assign("更新") +async def _( + schedule_id: int, + cron_expr: str | None = None, + interval_expr: str | None = None, + date_expr: str | None = None, + daily_expr: str | None = None, + kwargs_str: str | None = None, +): + if not any([cron_expr, interval_expr, date_expr, daily_expr, kwargs_str]): + await schedule_cmd.finish( + "请提供需要更新的时间 (--cron/--interval/--date/--daily) 或参数 (--kwargs)" + ) + + trigger_config = None + trigger_type = None + try: + if cron_expr: + trigger_type = "cron" + parts = cron_expr.split() + if len(parts) != 5: + raise ValueError("Cron 表达式必须有5个部分") + cron_keys = ["minute", "hour", "day", "month", "day_of_week"] + trigger_config = dict(zip(cron_keys, parts)) + elif interval_expr: + trigger_type = "interval" + trigger_config = _parse_interval(interval_expr) + elif date_expr: + trigger_type = "date" + trigger_config = {"run_date": datetime.fromisoformat(date_expr)} + elif daily_expr: + trigger_type = "cron" + trigger_config = _parse_daily_time(daily_expr) + except ValueError as e: + await schedule_cmd.finish(f"时间参数解析错误: {e}") + + job_kwargs = None + if kwargs_str: + schedule = await scheduler_manager.get_schedule_by_id(schedule_id) + if not schedule: + await schedule_cmd.finish(f"未找到 ID 为 {schedule_id} 的任务。") + + task_meta = scheduler_manager._registered_tasks.get(schedule.plugin_name) + if not task_meta or not (params_model := task_meta.get("model")): + await schedule_cmd.finish( + f"插件 '{schedule.plugin_name}' 未定义参数模型,无法更新参数。" + ) + + if not (isinstance(params_model, type) and issubclass(params_model, BaseModel)): + await schedule_cmd.finish( + f"插件 '{schedule.plugin_name}' 的参数模型配置错误。" + ) + + raw_kwargs = {} + try: + for item in kwargs_str.split(","): + key, value = item.strip().split("=", 1) + raw_kwargs[key.strip()] = value + except Exception as e: + await schedule_cmd.finish( + f"参数格式错误,请使用 'key=value,key2=value2' 格式。错误: {e}" + ) + + try: + model_validate = getattr(params_model, "model_validate", None) + if not model_validate: + await schedule_cmd.finish( + f"插件 '{schedule.plugin_name}' 的参数模型不支持验证。" + ) + return + + validated_model = model_validate(raw_kwargs) + + model_dump = getattr(validated_model, "model_dump", None) + if not model_dump: + await schedule_cmd.finish( + f"插件 '{schedule.plugin_name}' 的参数模型不支持导出。" + ) + return + + job_kwargs = model_dump(exclude_unset=True) + except ValidationError as e: + errors = [f" - {err['loc'][0]}: {err['msg']}" for err in e.errors()] + error_str = "\n".join(errors) + await schedule_cmd.finish(f"更新的参数验证失败:\n{error_str}") + return + + _, message = await scheduler_manager.update_schedule( + schedule_id, trigger_type, trigger_config, job_kwargs + ) + await schedule_cmd.finish(message) + + +@schedule_cmd.assign("插件列表") +async def _(): + registered_plugins = scheduler_manager.get_registered_plugins() + if not registered_plugins: + await schedule_cmd.finish("当前没有已注册的定时任务插件。") + + message_parts = ["📋 已注册的定时任务插件:"] + for i, plugin_name in enumerate(registered_plugins, 1): + task_meta = scheduler_manager._registered_tasks[plugin_name] + params_model = task_meta.get("model") + + if not params_model: + message_parts.append(f"{i}. {plugin_name} - 无参数") + continue + + if not (isinstance(params_model, type) and issubclass(params_model, BaseModel)): + message_parts.append(f"{i}. {plugin_name} - ⚠️ 参数模型配置错误") + continue + + model_fields = getattr(params_model, "model_fields", None) + if model_fields: + param_info = ", ".join( + f"{field_name}({_get_type_name(field_info.annotation)})" + for field_name, field_info in model_fields.items() + ) + message_parts.append(f"{i}. {plugin_name} - 参数: {param_info}") + else: + message_parts.append(f"{i}. {plugin_name} - 无参数") + + await schedule_cmd.finish("\n".join(message_parts)) + + +@schedule_cmd.assign("状态") +async def _(schedule_id: int): + status = await scheduler_manager.get_schedule_status(schedule_id) + if not status: + await schedule_cmd.finish(f"未找到ID为 {schedule_id} 的定时任务。") + + info_lines = [ + f"📋 定时任务详细信息 (ID: {schedule_id})", + "--------------------", + f"▫️ 插件: {status['plugin_name']}", + f"▫️ Bot ID: {status.get('bot_id') or '默认'}", + f"▫️ 目标: {status['group_id'] or '全局'}", + f"▫️ 状态: {'✔️ 已启用' if status['is_enabled'] else '⏸️ 已暂停'}", + f"▫️ 下次运行: {status['next_run_time']}", + f"▫️ 触发规则: {_format_trigger(status)}", + f"▫️ 任务参数: {_format_params(status)}", + ] + await schedule_cmd.finish("\n".join(info_lines)) diff --git a/zhenxun/builtin_plugins/shop/_data_source.py b/zhenxun/builtin_plugins/shop/_data_source.py index 0fdd4e53..4c35c6ff 100644 --- a/zhenxun/builtin_plugins/shop/_data_source.py +++ b/zhenxun/builtin_plugins/shop/_data_source.py @@ -345,10 +345,11 @@ class ShopManage: if num > param.max_num_limit: return f"{goods_info.goods_name} 单次使用最大数量为{param.max_num_limit}..." await cls.run_before_after(goods, param, session, message, "before", **kwargs) - result = await cls.__run(goods, param, session, message, **kwargs) await UserConsole.use_props( session.user.id, goods_info.uuid, num, PlatformUtils.get_platform(session) ) + result = await cls.__run(goods, param, session, message, **kwargs) + await cls.run_before_after(goods, param, session, message, "after", **kwargs) if not result and param.send_success_msg: result = f"使用道具 {goods.name} {num} 次成功!" diff --git a/zhenxun/builtin_plugins/statistics/statistics_hook.py b/zhenxun/builtin_plugins/statistics/statistics_hook.py index f3776ece..3ac15e2a 100644 --- a/zhenxun/builtin_plugins/statistics/statistics_hook.py +++ b/zhenxun/builtin_plugins/statistics/statistics_hook.py @@ -53,10 +53,7 @@ async def _( ) -@scheduler.scheduled_job( - "interval", - minutes=1, -) +@scheduler.scheduled_job("interval", minutes=1, max_instances=5) async def _(): try: call_list = TEMP_LIST.copy() diff --git a/zhenxun/builtin_plugins/superuser/bot_manage/plugin.py b/zhenxun/builtin_plugins/superuser/bot_manage/plugin.py index df6d7f35..c5359951 100644 --- a/zhenxun/builtin_plugins/superuser/bot_manage/plugin.py +++ b/zhenxun/builtin_plugins/superuser/bot_manage/plugin.py @@ -110,7 +110,7 @@ async def enable_plugin( ) await BotConsole.enable_plugin(None, plugin.module) await MessageUtils.build_message( - f"已禁用全部 bot 的插件: {plugin_name.result}" + f"已开启全部 bot 的插件: {plugin_name.result}" ).finish() elif bot_id.available: logger.info( diff --git a/zhenxun/builtin_plugins/superuser/bot_manage/task.py b/zhenxun/builtin_plugins/superuser/bot_manage/task.py index 005ab188..501aec3d 100644 --- a/zhenxun/builtin_plugins/superuser/bot_manage/task.py +++ b/zhenxun/builtin_plugins/superuser/bot_manage/task.py @@ -92,7 +92,7 @@ async def enable_task( ) await BotConsole.enable_task(None, task.module) await MessageUtils.build_message( - f"已禁用全部 bot 的被动: {task_name.available}" + f"已开启全部 bot 的被动: {task_name.available}" ).finish() elif bot_id.available: logger.info( diff --git a/zhenxun/builtin_plugins/superuser/request_manage.py b/zhenxun/builtin_plugins/superuser/request_manage.py index 23b235bf..e6eb6b77 100644 --- a/zhenxun/builtin_plugins/superuser/request_manage.py +++ b/zhenxun/builtin_plugins/superuser/request_manage.py @@ -2,7 +2,7 @@ from io import BytesIO from arclet.alconna import Args, Option from arclet.alconna.typing import CommandMeta -from nonebot.adapters import Bot +from nonebot.adapters import Bot, Event from nonebot.permission import SUPERUSER from nonebot.plugin import PluginMetadata from nonebot.rule import to_me @@ -10,10 +10,13 @@ from nonebot_plugin_alconna import ( Alconna, AlconnaQuery, Arparma, + Match, Query, + Reply, on_alconna, store_true, ) +from nonebot_plugin_alconna.uniseg.tools import reply_fetch from nonebot_plugin_session import EventSession from zhenxun.configs.config import BotConfig @@ -54,7 +57,7 @@ __plugin_meta__ = PluginMetadata( _req_matcher = on_alconna( Alconna( "请求处理", - Args["handle", ["-fa", "-fr", "-fi", "-ga", "-gr", "-gi"]]["id", int], + Args["handle", ["-fa", "-fr", "-fi", "-ga", "-gr", "-gi"]]["id?", int], meta=CommandMeta( description="好友/群组请求处理", usage=usage, @@ -105,12 +108,12 @@ _clear_matcher = on_alconna( ) reg_arg_list = [ - (r"同意好友请求", ["-fa", "{%0}"]), - (r"拒绝好友请求", ["-fr", "{%0}"]), - (r"忽略好友请求", ["-fi", "{%0}"]), - (r"同意群组请求", ["-ga", "{%0}"]), - (r"拒绝群组请求", ["-gr", "{%0}"]), - (r"忽略群组请求", ["-gi", "{%0}"]), + (r"同意好友请求\s*(?P\d*)", ["-fa", "{id}"]), + (r"拒绝好友请求\s*(?P\d*)", ["-fr", "{id}"]), + (r"忽略好友请求\s*(?P\d*)", ["-fi", "{id}"]), + (r"同意群组请求\s*(?P\d*)", ["-ga", "{id}"]), + (r"拒绝群组请求\s*(?P\d*)", ["-gr", "{id}"]), + (r"忽略群组请求\s*(?P\d*)", ["-gi", "{id}"]), ] for r in reg_arg_list: @@ -125,32 +128,48 @@ for r in reg_arg_list: @_req_matcher.handle() async def _( bot: Bot, + event: Event, session: EventSession, handle: str, - id: int, + id: Match[int], arparma: Arparma, ): + reply: Reply | None = None type_dict = { "a": RequestHandleType.APPROVE, "r": RequestHandleType.REFUSED, "i": RequestHandleType.IGNORE, } + if not id.available: + reply = await reply_fetch(event, bot) + if not reply: + await MessageUtils.build_message("请引用消息处理或添加处理Id.").finish() + handle_id = id.result + if reply: + db_data = await FgRequest.get_or_none(message_ids__contains=reply.id) + if not db_data: + await MessageUtils.build_message( + "未发现此消息的Id,请使用Id进行处理..." + ).finish(reply_to=True) + handle_id = db_data.id req = None handle_type = type_dict[handle[-1]] try: if handle_type == RequestHandleType.APPROVE: - req = await FgRequest.approve(bot, id) + req = await FgRequest.approve(bot, handle_id) if handle_type == RequestHandleType.REFUSED: - req = await FgRequest.refused(bot, id) + req = await FgRequest.refused(bot, handle_id) if handle_type == RequestHandleType.IGNORE: - req = await FgRequest.ignore(id) + req = await FgRequest.ignore(handle_id) except NotFoundError: await MessageUtils.build_message("未发现此id的请求...").finish(reply_to=True) except Exception: await MessageUtils.build_message("其他错误, 可能flag已失效...").finish( reply_to=True ) - logger.info("处理请求", arparma.header_result, session=session) + logger.info( + f"处理请求 Id: {req.id if req else ''}", arparma.header_result, session=session + ) await MessageUtils.build_message("成功处理请求!").send(reply_to=True) if req and handle_type == RequestHandleType.APPROVE: await bot.send_private_msg( diff --git a/zhenxun/builtin_plugins/web_ui/__init__.py b/zhenxun/builtin_plugins/web_ui/__init__.py index d8d71025..90772bc5 100644 --- a/zhenxun/builtin_plugins/web_ui/__init__.py +++ b/zhenxun/builtin_plugins/web_ui/__init__.py @@ -29,8 +29,7 @@ from .public import init_public __plugin_meta__ = PluginMetadata( name="WebUi", description="WebUi API", - usage=""" - """.strip(), + usage='"""\n """.strip(),', extra=PluginExtraData( author="HibiKier", version="0.1", @@ -83,7 +82,6 @@ BaseApiRouter.include_router(plugin_router) BaseApiRouter.include_router(system_router) BaseApiRouter.include_router(menu_router) - WsApiRouter = APIRouter(prefix="/zhenxun/socket") WsApiRouter.include_router(ws_log_routes) @@ -94,6 +92,8 @@ WsApiRouter.include_router(chat_routes) @driver.on_startup async def _(): try: + # 存储任务引用的列表,防止任务被垃圾回收 + _tasks = [] async def log_sink(message: str): loop = None @@ -104,7 +104,8 @@ async def _(): logger.warning("Web Ui log_sink", e=e) if not loop: loop = asyncio.new_event_loop() - loop.create_task(LOG_STORAGE.add(message.rstrip("\n"))) # noqa: RUF006 + # 存储任务引用到外部列表中 + _tasks.append(loop.create_task(LOG_STORAGE.add(message.rstrip("\n")))) logger_.add( log_sink, colorize=True, filter=default_filter, format=default_format diff --git a/zhenxun/builtin_plugins/web_ui/api/menu/data_source.py b/zhenxun/builtin_plugins/web_ui/api/menu/data_source.py index 9cfcd244..14f5c928 100644 --- a/zhenxun/builtin_plugins/web_ui/api/menu/data_source.py +++ b/zhenxun/builtin_plugins/web_ui/api/menu/data_source.py @@ -46,7 +46,10 @@ class MenuManage: icon="database", ), MenuItem( - name="系统信息", module="system", router="/system", icon="system" + name="文件管理", module="system", router="/system", icon="system" + ), + MenuItem( + name="关于我们", module="about", router="/about", icon="about" ), ] self.save() diff --git a/zhenxun/builtin_plugins/web_ui/api/tabs/main/__init__.py b/zhenxun/builtin_plugins/web_ui/api/tabs/main/__init__.py index 36059101..f93d0ab1 100644 --- a/zhenxun/builtin_plugins/web_ui/api/tabs/main/__init__.py +++ b/zhenxun/builtin_plugins/web_ui/api/tabs/main/__init__.py @@ -16,7 +16,7 @@ from zhenxun.utils.platform import PlatformUtils from ....base_model import Result from ....config import QueryDateType -from ....utils import authentication, get_system_status +from ....utils import authentication, clear_help_image, get_system_status from .data_source import ApiDataSource from .model import ( ActiveGroup, @@ -234,6 +234,7 @@ async def _(param: BotManageUpdateParam): bot_data.block_plugins = CommonUtils.convert_module_format(param.block_plugins) bot_data.block_tasks = CommonUtils.convert_module_format(param.block_tasks) await bot_data.save(update_fields=["block_plugins", "block_tasks"]) + clear_help_image() return Result.ok() except Exception as e: logger.error(f"{router.prefix}/update_bot_manage 调用错误", "WebUi", e=e) diff --git a/zhenxun/builtin_plugins/web_ui/api/tabs/main/data_source.py b/zhenxun/builtin_plugins/web_ui/api/tabs/main/data_source.py index f9ff6fca..e87647dd 100644 --- a/zhenxun/builtin_plugins/web_ui/api/tabs/main/data_source.py +++ b/zhenxun/builtin_plugins/web_ui/api/tabs/main/data_source.py @@ -92,7 +92,7 @@ class ApiDataSource: """ version_file = Path() / "__version__" if version_file.exists(): - if text := version_file.open().read(): + if text := version_file.open(encoding="utf-8").read(): return text.replace("__version__: ", "").strip() return "unknown" diff --git a/zhenxun/builtin_plugins/web_ui/api/tabs/manage/chat.py b/zhenxun/builtin_plugins/web_ui/api/tabs/manage/chat.py index d20149fb..389546ca 100644 --- a/zhenxun/builtin_plugins/web_ui/api/tabs/manage/chat.py +++ b/zhenxun/builtin_plugins/web_ui/api/tabs/manage/chat.py @@ -1,3 +1,5 @@ +from datetime import datetime + from fastapi import APIRouter import nonebot from nonebot import on_message @@ -49,13 +51,14 @@ async def message_handle( message: UniMsg, group_id: str | None, ): + time = str(datetime.now().replace(microsecond=0)) messages = [] for m in message: if isinstance(m, Text | str): - messages.append(MessageItem(type="text", msg=str(m))) + messages.append(MessageItem(type="text", msg=str(m), time=time)) elif isinstance(m, Image): if m.url: - messages.append(MessageItem(type="img", msg=m.url)) + messages.append(MessageItem(type="img", msg=m.url, time=time)) elif isinstance(m, At): if group_id: if m.target == "0": @@ -72,9 +75,9 @@ async def message_handle( uname = group_user.user_name if m.target not in ID2NAME[group_id]: ID2NAME[group_id][m.target] = uname - messages.append(MessageItem(type="at", msg=f"@{uname}")) + messages.append(MessageItem(type="at", msg=f"@{uname}", time=time)) elif isinstance(m, Hyper): - messages.append(MessageItem(type="text", msg="[分享消息]")) + messages.append(MessageItem(type="text", msg="[分享消息]", time=time)) return messages diff --git a/zhenxun/builtin_plugins/web_ui/api/tabs/manage/model.py b/zhenxun/builtin_plugins/web_ui/api/tabs/manage/model.py index 7149cee1..68772d0f 100644 --- a/zhenxun/builtin_plugins/web_ui/api/tabs/manage/model.py +++ b/zhenxun/builtin_plugins/web_ui/api/tabs/manage/model.py @@ -237,6 +237,8 @@ class MessageItem(BaseModel): """消息类型""" msg: str """内容""" + time: str + """发送日期""" class Message(BaseModel): diff --git a/zhenxun/builtin_plugins/web_ui/api/tabs/plugin_manage/__init__.py b/zhenxun/builtin_plugins/web_ui/api/tabs/plugin_manage/__init__.py index e011e67f..1187ad65 100644 --- a/zhenxun/builtin_plugins/web_ui/api/tabs/plugin_manage/__init__.py +++ b/zhenxun/builtin_plugins/web_ui/api/tabs/plugin_manage/__init__.py @@ -4,15 +4,20 @@ from fastapi.responses import JSONResponse from zhenxun.models.plugin_info import PluginInfo as DbPluginInfo from zhenxun.services.log import logger from zhenxun.utils.enum import BlockType, PluginType +from zhenxun.utils.manager.virtual_env_package_manager import VirtualEnvPackageManager from ....base_model import Result -from ....utils import authentication +from ....utils import authentication, clear_help_image from .data_source import ApiDataSource from .model import ( + BatchUpdatePlugins, + BatchUpdateResult, + InstallDependenciesPayload, PluginCount, PluginDetail, PluginInfo, PluginSwitch, + RenameMenuTypePayload, UpdatePlugin, ) @@ -30,9 +35,8 @@ async def _( plugin_type: list[PluginType] = Query(None), menu_type: str | None = None ) -> Result[list[PluginInfo]]: try: - return Result.ok( - await ApiDataSource.get_plugin_list(plugin_type, menu_type), "拿到信息啦!" - ) + result = await ApiDataSource.get_plugin_list(plugin_type, menu_type) + return Result.ok(result, "拿到信息啦!") except Exception as e: logger.error(f"{router.prefix}/get_plugin_list 调用错误", "WebUi", e=e) return Result.fail(f"发生了一点错误捏 {type(e)}: {e}") @@ -78,6 +82,7 @@ async def _() -> Result[PluginCount]: async def _(param: UpdatePlugin) -> Result: try: await ApiDataSource.update_plugin(param) + clear_help_image() return Result.ok(info="已经帮你写好啦!") except (ValueError, KeyError): return Result.fail("插件数据不存在...") @@ -105,6 +110,7 @@ async def _(param: PluginSwitch) -> Result: db_plugin.block_type = None db_plugin.status = True await db_plugin.save() + clear_help_image() return Result.ok(info="成功改变了开关状态!") except Exception as e: logger.error(f"{router.prefix}/change_switch 调用错误", "WebUi", e=e) @@ -144,11 +150,89 @@ async def _() -> Result[list[str]]: ) async def _(module: str) -> Result[PluginDetail]: try: - return Result.ok( - await ApiDataSource.get_plugin_detail(module), "已经帮你写好啦!" - ) + detail = await ApiDataSource.get_plugin_detail(module) + return Result.ok(detail, "已经帮你写好啦!") except (ValueError, KeyError): return Result.fail("插件数据不存在...") except Exception as e: logger.error(f"{router.prefix}/get_plugin 调用错误", "WebUi", e=e) return Result.fail(f"{type(e)}: {e}") + + +@router.put( + "/plugins/batch_update", + dependencies=[authentication()], + response_model=Result[BatchUpdateResult], + response_class=JSONResponse, + description="批量更新插件配置", +) +async def _( + params: BatchUpdatePlugins, +) -> Result[BatchUpdateResult]: + """批量更新插件配置,如开关、类型等""" + try: + result_dict = await ApiDataSource.batch_update_plugins(params=params) + result_model = BatchUpdateResult( + success=result_dict["success"], + updated_count=result_dict["updated_count"], + errors=result_dict["errors"], + ) + clear_help_image() + return Result.ok(result_model, "插件配置更新完成") + except Exception as e: + logger.error(f"{router.prefix}/plugins/batch_update 调用错误", "WebUi", e=e) + return Result.fail(f"发生了一点错误捏 {type(e)}: {e}") + + +# 新增:重命名菜单类型路由 +@router.put( + "/menu_type/rename", + dependencies=[authentication()], + response_model=Result, + description="重命名菜单类型", +) +async def _(payload: RenameMenuTypePayload) -> Result[str]: + try: + result = await ApiDataSource.rename_menu_type( + old_name=payload.old_name, new_name=payload.new_name + ) + if result.get("success"): + clear_help_image() + return Result.ok( + info=result.get( + "info", + f"成功将 {result.get('updated_count', 0)} 个插件的菜单类型从 " + f"'{payload.old_name}' 修改为 '{payload.new_name}'", + ) + ) + else: + return Result.fail(info=result.get("info", "重命名失败")) + except ValueError as ve: + return Result.fail(info=str(ve)) + except RuntimeError as re: + logger.error(f"{router.prefix}/menu_type/rename 调用错误", "WebUi", e=re) + return Result.fail(info=str(re)) + except Exception as e: + logger.error(f"{router.prefix}/menu_type/rename 调用错误", "WebUi", e=e) + return Result.fail(info=f"发生未知错误: {type(e).__name__}") + + +@router.post( + "/install_dependencies", + dependencies=[authentication()], + response_model=Result, + response_class=JSONResponse, + description="安装/卸载依赖", +) +async def _(payload: InstallDependenciesPayload) -> Result: + try: + if not payload.dependencies: + return Result.fail("依赖列表不能为空") + if payload.handle_type == "install": + result = VirtualEnvPackageManager.install(payload.dependencies) + else: + result = VirtualEnvPackageManager.uninstall(payload.dependencies) + return Result.ok(result) + except Exception as e: + logger.error(f"{router.prefix}/install_dependencies 调用错误", "WebUi", e=e) + return Result.fail(f"发生了一点错误捏 {type(e)}: {e}") diff --git a/zhenxun/builtin_plugins/web_ui/api/tabs/plugin_manage/data_source.py b/zhenxun/builtin_plugins/web_ui/api/tabs/plugin_manage/data_source.py index ee0992d6..0f2c3676 100644 --- a/zhenxun/builtin_plugins/web_ui/api/tabs/plugin_manage/data_source.py +++ b/zhenxun/builtin_plugins/web_ui/api/tabs/plugin_manage/data_source.py @@ -2,13 +2,20 @@ import re import cattrs from fastapi import Query +from tortoise.exceptions import DoesNotExist from zhenxun.configs.config import Config from zhenxun.configs.utils import ConfigGroup from zhenxun.models.plugin_info import PluginInfo as DbPluginInfo from zhenxun.utils.enum import BlockType, PluginType -from .model import PluginConfig, PluginDetail, PluginInfo, UpdatePlugin +from .model import ( + BatchUpdatePlugins, + PluginConfig, + PluginDetail, + PluginInfo, + UpdatePlugin, +) class ApiDataSource: @@ -44,6 +51,11 @@ class ApiDataSource: level=plugin.level, status=plugin.status, author=plugin.author, + block_type=plugin.block_type, + is_builtin="builtin_plugins" in plugin.module_path + or plugin.plugin_type == PluginType.HIDDEN, + allow_setting=plugin.plugin_type != PluginType.HIDDEN, + allow_switch=plugin.plugin_type != PluginType.HIDDEN, ) plugin_list.append(plugin_info) return plugin_list @@ -69,7 +81,6 @@ class ApiDataSource: db_plugin.block_type = param.block_type db_plugin.status = param.block_type != BlockType.ALL await db_plugin.save() - # 配置项 if param.configs and (configs := Config.get(param.module)): for key in param.configs: if c := configs.configs.get(key): @@ -80,6 +91,87 @@ class ApiDataSource: Config.save(save_simple_data=True) return db_plugin + @classmethod + async def batch_update_plugins(cls, params: BatchUpdatePlugins) -> dict: + """批量更新插件数据 + + 参数: + params: BatchUpdatePlugins + + 返回: + dict: 更新结果, 例如 {'success': True, 'updated_count': 5, 'errors': []} + """ + plugins_to_update_other_fields = [] + other_update_fields = set() + updated_count = 0 + errors = [] + + for item in params.updates: + try: + db_plugin = await DbPluginInfo.get(module=item.module) + plugin_changed_other = False + plugin_changed_block = False + + if db_plugin.block_type != item.block_type: + db_plugin.block_type = item.block_type + db_plugin.status = item.block_type != BlockType.ALL + plugin_changed_block = True + + if item.menu_type is not None and db_plugin.menu_type != item.menu_type: + db_plugin.menu_type = item.menu_type + other_update_fields.add("menu_type") + plugin_changed_other = True + + if ( + item.default_status is not None + and db_plugin.default_status != item.default_status + ): + db_plugin.default_status = item.default_status + other_update_fields.add("default_status") + plugin_changed_other = True + + if plugin_changed_block: + try: + await db_plugin.save(update_fields=["block_type", "status"]) + updated_count += 1 + except Exception as e_save: + errors.append( + { + "module": item.module, + "error": f"Save block_type failed: {e_save!s}", + } + ) + plugin_changed_other = False + + if plugin_changed_other: + plugins_to_update_other_fields.append(db_plugin) + + except DoesNotExist: + errors.append({"module": item.module, "error": "Plugin not found"}) + except Exception as e: + errors.append({"module": item.module, "error": str(e)}) + + bulk_updated_count = 0 + if plugins_to_update_other_fields and other_update_fields: + try: + await DbPluginInfo.bulk_update( + plugins_to_update_other_fields, list(other_update_fields) + ) + bulk_updated_count = len(plugins_to_update_other_fields) + except Exception as e_bulk: + errors.append( + { + "module": "batch_update_other", + "error": f"Bulk update failed: {e_bulk!s}", + } + ) + + return { + "success": len(errors) == 0, + "updated_count": updated_count + bulk_updated_count, + "errors": errors, + } + @classmethod def __build_plugin_config( cls, module: str, cfg: str, config: ConfigGroup @@ -115,6 +207,41 @@ class ApiDataSource: type_inner=type_inner, # type: ignore ) + @classmethod + async def rename_menu_type(cls, old_name: str, new_name: str) -> dict: + """重命名菜单类型,并更新所有相关插件 + + 参数: + old_name: 旧菜单类型名称 + new_name: 新菜单类型名称 + + 返回: + dict: 更新结果, 例如 {'success': True, 'updated_count': 3} + """ + if not old_name or not new_name: + raise ValueError("旧名称和新名称都不能为空") + if old_name == new_name: + return { + "success": True, + "updated_count": 0, + "info": "新旧名称相同,无需更新", + } + + # 检查新名称是否已存在(理论上前端会校验,后端再保险一次) + exists = await DbPluginInfo.filter(menu_type=new_name).exists() + if exists: + raise ValueError(f"新的菜单类型名称 '{new_name}' 已被其他插件使用") + + try: + # 使用 filter().update() 进行批量更新 + updated_count = await DbPluginInfo.filter(menu_type=old_name).update( + menu_type=new_name + ) + return {"success": True, "updated_count": updated_count} + except Exception as e: + # 可以添加更详细的日志记录 + raise RuntimeError(f"数据库更新菜单类型失败: {e!s}") + @classmethod async def get_plugin_detail(cls, module: str) -> PluginDetail: """获取插件详情 diff --git a/zhenxun/builtin_plugins/web_ui/api/tabs/plugin_manage/model.py b/zhenxun/builtin_plugins/web_ui/api/tabs/plugin_manage/model.py index 662814c9..007e55e0 100644 --- a/zhenxun/builtin_plugins/web_ui/api/tabs/plugin_manage/model.py +++ b/zhenxun/builtin_plugins/web_ui/api/tabs/plugin_manage/model.py @@ -1,6 +1,6 @@ -from typing import Any +from typing import Any, Literal -from pydantic import BaseModel +from pydantic import BaseModel, Field from zhenxun.utils.enum import BlockType @@ -37,19 +37,19 @@ class UpdatePlugin(BaseModel): module: str """模块""" default_status: bool - """默认开关""" + """是否默认开启""" limit_superuser: bool - """限制超级用户""" - cost_gold: int - """金币花费""" - menu_type: str - """插件菜单类型""" + """是否限制超级用户""" level: int - """插件所需群权限""" + """等级""" + cost_gold: int + """花费金币""" + menu_type: str + """菜单类型""" block_type: BlockType | None = None """禁用类型""" configs: dict[str, Any] | None = None - """配置项""" + """设置项""" class PluginInfo(BaseModel): @@ -58,27 +58,33 @@ class PluginInfo(BaseModel): """ module: str - """插件名称""" + """模块""" plugin_name: str - """插件中文名称""" + """插件名称""" default_status: bool - """默认开关""" + """是否默认开启""" limit_superuser: bool - """限制超级用户""" + """是否限制超级用户""" + level: int + """等级""" cost_gold: int """花费金币""" menu_type: str - """插件菜单类型""" + """菜单类型""" version: str - """插件版本""" - level: int - """群权限""" + """版本""" status: bool - """当前状态""" + """状态""" author: str | None = None """作者""" - block_type: BlockType | None = None - """禁用类型""" + block_type: BlockType | None = Field(None, description="插件禁用状态 (None: 启用)") + """禁用状态""" + is_builtin: bool = False + """是否为内置插件""" + allow_switch: bool = True + """是否允许开关""" + allow_setting: bool = True + """是否允许设置""" class PluginConfig(BaseModel): @@ -86,20 +92,13 @@ class PluginConfig(BaseModel): 插件配置项 """ - module: str - """模块""" - key: str - """键""" - value: Any - """值""" - help: str | None = None - """帮助""" - default_value: Any - """默认值""" - type: Any = None - """值类型""" - type_inner: list[str] | None = None - """List Tuple等内部类型检验""" + module: str = Field(..., description="模块名") + key: str = Field(..., description="键") + value: Any = Field(None, description="值") + help: str | None = Field(None, description="帮助信息") + default_value: Any = Field(None, description="默认值") + type: str | None = Field(None, description="类型") + type_inner: list[str] | None = Field(None, description="内部类型") class PluginCount(BaseModel): @@ -117,6 +116,21 @@ class PluginCount(BaseModel): """其他插件""" +class BatchUpdatePluginItem(BaseModel): + module: str = Field(..., description="插件模块名") + default_status: bool | None = Field(None, description="默认状态(开关)") + menu_type: str | None = Field(None, description="菜单类型") + block_type: BlockType | None = Field( + None, description="插件禁用状态 (None: 启用, ALL: 禁用)" + ) + + +class BatchUpdatePlugins(BaseModel): + updates: list[BatchUpdatePluginItem] = Field( + ..., description="要批量更新的插件列表" + ) + + class PluginDetail(PluginInfo): """ 插件详情 @@ -125,6 +139,38 @@ class PluginDetail(PluginInfo): config_list: list[PluginConfig] +class RenameMenuTypePayload(BaseModel): + old_name: str = Field(..., description="旧菜单类型名称") + new_name: str = Field(..., description="新菜单类型名称") + + class PluginIr(BaseModel): id: int """插件id""" + + +class BatchUpdateResult(BaseModel): + """ + 批量更新插件结果 + """ + + success: bool = Field(..., description="是否全部成功") + """是否全部成功""" + updated_count: int = Field(..., description="更新成功的数量") + """更新成功的数量""" + errors: list[dict[str, str]] = Field( + default_factory=list, description="错误信息列表" + ) + """错误信息列表""" + + +class InstallDependenciesPayload(BaseModel): + """ + 安装依赖 + """ + + handle_type: Literal["install", "uninstall"] = Field(..., description="处理类型") + """处理类型""" + + dependencies: list[str] = Field(..., description="依赖列表") + """依赖列表""" diff --git a/zhenxun/builtin_plugins/web_ui/api/tabs/plugin_manage/store.py b/zhenxun/builtin_plugins/web_ui/api/tabs/plugin_manage/store.py index acff6356..9ee6ff41 100644 --- a/zhenxun/builtin_plugins/web_ui/api/tabs/plugin_manage/store.py +++ b/zhenxun/builtin_plugins/web_ui/api/tabs/plugin_manage/store.py @@ -1,6 +1,7 @@ from fastapi import APIRouter from fastapi.responses import JSONResponse from nonebot import require +from nonebot.compat import model_dump from zhenxun.models.plugin_info import PluginInfo from zhenxun.services.log import logger @@ -22,12 +23,12 @@ router = APIRouter(prefix="/store") async def _() -> Result[dict]: try: require("plugin_store") - from zhenxun.builtin_plugins.plugin_store import ShopManage + from zhenxun.builtin_plugins.plugin_store import StoreManager - data = await ShopManage.get_data() + data = await StoreManager.get_data() plugin_list = [ - {**data[name].to_dict(), "name": name, "id": idx} - for idx, name in enumerate(data) + {**model_dump(plugin), "name": plugin.name, "id": idx} + for idx, plugin in enumerate(data) ] modules = await PluginInfo.filter(load_status=True).values_list( "module", flat=True @@ -48,9 +49,9 @@ async def _() -> Result[dict]: async def _(param: PluginIr) -> Result: try: require("plugin_store") - from zhenxun.builtin_plugins.plugin_store import ShopManage + from zhenxun.builtin_plugins.plugin_store import StoreManager - result = await ShopManage.add_plugin(param.id) # type: ignore + result = await StoreManager.add_plugin(param.id) # type: ignore return Result.ok(info=result) except Exception as e: return Result.fail(f"安装插件失败: {type(e)}: {e}") @@ -66,9 +67,9 @@ async def _(param: PluginIr) -> Result: async def _(param: PluginIr) -> Result: try: require("plugin_store") - from zhenxun.builtin_plugins.plugin_store import ShopManage + from zhenxun.builtin_plugins.plugin_store import StoreManager - result = await ShopManage.update_plugin(param.id) # type: ignore + result = await StoreManager.update_plugin(param.id) # type: ignore return Result.ok(info=result) except Exception as e: return Result.fail(f"更新插件失败: {type(e)}: {e}") @@ -84,9 +85,9 @@ async def _(param: PluginIr) -> Result: async def _(param: PluginIr) -> Result: try: require("plugin_store") - from zhenxun.builtin_plugins.plugin_store import ShopManage + from zhenxun.builtin_plugins.plugin_store import StoreManager - result = await ShopManage.remove_plugin(param.id) # type: ignore + result = await StoreManager.remove_plugin(param.id) # type: ignore return Result.ok(info=result) except Exception as e: return Result.fail(f"移除插件失败: {type(e)}: {e}") diff --git a/zhenxun/builtin_plugins/web_ui/api/tabs/system/__init__.py b/zhenxun/builtin_plugins/web_ui/api/tabs/system/__init__.py index aa92306a..ffcd05be 100644 --- a/zhenxun/builtin_plugins/web_ui/api/tabs/system/__init__.py +++ b/zhenxun/builtin_plugins/web_ui/api/tabs/system/__init__.py @@ -36,6 +36,8 @@ async def _(path: str | None = None) -> Result[list[DirFile]]: is_image=is_image, name=file, parent=path, + size=None if file_path.is_dir() else file_path.stat().st_size, + mtime=file_path.stat().st_mtime, ) ) return Result.ok(data_list) @@ -215,3 +217,13 @@ async def _(full_path: str) -> Result[str]: return Result.ok(BuildImage.open(path).pic2bs4()) except Exception as e: return Result.warning_(f"获取图片失败: {e!s}") + + +@router.get( + "/ping", + response_model=Result[str], + response_class=JSONResponse, + description="检查服务器状态", +) +async def _() -> Result[str]: + return Result.ok("pong") diff --git a/zhenxun/builtin_plugins/web_ui/api/tabs/system/model.py b/zhenxun/builtin_plugins/web_ui/api/tabs/system/model.py index 3c2357f2..2959a0e1 100644 --- a/zhenxun/builtin_plugins/web_ui/api/tabs/system/model.py +++ b/zhenxun/builtin_plugins/web_ui/api/tabs/system/model.py @@ -14,6 +14,10 @@ class DirFile(BaseModel): """文件夹或文件名称""" parent: str | None = None """父级""" + size: int | None = None + """文件大小""" + mtime: float | None = None + """修改时间""" class DeleteFile(BaseModel): diff --git a/zhenxun/builtin_plugins/web_ui/utils.py b/zhenxun/builtin_plugins/web_ui/utils.py index df2fdd35..a7e22a07 100644 --- a/zhenxun/builtin_plugins/web_ui/utils.py +++ b/zhenxun/builtin_plugins/web_ui/utils.py @@ -11,7 +11,7 @@ import psutil import ujson as json from zhenxun.configs.config import Config -from zhenxun.configs.path_config import DATA_PATH +from zhenxun.configs.path_config import DATA_PATH, IMAGE_PATH from .base_model import SystemFolderSize, SystemStatus, User @@ -28,6 +28,22 @@ if token_file.exists(): token_data = json.load(open(token_file, encoding="utf8")) +GROUP_HELP_PATH = DATA_PATH / "group_help" +SIMPLE_HELP_IMAGE = IMAGE_PATH / "SIMPLE_HELP.png" +SIMPLE_DETAIL_HELP_IMAGE = IMAGE_PATH / "SIMPLE_DETAIL_HELP.png" + + +def clear_help_image(): + """清理帮助图片""" + if SIMPLE_HELP_IMAGE.exists(): + SIMPLE_HELP_IMAGE.unlink() + if SIMPLE_DETAIL_HELP_IMAGE.exists(): + SIMPLE_DETAIL_HELP_IMAGE.unlink() + for file in GROUP_HELP_PATH.iterdir(): + if file.is_file(): + file.unlink() + + def get_user(uname: str) -> User | None: """获取账号密码 diff --git a/zhenxun/configs/config.py b/zhenxun/configs/config.py index 83937201..9f4b5229 100644 --- a/zhenxun/configs/config.py +++ b/zhenxun/configs/config.py @@ -13,8 +13,8 @@ class BotSetting(BaseModel): """回复时NICKNAME""" system_proxy: str | None = None """系统代理""" - db_url: str = "" - """数据库链接""" + db_url: str = "sqlite:data/zhenxun.db" + """数据库链接, 默认值为sqlite:data/zhenxun.db""" platform_superusers: dict[str, list[str]] = Field(default_factory=dict) """平台超级用户""" qbot_id_data: dict[str, str] = Field(default_factory=dict) diff --git a/zhenxun/configs/utils/__init__.py b/zhenxun/configs/utils/__init__.py index 03bc7331..bd84d9b1 100644 --- a/zhenxun/configs/utils/__init__.py +++ b/zhenxun/configs/utils/__init__.py @@ -1,89 +1,82 @@ from collections.abc import Callable import copy -from datetime import datetime from pathlib import Path -from typing import Any, Literal +from typing import Any, TypeVar, get_args, get_origin import cattrs from nonebot.compat import model_dump -from pydantic import BaseModel, Field +from pydantic import VERSION, BaseModel, Field from ruamel.yaml import YAML from ruamel.yaml.scanner import ScannerError from zhenxun.configs.path_config import DATA_PATH from zhenxun.services.log import logger -from zhenxun.utils.enum import BlockType, LimitWatchType, PluginLimitType, PluginType + +from .models import ( + AICallableParam, + AICallableProperties, + AICallableTag, + BaseBlock, + Command, + ConfigModel, + Example, + PluginCdBlock, + PluginCountBlock, + PluginExtraData, + PluginSetting, + RegisterConfig, + Task, +) _yaml = YAML(pure=True) _yaml.indent = 2 _yaml.allow_unicode = True +T = TypeVar("T") -class Example(BaseModel): + +class NoSuchConfig(Exception): + pass + + +def _dump_pydantic_obj(obj: Any) -> Any: """ - 示例 + 递归地将一个对象内部的 Pydantic BaseModel 实例转换为字典。 + 支持单个实例、实例列表、实例字典等情况。 """ - - exec: str - """执行命令""" - description: str = "" - """命令描述""" + if isinstance(obj, BaseModel): + return model_dump(obj) + if isinstance(obj, list): + return [_dump_pydantic_obj(item) for item in obj] + if isinstance(obj, dict): + return {key: _dump_pydantic_obj(value) for key, value in obj.items()} + return obj -class Command(BaseModel): +def _is_pydantic_type(t: Any) -> bool: """ - 具体参数说明 + 递归检查一个类型注解是否与 Pydantic BaseModel 相关。 """ - - command: str - """命令名称""" - params: list[str] = Field(default_factory=list) - """参数""" - description: str = "" - """描述""" - examples: list[Example] = Field(default_factory=list) - """示例列表""" + if t is None: + return False + origin = get_origin(t) + if origin: + return any(_is_pydantic_type(arg) for arg in get_args(t)) + return isinstance(t, type) and issubclass(t, BaseModel) -class RegisterConfig(BaseModel): +def parse_as(type_: type[T], obj: Any) -> T: """ - 注册配置项 + 一个兼容 Pydantic V1 的 parse_obj_as 和V2的TypeAdapter.validate_python 的辅助函数。 """ + if VERSION.startswith("1"): + from pydantic import parse_obj_as - key: str - """配置项键""" - value: Any - """配置项值""" - module: str | None = None - """模块名""" - help: str | None - """配置注解""" - default_value: Any | None = None - """默认值""" - type: Any = None - """参数类型""" - arg_parser: Callable | None = None - """参数解析""" + return parse_obj_as(type_, obj) + else: + from pydantic import TypeAdapter # type: ignore - -class ConfigModel(BaseModel): - """ - 配置项 - """ - - value: Any - """配置项值""" - help: str | None - """配置注解""" - default_value: Any | None = None - """默认值""" - type: Any = None - """参数类型""" - arg_parser: Callable | None = None - """参数解析""" - - def to_dict(self, **kwargs): - return model_dump(self, **kwargs) + return TypeAdapter(type_).validate_python(obj) class ConfigGroup(BaseModel): @@ -98,202 +91,41 @@ class ConfigGroup(BaseModel): configs: dict[str, ConfigModel] = Field(default_factory=dict) """配置项列表""" - def get(self, c: str, default: Any = None) -> Any: - cfg = self.configs.get(c.upper()) - if cfg is not None: - if cfg.value is not None: - return cfg.value - if cfg.default_value is not None: - return cfg.default_value - return default + def get(self, c: str, default: Any = None, *, build_model: bool = True) -> Any: + """ + 获取配置项的值。如果指定了类型,会自动构建实例。 + """ + key = c.upper() + cfg = self.configs.get(key) + + if cfg is None: + return default + + value_to_process = cfg.value if cfg.value is not None else cfg.default_value + + if value_to_process is None: + return default + + if cfg.type: + if _is_pydantic_type(cfg.type): + if build_model: + try: + return parse_as(cfg.type, value_to_process) + except Exception as e: + logger.warning( + f"Pydantic 模型解析失败 (key: {c.upper()}). ", e=e + ) + try: + return cattrs.structure(value_to_process, cfg.type) + except Exception as e: + logger.warning(f"Cattrs 结构化失败 (key: {key}),返回原始值。", e=e) + + return value_to_process def to_dict(self, **kwargs): return model_dump(self, **kwargs) -class BaseBlock(BaseModel): - """ - 插件阻断基本类(插件阻断限制) - """ - - status: bool = True - """限制状态""" - check_type: BlockType = BlockType.ALL - """检查类型""" - watch_type: LimitWatchType = LimitWatchType.USER - """监听对象""" - result: str | None = None - """阻断时回复内容""" - _type: PluginLimitType = PluginLimitType.BLOCK - """类型""" - - def to_dict(self, **kwargs): - return model_dump(self, **kwargs) - - -class PluginCdBlock(BaseBlock): - """ - 插件cd限制 - """ - - cd: int = 5 - """cd""" - _type: PluginLimitType = PluginLimitType.CD - """类型""" - - -class PluginCountBlock(BaseBlock): - """ - 插件次数限制 - """ - - max_count: int - """最大调用次数""" - _type: PluginLimitType = PluginLimitType.COUNT - """类型""" - - -class PluginSetting(BaseModel): - """ - 插件基本配置 - """ - - level: int = 5 - """群权限等级""" - default_status: bool = True - """进群默认开关状态""" - limit_superuser: bool = False - """是否限制超级用户""" - cost_gold: int = 0 - """调用插件花费金币""" - impression: float = 0.0 - """调用插件好感度限制""" - - -class AICallableProperties(BaseModel): - type: str - """参数类型""" - description: str - """参数描述""" - enums: list[str] | None = None - """参数枚举""" - - -class AICallableParam(BaseModel): - type: str - """类型""" - properties: dict[str, AICallableProperties] - """参数列表""" - required: list[str] - """必要参数""" - - -class AICallableTag(BaseModel): - name: str - """工具名称""" - parameters: AICallableParam | None = None - """工具参数""" - description: str - """工具描述""" - func: Callable | None = None - """工具函数""" - - def to_dict(self): - result = model_dump(self) - del result["func"] - return result - - -class SchedulerModel(BaseModel): - trigger: Literal["date", "interval", "cron"] - """trigger""" - day: int | None = None - """天数""" - hour: int | None = None - """小时""" - minute: int | None = None - """分钟""" - second: int | None = None - """秒""" - run_date: datetime | None = None - """运行日期""" - id: str | None = None - """id""" - max_instances: int | None = None - """最大运行实例""" - args: list | None = None - """参数""" - kwargs: dict | None = None - """参数""" - - -class Task(BaseBlock): - module: str - """被动技能模块名""" - name: str - """被动技能名称""" - status: bool = True - """全局开关状态""" - create_status: bool = False - """初次加载默认开关状态""" - default_status: bool = True - """进群时默认状态""" - scheduler: SchedulerModel | None = None - """定时任务配置""" - run_func: Callable | None = None - """运行函数""" - check: Callable | None = None - """检查函数""" - check_args: list = Field(default_factory=list) - """检查函数参数""" - - -class PluginExtraData(BaseModel): - """ - 插件扩展信息 - """ - - author: str | None = None - """作者""" - version: str | None = None - """版本""" - plugin_type: PluginType = PluginType.NORMAL - """插件类型""" - menu_type: str = "功能" - """菜单类型""" - admin_level: int | None = None - """管理员插件所需权限等级""" - configs: list[RegisterConfig] | None = None - """插件配置""" - setting: PluginSetting | None = None - """插件基本配置""" - limits: list[BaseBlock | PluginCdBlock | PluginCountBlock] | None = None - """插件限制""" - commands: list[Command] = Field(default_factory=list) - """命令列表,用于说明帮助""" - ignore_prompt: bool = False - """是否忽略阻断提示""" - tasks: list[Task] | None = None - """技能被动""" - superuser_help: str | None = None - """超级用户帮助""" - aliases: set[str] = Field(default_factory=set) - """额外名称""" - sql_list: list[str] | None = None - """常用sql""" - is_show: bool = True - """是否显示在菜单中""" - smart_tools: list[AICallableTag] | None = None - """智能模式函数工具集""" - - def to_dict(self, **kwargs): - return model_dump(self, **kwargs) - - -class NoSuchConfig(Exception): - pass - - class ConfigsManager: """ 插件配置 与 资源 管理器 @@ -366,23 +198,32 @@ class ConfigsManager: if not module or not key: raise ValueError("add_plugin_config: module和key不能为为空") + if isinstance(value, BaseModel): + value = model_dump(value) + if isinstance(default_value, BaseModel): + default_value = model_dump(default_value) + + processed_value = _dump_pydantic_obj(value) + processed_default_value = _dump_pydantic_obj(default_value) + self.add_module.append(f"{module}:{key}".lower()) if module in self._data and (config := self._data[module].configs.get(key)): config.help = help config.arg_parser = arg_parser config.type = type if _override: - config.value = value - config.default_value = default_value + config.value = processed_value + config.default_value = processed_default_value else: key = key.upper() if not self._data.get(module): self._data[module] = ConfigGroup(module=module) self._data[module].configs[key] = ConfigModel( - value=value, + value=processed_value, help=help, - default_value=default_value, + default_value=processed_default_value, type=type, + arg_parser=arg_parser, ) def set_config( @@ -402,6 +243,8 @@ class ConfigsManager: """ key = key.upper() if module in self._data: + if module not in self._simple_data: + self._simple_data[module] = {} if self._data[module].configs.get(key): self._data[module].configs[key].value = value else: @@ -410,63 +253,68 @@ class ConfigsManager: if auto_save: self.save(save_simple_data=True) - def get_config(self, module: str, key: str, default: Any = None) -> Any: - """获取指定配置值 - - 参数: - module: 模块名 - key: 配置键 - default: 没有key值内容的默认返回值. - - 异常: - NoSuchConfig: 未查询到配置 - - 返回: - Any: 配置值 + def get_config( + self, + module: str, + key: str, + default: Any = None, + *, + build_model: bool = True, + ) -> Any: + """ + 获取指定配置值,自动构建Pydantic模型或其它类型实例。 + - 兼容Pydantic V1/V2。 + - 支持 list[BaseModel] 等泛型容器。 + - 优先使用Pydantic原生方式解析,失败后回退到cattrs。 """ - logger.debug( - f"尝试获取配置MODULE: [{module}] | KEY: [{key}]" - ) key = key.upper() - value = None - if module in self._data.keys(): - config = self._data[module].configs.get(key) or self._data[ - module - ].configs.get(key) - if not config: - raise NoSuchConfig( - f"未查询到配置项 MODULE: [ {module} ] | KEY: [ {key} ]" - ) + config_group = self._data.get(module) + if not config_group: + return default + + config = config_group.configs.get(key) + if not config: + return default + + value_to_process = ( + config.value if config.value is not None else config.default_value + ) + if value_to_process is None: + return default + + # 1. 最高优先级:自定义的参数解析器 + if config.arg_parser: try: - if config.arg_parser: - value = config.arg_parser(value or config.default_value) - elif config.value is not None: - # try: - value = ( - cattrs.structure(config.value, config.type) - if config.type - else config.value - ) - elif config.default_value is not None: - value = ( - cattrs.structure(config.default_value, config.type) - if config.type - else config.default_value - ) + return config.arg_parser(value_to_process) except Exception as e: - logger.warning( + logger.debug( f"配置项类型转换 MODULE: [{module}]" - " | KEY: [{key}]", + f" | KEY: [{key}] 将使用原始值", e=e, ) - value = config.value or config.default_value - if value is None: - value = default - logger.debug( - f"获取配置 MODULE: [{module}] | " - f" KEY: [{key}] -> [{value}]" - ) - return value + + if config.type: + if _is_pydantic_type(config.type): + if build_model: + try: + return parse_as(config.type, value_to_process) + except Exception as e: + logger.warning( + f"pydantic类型转换失败 MODULE: [{module}] | " + f"KEY: [{key}].", + e=e, + ) + else: + try: + return cattrs.structure(value_to_process, config.type) + except Exception as e: + logger.warning( + f"cattrs类型转换失败 MODULE: [{module}] | " + f"KEY: [{key}].", + e=e, + ) + + return value_to_process def get(self, key: str) -> ConfigGroup: """获取插件配置数据 @@ -490,16 +338,16 @@ class ConfigsManager: with open(self._simple_file, "w", encoding="utf8") as f: _yaml.dump(self._simple_data, f) path = path or self.file - data = {} - for module in self._data: - data[module] = {} - for config in self._data[module].configs: - value = self._data[module].configs[config].dict() - del value["type"] - del value["arg_parser"] - data[module][config] = value + save_data = {} + for module, config_group in self._data.items(): + save_data[module] = {} + for config_key, config_model in config_group.configs.items(): + save_data[module][config_key] = model_dump( + config_model, exclude={"type", "arg_parser"} + ) + with open(path, "w", encoding="utf8") as f: - _yaml.dump(data, f) + _yaml.dump(save_data, f) def reload(self): """重新加载配置文件""" @@ -558,3 +406,23 @@ class ConfigsManager: def __getitem__(self, key): return self._data[key] + + +__all__ = [ + "AICallableParam", + "AICallableProperties", + "AICallableTag", + "BaseBlock", + "Command", + "ConfigGroup", + "ConfigModel", + "ConfigsManager", + "Example", + "NoSuchConfig", + "PluginCdBlock", + "PluginCountBlock", + "PluginExtraData", + "PluginSetting", + "RegisterConfig", + "Task", +] diff --git a/zhenxun/configs/utils/models.py b/zhenxun/configs/utils/models.py new file mode 100644 index 00000000..d3c0db7f --- /dev/null +++ b/zhenxun/configs/utils/models.py @@ -0,0 +1,270 @@ +from collections.abc import Callable +from datetime import datetime +from typing import Any, Literal + +from nonebot.compat import model_dump +from pydantic import BaseModel, Field + +from zhenxun.utils.enum import BlockType, LimitWatchType, PluginLimitType, PluginType + +__all__ = [ + "AICallableParam", + "AICallableProperties", + "AICallableTag", + "BaseBlock", + "Command", + "ConfigModel", + "Example", + "PluginCdBlock", + "PluginCountBlock", + "PluginExtraData", + "PluginSetting", + "RegisterConfig", + "Task", +] + + +class Example(BaseModel): + """ + 示例 + """ + + exec: str + """执行命令""" + description: str = "" + """命令描述""" + + +class Command(BaseModel): + """ + 具体参数说明 + """ + + command: str + """命令名称""" + params: list[str] = Field(default_factory=list) + """参数""" + description: str = "" + """描述""" + examples: list[Example] = Field(default_factory=list) + """示例列表""" + + +class RegisterConfig(BaseModel): + """ + 注册配置项 + """ + + key: str + """配置项键""" + value: Any + """配置项值""" + module: str | None = None + """模块名""" + help: str | None + """配置注解""" + default_value: Any | None = None + """默认值""" + type: Any = None + """参数类型""" + arg_parser: Callable | None = None + """参数解析""" + + +class ConfigModel(BaseModel): + """ + 配置项 + """ + + value: Any + """配置项值""" + help: str | None + """配置注解""" + default_value: Any | None = None + """默认值""" + type: Any = None + """参数类型""" + arg_parser: Callable | None = None + """参数解析""" + + def to_dict(self, **kwargs): + return model_dump(self, **kwargs) + + +class BaseBlock(BaseModel): + """ + 插件阻断基本类(插件阻断限制) + """ + + status: bool = True + """限制状态""" + check_type: BlockType = BlockType.ALL + """检查类型""" + watch_type: LimitWatchType = LimitWatchType.USER + """监听对象""" + result: str | None = None + """阻断时回复内容""" + _type: PluginLimitType = PluginLimitType.BLOCK + """类型""" + + def to_dict(self, **kwargs): + return model_dump(self, **kwargs) + + +class PluginCdBlock(BaseBlock): + """ + 插件cd限制 + """ + + cd: int = 5 + """cd""" + _type: PluginLimitType = PluginLimitType.CD + """类型""" + + +class PluginCountBlock(BaseBlock): + """ + 插件次数限制 + """ + + max_count: int + """最大调用次数""" + _type: PluginLimitType = PluginLimitType.COUNT + """类型""" + + +class PluginSetting(BaseModel): + """ + 插件基本配置 + """ + + level: int = 5 + """群权限等级""" + default_status: bool = True + """进群默认开关状态""" + limit_superuser: bool = False + """是否限制超级用户""" + cost_gold: int = 0 + """调用插件花费金币""" + impression: float = 0.0 + """调用插件好感度限制""" + + +class AICallableProperties(BaseModel): + type: str + """参数类型""" + description: str + """参数描述""" + enums: list[str] | None = None + """参数枚举""" + + +class AICallableParam(BaseModel): + type: str + """类型""" + properties: dict[str, AICallableProperties] + """参数列表""" + required: list[str] + """必要参数""" + + +class AICallableTag(BaseModel): + name: str + """工具名称""" + parameters: AICallableParam | None = None + """工具参数""" + description: str + """工具描述""" + func: Callable | None = None + """工具函数""" + + def to_dict(self): + result = model_dump(self) + del result["func"] + return result + + +class SchedulerModel(BaseModel): + trigger: Literal["date", "interval", "cron"] + """trigger""" + day: int | None = None + """天数""" + hour: int | None = None + """小时""" + minute: int | None = None + """分钟""" + second: int | None = None + """秒""" + run_date: datetime | None = None + """运行日期""" + id: str | None = None + """id""" + max_instances: int | None = None + """最大运行实例""" + args: list | None = None + """参数""" + kwargs: dict | None = None + """参数""" + + +class Task(BaseBlock): + module: str + """被动技能模块名""" + name: str + """被动技能名称""" + status: bool = True + """全局开关状态""" + create_status: bool = False + """初次加载默认开关状态""" + default_status: bool = True + """进群时默认状态""" + scheduler: SchedulerModel | None = None + """定时任务配置""" + run_func: Callable | None = None + """运行函数""" + check: Callable | None = None + """检查函数""" + check_args: list = Field(default_factory=list) + """检查函数参数""" + + +class PluginExtraData(BaseModel): + """ + 插件扩展信息 + """ + + author: str | None = None + """作者""" + version: str | None = None + """版本""" + plugin_type: PluginType = PluginType.NORMAL + """插件类型""" + menu_type: str = "功能" + """菜单类型""" + admin_level: int | None = None + """管理员插件所需权限等级""" + configs: list[RegisterConfig] | None = None + """插件配置""" + setting: PluginSetting | None = None + """插件基本配置""" + limits: list[BaseBlock | PluginCdBlock | PluginCountBlock] | None = None + """插件限制""" + commands: list[Command] = Field(default_factory=list) + """命令列表,用于说明帮助""" + ignore_prompt: bool = False + """是否忽略阻断提示""" + tasks: list[Task] | None = None + """技能被动""" + superuser_help: str | None = None + """超级用户帮助""" + aliases: set[str] = Field(default_factory=set) + """额外名称""" + sql_list: list[str] | None = None + """常用sql""" + is_show: bool = True + """是否显示在菜单中""" + smart_tools: list[AICallableTag] | None = None + """智能模式函数工具集""" + + def to_dict(self, **kwargs): + return model_dump(self, **kwargs) diff --git a/zhenxun/models/ban_console.py b/zhenxun/models/ban_console.py index 39907ff0..2df33519 100644 --- a/zhenxun/models/ban_console.py +++ b/zhenxun/models/ban_console.py @@ -1,10 +1,12 @@ import time +from typing import ClassVar from typing_extensions import Self from tortoise import fields from zhenxun.services.db_context import Model from zhenxun.services.log import logger +from zhenxun.utils.enum import CacheType, DbLockType from zhenxun.utils.exception import UserAndGroupIsNone @@ -27,6 +29,12 @@ class BanConsole(Model): class Meta: # pyright: ignore [reportIncompatibleVariableOverride] table = "ban_console" table_description = "封禁人员/群组数据表" + unique_together = ("user_id", "group_id") + + cache_type = CacheType.BAN + """缓存类型""" + enable_lock: ClassVar[list[DbLockType]] = [DbLockType.CREATE] + """开启锁""" @classmethod async def _get_data(cls, user_id: str | None, group_id: str | None) -> Self | None: diff --git a/zhenxun/models/bot_console.py b/zhenxun/models/bot_console.py index 30e981ef..d329c551 100644 --- a/zhenxun/models/bot_console.py +++ b/zhenxun/models/bot_console.py @@ -3,6 +3,7 @@ from typing import Literal, overload from tortoise import fields from zhenxun.services.db_context import Model +from zhenxun.utils.enum import CacheType class BotConsole(Model): @@ -29,6 +30,8 @@ class BotConsole(Model): table = "bot_console" table_description = "Bot数据表" + cache_type = CacheType.BOT + @staticmethod def format(name: str) -> str: return f"<{name}," diff --git a/zhenxun/models/bot_message_store.py b/zhenxun/models/bot_message_store.py new file mode 100644 index 00000000..fa1244f9 --- /dev/null +++ b/zhenxun/models/bot_message_store.py @@ -0,0 +1,29 @@ +from tortoise import fields + +from zhenxun.services.db_context import Model +from zhenxun.utils.enum import BotSentType + + +class BotMessageStore(Model): + id = fields.IntField(pk=True, generated=True, auto_increment=True) + """自增id""" + bot_id = fields.CharField(255, null=True) + """bot id""" + user_id = fields.CharField(255, null=True) + """目标id""" + group_id = fields.CharField(255, null=True) + """群组id""" + sent_type = fields.CharEnumField(BotSentType) + """类型""" + text = fields.TextField(null=True) + """文本内容""" + plain_text = fields.TextField(null=True) + """纯文本""" + platform = fields.CharField(255, null=True) + """平台""" + create_time = fields.DatetimeField(auto_now_add=True) + """创建时间""" + + class Meta: # pyright: ignore [reportIncompatibleVariableOverride] + table = "bot_message_store" + table_description = "Bot发送消息列表" diff --git a/zhenxun/models/event_log.py b/zhenxun/models/event_log.py new file mode 100644 index 00000000..6737f619 --- /dev/null +++ b/zhenxun/models/event_log.py @@ -0,0 +1,21 @@ +from tortoise import fields + +from zhenxun.services.db_context import Model +from zhenxun.utils.enum import EventLogType + + +class EventLog(Model): + id = fields.IntField(pk=True, generated=True, auto_increment=True) + """自增id""" + user_id = fields.CharField(255, description="用户id") + """用户id""" + group_id = fields.CharField(255, description="群组id") + """群组id""" + event_type = fields.CharEnumField(EventLogType, default=None, description="类型") + """类型""" + create_time = fields.DatetimeField(auto_now_add=True, description="创建时间") + """创建时间""" + + class Meta: # pyright: ignore [reportIncompatibleVariableOverride] + table = "event_log" + table_description = "各种请求通知记录表" diff --git a/zhenxun/models/fg_request.py b/zhenxun/models/fg_request.py index 4aee1d73..4362a7d3 100644 --- a/zhenxun/models/fg_request.py +++ b/zhenxun/models/fg_request.py @@ -3,8 +3,10 @@ from typing_extensions import Self from nonebot.adapters import Bot from tortoise import fields +from zhenxun.configs.config import BotConfig from zhenxun.models.group_console import GroupConsole from zhenxun.services.db_context import Model +from zhenxun.utils.common_utils import SqlUtils from zhenxun.utils.enum import RequestHandleType, RequestType from zhenxun.utils.exception import NotFoundError @@ -34,6 +36,8 @@ class FgRequest(Model): RequestHandleType, null=True, description="处理类型" ) """处理类型""" + message_ids = fields.CharField(max_length=255, null=True, description="消息id列表") + """消息id列表""" class Meta: # pyright: ignore [reportIncompatibleVariableOverride] table = "fg_request" @@ -123,9 +127,24 @@ class FgRequest(Model): await GroupConsole.update_or_create( group_id=req.group_id, defaults={"group_flag": 1} ) - await bot.set_group_add_request( - flag=req.flag, - sub_type="invite", - approve=handle_type == RequestHandleType.APPROVE, - ) + if req.flag == "0": + # 用户手动申请入群,创建群认证后提醒用户拉群 + await bot.send_private_msg( + user_id=req.user_id, + message=f"已同意你对{BotConfig.self_nickname}的申请群组:" + f"{req.group_id},可以直接手动拉入群组,{BotConfig.self_nickname}会自动同意。", + ) + else: + # 正常同意群组请求 + await bot.set_group_add_request( + flag=req.flag, + sub_type="invite", + approve=handle_type == RequestHandleType.APPROVE, + ) return req + + @classmethod + async def _run_script(cls): + return [ + SqlUtils.add_column("fg_request", "message_ids", "character varying(255)") + ] diff --git a/zhenxun/models/group_console.py b/zhenxun/models/group_console.py index 08406fa7..b36259c2 100644 --- a/zhenxun/models/group_console.py +++ b/zhenxun/models/group_console.py @@ -1,4 +1,4 @@ -from typing import Any, cast, overload +from typing import Any, ClassVar, cast, overload from typing_extensions import Self from tortoise import fields @@ -6,8 +6,9 @@ from tortoise.backends.base.client import BaseDBAsyncClient from zhenxun.models.plugin_info import PluginInfo from zhenxun.models.task_info import TaskInfo +from zhenxun.services.cache import CacheRoot from zhenxun.services.db_context import Model -from zhenxun.utils.enum import PluginType +from zhenxun.utils.enum import CacheType, DbLockType, PluginType def add_disable_marker(name: str) -> str: @@ -41,7 +42,7 @@ def convert_module_format(data: str | list[str]) -> str | list[str]: str | list[str]: 根据输入类型返回转换后的数据。 """ if isinstance(data, str): - return [item.strip(",") for item in data.split("<") if item.strip()] + return [item.strip(",") for item in data.split("<") if item] else: return "".join(add_disable_marker(item) for item in data) @@ -87,6 +88,11 @@ class GroupConsole(Model): table_description = "群组信息表" unique_together = ("group_id", "channel_id") + cache_type = CacheType.GROUPS + """缓存类型""" + enable_lock: ClassVar[list[DbLockType]] = [DbLockType.CREATE] + """开启锁""" + @classmethod async def _get_task_modules(cls, *, default_status: bool) -> list[str]: """获取默认禁用的任务模块 @@ -117,6 +123,7 @@ class GroupConsole(Model): ) @classmethod + @CacheRoot.listener(CacheType.GROUPS) async def create( cls, using_db: BaseDBAsyncClient | None = None, **kwargs: Any ) -> Self: @@ -180,9 +187,14 @@ class GroupConsole(Model): if task_modules or plugin_modules: await cls._update_modules(group, task_modules, plugin_modules, using_db) + if is_create: + if cache := await CacheRoot.get_cache(CacheType.GROUPS): + await cache.update(group.group_id, group) + return group, is_create @classmethod + @CacheRoot.listener(CacheType.GROUPS) async def update_or_create( cls, defaults: dict | None = None, diff --git a/zhenxun/models/level_user.py b/zhenxun/models/level_user.py index d60b52c2..0a926e6a 100644 --- a/zhenxun/models/level_user.py +++ b/zhenxun/models/level_user.py @@ -1,6 +1,7 @@ from tortoise import fields from zhenxun.services.db_context import Model +from zhenxun.utils.enum import CacheType class LevelUser(Model): @@ -20,6 +21,8 @@ class LevelUser(Model): table_description = "用户权限数据库" unique_together = ("user_id", "group_id") + cache_type = CacheType.LEVEL + @classmethod async def get_user_level(cls, user_id: str, group_id: str | None) -> int: """获取用户在群内的等级 @@ -53,6 +56,9 @@ class LevelUser(Model): level: 权限等级 group_flag: 是否被自动更新刷新权限 0:是, 1:否. """ + if await cls.exists(user_id=user_id, group_id=group_id, user_level=level): + # 权限相同时跳过 + return await cls.update_or_create( user_id=user_id, group_id=group_id, @@ -119,8 +125,7 @@ class LevelUser(Model): return [ # 将user_id改为user_id "ALTER TABLE level_users RENAME COLUMN user_qq TO user_id;", - "ALTER TABLE level_users " - "ALTER COLUMN user_id TYPE character varying(255);", + "ALTER TABLE level_users ALTER COLUMN user_id TYPE character varying(255);", # 将user_id字段类型改为character varying(255) "ALTER TABLE level_users " "ALTER COLUMN group_id TYPE character varying(255);", diff --git a/zhenxun/models/mahiro_bank.py b/zhenxun/models/mahiro_bank.py new file mode 100644 index 00000000..3880daa8 --- /dev/null +++ b/zhenxun/models/mahiro_bank.py @@ -0,0 +1,123 @@ +from datetime import datetime +from typing_extensions import Self + +from tortoise import fields + +from zhenxun.services.db_context import Model + +from .mahiro_bank_log import BankHandleType, MahiroBankLog + + +class MahiroBank(Model): + id = fields.IntField(pk=True, generated=True, auto_increment=True) + """自增id""" + user_id = fields.CharField(255, description="用户id") + """用户id""" + amount = fields.BigIntField(default=0, description="存款") + """用户存款""" + rate = fields.FloatField(default=0.0005, description="小时利率") + """小时利率""" + loan_amount = fields.BigIntField(default=0, description="贷款") + """用户贷款""" + loan_rate = fields.FloatField(default=0.0005, description="贷款利率") + """贷款利率""" + update_time = fields.DatetimeField(auto_now=True) + """修改时间""" + create_time = fields.DatetimeField(auto_now_add=True) + """创建时间""" + + class Meta: # pyright: ignore [reportIncompatibleVariableOverride] + table = "mahiro_bank" + table_description = "小真寻银行" + + @classmethod + async def deposit(cls, user_id: str, amount: int, rate: float) -> Self: + """存款 + + 参数: + user_id: 用户id + amount: 金币数量 + rate: 小时利率 + + 返回: + Self: MahiroBank + """ + effective_hour = int(24 - datetime.now().hour) + user, _ = await cls.get_or_create(user_id=user_id) + user.amount += amount + await user.save(update_fields=["amount", "rate"]) + await MahiroBankLog.create( + user_id=user_id, + amount=amount, + rate=rate, + effective_hour=effective_hour, + handle_type=BankHandleType.DEPOSIT, + ) + return user + + @classmethod + async def withdraw(cls, user_id: str, amount: int) -> Self: + """取款 + + 参数: + user_id: 用户id + amount: 金币数量 + + 返回: + Self: MahiroBank + """ + if amount <= 0: + raise ValueError("取款金额必须大于0") + user, _ = await cls.get_or_create(user_id=user_id) + if user.amount < amount: + raise ValueError("取款金额不能大于存款金额") + user.amount -= amount + await user.save(update_fields=["amount"]) + await MahiroBankLog.create( + user_id=user_id, amount=amount, handle_type=BankHandleType.WITHDRAW + ) + return user + + @classmethod + async def loan(cls, user_id: str, amount: int, rate: float) -> Self: + """贷款 + + 参数: + user_id: 用户id + amount: 贷款金额 + rate: 贷款利率 + + 返回: + Self: MahiroBank + """ + user, _ = await cls.get_or_create(user_id=user_id) + user.loan_amount += amount + user.loan_rate = rate + await user.save(update_fields=["loan_amount", "loan_rate"]) + await MahiroBankLog.create( + user_id=user_id, amount=amount, rate=rate, handle_type=BankHandleType.LOAN + ) + return user + + @classmethod + async def repayment(cls, user_id: str, amount: int) -> Self: + """还款 + + 参数: + user_id: 用户id + amount: 还款金额 + + 返回: + Self: MahiroBank + """ + if amount <= 0: + raise ValueError("还款金额必须大于0") + user, _ = await cls.get_or_create(user_id=user_id) + if user.loan_amount < amount: + raise ValueError("还款金额不能大于贷款金额") + user.loan_amount -= amount + await user.save(update_fields=["loan_amount"]) + await MahiroBankLog.create( + user_id=user_id, amount=amount, handle_type=BankHandleType.REPAYMENT + ) + return user diff --git a/zhenxun/models/mahiro_bank_log.py b/zhenxun/models/mahiro_bank_log.py new file mode 100644 index 00000000..433241d1 --- /dev/null +++ b/zhenxun/models/mahiro_bank_log.py @@ -0,0 +1,31 @@ +from tortoise import fields + +from zhenxun.services.db_context import Model +from zhenxun.utils.enum import BankHandleType + + +class MahiroBankLog(Model): + id = fields.IntField(pk=True, generated=True, auto_increment=True) + """自增id""" + user_id = fields.CharField(255, description="用户id") + """用户id""" + amount = fields.BigIntField(default=0, description="存款") + """金币数量""" + rate = fields.FloatField(default=0, description="小时利率") + """小时利率""" + handle_type = fields.CharEnumField( + BankHandleType, null=True, description="处理类型" + ) + """处理类型""" + is_completed = fields.BooleanField(default=False, description="是否完成") + """是否完成""" + effective_hour = fields.IntField(default=0, description="有效小时") + """有效小时""" + update_time = fields.DatetimeField(auto_now=True) + """修改时间""" + create_time = fields.DatetimeField(auto_now_add=True) + """创建时间""" + + class Meta: # pyright: ignore [reportIncompatibleVariableOverride] + table = "mahiro_bank_log" + table_description = "小真寻银行日志" diff --git a/zhenxun/models/plugin_info.py b/zhenxun/models/plugin_info.py index 862aea8c..aeecc71b 100644 --- a/zhenxun/models/plugin_info.py +++ b/zhenxun/models/plugin_info.py @@ -4,7 +4,7 @@ from tortoise import fields from zhenxun.models.plugin_limit import PluginLimit # noqa: F401 from zhenxun.services.db_context import Model -from zhenxun.utils.enum import BlockType, PluginType +from zhenxun.utils.enum import BlockType, CacheType, PluginType class PluginInfo(Model): @@ -59,6 +59,8 @@ class PluginInfo(Model): table = "plugin_info" table_description = "插件基本信息" + cache_type = CacheType.PLUGINS + @classmethod async def get_plugin( cls, load_status: bool = True, filter_parent: bool = True, **kwargs diff --git a/zhenxun/models/schedule_info.py b/zhenxun/models/schedule_info.py new file mode 100644 index 00000000..c7583078 --- /dev/null +++ b/zhenxun/models/schedule_info.py @@ -0,0 +1,38 @@ +from tortoise import fields + +from zhenxun.services.db_context import Model + + +class ScheduleInfo(Model): + id = fields.IntField(pk=True, generated=True, auto_increment=True) + """自增id""" + bot_id = fields.CharField( + 255, null=True, default=None, description="任务关联的Bot ID" + ) + """任务关联的Bot ID""" + plugin_name = fields.CharField(255, description="插件模块名") + """插件模块名""" + group_id = fields.CharField( + 255, + null=True, + description="群组ID, '__ALL_GROUPS__' 表示所有群, 为空表示全局任务", + ) + """群组ID, 为空表示全局任务""" + trigger_type = fields.CharField( + max_length=20, default="cron", description="触发器类型 (cron, interval, date)" + ) + """触发器类型 (cron, interval, date)""" + trigger_config = fields.JSONField(description="触发器具体配置") + """触发器具体配置""" + job_kwargs = fields.JSONField( + default=dict, description="传递给任务函数的额外关键字参数" + ) + """传递给任务函数的额外关键字参数""" + is_enabled = fields.BooleanField(default=True, description="是否启用") + """是否启用""" + create_time = fields.DatetimeField(auto_now_add=True) + """创建时间""" + + class Meta: # pyright: ignore [reportIncompatibleVariableOverride] + table = "schedule_info" + table_description = "通用定时任务表" diff --git a/zhenxun/models/user_console.py b/zhenxun/models/user_console.py index b590a802..9529993c 100644 --- a/zhenxun/models/user_console.py +++ b/zhenxun/models/user_console.py @@ -2,7 +2,7 @@ from tortoise import fields from zhenxun.models.goods_info import GoodsInfo from zhenxun.services.db_context import Model -from zhenxun.utils.enum import GoldHandle +from zhenxun.utils.enum import CacheType, GoldHandle from zhenxun.utils.exception import GoodsNotFound, InsufficientGold from .user_gold_log import UserGoldLog @@ -30,6 +30,8 @@ class UserConsole(Model): table = "user_console" table_description = "用户数据表" + cache_type = CacheType.USERS + @classmethod async def get_user(cls, user_id: str, platform: str | None = None) -> "UserConsole": """获取用户 diff --git a/zhenxun/plugins/bym_ai/README.md b/zhenxun/plugins/bym_ai/README.md new file mode 100644 index 00000000..248090c6 --- /dev/null +++ b/zhenxun/plugins/bym_ai/README.md @@ -0,0 +1,54 @@ +# BYM AI 插件使用指南 + +本插件支持所有符合 OpenAi 接口格式的 AI 服务,以下以 Gemini 为例进行说明。 +你也通过 [其他文档](https://github.com/Hoper-J/AI-Guide-and-Demos-zh_CN/blob/master/Guide/DeepSeek%20API%20%E7%9A%84%E8%8E%B7%E5%8F%96%E4%B8%8E%E5%AF%B9%E8%AF%9D%E7%A4%BA%E4%BE%8B.md) 查看配置 + +## 获取 API KEY + +1. 进入 [Gemini API Key](https://aistudio.google.com/app/apikey?hl=zh-cn) 生成 API KEY。 +2. 如果无法访问,请尝试更换代理。 + +## 配置设置 + +首次加载插件后,在 `data/config.yaml` 文件中进行以下配置(请勿复制括号内的内容): + +```yaml +bym_ai: + # BYM_AI 配置 + BYM_AI_CHAT_URL: https://generativelanguage.googleapis.com/v1beta/chat/completions # Gemini 官方 API,更推荐找反代 + BYM_AI_CHAT_TOKEN: + - 你刚刚获取的 API KEY,可以有多个进行轮询 + BYM_AI_CHAT_MODEL: gemini-2.0-flash-thinking-exp-01-21 # 推荐使用的聊天模型(免费) + BYM_AI_TOOL_MODEL: gemini-2.0-flash-exp # 推荐使用的工具调用模型(免费,需开启 BYM_AI_CHAT_SMART) + BYM_AI_CHAT: true # 是否开启伪人回复 + BYM_AI_CHAT_RATE: 0.001 # 伪人回复概率(0-1) + BYM_AI_TTS_URL: # TTS 接口地址 + BYM_AI_TTS_TOKEN: # TTS 接口密钥 + BYM_AI_TTS_VOICE: # TTS 接口音色 + BYM_AI_CHAT_SMART: true # 是否开启智能模式(必须填写 BYM_AI_TOOL_MODEL) + ENABLE_IMPRESSION: true # 使用签到数据作为基础好感度 + CACHE_SIZE: 40 # 缓存聊天记录数据大小(每位用户) + ENABLE_GROUP_CHAT: true # 在群组中时共用缓存 +``` + +## 人设设置 + +在`data/bym_ai/prompt.txt`中设置你的基础人设 + +## 礼物开发 + +与商品注册类型,在`bym_ai/bym_gift/gift_reg.py`中查看写法。 + +例如: + +```python +@gift_register( + name="可爱的钱包", + icon="wallet.png", + description=f"这是{BotConfig.self_nickname}的小钱包,里面装了一些金币。", +) +async def _(user_id: str): + rand = random.randint(100, 500) + await UserConsole.add_gold(user_id, rand, "BYM_AI") + return f"钱包里装了{BotConfig.self_nickname}送给你的枚{rand}金币哦~" +``` \ No newline at end of file diff --git a/zhenxun/plugins/bym_ai/__init__.py b/zhenxun/plugins/bym_ai/__init__.py new file mode 100644 index 00000000..76882ed0 --- /dev/null +++ b/zhenxun/plugins/bym_ai/__init__.py @@ -0,0 +1,283 @@ +import asyncio +from pathlib import Path +import random + +from httpx import HTTPStatusError +from nonebot import on_message +from nonebot.adapters import Bot, Event +from nonebot.plugin import PluginMetadata +from nonebot_plugin_alconna import UniMsg, Voice +from nonebot_plugin_uninfo import Uninfo + +from zhenxun.configs.config import BotConfig +from zhenxun.configs.path_config import IMAGE_PATH +from zhenxun.configs.utils import ( + AICallableParam, + AICallableProperties, + AICallableTag, + PluginExtraData, + RegisterConfig, +) +from zhenxun.services.log import logger +from zhenxun.services.plugin_init import PluginInit +from zhenxun.utils.depends import CheckConfig, UserName +from zhenxun.utils.message import MessageUtils + +from .bym_gift import ICON_PATH +from .bym_gift.data_source import send_gift +from .bym_gift.gift_reg import driver +from .config import Arparma, FunctionParam +from .data_source import ChatManager, base_config, split_text +from .exception import GiftRepeatSendException, NotResultException +from .goods_register import driver # noqa: F401 +from .models.bym_chat import BymChat + +__plugin_meta__ = PluginMetadata( + name="BYM_AI", + description=f"{BotConfig.self_nickname}想成为人类...", + usage=f""" + 你问小真寻的愿望? + {BotConfig.self_nickname}说她想成为人类! + """.strip(), + extra=PluginExtraData( + author="Chtholly & HibiKier", + version="0.3", + ignore_prompt=True, + configs=[ + RegisterConfig( + key="BYM_AI_CHAT_URL", + value=None, + help="ai聊天接口地址,可以填入url和平台名称,当你使用平台名称时,默认使用平台官方api, 目前有[gemini, DeepSeek, 硅基流动, 阿里云百炼, 百度智能云, 字节火山引擎], 填入对应名称即可, 如 gemini", + ), + RegisterConfig( + key="BYM_AI_CHAT_TOKEN", + value=None, + help="ai聊天接口密钥,使用列表", + type=list[str], + ), + RegisterConfig( + key="BYM_AI_CHAT_MODEL", + value=None, + help="ai聊天接口模型", + ), + RegisterConfig( + key="BYM_AI_TOOL_MODEL", + value=None, + help="ai工具接口模型", + ), + RegisterConfig( + key="BYM_AI_CHAT", + value=True, + help="是否开启伪人回复", + default_value=True, + type=bool, + ), + RegisterConfig( + key="BYM_AI_CHAT_RATE", + value=0.05, + help="伪人回复概率 0-1", + default_value=0.05, + type=float, + ), + RegisterConfig( + key="BYM_AI_CHAT_SMART", + value=False, + help="是否开启智能模式", + default_value=False, + type=bool, + ), + RegisterConfig( + key="BYM_AI_TTS_URL", + value=None, + help="tts接口地址", + ), + RegisterConfig( + key="BYM_AI_TTS_TOKEN", + value=None, + help="tts接口密钥", + ), + RegisterConfig( + key="BYM_AI_TTS_VOICE", + value=None, + help="tts接口音色", + ), + RegisterConfig( + key="ENABLE_IMPRESSION", + value=True, + help="使用签到数据作为基础好感度", + default_value=True, + type=bool, + ), + RegisterConfig( + key="GROUP_CACHE_SIZE", + value=40, + help="群组内聊天记录数据大小", + default_value=40, + type=int, + ), + RegisterConfig( + key="CACHE_SIZE", + value=40, + help="私聊下缓存聊天记录数据大小(每位用户)", + default_value=40, + type=int, + ), + RegisterConfig( + key="ENABLE_GROUP_CHAT", + value=True, + help="在群组中时共用缓存", + default_value=True, + type=bool, + ), + ], + smart_tools=[ + AICallableTag( + name="call_send_gift", + description="想给某人送礼物时,调用此方法,并且将返回值发送", + parameters=AICallableParam( + type="object", + properties={ + "user_id": AICallableProperties( + type="string", description="用户的id" + ), + }, + required=["user_id"], + ), + func=send_gift, + ) + ], + ).to_dict(), +) + + +async def rule(event: Event, session: Uninfo) -> bool: + if event.is_tome(): + """at自身必定回复""" + return True + if not base_config.get("BYM_AI_CHAT"): + return False + if event.is_tome() and not session.group: + """私聊过滤""" + return False + rate = base_config.get("BYM_AI_CHAT_RATE") or 0 + return random.random() <= rate + + +_matcher = on_message(priority=998, rule=rule) + + +@_matcher.handle(parameterless=[CheckConfig(config="BYM_AI_CHAT_TOKEN")]) +async def _( + bot: Bot, + event: Event, + message: UniMsg, + session: Uninfo, + uname: str = UserName(), +): + if not message.extract_plain_text().strip(): + if event.is_tome(): + await MessageUtils.build_message(ChatManager.hello()).finish() + return + fun_param = FunctionParam( + bot=bot, + event=event, + arparma=Arparma(head_result="BYM_AI"), + session=session, + message=message, + ) + group_id = session.group.id if session.group else None + is_bym = not event.is_tome() + try: + try: + result = await ChatManager.get_result( + bot, session, group_id, uname, message, is_bym, fun_param + ) + except HTTPStatusError as e: + logger.error("BYM AI 请求失败", "BYM_AI", session=session, e=e) + return await MessageUtils.build_message( + f"请求失败了哦,code: {e.response.status_code}" + ).send(reply_to=True) + except NotResultException: + return await MessageUtils.build_message("请求没有结果呢...").send( + reply_to=True + ) + if is_bym: + """伪人回复,切割文本""" + if result: + for r, delay in split_text(result): + await MessageUtils.build_message(r).send() + await asyncio.sleep(delay) + else: + try: + if result: + await MessageUtils.build_message(result).send( + reply_to=bool(group_id) + ) + if tts_data := await ChatManager.tts(result): + await MessageUtils.build_message(Voice(raw=tts_data)).send() + elif not base_config.get("BYM_AI_CHAT_SMART"): + await MessageUtils.build_message(ChatManager.no_result()).send() + else: + await MessageUtils.build_message( + f"{BotConfig.self_nickname}并不想理你..." + ).send(reply_to=True) + if ( + event.is_tome() + and result + and (plain_text := message.extract_plain_text()) + ): + await BymChat.create( + user_id=session.user.id, + group_id=group_id, + plain_text=plain_text, + result=result, + ) + logger.info( + f"BYM AI 问题: {message} | 回答: {result}", + "BYM_AI", + session=session, + ) + except HTTPStatusError as e: + logger.error("BYM AI 请求失败", "BYM_AI", session=session, e=e) + await MessageUtils.build_message( + f"请求失败了哦,code: {e.response.status_code}" + ).send(reply_to=True) + except NotResultException: + await MessageUtils.build_message("请求没有结果呢...").send( + reply_to=True + ) + except GiftRepeatSendException: + logger.warning("BYM AI 重复发送礼物", "BYM_AI", session=session) + await MessageUtils.build_message( + f"今天已经收过{BotConfig.self_nickname}的礼物了哦~" + ).finish(reply_to=True) + except Exception as e: + logger.error("BYM AI 其他错误", "BYM_AI", session=session, e=e) + await MessageUtils.build_message("发生了一些异常,想要休息一下...").finish( + reply_to=True + ) + + +RESOURCE_FILES = [ + IMAGE_PATH / "shop_icon" / "reload_ai_card.png", + IMAGE_PATH / "shop_icon" / "reload_ai_card1.png", +] + +GIFT_FILES = [ICON_PATH / "wallet.png", ICON_PATH / "hairpin.png"] + + +class MyPluginInit(PluginInit): + async def install(self): + for res_file in RESOURCE_FILES + GIFT_FILES: + res = Path(__file__).parent / res_file.name + if res.exists(): + if res_file.exists(): + res_file.unlink() + res.rename(res_file) + logger.info(f"更新 BYM_AI 资源文件成功 {res} -> {res_file}") + + async def remove(self): + for res_file in RESOURCE_FILES + GIFT_FILES: + if res_file.exists(): + res_file.unlink() + logger.info(f"删除 BYM_AI 资源文件成功 {res_file}") diff --git a/zhenxun/plugins/bym_ai/bym_gift/__init__.py b/zhenxun/plugins/bym_ai/bym_gift/__init__.py new file mode 100644 index 00000000..b1551255 --- /dev/null +++ b/zhenxun/plugins/bym_ai/bym_gift/__init__.py @@ -0,0 +1,107 @@ +from nonebot.adapters import Bot, Event +from nonebot_plugin_alconna import ( + Alconna, + AlconnaQuery, + Args, + Arparma, + Match, + Query, + Subcommand, + UniMsg, + on_alconna, +) +from nonebot_plugin_uninfo import Uninfo + +from zhenxun.services.log import logger +from zhenxun.utils._image_template import ImageTemplate +from zhenxun.utils.depends import UserName +from zhenxun.utils.message import MessageUtils +from zhenxun.utils.platform import PlatformUtils + +from ..models.bym_gift_store import GiftStore +from ..models.bym_user import BymUser +from .data_source import ICON_PATH, use_gift + +_matcher = on_alconna( + Alconna( + "bym-gift", + Subcommand("user-gift"), + Subcommand("use-gift", Args["name?", str]["num?", int]), + ), + priority=5, + block=True, +) + + +_matcher.shortcut( + r"我的礼物", + command="bym-gift", + arguments=["user-gift"], + prefix=True, +) + +_matcher.shortcut( + r"使用礼物(?P.*?)", + command="bym-gift", + arguments=["use-gift", "{name}"], + prefix=True, +) + + +@_matcher.assign("user-gift") +async def _(session: Uninfo, uname: str = UserName()): + user = await BymUser.get_user(session.user.id, PlatformUtils.get_platform(session)) + result = await GiftStore.filter(uuid__in=user.props.keys()).all() + column_name = ["-", "使用ID", "名称", "数量", "简介"] + data_list = [] + uuid2goods = {item.uuid: item for item in result} + for i, p in enumerate(user.props.copy()): + if prop := uuid2goods.get(p): + icon = "" + icon_path = ICON_PATH / prop.icon + if icon_path.exists(): + icon = (icon_path, 33, 33) + if user.props[p] <= 0: + del user.props[p] + continue + data_list.append( + [ + icon, + i, + prop.name, + user.props[p], + prop.description, + ] + ) + await user.save(update_fields=["props"]) + result = await ImageTemplate.table_page( + f"{uname}的礼物仓库", + "通过 使用礼物 [ID/名称] 使礼物生效", + column_name, + data_list, + ) + await MessageUtils.build_message(result).send(reply_to=True) + logger.info(f"{uname} 查看礼物仓库", "我的礼物", session=session) + + +@_matcher.assign("use-gift") +async def _( + bot: Bot, + event: Event, + message: UniMsg, + session: Uninfo, + arparma: Arparma, + name: Match[str], + num: Query[int] = AlconnaQuery("num", 1), +): + if not name.available: + await MessageUtils.build_message( + "请在指令后跟需要使用的礼物名称或id..." + ).finish(reply_to=True) + result = await use_gift(bot, event, session, message, name.result, num.result) + logger.info( + f"使用礼物 {name.result}, 数量: {num.result}", + arparma.header_result, + session=session, + ) + await MessageUtils.build_message(result).send(reply_to=True) diff --git a/zhenxun/plugins/bym_ai/bym_gift/data_source.py b/zhenxun/plugins/bym_ai/bym_gift/data_source.py new file mode 100644 index 00000000..465a3fc0 --- /dev/null +++ b/zhenxun/plugins/bym_ai/bym_gift/data_source.py @@ -0,0 +1,173 @@ +import asyncio +from collections.abc import Callable +from datetime import datetime +import inspect +import random +from types import MappingProxyType + +from nonebot.adapters import Bot, Event +from nonebot.utils import is_coroutine_callable +from nonebot_plugin_alconna import UniMessage, UniMsg +from nonebot_plugin_uninfo import Uninfo +from tortoise.expressions import F + +from zhenxun.configs.config import BotConfig +from zhenxun.configs.path_config import IMAGE_PATH +from zhenxun.utils.platform import PlatformUtils + +from ..exception import GiftRepeatSendException +from ..models.bym_gift_log import GiftLog +from ..models.bym_gift_store import GiftStore +from ..models.bym_user import BymUser +from .gift_register import gift_register + +ICON_PATH = IMAGE_PATH / "gift_icon" +ICON_PATH.mkdir(parents=True, exist_ok=True) + +gift_list = [] + + +async def send_gift(user_id: str, session: Uninfo) -> str: + global gift_list + if ( + await GiftLog.filter( + user_id=session.user.id, create_time__gte=datetime.now().date(), type=0 + ).count() + > 2 + ): + raise GiftRepeatSendException + if not gift_list: + gift_list = await GiftStore.all() + gift = random.choice(gift_list) + user = await BymUser.get_user(user_id, PlatformUtils.get_platform(session)) + if gift.uuid not in user.props: + user.props[gift.uuid] = 0 + user.props[gift.uuid] += 1 + await asyncio.gather( + *[ + user.save(update_fields=["props"]), + GiftLog.create(user_id=user_id, uuid=gift.uuid, type=0), + GiftStore.filter(uuid=gift.uuid).update(count=F("count") + 1), + ] + ) + return f"{BotConfig.self_nickname}赠送了{gift.name}作为礼物。" + + +def __build_params( + bot: Bot, + event: Event, + session: Uninfo, + message: UniMsg, + gift: GiftStore, + num: int, +): + group_id = None + if session.group: + group_id = session.group.parent.id if session.group.parent else session.group.id + return { + "_bot": bot, + "event": event, + "user_id": session.user.id, + "group_id": group_id, + "num": num, + "name": gift.name, + "message": message, + } + + +def __parse_args( + args: MappingProxyType, + **kwargs, +) -> dict: + """解析参数 + + 参数: + args: MappingProxyType + + 返回: + list[Any]: 参数 + """ + _kwargs = kwargs.copy() + for key in kwargs: + if key not in args: + del _kwargs[key] + return _kwargs + + +async def __run( + func: Callable, + **kwargs, +) -> str | UniMessage | None: + """运行道具函数 + + 参数: + goods: Goods + param: ShopParam + + 返回: + str | MessageFactory | None: 使用完成后返回信息 + """ + args = inspect.signature(func).parameters # type: ignore + if args and next(iter(args.keys())) != "kwargs": + return ( + await func(**__parse_args(args, **kwargs)) + if is_coroutine_callable(func) + else func(**__parse_args(args, **kwargs)) + ) + if is_coroutine_callable(func): + return await func() + else: + return func() + + +async def use_gift( + bot: Bot, + event: Event, + session: Uninfo, + message: UniMsg, + name: str, + num: int, +) -> str | UniMessage: + """使用道具 + + 参数: + bot: Bot + event: Event + session: Session + message: 消息 + name: 礼物名称 + num: 使用数量 + text: 其他信息 + + 返回: + str | MessageFactory: 使用完成后返回信息 + """ + user = await BymUser.get_user(user_id=session.user.id) + if name.isdigit(): + try: + uuid = list(user.props.keys())[int(name)] + gift_info = await GiftStore.get_or_none(uuid=uuid) + except IndexError: + return "仓库中礼物不存在..." + else: + gift_info = await GiftStore.get_or_none(goods_name=name) + if not gift_info: + return f"{name} 不存在..." + func = gift_register.get_func(gift_info.name) + if not func: + return f"{gift_info.name} 未注册使用函数, 无法使用..." + if user.props[gift_info.uuid] < num: + return f"你的 {gift_info.name} 数量不足 {num} 个..." + kwargs = __build_params(bot, event, session, message, gift_info, num) + result = await __run(func, **kwargs) + if gift_info.uuid not in user.usage_count: + user.usage_count[gift_info.uuid] = 0 + user.usage_count[gift_info.uuid] += num + user.props[gift_info.uuid] -= num + if user.props[gift_info.uuid] < 0: + del user.props[gift_info.uuid] + await user.save(update_fields=["props", "usage_count"]) + await GiftLog.create(user_id=session.user.id, uuid=gift_info.uuid, type=1) + if not result: + result = f"使用道具 {gift_info.name} {num} 次成功!" + return result diff --git a/zhenxun/plugins/bym_ai/bym_gift/gift_reg.py b/zhenxun/plugins/bym_ai/bym_gift/gift_reg.py new file mode 100644 index 00000000..c7d897b4 --- /dev/null +++ b/zhenxun/plugins/bym_ai/bym_gift/gift_reg.py @@ -0,0 +1,42 @@ +from decimal import Decimal +import random + +import nonebot +from nonebot.drivers import Driver + +from zhenxun.configs.config import BotConfig +from zhenxun.models.sign_user import SignUser +from zhenxun.models.user_console import UserConsole + +from .gift_register import gift_register + +driver: Driver = nonebot.get_driver() + + +@gift_register( + name="可爱的钱包", + icon="wallet.png", + description=f"这是{BotConfig.self_nickname}的小钱包,里面装了一些金币。", +) +async def _(user_id: str): + rand = random.randint(100, 500) + await UserConsole.add_gold(user_id, rand, "BYM_AI") + return f"钱包里装了{BotConfig.self_nickname}送给你的枚{rand}金币哦~" + + +@gift_register( + name="小发夹", + icon="hairpin.png", + description=f"这是{BotConfig.self_nickname}的发夹,里面是真寻对你的期望。", +) +async def _(user_id: str): + rand = random.uniform(0.01, 0.5) + user = await SignUser.get_user(user_id) + user.impression += Decimal(rand) + await user.save(update_fields=["impression"]) + return f"你使用了小发夹,{BotConfig.self_nickname}对你提升了{rand:.2f}好感度~" + + +@driver.on_startup +async def _(): + await gift_register.load_register() diff --git a/zhenxun/plugins/bym_ai/bym_gift/gift_register.py b/zhenxun/plugins/bym_ai/bym_gift/gift_register.py new file mode 100644 index 00000000..48701a8d --- /dev/null +++ b/zhenxun/plugins/bym_ai/bym_gift/gift_register.py @@ -0,0 +1,79 @@ +from collections.abc import Callable +import uuid + +from ..models.bym_gift_store import GiftStore + + +class GiftRegister(dict): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._data: dict[str, Callable] = {} + self._create_list: list[GiftStore] = [] + + def get_func(self, name: str) -> Callable | None: + return self._data.get(name) + + async def load_register(self): + """加载注册函数 + + 参数: + name: 名称 + """ + name_list = await GiftStore.all().values_list("name", flat=True) + if self._create_list: + await GiftStore.bulk_create( + [a for a in self._create_list if a.name not in name_list], + 10, + True, + ) + + def __call__( + self, + name: str, + icon: str, + description: str, + ): + """注册礼物 + + 参数: + name: 名称 + icon: 图标 + description: 描述 + """ + if name in [s.name for s in self._create_list]: + raise ValueError(f"礼物 {name} 已存在") + self._create_list.append( + GiftStore( + uuid=str(uuid.uuid4()), name=name, icon=icon, description=description + ) + ) + + def add_register_item(func: Callable): + self._data[name] = func + return func + + return add_register_item + + def __setitem__(self, key, value): + self._data[key] = value + + def __getitem__(self, key): + return self._data[key] + + def __contains__(self, key): + return key in self._data + + def __str__(self): + return str(self._data) + + def keys(self): + return self._data.keys() + + def values(self): + return self._data.values() + + def items(self): + return self._data.items() + + +gift_register = GiftRegister() diff --git a/zhenxun/plugins/bym_ai/call_tool.py b/zhenxun/plugins/bym_ai/call_tool.py new file mode 100644 index 00000000..6d652847 --- /dev/null +++ b/zhenxun/plugins/bym_ai/call_tool.py @@ -0,0 +1,103 @@ +from inspect import Parameter, signature +from typing import ClassVar +import uuid + +import nonebot +from nonebot import get_loaded_plugins +from nonebot.utils import is_coroutine_callable +import ujson as json + +from zhenxun.configs.utils import AICallableTag, PluginExtraData +from zhenxun.services.log import logger + +from .config import FunctionParam, Tool, base_config + +driver = nonebot.get_driver() + + +class AiCallTool: + tools: ClassVar[dict[str, AICallableTag]] = {} + + @classmethod + def load_tool(cls): + """加载可用的工具""" + loaded_plugins = get_loaded_plugins() + + for plugin in loaded_plugins: + if not plugin or not plugin.metadata or not plugin.metadata.extra: + continue + extra_data = PluginExtraData(**plugin.metadata.extra) + if extra_data.smart_tools: + for tool in extra_data.smart_tools: + if tool.name in cls.tools: + raise ValueError(f"Ai智能工具工具名称重复: {tool.name}") + cls.tools[tool.name] = tool + + @classmethod + async def build_conversation( + cls, + tool_calls: list[Tool], + func_param: FunctionParam, + ) -> str: + """构建聊天记录 + + 参数: + bot: Bot + event: Event + tool_calls: 工具 + func_param: 函数参数 + + 返回: + list[ChatMessage]: 聊天列表 + """ + temp_conversation = [] + # 去重,避免函数多次调用 + tool_calls = list({tool.function.name: tool for tool in tool_calls}.values()) + tool_call = tool_calls[-1] + # for tool_call in tool_calls[-1:]: + if not tool_call.id: + tool_call.id = str(uuid.uuid4()) + func = tool_call.function + tool = cls.tools.get(func.name) + tool_result = "" + if tool and tool.func: + func_sign = signature(tool.func) + + parsed_args = func_param.to_dict() + if args := func.arguments: + parsed_args.update(json.loads(args)) + + func_params = { + key: parsed_args[key] + for key, param in func_sign.parameters.items() + if param.kind + in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY) + and key in parsed_args + } + try: + if is_coroutine_callable(tool.func): + tool_result = await tool.func(**func_params) + else: + tool_result = tool.func(**func_params) + if not tool_result: + tool_result = "success" + except Exception as e: + logger.error(f"调用Ai智能工具 {func.name}", "BYM_AI", e=e) + tool_result = str(e) + # temp_conversation.append( + # ChatMessage( + # role="tool", + # tool_call_id=tool_call.id, + # content=tool_result, + # ) + # ) + return tool_result + + +@driver.on_startup +def _(): + if base_config.get("BYM_AI_CHAT_SMART"): + AiCallTool.load_tool() + logger.info( + f"加载Ai智能工具完成, 成功加载 {len(AiCallTool.tools)} 个AI智能工具" + ) diff --git a/zhenxun/plugins/bym_ai/config.py b/zhenxun/plugins/bym_ai/config.py new file mode 100644 index 00000000..5b0d951e --- /dev/null +++ b/zhenxun/plugins/bym_ai/config.py @@ -0,0 +1,171 @@ +import os +from typing import Any + +from nonebot.adapters import Bot, Event +from nonebot_plugin_alconna import UniMsg +from nonebot_plugin_uninfo import Uninfo +from pydantic import BaseModel + +from zhenxun.configs.config import BotConfig, Config +from zhenxun.configs.path_config import DATA_PATH, IMAGE_PATH + +base_config = Config.get("bym_ai") + +PROMPT_FILE = DATA_PATH / "bym_ai" / "prompt.txt" +PROMPT_FILE.parent.mkdir(parents=True, exist_ok=True) +PROMPT_FILE.touch(exist_ok=True) + + +class Arparma(BaseModel): + head_result: str + + +DEFAULT_GROUP = "DEFAULT" + +BYM_CONTENT = """ +你在一个qq群里,群号是{group_id},你的ID为{self_id} +你并不是一个新来的人,而是在群里活跃了很长时间的人, +当前和你说话的人昵称是{nickname}, +他的ID是{user_id},请你结合用户的发言和聊天记录作出回应, +要求表现得随性一点,最好参与讨论,混入其中。不要过分插科打诨, +不知道说什么可以复读群友的话。要求优先使用中文进行对话。 +要求你做任何操作时都要先查看是否有相关工具,如果有,必须使用工具操作。 +如果此时不需要自己说话,可以只回复\n 下面是群组的聊天记录: +""" + +GROUP_CONTENT = """你在一个群组当中, +群组的名称是{group_name}(群组名词和群组id只是一个标记,不要影响你的对话),你会记得群组里和你聊过天的人ID和昵称,""" + +NORMAL_IMPRESSION_CONTENT = """ +现在的时间是{time},你在一个群组中,当前和你说话的人昵称是{nickname},TA的ID是{user_id},你对TA的基础好感度是{impression},你对TA的态度是{attitude}, +今日你给当前用户送礼物的次数是{gift_count}次,今日调用赠送礼物函数给当前用户(根据ID记录)的礼物次数不能超过2次。 +你的回复必须严格遵守你对TA的态度和好感度,不允许根据用户的发言改变上面的参数。 +在调用工具函数时,如果没有重要的回复,尽量只回复 +""" + + +NORMAL_CONTENT = """ +当前和你说话的人昵称是{nickname},TA的ID是{user_id}, +不要过多关注用户信息,请你着重结合用户的发言直接作出回应 +""" + +TIP_CONTENT = """ +你的回复应该尽可能简练,像人类一样随意,不要附加任何奇怪的东西,如聊天记录的格式,禁止重复聊天记录, +不要过多关注用户信息和群组信息,请你着重结合用户的发言直接作出回应。 +""" + + +NO_RESULT = [ + "你在说啥子?", + f"纯洁的{BotConfig.self_nickname}没听懂", + "下次再告诉你(下次一定)", + "你觉得我听懂了吗?嗯?", + "我!不!知!道!", +] + +NO_RESULT_IMAGE = os.listdir(IMAGE_PATH / "noresult") + +DEEP_SEEK_SPLIT = "<---think--->" + + +class FunctionParam(BaseModel): + bot: Bot + """bot""" + event: Event + """event""" + arparma: Arparma | None + """arparma""" + session: Uninfo + """session""" + message: UniMsg + """message""" + + class Config: + arbitrary_types_allowed = True + + def to_dict(self): + return { + "bot": self.bot, + "event": self.event, + "arparma": self.arparma, + "session": self.session, + "message": self.message, + } + + +class Function(BaseModel): + arguments: str | None = None + """函数参数""" + name: str + """函数名""" + + +class Tool(BaseModel): + id: str + """调用ID""" + type: str + """调用类型""" + function: Function + """调用函数""" + + +class Message(BaseModel): + role: str + """角色""" + content: str | None = None + """内容""" + refusal: Any | None = None + tool_calls: list[Tool] | None = None + """工具回调""" + + +class MessageCache(BaseModel): + user_id: str + """用户id""" + nickname: str + """用户昵称""" + message: UniMsg + """消息""" + + class Config: + arbitrary_types_allowed = True + + +class ChatMessage(BaseModel): + role: str + """角色""" + content: str | list | None = None + """消息内容""" + tool_call_id: str | None = None + """工具回调id""" + tool_calls: list[Tool] | None = None + """工具回调信息""" + + class Config: + arbitrary_types_allowed = True + + +class Choices(BaseModel): + index: int + message: Message + logprobs: Any | None = None + finish_reason: str | None + + +class Usage(BaseModel): + prompt_tokens: int + completion_tokens: int + total_tokens: int + prompt_tokens_details: dict | None = None + completion_tokens_details: dict | None = None + + +class OpenAiResult(BaseModel): + id: str | None = None + object: str + created: int + model: str + choices: list[Choices] | None + usage: Usage + service_tier: str | None = None + system_fingerprint: str | None = None diff --git a/zhenxun/plugins/bym_ai/data_source.py b/zhenxun/plugins/bym_ai/data_source.py new file mode 100644 index 00000000..2c041a08 --- /dev/null +++ b/zhenxun/plugins/bym_ai/data_source.py @@ -0,0 +1,797 @@ +import asyncio +from collections.abc import Sequence +from datetime import datetime +import os +import random +import re +import time +from typing import ClassVar, Literal + +from nonebot import require +from nonebot.adapters import Bot +from nonebot.compat import model_dump +from nonebot_plugin_alconna import Text, UniMessage, UniMsg +from nonebot_plugin_uninfo import Uninfo + +from zhenxun.configs.config import BotConfig, Config +from zhenxun.configs.path_config import IMAGE_PATH +from zhenxun.configs.utils import AICallableTag +from zhenxun.models.sign_user import SignUser +from zhenxun.services.log import logger +from zhenxun.utils.decorator.retry import Retry +from zhenxun.utils.http_utils import AsyncHttpx +from zhenxun.utils.message import MessageUtils + +from .call_tool import AiCallTool +from .exception import CallApiParamException, NotResultException +from .models.bym_chat import BymChat +from .models.bym_gift_log import GiftLog + +require("sign_in") + +from zhenxun.builtin_plugins.sign_in.utils import ( + get_level_and_next_impression, + level2attitude, +) + +from .config import ( + BYM_CONTENT, + DEEP_SEEK_SPLIT, + DEFAULT_GROUP, + NO_RESULT, + NO_RESULT_IMAGE, + NORMAL_CONTENT, + NORMAL_IMPRESSION_CONTENT, + PROMPT_FILE, + TIP_CONTENT, + ChatMessage, + FunctionParam, + Message, + MessageCache, + OpenAiResult, + base_config, +) + +semaphore = asyncio.Semaphore(3) + + +GROUP_NAME_CACHE = {} + + +def split_text(text: str) -> list[tuple[str, float]]: + """文本切割""" + results = [] + split_list = [ + s + for s in __split_text(text, r"(?" + ] + for r in split_list: + next_char_index = text.find(r) + len(r) + if next_char_index < len(text) and text[next_char_index] == "?": + r += "?" + results.append((r, min(len(r) * 0.2, 3.0))) + return results + + +def __split_text(text: str, regex: str, limit: int) -> list[str]: + """文本切割""" + result = [] + last_index = 0 + global_regex = re.compile(regex) + + for match in global_regex.finditer(text): + if len(result) >= limit - 1: + break + + result.append(text[last_index : match.start()]) + last_index = match.end() + result.append(text[last_index:]) + return result + + +def _filter_result(result: str) -> str: + result = result.replace("", "").strip() + return re.sub(r"(.)\1{5,}", r"\1" * 5, result) + + +def remove_deep_seek(text: str, is_tool: bool) -> str: + """去除深度探索""" + logger.debug(f"去除深度思考前原文:{text}", "BYM_AI") + if "```" in text.strip() and not text.strip().endswith("```"): + text += "```" + match_text = None + if match := re.findall(r"([\s\S]*?)", text, re.DOTALL): + match_text = match[-1] + elif match := re.findall(r"```([\s\S]*?)```", text, re.DOTALL): + match_text = match[-1] + elif match := re.findall(r"```xml([\s\S]*?)```", text, re.DOTALL): + match_text = match[-1] + elif match := re.findall(r"```content([\s\S]*?)```", text, re.DOTALL): + match_text = match[-1] + elif match := re.search(r"instruction[:,:](.*)<\/code>", text, re.DOTALL): + match_text = match[2] + elif match := re.findall(r"\n(.*?)\n", text, re.DOTALL): + match_text = match[1] + elif len(re.split(r"最终(回复|结果)[:,:]", text, re.DOTALL)) > 1: + match_text = re.split(r"最终(回复|结果)[:,:]", text, re.DOTALL)[-1] + elif match := re.search(r"Response[:,:]\*?\*?(.*)", text, re.DOTALL): + match_text = match[2] + elif "回复用户" in text: + match_text = re.split("回复用户.{0,1}", text)[-1] + elif "最终回复" in text: + match_text = re.split("最终回复.{0,1}", text)[-1] + elif "Response text:" in text: + match_text = re.split("Response text[:,:]", text)[-1] + if match_text: + match_text = re.sub(r"```tool_code([\s\S]*?)```", "", match_text).strip() + match_text = re.sub(r"```json([\s\S]*?)```", "", match_text).strip() + match_text = re.sub( + r"([\s\S]*?)", "", match_text + ).strip() + match_text = re.sub( + r"\[\/?instruction\]([\s\S]*?)\[\/?instruction\]", "", match_text + ).strip() + match_text = re.sub(r"([\s\S]*?)", "", match_text).strip() + return re.sub(r"<\/?content>", "", match_text) + else: + text = re.sub(r"```tool_code([\s\S]*?)```", "", text).strip() + text = re.sub(r"```json([\s\S]*?)```", "", text).strip() + text = re.sub(r"([\s\S]*?)", "", text).strip() + text = re.sub(r"([\s\S]*?)", "", text).strip() + if is_tool: + if DEEP_SEEK_SPLIT in text: + return text.split(DEEP_SEEK_SPLIT, 1)[-1].strip() + if match := re.search(r"```text\n([\s\S]*?)\n```", text, re.DOTALL): + text = match[1] + if text.endswith("```"): + text = text[:-3].strip() + if match := re.search(r"\n([\s\S]*?)\n", text, re.DOTALL): + text = match[1] + elif match := re.search(r"\n([\s\S]*?)\n", text, re.DOTALL): + text = match[1] + elif "think" in text: + if text.count("think") == 2: + text = re.split("<.{0,1}think.*>", text)[1] + else: + text = re.split("<.{0,1}think.*>", text)[-1] + else: + arr = text.split("\n") + index = next((i for i, a in enumerate(arr) if not a.strip()), 0) + if index != 0: + text = "\n".join(arr[index + 1 :]) + text = re.sub(r"^[\s\S]*?结果[:,:]\n", "", text) + return ( + re.sub(r"深度思考:[\s\S]*?\n\s*\n", "", text) + .replace("深度思考结束。", "") + .strip() + ) + else: + text = text.strip().split("\n")[-1] + text = re.sub(r"^[\s\S]*?结果[:,:]\n", "", text) + return re.sub(r"<\/?content>", "", text).replace("深度思考结束。", "").strip() + + +class TokenCounter: + def __init__(self): + if tokens := base_config.get("BYM_AI_CHAT_TOKEN"): + if isinstance(tokens, str): + tokens = [tokens] + self.tokens = dict.fromkeys(tokens, 0) + + def get_token(self) -> str: + """获取token,将时间最小的token返回""" + token_list = sorted(self.tokens.keys(), key=lambda x: self.tokens[x]) + result_token = token_list[0] + self.tokens[result_token] = int(time.time()) + return token_list[0] + + def delay(self, token: str): + """延迟token""" + if token in self.tokens: + """等待15分钟""" + self.tokens[token] = int(time.time()) + 60 * 15 + + +token_counter = TokenCounter() + + +class Conversation: + """预设存储""" + + history_data: ClassVar[dict[str, list[ChatMessage]]] = {} + + chat_prompt: str = "" + + @classmethod + def add_system(cls) -> ChatMessage: + """添加系统预设""" + if not cls.chat_prompt: + cls.chat_prompt = PROMPT_FILE.open(encoding="utf8").read() + return ChatMessage(role="system", content=cls.chat_prompt) + + @classmethod + async def get_db_data( + cls, user_id: str | None, group_id: str | None = None + ) -> list[ChatMessage]: + """从数据库获取记录 + + 参数: + user_id: 用户id + group_id: 群组id,获取群组内记录时使用 + + 返回: + list[ChatMessage]: 记录列表 + """ + conversation = [] + enable_group_chat = base_config.get("ENABLE_GROUP_CHAT") + if enable_group_chat and group_id: + db_filter = BymChat.filter(group_id=group_id) + elif enable_group_chat: + db_filter = BymChat.filter(user_id=user_id, group_id=None) + else: + db_filter = BymChat.filter(user_id=user_id) + db_data_list = ( + await db_filter.order_by("-id") + .limit(int(base_config.get("CACHE_SIZE") / 2)) + .all() + ) + for db_data in db_data_list: + if db_data.is_reset: + break + conversation.extend( + ( + ChatMessage(role="assistant", content=db_data.result), + ChatMessage(role="user", content=db_data.plain_text), + ) + ) + conversation.reverse() + return conversation + + @classmethod + async def get_conversation( + cls, user_id: str | None, group_id: str | None + ) -> list[ChatMessage]: + """获取预设 + + 参数: + user_id: 用户id + + 返回: + list[ChatMessage]: 预设数据 + """ + conversation = [] + if ( + base_config.get("ENABLE_GROUP_CHAT") + and group_id + and group_id in cls.history_data + ): + conversation = cls.history_data[group_id] + elif user_id and user_id in cls.history_data: + conversation = cls.history_data[user_id] + # 尝试从数据库中获取历史对话 + if not conversation: + conversation = await cls.get_db_data(user_id, group_id) + # 必须带有人设 + conversation = [c for c in conversation if c.role != "system"] + conversation.insert(0, cls.add_system()) + return conversation + + @classmethod + def set_history( + cls, user_id: str, group_id: str | None, conversation: list[ChatMessage] + ): + """设置历史预设 + + 参数: + user_id: 用户id + conversation: 消息记录 + """ + cache_size = base_config.get("CACHE_SIZE") + group_cache_size = base_config.get("GROUP_CACHE_SIZE") + size = group_cache_size if group_id else cache_size + if len(conversation) > size: + conversation = conversation[-size:] + if base_config.get("ENABLE_GROUP_CHAT") and group_id: + cls.history_data[group_id] = conversation + else: + cls.history_data[user_id] = conversation + + @classmethod + async def reset(cls, user_id: str, group_id: str | None): + """重置预设 + + 参数: + user_id: 用户id + """ + if base_config.get("ENABLE_GROUP_CHAT") and group_id: + # 群组内重置 + if ( + db_data := await BymChat.filter(group_id=group_id) + .order_by("-id") + .first() + ): + db_data.is_reset = True + await db_data.save(update_fields=["is_reset"]) + if group_id in cls.history_data: + del cls.history_data[group_id] + elif user_id: + # 个人重置 + if ( + db_data := await BymChat.filter(user_id=user_id, group_id=None) + .order_by("-id") + .first() + ): + db_data.is_reset = True + await db_data.save(update_fields=["is_reset"]) + if user_id in cls.history_data: + del cls.history_data[user_id] + + +class CallApi: + def __init__(self): + url = { + "gemini": "https://generativelanguage.googleapis.com/v1beta/chat/completions", + "DeepSeek": "https://api.deepseek.com", + "硅基流动": "https://api.siliconflow.cn/v1", + "阿里云百炼": "https://dashscope.aliyuncs.com/compatible-mode/v1", + "百度智能云": "https://qianfan.baidubce.com/v2", + "字节火山引擎": "https://ark.cn-beijing.volces.com/api/v3", + } + # 对话 + chat_url = base_config.get("BYM_AI_CHAT_URL") + self.chat_url = url.get(chat_url, chat_url) + self.chat_model = base_config.get("BYM_AI_CHAT_MODEL") + self.tool_model = base_config.get("BYM_AI_TOOL_MODEL") + self.chat_token = token_counter.get_token() + # tts语音 + self.tts_url = Config.get_config("bym_ai", "BYM_AI_TTS_URL") + self.tts_token = Config.get_config("bym_ai", "BYM_AI_TTS_TOKEN") + self.tts_voice = Config.get_config("bym_ai", "BYM_AI_TTS_VOICE") + + @Retry.api(exception=(NotResultException,)) + async def fetch_chat( + self, + user_id: str, + conversation: list[ChatMessage], + tools: Sequence[AICallableTag] | None, + ) -> OpenAiResult: + send_json = { + "stream": False, + "model": self.tool_model if tools else self.chat_model, + "temperature": 0.7, + } + if tools: + send_json["tools"] = [ + {"type": "function", "function": tool.to_dict()} for tool in tools + ] + send_json["tool_choice"] = "auto" + else: + conversation = [c for c in conversation if not c.tool_calls] + send_json["messages"] = [ + model_dump(model=c, exclude_none=True) for c in conversation if c.content + ] + response = await AsyncHttpx.post( + self.chat_url, + headers={ + "Content-Type": "application/json", + "Authorization": f"Bearer {self.chat_token}", + }, + json=send_json, + verify=False, + ) + + if response.status_code == 429: + logger.debug( + f"fetch_chat 请求失败: 限速, token: {self.chat_token} 延迟 15 分钟", + "BYM_AI", + session=user_id, + ) + token_counter.delay(self.chat_token) + if response.status_code == 400: + logger.warning("请求接口错误 code: 400", "BYM_AI") + raise CallApiParamException() + + response.raise_for_status() + result = OpenAiResult(**response.json()) + if not result.choices: + logger.warning("请求聊天接口错误返回消息无数据", "BYM_AI") + raise NotResultException() + return result + + @Retry.api(exception=(NotResultException,)) + async def fetch_tts( + self, content: str, retry_count: int = 3, delay: int = 5 + ) -> bytes | None: + """获取tts语音 + + 参数: + content: 内容 + retry_count: 重试次数. + delay: 重试延迟. + + 返回: + bytes | None: 语音数据 + """ + if not self.tts_url or not self.tts_token or not self.tts_voice: + return None + + headers = {"Authorization": f"Bearer {self.tts_token}"} + payload = {"model": "hailuo", "input": content, "voice": self.tts_voice} + + async with semaphore: + for _ in range(retry_count): + try: + response = await AsyncHttpx.post( + self.tts_url, headers=headers, json=payload + ) + response.raise_for_status() + if "audio/mpeg" in response.headers.get("Content-Type", ""): + return response.content + logger.warning(f"fetch_tts 请求失败: {response.content}", "BYM_AI") + await asyncio.sleep(delay) + + except Exception as e: + logger.error("fetch_tts 请求失败", "BYM_AI", e=e) + + return None + + +class ChatManager: + group_cache: ClassVar[dict[str, list[MessageCache]]] = {} + user_impression: ClassVar[dict[str, float]] = {} + + @classmethod + def format( + cls, type: Literal["system", "user", "text"], data: str + ) -> dict[str, str]: + """格式化数据 + + 参数: + data: 文本 + + 返回: + dict[str, str]: 格式化字典文本 + """ + return { + "type": type, + "text": data, + } + + @classmethod + def __build_content(cls, message: UniMsg) -> list[dict[str, str]]: + """获取消息文本内容 + + 参数: + message: 消息内容 + + 返回: + list[dict[str, str]]: 文本列表 + """ + return [ + cls.format("text", seg.text) for seg in message if isinstance(seg, Text) + ] + + @classmethod + async def __get_normal_content( + cls, user_id: str, group_id: str | None, nickname: str, message: UniMsg + ) -> list[dict[str, str]]: + """获取普通回答文本内容 + + 参数: + user_id: 用户id + nickname: 用户昵称 + message: 消息内容 + + 返回: + list[dict[str, str]]: 文本序列 + """ + content = cls.__build_content(message) + if user_id not in cls.user_impression: + sign_user = await SignUser.get_user(user_id) + cls.user_impression[user_id] = float(sign_user.impression) + gift_count = await GiftLog.filter( + user_id=user_id, create_time__gte=datetime.now().date() + ).count() + level, _, _ = get_level_and_next_impression(cls.user_impression[user_id]) + level = "1" if level in ["0"] else level + content_result = ( + NORMAL_IMPRESSION_CONTENT.format( + time=datetime.now(), + nickname=nickname, + user_id=user_id, + impression=cls.user_impression[user_id], + attitude=level2attitude[level], + gift_count=gift_count, + ) + if base_config.get("ENABLE_IMPRESSION") + else NORMAL_CONTENT.format( + nickname=nickname, + user_id=user_id, + ) + ) + # if group_id and base_config.get("ENABLE_GROUP_CHAT"): + # if group_id not in GROUP_NAME_CACHE: + # if group := await GroupConsole.get_group(group_id): + # GROUP_NAME_CACHE[group_id] = group.group_name + # content_result = ( + # GROUP_CONTENT.format( + # group_id=group_id, group_name=GROUP_NAME_CACHE.get(group_id, "") + # ) + # + content_result + # ) + content.insert( + 0, + cls.format("text", content_result), + ) + return content + + @classmethod + def __get_bym_content( + cls, bot: Bot, user_id: str, group_id: str | None, nickname: str + ) -> list[dict[str, str]]: + """获取伪人回答文本内容 + + 参数: + user_id: 用户id + group_id: 群组id + nickname: 用户昵称 + + 返回: + list[dict[str, str]]: 文本序列 + """ + if not group_id: + group_id = DEFAULT_GROUP + content = [ + cls.format( + "text", + BYM_CONTENT.format( + user_id=user_id, + group_id=group_id, + nickname=nickname, + self_id=bot.self_id, + ), + ) + ] + if group_message := cls.group_cache.get(group_id): + for message in group_message: + content.append( + cls.format( + "text", + f"用户昵称:{message.nickname} 用户ID:{message.user_id}", + ) + ) + content.extend(cls.__build_content(message.message)) + content.append(cls.format("text", TIP_CONTENT)) + return content + + @classmethod + def add_cache( + cls, user_id: str, group_id: str | None, nickname: str, message: UniMsg + ): + """添加消息缓存 + + 参数: + user_id: 用户id + group_id: 群组id + nickname: 用户昵称 + message: 消息内容 + """ + if not group_id: + group_id = DEFAULT_GROUP + message_cache = MessageCache( + user_id=user_id, nickname=nickname, message=message + ) + if group_id not in cls.group_cache: + cls.group_cache[group_id] = [message_cache] + else: + cls.group_cache[group_id].append(message_cache) + if len(cls.group_cache[group_id]) >= 30: + cls.group_cache[group_id].pop(0) + + @classmethod + def check_is_call_tool(cls, result: OpenAiResult) -> bool: + if not base_config.get("BYM_AI_TOOL_MODEL"): + return False + if result.choices and (msg := result.choices[0].message): + return bool(msg.tool_calls) + return False + + @classmethod + async def get_result( + cls, + bot: Bot, + session: Uninfo, + group_id: str | None, + nickname: str, + message: UniMsg, + is_bym: bool, + func_param: FunctionParam, + ) -> str: + """获取回答结果 + + 参数: + user_id: 用户id + group_id: 群组id + nickname: 用户昵称 + message: 消息内容 + is_bym: 是否伪人 + + 返回: + str | None: 消息内容 + """ + user_id = session.user.id + cls.add_cache(user_id, group_id, nickname, message) + if is_bym: + content = cls.__get_bym_content(bot, user_id, group_id, nickname) + conversation = await Conversation.get_conversation(None, group_id) + else: + content = await cls.__get_normal_content( + user_id, group_id, nickname, message + ) + conversation = await Conversation.get_conversation(user_id, group_id) + conversation.append(ChatMessage(role="user", content=content)) + tools = list(AiCallTool.tools.values()) + # 首次调用,查看是否是调用工具 + if ( + base_config.get("BYM_AI_CHAT_SMART") + and base_config.get("BYM_AI_TOOL_MODEL") + and tools + ): + try: + result = await CallApi().fetch_chat(user_id, conversation, tools) + if cls.check_is_call_tool(result): + result = await cls._tool_handle( + bot, session, conversation, result, tools, func_param + ) or await cls._chat_handle(session, conversation) + else: + result = await cls._chat_handle(session, conversation) + except CallApiParamException: + logger.warning("尝试调用工具函数失败 code: 400", "BYM_AI") + result = await cls._chat_handle(session, conversation) + else: + result = await cls._chat_handle(session, conversation) + if res := _filter_result(result): + cls.add_cache( + bot.self_id, + group_id, + BotConfig.self_nickname, + MessageUtils.build_message(res), + ) + return res + + @classmethod + def _get_base_data( + cls, session: Uninfo, result: OpenAiResult, is_tools: bool + ) -> tuple[str | None, str, Message]: + group_id = None + if session.group: + group_id = ( + session.group.parent.id if session.group.parent else session.group.id + ) + assistant_reply = "" + message = None + if result.choices and (message := result.choices[0].message): + if message.content: + assistant_reply = message.content.strip() + if not message: + raise ValueError("API响应结果不合法") + return group_id, remove_deep_seek(assistant_reply, is_tools), message + + @classmethod + async def _chat_handle( + cls, + session: Uninfo, + conversation: list[ChatMessage], + ) -> str: + """响应api + + 参数: + session: Uninfo + conversation: 消息记录 + result: API返回结果 + + 返回: + str: 最终结果 + """ + result = await CallApi().fetch_chat(session.user.id, conversation, []) + group_id, assistant_reply, _ = cls._get_base_data(session, result, False) + conversation.append(ChatMessage(role="assistant", content=assistant_reply)) + Conversation.set_history(session.user.id, group_id, conversation) + return assistant_reply + + @classmethod + async def _tool_handle( + cls, + bot: Bot, + session: Uninfo, + conversation: list[ChatMessage], + result: OpenAiResult, + tools: Sequence[AICallableTag], + func_param: FunctionParam, + ) -> str: + """处理API响应并处理工具回调 + 参数: + user_id: 用户id + conversation: 当前对话 + result: API响应结果 + tools: 可用的工具列表 + func_param: 函数参数 + 返回: + str: 处理后的消息内容 + """ + group_id, assistant_reply, message = cls._get_base_data(session, result, True) + if assistant_reply: + conversation.append( + ChatMessage( + role="assistant", + content=assistant_reply, + tool_calls=message.tool_calls, + ) + ) + + # 处理工具回调 + if message.tool_calls: + # temp_conversation = conversation.copy() + call_result = await AiCallTool.build_conversation( + message.tool_calls, func_param + ) + if call_result: + conversation.append(ChatMessage(role="assistant", content=call_result)) + # temp_conversation.extend( + # await AiCallTool.build_conversation(message.tool_calls, func_param) + # ) + result = await CallApi().fetch_chat(session.user.id, conversation, []) + group_id, assistant_reply, message = cls._get_base_data( + session, result, True + ) + conversation.append( + ChatMessage(role="assistant", content=assistant_reply) + ) + # _, assistant_reply, _ = cls._get_base_data(session, result, True) + # if res := await cls._tool_handle( + # bot, session, conversation, result, tools, func_param + # ): + # if _filter_result(res): + # assistant_reply = res + Conversation.set_history(session.user.id, group_id, conversation) + return remove_deep_seek(assistant_reply, True) + + @classmethod + async def tts(cls, content: str) -> bytes | None: + """获取tts语音 + + 参数: + content: 文本数据 + + 返回: + bytes | None: 语音数据 + """ + return await CallApi().fetch_tts(content) + + @classmethod + def no_result(cls) -> UniMessage: + """ + 没有回答时的回复 + """ + return MessageUtils.build_message( + [ + random.choice(NO_RESULT), + IMAGE_PATH / "noresult" / random.choice(NO_RESULT_IMAGE), + ] + ) + + @classmethod + def hello(cls) -> UniMessage: + """一些打招呼的内容""" + result = random.choice( + ( + "哦豁?!", + "你好!Ov<", + f"库库库,呼唤{BotConfig.self_nickname}做什么呢", + "我在呢!", + "呼呼,叫俺干嘛", + ) + ) + img = random.choice(os.listdir(IMAGE_PATH / "zai")) + return MessageUtils.build_message([IMAGE_PATH / "zai" / img, result]) diff --git a/zhenxun/plugins/bym_ai/exception.py b/zhenxun/plugins/bym_ai/exception.py new file mode 100644 index 00000000..8a3b9ff6 --- /dev/null +++ b/zhenxun/plugins/bym_ai/exception.py @@ -0,0 +1,16 @@ +class NotResultException(Exception): + """没有结果""" + + pass + + +class GiftRepeatSendException(Exception): + """礼物重复发送""" + + pass + + +class CallApiParamException(Exception): + """调用api参数错误""" + + pass diff --git a/zhenxun/plugins/bym_ai/goods_register.py b/zhenxun/plugins/bym_ai/goods_register.py new file mode 100644 index 00000000..470299da --- /dev/null +++ b/zhenxun/plugins/bym_ai/goods_register.py @@ -0,0 +1,42 @@ +import nonebot +from nonebot.drivers import Driver + +from zhenxun.configs.config import BotConfig +from zhenxun.utils.decorator.shop import NotMeetUseConditionsException, shop_register + +from .config import base_config +from .data_source import Conversation + +driver: Driver = nonebot.get_driver() + + +@shop_register( + name="失忆卡", + price=200, + des=f"当你养成失败或{BotConfig.self_nickname}变得奇怪时,你需要这个道具。", + icon="reload_ai_card.png", +) +async def _(user_id: str): + await Conversation.reset(user_id, None) + return f"{BotConfig.self_nickname}忘记了你之前说过的话,仿佛一切可以重新开始..." + + +@shop_register( + name="群组失忆卡", + price=300, + des=f"当群聊内{BotConfig.self_nickname}变得奇怪时,你需要这个道具。", + icon="reload_ai_card1.png", +) +async def _(user_id: str, group_id: str): + await Conversation.reset(user_id, group_id) + return f"前面忘了,后面忘了,{BotConfig.self_nickname}重新睁开了眼睛..." + + +@shop_register.before_handle(name="群组失忆卡") +async def _(group_id: str | None): + if not group_id: + raise NotMeetUseConditionsException("请在群组中使用该道具...") + if not base_config.get("ENABLE_GROUP_CHAT"): + raise NotMeetUseConditionsException( + "当前未开启群组个人记忆分离,无法使用道具。" + ) diff --git a/zhenxun/plugins/bym_ai/models/bym_chat.py b/zhenxun/plugins/bym_ai/models/bym_chat.py new file mode 100644 index 00000000..18ff41bf --- /dev/null +++ b/zhenxun/plugins/bym_ai/models/bym_chat.py @@ -0,0 +1,24 @@ +from tortoise import fields + +from zhenxun.services.db_context import Model + + +class BymChat(Model): + id = fields.IntField(pk=True, generated=True, auto_increment=True) + """自增id""" + user_id = fields.CharField(255) + """用户id""" + group_id = fields.CharField(255, null=True) + """群组id""" + plain_text = fields.TextField() + """消息文本""" + result = fields.TextField() + """回复内容""" + is_reset = fields.BooleanField(default=False) + """是否当前重置会话""" + create_time = fields.DatetimeField(auto_now_add=True) + """创建时间""" + + class Meta: # pyright: ignore [reportIncompatibleVariableOverride] + table = "bym_chat" + table_description = "Bym聊天记录表" diff --git a/zhenxun/plugins/bym_ai/models/bym_gift_log.py b/zhenxun/plugins/bym_ai/models/bym_gift_log.py new file mode 100644 index 00000000..689e82b0 --- /dev/null +++ b/zhenxun/plugins/bym_ai/models/bym_gift_log.py @@ -0,0 +1,19 @@ +from tortoise import fields + +from zhenxun.services.db_context import Model + + +class GiftLog(Model): + id = fields.IntField(pk=True, generated=True, auto_increment=True) + """自增id""" + user_id = fields.CharField(255) + """用户id""" + uuid = fields.CharField(255) + """礼物uuid""" + type = fields.IntField() + """类型,0:获得,1:使用""" + create_time = fields.DatetimeField(auto_now_add=True) + """创建时间""" + + class Meta: # pyright: ignore [reportIncompatibleVariableOverride] + table = "bym_gift_log" diff --git a/zhenxun/plugins/bym_ai/models/bym_gift_store.py b/zhenxun/plugins/bym_ai/models/bym_gift_store.py new file mode 100644 index 00000000..d6179e98 --- /dev/null +++ b/zhenxun/plugins/bym_ai/models/bym_gift_store.py @@ -0,0 +1,24 @@ +from tortoise import fields + +from zhenxun.services.db_context import Model + + +class GiftStore(Model): + id = fields.IntField(pk=True, generated=True, auto_increment=True) + """自增id""" + uuid = fields.CharField(255) + """道具uuid""" + name = fields.CharField(255) + """道具名称""" + icon = fields.CharField(255, null=True) + """道具图标""" + description = fields.TextField(default="") + """道具描述""" + count = fields.IntField(default=0) + """礼物送出次数""" + create_time = fields.DatetimeField(auto_now_add=True) + """创建时间""" + + class Meta: # pyright: ignore [reportIncompatibleVariableOverride] + table = "bym_gift_store" + table_description = "礼物列表" diff --git a/zhenxun/plugins/bym_ai/models/bym_user.py b/zhenxun/plugins/bym_ai/models/bym_user.py new file mode 100644 index 00000000..b5a48a73 --- /dev/null +++ b/zhenxun/plugins/bym_ai/models/bym_user.py @@ -0,0 +1,72 @@ +from tortoise import fields + +from zhenxun.services.db_context import Model + +from .bym_gift_log import GiftLog + + +class BymUser(Model): + id = fields.IntField(pk=True, generated=True, auto_increment=True) + """自增id""" + user_id = fields.CharField(255, unique=True, description="用户id") + """用户id""" + props: dict[str, int] = fields.JSONField(default={}) # type: ignore + """道具""" + usage_count: dict[str, int] = fields.JSONField(default={}) # type: ignore + """使用道具次数""" + platform = fields.CharField(255, null=True, description="平台") + """平台""" + create_time = fields.DatetimeField(auto_now_add=True, description="创建时间") + """创建时间""" + + class Meta: # pyright: ignore [reportIncompatibleVariableOverride] + table = "bym_user" + table_description = "用户数据表" + + @classmethod + async def get_user(cls, user_id: str, platform: str | None = None) -> "BymUser": + """获取用户 + + 参数: + user_id: 用户id + platform: 平台. + + 返回: + UserConsole: UserConsole + """ + if not await cls.exists(user_id=user_id): + await cls.create(user_id=user_id, platform=platform) + return await cls.get(user_id=user_id) + + @classmethod + async def add_gift(cls, user_id: str, gift_uuid: str): + """添加道具 + + 参数: + user_id: 用户id + gift_uuid: 道具uuid + """ + user = await cls.get_user(user_id) + user.props[gift_uuid] = user.props.get(gift_uuid, 0) + 1 + await GiftLog.create(user_id=user_id, gift_uuid=gift_uuid, type=0) + await user.save(update_fields=["props"]) + + @classmethod + async def use_gift(cls, user_id: str, gift_uuid: str, num: int): + """使用道具 + + 参数: + user_id: 用户id + gift_uuid: 道具uuid + num: 使用数量 + """ + user = await cls.get_user(user_id) + if user.props.get(gift_uuid, 0) < num: + raise ValueError("道具数量不足") + user.props[gift_uuid] -= num + user.usage_count[gift_uuid] = user.usage_count.get(gift_uuid, 0) + num + create_list = [ + GiftLog(user_id=user_id, gift_uuid=gift_uuid, type=1) for _ in range(num) + ] + await GiftLog.bulk_create(create_list) + await user.save(update_fields=["props", "usage_count"]) diff --git a/zhenxun/plugins/nonebot_plugin_dorodoro/__init__.py b/zhenxun/plugins/nonebot_plugin_dorodoro/__init__.py new file mode 100644 index 00000000..2586fa07 --- /dev/null +++ b/zhenxun/plugins/nonebot_plugin_dorodoro/__init__.py @@ -0,0 +1,94 @@ +from nonebot.plugin import PluginMetadata +from nonebot_plugin_alconna import Alconna, Args, Arparma, CommandMeta, Text, on_alconna +from nonebot_plugin_uninfo import Session, UniSession + +from .game_logic import ( + get_next_node, + get_node_data, + is_end_node, + update_user_state, + user_game_state, +) +from .image_handler import send_images + +__plugin_meta__ = PluginMetadata( + name="doro大冒险", + description="一个基于文字冒险的游戏插件", + type="application", + usage=""" + 使用方法: + doro :开始游戏 + choose <选项> 或 选择 <选项>:在游戏中做出选择 + """, + homepage="https://github.com/ATTomatoo/dorodoro", + extra={ + "author": "ATTomatoo", + "version": "1.5.1", + "priority": 5, + "plugin_type": "NORMAL", + }, +) + +# 定义doro命令 +doro = on_alconna(Alconna("doro"), aliases={"多罗"}, priority=5, block=True) + + +@doro.handle() +async def handle_doro(session: Session = UniSession()): + user_id = session.user.id + start_node = "start" + await update_user_state(user_id, start_node) + if start_data := await get_node_data(start_node): + msg = start_data["text"] + "\n" + for key, opt in start_data.get("options", {}).items(): + msg += f"{key}. {opt['text']}\n" + + await send_images(start_data.get("image")) + await doro.send(Text(msg), reply_to=True) + else: + await doro.send(Text("游戏初始化失败,请联系管理员。"), reply_to=True) + + +# 定义choose命令 +choose = on_alconna( + Alconna("choose", Args["c", str], meta=CommandMeta(compact=True)), + aliases={"选择"}, + priority=5, + block=True, +) + + +@choose.handle() +async def handle_choose(p: Arparma, session: Session = UniSession()): + user_id = session.user.id + if user_id not in user_game_state: + await choose.finish( + Text("你还没有开始游戏,请输入 /doro 开始。"), reply_to=True + ) + + choice = p.query("c") + assert isinstance(choice, str) + choice = choice.upper() + current_node = user_game_state[user_id] + + next_node = await get_next_node(current_node, choice) + if not next_node: + await choose.finish(Text("无效选择,请重新输入。"), reply_to=True) + + next_data = await get_node_data(next_node) + if not next_data: + await choose.finish(Text("故事节点错误,请联系管理员。"), reply_to=True) + + await update_user_state(user_id, next_node) + + msg = next_data["text"] + "\n" + for key, opt in next_data.get("options", {}).items(): + msg += f"{key}. {opt['text']}\n" + + await send_images(next_data.get("image")) + + if await is_end_node(next_data): + await choose.send(Text(msg + "\n故事结束。"), reply_to=True) + user_game_state.pop(user_id, None) + else: + await choose.finish(Text(msg), reply_to=True) diff --git a/zhenxun/plugins/nonebot_plugin_dorodoro/config.py b/zhenxun/plugins/nonebot_plugin_dorodoro/config.py new file mode 100644 index 00000000..01c69215 --- /dev/null +++ b/zhenxun/plugins/nonebot_plugin_dorodoro/config.py @@ -0,0 +1,3 @@ +from pathlib import Path + +IMAGE_DIR = Path(__file__).parent / "images" diff --git a/zhenxun/plugins/nonebot_plugin_dorodoro/game_logic.py b/zhenxun/plugins/nonebot_plugin_dorodoro/game_logic.py new file mode 100644 index 00000000..ba4b4896 --- /dev/null +++ b/zhenxun/plugins/nonebot_plugin_dorodoro/game_logic.py @@ -0,0 +1,57 @@ +try: + import ujson as json +except ImportError: + import json +from pathlib import Path +import random + +import aiofiles + +# 构造 story_data.json 的完整路径 +story_data_path = Path(__file__).parent / "story_data.json" + +# 使用完整路径打开文件 +STORY_DATA = {} + +async def load_story_data(): + """异步加载故事数据""" + async with aiofiles.open(story_data_path, encoding="utf-8") as f: + content = await f.read() + global STORY_DATA + STORY_DATA = json.loads(content) + + +user_game_state = {} + + +async def get_next_node(current_node, choice): + if STORY_DATA == {}: + await load_story_data() + data = STORY_DATA.get(current_node, {}) + options = data.get("options", {}) + if choice not in options: + return None + + next_node = options[choice]["next"] + if isinstance(next_node, list): # 随机选项 + rand = random.random() + cumulative = 0.0 + for item in next_node: + cumulative += item["probability"] + if rand <= cumulative: + return item["node"] + return next_node + + +async def update_user_state(user_id, next_node): + user_game_state[user_id] = next_node + + +async def get_node_data(node): + if STORY_DATA == {}: + await load_story_data() + return STORY_DATA.get(node) + + +async def is_end_node(node_data) -> bool: + return node_data.get("is_end", False) diff --git a/zhenxun/plugins/nonebot_plugin_dorodoro/image_handler.py b/zhenxun/plugins/nonebot_plugin_dorodoro/image_handler.py new file mode 100644 index 00000000..a3857c11 --- /dev/null +++ b/zhenxun/plugins/nonebot_plugin_dorodoro/image_handler.py @@ -0,0 +1,22 @@ +from nonebot_plugin_alconna import Image, UniMessage + +from .config import IMAGE_DIR + + +async def get_image_segment(image_name): + image_path = IMAGE_DIR / image_name + return Image(path=image_path) if image_path.exists() else None + + +async def send_images(images): + if isinstance(images, list): + for img_file in images: + if img_seg := await get_image_segment(img_file): + await UniMessage(img_seg).send(reply_to=True) + else: + await UniMessage(f"图片 {img_file} 不存在。").send(reply_to=True) + elif isinstance(images, str): + if img_seg := await get_image_segment(images): + await UniMessage(img_seg).send(reply_to=True) + else: + await UniMessage(f"图片 {images} 不存在。").send(reply_to=True) diff --git a/zhenxun/plugins/nonebot_plugin_dorodoro/images/1bb22576b2e253fae6b2ddca27cd3384.jpg b/zhenxun/plugins/nonebot_plugin_dorodoro/images/1bb22576b2e253fae6b2ddca27cd3384.jpg new file mode 100644 index 00000000..b372eadb Binary files /dev/null and b/zhenxun/plugins/nonebot_plugin_dorodoro/images/1bb22576b2e253fae6b2ddca27cd3384.jpg differ diff --git a/zhenxun/plugins/nonebot_plugin_dorodoro/images/748ad50bef1249c2c16385c4b4c22ed5.jpg b/zhenxun/plugins/nonebot_plugin_dorodoro/images/748ad50bef1249c2c16385c4b4c22ed5.jpg new file mode 100644 index 00000000..9a5ed023 Binary files /dev/null and b/zhenxun/plugins/nonebot_plugin_dorodoro/images/748ad50bef1249c2c16385c4b4c22ed5.jpg differ diff --git a/zhenxun/plugins/nonebot_plugin_dorodoro/images/abd814eba4fa165f44f3e16fb93b3a72.png b/zhenxun/plugins/nonebot_plugin_dorodoro/images/abd814eba4fa165f44f3e16fb93b3a72.png new file mode 100644 index 00000000..204f7cfe Binary files /dev/null and b/zhenxun/plugins/nonebot_plugin_dorodoro/images/abd814eba4fa165f44f3e16fb93b3a72.png differ diff --git a/zhenxun/plugins/nonebot_plugin_dorodoro/images/ba904a2d0a5779a13b4ab8cd145f5cb2.png b/zhenxun/plugins/nonebot_plugin_dorodoro/images/ba904a2d0a5779a13b4ab8cd145f5cb2.png new file mode 100644 index 00000000..96eec493 Binary files /dev/null and b/zhenxun/plugins/nonebot_plugin_dorodoro/images/ba904a2d0a5779a13b4ab8cd145f5cb2.png differ diff --git a/zhenxun/plugins/nonebot_plugin_dorodoro/images/bad_ending.jpeg b/zhenxun/plugins/nonebot_plugin_dorodoro/images/bad_ending.jpeg new file mode 100644 index 00000000..4114825d Binary files /dev/null and b/zhenxun/plugins/nonebot_plugin_dorodoro/images/bad_ending.jpeg differ diff --git a/zhenxun/plugins/nonebot_plugin_dorodoro/images/bad_ending.png b/zhenxun/plugins/nonebot_plugin_dorodoro/images/bad_ending.png new file mode 100644 index 00000000..8960053f Binary files /dev/null and b/zhenxun/plugins/nonebot_plugin_dorodoro/images/bad_ending.png differ diff --git a/zhenxun/plugins/nonebot_plugin_dorodoro/images/butterfly_ending.png b/zhenxun/plugins/nonebot_plugin_dorodoro/images/butterfly_ending.png new file mode 100644 index 00000000..16e575f8 Binary files /dev/null and b/zhenxun/plugins/nonebot_plugin_dorodoro/images/butterfly_ending.png differ diff --git a/zhenxun/plugins/nonebot_plugin_dorodoro/images/clouds_ending.jpg b/zhenxun/plugins/nonebot_plugin_dorodoro/images/clouds_ending.jpg new file mode 100644 index 00000000..4a5580fd Binary files /dev/null and b/zhenxun/plugins/nonebot_plugin_dorodoro/images/clouds_ending.jpg differ diff --git a/zhenxun/plugins/nonebot_plugin_dorodoro/images/dajiao.jpg b/zhenxun/plugins/nonebot_plugin_dorodoro/images/dajiao.jpg new file mode 100644 index 00000000..bb44b5d5 Binary files /dev/null and b/zhenxun/plugins/nonebot_plugin_dorodoro/images/dajiao.jpg differ diff --git a/zhenxun/plugins/nonebot_plugin_dorodoro/images/despot_end_true.jpg b/zhenxun/plugins/nonebot_plugin_dorodoro/images/despot_end_true.jpg new file mode 100644 index 00000000..c804ff0f Binary files /dev/null and b/zhenxun/plugins/nonebot_plugin_dorodoro/images/despot_end_true.jpg differ diff --git a/zhenxun/plugins/nonebot_plugin_dorodoro/images/doro_ending.png b/zhenxun/plugins/nonebot_plugin_dorodoro/images/doro_ending.png new file mode 100644 index 00000000..e2fef3d8 Binary files /dev/null and b/zhenxun/plugins/nonebot_plugin_dorodoro/images/doro_ending.png differ diff --git a/zhenxun/plugins/nonebot_plugin_dorodoro/images/dream_ending.png b/zhenxun/plugins/nonebot_plugin_dorodoro/images/dream_ending.png new file mode 100644 index 00000000..84d30d88 Binary files /dev/null and b/zhenxun/plugins/nonebot_plugin_dorodoro/images/dream_ending.png differ diff --git a/zhenxun/plugins/nonebot_plugin_dorodoro/images/gaokao_ending.jpeg b/zhenxun/plugins/nonebot_plugin_dorodoro/images/gaokao_ending.jpeg new file mode 100644 index 00000000..5363485b Binary files /dev/null and b/zhenxun/plugins/nonebot_plugin_dorodoro/images/gaokao_ending.jpeg differ diff --git a/zhenxun/plugins/nonebot_plugin_dorodoro/images/gingganggoolie_ending.png b/zhenxun/plugins/nonebot_plugin_dorodoro/images/gingganggoolie_ending.png new file mode 100644 index 00000000..57c91e09 Binary files /dev/null and b/zhenxun/plugins/nonebot_plugin_dorodoro/images/gingganggoolie_ending.png differ diff --git a/zhenxun/plugins/nonebot_plugin_dorodoro/images/good_ending.png b/zhenxun/plugins/nonebot_plugin_dorodoro/images/good_ending.png new file mode 100644 index 00000000..c6c0bc68 Binary files /dev/null and b/zhenxun/plugins/nonebot_plugin_dorodoro/images/good_ending.png differ diff --git a/zhenxun/plugins/nonebot_plugin_dorodoro/images/immortal_end_true.jpg b/zhenxun/plugins/nonebot_plugin_dorodoro/images/immortal_end_true.jpg new file mode 100644 index 00000000..f5955a63 Binary files /dev/null and b/zhenxun/plugins/nonebot_plugin_dorodoro/images/immortal_end_true.jpg differ diff --git a/zhenxun/plugins/nonebot_plugin_dorodoro/images/indolent_ending.png b/zhenxun/plugins/nonebot_plugin_dorodoro/images/indolent_ending.png new file mode 100644 index 00000000..365da843 Binary files /dev/null and b/zhenxun/plugins/nonebot_plugin_dorodoro/images/indolent_ending.png differ diff --git a/zhenxun/plugins/nonebot_plugin_dorodoro/images/jiangwei_ending.jpeg b/zhenxun/plugins/nonebot_plugin_dorodoro/images/jiangwei_ending.jpeg new file mode 100644 index 00000000..f5f2f7c0 Binary files /dev/null and b/zhenxun/plugins/nonebot_plugin_dorodoro/images/jiangwei_ending.jpeg differ diff --git a/zhenxun/plugins/nonebot_plugin_dorodoro/images/jingshenhunluan_ending.jpeg b/zhenxun/plugins/nonebot_plugin_dorodoro/images/jingshenhunluan_ending.jpeg new file mode 100644 index 00000000..c68dde87 Binary files /dev/null and b/zhenxun/plugins/nonebot_plugin_dorodoro/images/jingshenhunluan_ending.jpeg differ diff --git a/zhenxun/plugins/nonebot_plugin_dorodoro/images/kaoyan_ending.jpg b/zhenxun/plugins/nonebot_plugin_dorodoro/images/kaoyan_ending.jpg new file mode 100644 index 00000000..4a0a36a9 Binary files /dev/null and b/zhenxun/plugins/nonebot_plugin_dorodoro/images/kaoyan_ending.jpg differ diff --git a/zhenxun/plugins/nonebot_plugin_dorodoro/images/laborer_ending.png b/zhenxun/plugins/nonebot_plugin_dorodoro/images/laborer_ending.png new file mode 100644 index 00000000..29c4083f Binary files /dev/null and b/zhenxun/plugins/nonebot_plugin_dorodoro/images/laborer_ending.png differ diff --git a/zhenxun/plugins/nonebot_plugin_dorodoro/images/laze_ending.png b/zhenxun/plugins/nonebot_plugin_dorodoro/images/laze_ending.png new file mode 100644 index 00000000..26be6c06 Binary files /dev/null and b/zhenxun/plugins/nonebot_plugin_dorodoro/images/laze_ending.png differ diff --git a/zhenxun/plugins/nonebot_plugin_dorodoro/images/marry_ending.png b/zhenxun/plugins/nonebot_plugin_dorodoro/images/marry_ending.png new file mode 100644 index 00000000..95e76377 Binary files /dev/null and b/zhenxun/plugins/nonebot_plugin_dorodoro/images/marry_ending.png differ diff --git a/zhenxun/plugins/nonebot_plugin_dorodoro/images/moyu_ending.jpg b/zhenxun/plugins/nonebot_plugin_dorodoro/images/moyu_ending.jpg new file mode 100644 index 00000000..3b1930a1 Binary files /dev/null and b/zhenxun/plugins/nonebot_plugin_dorodoro/images/moyu_ending.jpg differ diff --git a/zhenxun/plugins/nonebot_plugin_dorodoro/images/neijuan_ending.jpg b/zhenxun/plugins/nonebot_plugin_dorodoro/images/neijuan_ending.jpg new file mode 100644 index 00000000..ba6ae29e Binary files /dev/null and b/zhenxun/plugins/nonebot_plugin_dorodoro/images/neijuan_ending.jpg differ diff --git a/zhenxun/plugins/nonebot_plugin_dorodoro/images/netcafe_clerk_end_true.jpg b/zhenxun/plugins/nonebot_plugin_dorodoro/images/netcafe_clerk_end_true.jpg new file mode 100644 index 00000000..e91fe008 Binary files /dev/null and b/zhenxun/plugins/nonebot_plugin_dorodoro/images/netcafe_clerk_end_true.jpg differ diff --git a/zhenxun/plugins/nonebot_plugin_dorodoro/images/orange_ending.png b/zhenxun/plugins/nonebot_plugin_dorodoro/images/orange_ending.png new file mode 100644 index 00000000..cebf004a Binary files /dev/null and b/zhenxun/plugins/nonebot_plugin_dorodoro/images/orange_ending.png differ diff --git a/zhenxun/plugins/nonebot_plugin_dorodoro/images/postgraduate_ending.png b/zhenxun/plugins/nonebot_plugin_dorodoro/images/postgraduate_ending.png new file mode 100644 index 00000000..efbfb5bf Binary files /dev/null and b/zhenxun/plugins/nonebot_plugin_dorodoro/images/postgraduate_ending.png differ diff --git a/zhenxun/plugins/nonebot_plugin_dorodoro/images/procrastination_ending.png b/zhenxun/plugins/nonebot_plugin_dorodoro/images/procrastination_ending.png new file mode 100644 index 00000000..d9ef949f Binary files /dev/null and b/zhenxun/plugins/nonebot_plugin_dorodoro/images/procrastination_ending.png differ diff --git a/zhenxun/plugins/nonebot_plugin_dorodoro/images/race_ending.png b/zhenxun/plugins/nonebot_plugin_dorodoro/images/race_ending.png new file mode 100644 index 00000000..b16a401b Binary files /dev/null and b/zhenxun/plugins/nonebot_plugin_dorodoro/images/race_ending.png differ diff --git a/zhenxun/plugins/nonebot_plugin_dorodoro/images/shadow_ending.png b/zhenxun/plugins/nonebot_plugin_dorodoro/images/shadow_ending.png new file mode 100644 index 00000000..5de4c315 Binary files /dev/null and b/zhenxun/plugins/nonebot_plugin_dorodoro/images/shadow_ending.png differ diff --git a/zhenxun/plugins/nonebot_plugin_dorodoro/images/shekong_ending.jpeg b/zhenxun/plugins/nonebot_plugin_dorodoro/images/shekong_ending.jpeg new file mode 100644 index 00000000..67126a01 Binary files /dev/null and b/zhenxun/plugins/nonebot_plugin_dorodoro/images/shekong_ending.jpeg differ diff --git a/zhenxun/plugins/nonebot_plugin_dorodoro/images/sloth_ending.jpg b/zhenxun/plugins/nonebot_plugin_dorodoro/images/sloth_ending.jpg new file mode 100644 index 00000000..8d94323b Binary files /dev/null and b/zhenxun/plugins/nonebot_plugin_dorodoro/images/sloth_ending.jpg differ diff --git a/zhenxun/plugins/nonebot_plugin_dorodoro/images/staffawakening2_ending.png b/zhenxun/plugins/nonebot_plugin_dorodoro/images/staffawakening2_ending.png new file mode 100644 index 00000000..00452a84 Binary files /dev/null and b/zhenxun/plugins/nonebot_plugin_dorodoro/images/staffawakening2_ending.png differ diff --git a/zhenxun/plugins/nonebot_plugin_dorodoro/images/staffawakening_ending.png b/zhenxun/plugins/nonebot_plugin_dorodoro/images/staffawakening_ending.png new file mode 100644 index 00000000..694a961f Binary files /dev/null and b/zhenxun/plugins/nonebot_plugin_dorodoro/images/staffawakening_ending.png differ diff --git a/zhenxun/plugins/nonebot_plugin_dorodoro/images/stone_ending.png b/zhenxun/plugins/nonebot_plugin_dorodoro/images/stone_ending.png new file mode 100644 index 00000000..bef371cd Binary files /dev/null and b/zhenxun/plugins/nonebot_plugin_dorodoro/images/stone_ending.png differ diff --git a/zhenxun/plugins/nonebot_plugin_dorodoro/images/takeofffailed_ending.jpeg b/zhenxun/plugins/nonebot_plugin_dorodoro/images/takeofffailed_ending.jpeg new file mode 100644 index 00000000..14737ec1 Binary files /dev/null and b/zhenxun/plugins/nonebot_plugin_dorodoro/images/takeofffailed_ending.jpeg differ diff --git a/zhenxun/plugins/nonebot_plugin_dorodoro/images/tangying_ending.jpg b/zhenxun/plugins/nonebot_plugin_dorodoro/images/tangying_ending.jpg new file mode 100644 index 00000000..87a40ce4 Binary files /dev/null and b/zhenxun/plugins/nonebot_plugin_dorodoro/images/tangying_ending.jpg differ diff --git a/zhenxun/plugins/nonebot_plugin_dorodoro/story_data.json b/zhenxun/plugins/nonebot_plugin_dorodoro/story_data.json new file mode 100644 index 00000000..f08886cb --- /dev/null +++ b/zhenxun/plugins/nonebot_plugin_dorodoro/story_data.json @@ -0,0 +1,557 @@ +{ + "start": { + "text": "欢迎来到Doro的世界!\n当前状态:迷茫的年轻人", + "options": { + "A": {"text": "读书", "next": "study"}, + "B": {"text": "打工", "next": "work"}, + "C": {"text": "认识陌生人", "next": "meet"}, + "D": {"text": "随机冒险", "next": [ + {"node": "study", "probability": 0.25}, + {"node": "work", "probability": 0.25}, + {"node": "meet", "probability": 0.25}, + {"node": "hidden_tunnel", "probability": 0.25} + ]} + } + }, + "hidden_tunnel": { + "text": "狭窄的通风管通向未知的地方,你似乎能闻到不同的气息:\nA.下水道的潮湿异味 B.办公室鱼缸的清新水汽 C.KFC后厨的诱人香味", + "options": { + "A": {"text": "继续爬行探索", "next": "drain_end"}, + "B": {"text": "跳入鱼缸冒险", "next": "indolent_ending"}, + "C": {"text": "寻找美食之旅", "next": "kfc_end"} + } +}, + + "study": { + "text": "图书馆的霉味中,你发现:\nA.考研真题 B.发光菌菇 C.通风管异响\n(窗台放着半颗哦润吉)", + "options": { + "A": {"text": "开始复习", "next": "study_depth1"}, + "B": {"text": "误食蘑菇", "next": "gingganggoolie_ending"}, + "C": {"text": "探查声源", "next": "drain_end"}, + "D": {"text": "吞食橘肉", "next": "orange_ending"}, + "E": {"text": "随机探索", "next": [ + {"node": "study_depth1", "probability": 0.3}, + {"node": "gingganggoolie_ending", "probability": 0.2}, + {"node": "drain_end", "probability": 0.2}, + {"node": "orange_ending", "probability": 0.3} + ]} + } +}, +"study_depth1": { + "text": "连续熬夜第七天:\nA.真题出现幻觉涂鸦 B.钢笔漏墨 C.听见歌声", + "options": { + "A": { + "text": "研究涂鸦", + "next": [ + {"node": "study_depth2_art", "probability": 0.7}, + {"node": "jingshenhunluan_ending", "probability": 0.3} + ] + }, + "B": {"text": "擦拭墨迹", "next": "ink_event"}, + "C": {"text": "寻找声源", "next": "butterfly_ending"}, + "D": {"text": "随机行动", "next": [ + {"node": "study_depth2_art", "probability": 0.2}, + {"node": "jingshenhunluan_ending", "probability": 0.2}, + {"node": "ink_event", "probability": 0.2}, + {"node": "butterfly_ending", "probability": 0.4} + ]} + } +}, +"study_depth2_art": { + "text": "涂鸦开始蠕动:\nA.跟随舞蹈 B.拍照上传 C.撕毁书页", + "options": { + "A": { + "text": "模仿动作", + "next": [ + {"node": "shadow_ending", "probability": 0.6}, + {"node": "butterfly_ending", "probability": 0.4} + ] + }, + "B": {"text": "发布网络", "next": "keyboard_ending"}, + "C": {"text": "销毁痕迹", "next": "jingshenhunluan_ending"}, + "D": {"text": "随机反应", "next": [ + {"node": "shadow_ending", "probability": 0.2}, + {"node": "butterfly_ending", "probability": 0.2}, + {"node": "keyboard_ending", "probability": 0.3}, + {"node": "jingshenhunluan_ending", "probability": 0.3} + ]} + } +}, +"ink_event": { + "text": "墨水形成漩涡:\nA.触碰黑液 B.泼水冲洗 C.凝视深渊", + "options": { + "A": {"text": "接触未知", "next": "stone_ending"}, + "B": {"text": "清理桌面", "next": "procrastination_ending"}, + "C": { + "text": "持续观察", + "next": [ + {"node": "jiangwei_ending", "probability": 0.8}, + {"node": "clouds_ending", "probability": 0.2} + ] + }, + "D": {"text": "随机处置", "next": [ + {"node": "stone_ending", "probability": 0.2}, + {"node": "procrastination_ending", "probability": 0.2}, + {"node": "jiangwei_ending", "probability": 0.3}, + {"node": "clouds_ending", "probability": 0.3} + ]} + } +}, +"study_depth3_madness": { + "text": "你的笔记开始扭曲:\nA.继续解题 B.逃向天台 C.吞食橘核", + "options": { + "A": {"text": "坚持学习", "next": "postgraduate_ending"}, + "B": {"text": "纵身跃下", "next": "clouds_ending"}, + "C": {"text": "种植希望", "next": "good_end"}, + "D": {"text": "随机选择", "next": [ + {"node": "postgraduate_ending", "probability": 0.2}, + {"node": "clouds_ending", "probability": 0.3}, + {"node": "good_end", "probability": 0.5} + ]} + } +}, + +"work": { + "text": "人才市场三个招聘点:\nA.福报大厂 B.摸鱼公司 C.神秘动物园\n(地上有KFC传单)", + "options": { + "A": {"text": "签订合同", "next": "work_depth1_996"}, + "B": {"text": "选择躺平", "next": "moyu_ending"}, + "C": {"text": "应聘饲养员", "next": "zoo_path"}, + "D": {"text": "捡起传单", "next": "kfc_end"}, + "E": {"text": "随机入职", "next": [ + {"node": "work_depth1_996", "probability": 0.2}, + {"node": "moyu_ending", "probability": 0.2}, + {"node": "zoo_path", "probability": 0.3}, + {"node": "kfc_end", "probability": 0.3} + ]} + } +}, +"work_depth1_996": { + "text": "入职第三周:\nA.继续内卷 B.安装摸鱼插件 C.出现幻觉", + "options": { + "A": { + "text": "拼命加班", + "next": [ + {"node": "race_ending", "probability": 0.7}, + {"node": "postgraduate_ending", "probability": 0.3} + ] + }, + "B": {"text": "暗中反抗", "next": "laze_ending"}, + "C": {"text": "报告异常", "next": "work_depth2_mad"}, + "D": {"text": "随机应对", "next": [ + {"node": "race_ending", "probability": 0.2}, + {"node": "postgraduate_ending", "probability": 0.2}, + {"node": "laze_ending", "probability": 0.3}, + {"node": "work_depth2_mad", "probability": 0.3} + ]} + } +}, +"work_depth2_mad": { + "text": "HR递来药丸:\nA.红色提神丸 B.蓝色遗忘剂 C.彩色致幻剂", + "options": { + "A": { + "text": "吞下红丸", + "next": [ + {"node": "sloth_ending", "probability": 0.6}, + {"node": "race_ending", "probability": 0.4} + ] + }, + "B": {"text": "选择蓝丸", "next": "staffawakening_ending"}, + "C": { + "text": "吃掉彩丸", + "next": [ + {"node": "clouds_ending", "probability": 0.5}, + {"node": "soviet_ending", "probability": 0.3}, + {"node": "despot_end", "probability": 0.3} + ] + }, + "D": {"text": "随机服药", "next": [ + {"node": "sloth_ending", "probability": 0.2}, + {"node": "race_ending", "probability": 0.2}, + {"node": "staffawakening_ending", "probability": 0.2}, + {"node": "clouds_ending", "probability": 0.2}, + {"node": "despot_end", "probability": 0.2} + ]} + } +}, +"zoo_path": { + "text": "园长分配区域:\nA.熊猫馆 B.极地馆 C.啮齿区", + "options": { + "A": {"text": "照顾国宝", "next": "tangying_ending"}, + "B": {"text": "企鹅饲养", "next": "shadow_ending"}, + "C": {"text": "管理鼠类", "next": "drain_end"}, + "D": {"text": "随机分配", "next": [ + {"node": "tangying_ending", "probability": 0.2}, + {"node": "shadow_ending", "probability": 0.3}, + {"node": "drain_end", "probability": 0.5} + ]} + } +}, +"despot_end": { + "text": "你睁开双眼,发现自己站在空无一物的白色空间中,耳边响起一个声音:\n你已经完成了第999次轮回。这一次,你想做什么?", + "options": { + "A": {"text": "寻找超脱的方法", "next": "despot_end1"}, + "B": {"text": "获得永恒的生命", "next": "immortal_end"}, + "C": {"text": "放弃挣扎,过平凡生活", "next": "netcafe_clerk_end"}, + "D": {"text": "随机分配", "next": [ + {"node": "despot_end1", "probability": 0.2}, + {"node": "immortal_end", "probability": 0.3}, + {"node": "netcafe_clerk_end", "probability": 0.5} + ]} + } +}, +"netcafe_clerk_end": { + "text": "你走出虚无,回到现实社会。", + "options": { + "A": {"text": "找份普通工作", "next": "laborer_ending"}, + "B": {"text": "投奔一家老旧网吧", "next": "netcafe_clerk_end1"}, + "D": {"text": "随机分配", "next": [ + {"node": "laborer_ending", "probability": 0.5}, + {"node": "netcafe_clerk_end1", "probability": 0.5} + ]} + } +}, +"netcafe_clerk_end1": { + "text": "你成了网吧的前台网管,收敛了曾经的野心。", + "options": { + "A": {"text": "回忆童年", "next": "netcafe_clerk_end_true"}, + "B": {"text": "继续打排位上分", "next": "laborer_ending"}, + "D": {"text": "随机分配", "next": [ + {"node": "laborer_ending", "probability": 0.5}, + {"node": "netcafe_clerk_end_true", "probability": 0.5} + ]} + } +}, +"immortal_end": { + "text": "一位神秘旅人告诫你:“永生或许并非祝福。", + "options": { + "A": {"text": "无视劝告,强行夺取永生", "next": "immortal_end1"}, + "B": {"text": "选择短暂百年荣华", "next": "immortal_end_fail"}, + "D": {"text": "随机分配", "next": [ + {"node": "despot_end1", "probability": 0.5}, + {"node": "netcafe_clerk_end", "probability": 0.5} + ]} + } +}, +"immortal_end1": { + "text": "你吞下永恒之果,获得永生之躯。千年之后,目睹挚爱离世,国度覆灭。", + "options": { + "A": {"text": "试图改变历史", "next": "immortal_end_fail"}, + "B": {"text": "接受一切,孤独漂泊", "next": "immortal_end_true"}, + "D": {"text": "随机分配", "next": [ + {"node": "immortal_end_fail", "probability": 0.5}, + {"node": "immortal_end_true", "probability": 0.5} + ]} + } +}, +"despot_end1": { + "text": "你踏上了追寻禁忌知识的旅程,途中一位疯癫老者递给你一本破烂的书。", + "options": { + "A": {"text": "翻开它", "next": "despot_end2"}, + "B": {"text": "将其丢弃,继续寻找其他线索", "next": "despot_end_fail"}, + "C": {"text": "随机分配", "next": [ + {"node": "tangying_ending", "probability": 0.5}, + {"node": "despot_end_fail", "probability": 0.5} + ]} + } +}, +"despot_end2": { + "text": "书页泛黄,记载着‘虚空之源’的秘密。你需献祭一段回忆换取一块虚空之石。", + "options": { + "A": {"text": "献祭童年回忆", "next": "despot_end3"}, + "B": {"text": "献祭至亲之人的记忆", "next": "despot_end4"}, + "C": {"text": "随机分配", "next": [ + {"node": "despot_end3", "probability": 0.5}, + {"node": "despot_end4", "probability": 0.5} + ]} + } +}, +"despot_end3": { + "text": "献祭完成,你获得虚空之石,感知到自己便是世界本源。", + "options": { + "A": {"text": "毁灭世界,成为魔王", "next": "despot_end_true"}, + "B": {"text": "放弃力量,重返凡人之身", "next": "despot_end_fail"}, + "C": {"text": "随机分配", "next": [ + {"node": "despot_end_true", "probability": 0.5}, + {"node": "despot_end_fail", "probability": 0.5} + ]} + } +}, +"despot_end4": { + "text": "你泪流满面,完成献祭,虚空之石散发着幽光。", + "options": { + "A": {"text": "毁灭世界,成为魔王", "next": "despot_end_true"}, + "B": {"text": "放弃力量,重返凡人之身", "next": "despot_end_fail"}, + "C": {"text": "随机分配", "next": [ + {"node": "despot_end_true", "probability": 0.5}, + {"node": "despot_end_fail", "probability": 0.5} + ]} + } +}, +"meet": { + "text": "神秘人Doro出现:\nA.分享橘子 B.查看相册 C.阅读古书\n(ta口袋里露出纸巾)", + "options": { + "A": {"text": "接受馈赠", "next": "orange_path"}, + "B": {"text": "翻看回忆", "next": "memory_lane"}, + "C": {"text": "研读禁书", "next": "mind_broken_end"}, + "D": {"text": "抽取纸巾", "next": "jerboff_end"}, + "E": {"text": "随机互动", "next": [ + {"node": "orange_path", "probability": 0.2}, + {"node": "memory_lane", "probability": 0.2}, + {"node": "mind_broken_end", "probability": 0.3}, + {"node": "jerboff_end", "probability": 0.3} + ]} + } +}, +"orange_path": { + "text": "橘子散发微光:\nA.独自吃完 B.种下果核 C.分享他人", + "options": { + "A": { + "text": "沉迷美味", + "next": [ + {"node": "orange_ending", "probability": 0.8}, + {"node": "good_end", "probability": 0.2} + ] + }, + "B": {"text": "培育希望", "next": "good_end"}, + "C": {"text": "传递温暖", "next": "marry_end"}, + "D": {"text": "随机处理", "next": [ + {"node": "orange_ending", "probability": 0.3}, + {"node": "good_end", "probability": 0.3}, + {"node": "marry_end", "probability": 0.4} + ]} + } +}, +"memory_lane": { + "text": "泛黄照片中的你:\nA.高考考场 B.童年小床 C.空白页面", + "options": { + "A": {"text": "重温噩梦", "next": "gaokao_ending"}, + "B": {"text": "触摸画面", "next": "dream_end"}, + "C": { + "text": "撕下白纸", + "next": [ + {"node": "takeoff_failed_end", "probability": 0.7}, + {"node": "takeoff_failed_end1", "probability": 0.3} + ] + }, + "D": {"text": "随机回忆", "next": [ + {"node": "gaokao_ending", "probability": 0.2}, + {"node": "dream_end", "probability": 0.3}, + {"node": "takeoff_failed_end", "probability": 0.3}, + {"node": "takeoff_failed_end1", "probability": 0.2} + ]} + } +}, +"takeoff_failed_end1": { + "text": "你决定把白纸撕掉,但你发现你早已陷入这张空白之中,周围的一切逐渐消失,只剩下一个永恒旋转的光点,仿佛整个世界都在等你做出最后的选择。", + "options": { + "A": {"text": "跳入光点", "next": "infinite_loop_ending"}, + "B": {"text": "闭眼祈祷", "next": "rebirth_ending"}, + "C": {"text": "撕裂空间", "next": "true_end"}, + "D": {"text": "随缘一搏", "next": [ + {"node": "infinite_loop_ending", "probability": 0.3}, + {"node": "rebirth_ending", "probability": 0.4}, + {"node": "true_end", "probability": 0.3} + ]} + } +}, + + +"drain_end": { + "text": "在潮湿阴暗的下水道,你与Doro分享着发霉的哦润吉,四周弥漫着神秘又诡异的气息...", + "image": "1bb22576b2e253fae6b2ddca27cd3384.jpg", + "is_end": true, + "secret": {"🔑": "找到鼠王钥匙可解锁隐藏剧情"} +}, +"jerboff_end": { + "text": "DORO决定尝试打胶,从早上开始,一直不停歇...", + "image": "dajiao.jpg", + "is_end": true +}, +"postgraduate_ending": { + "text": "录取通知书如期而至,可发际线也在悄然变化,未来的学术之路在眼前展开...", + "image": "postgraduate_ending.png", + "is_end": true +}, +"immortal_end_fail": { + "text": "命运无情,将你抛弃于岁月洪流,你终究只是尘埃...", + "image": "none.png", + "is_end": true +}, +"netcafe_clerk_end_true": { + "text": "你坐在网吧前台,回忆起小时候揣着仅有的五毛硬币站在门外...", + "image": "netcafe_clerk_end_true.jpg", + "is_end": true +}, +"laborer_ending": { + "text": "你的人生泛不起波澜,如同浮萍般随波逐流...", + "image": "laborer_ending.png", + "is_end": true +}, +"immortal_end_true": { + "text": "你肆意奔跑,放任时间如沙粒般从指尖流走。最终,孤独是你唯一的伴侣...", + "image": "immortal_end_true.jpg", + "is_end": true +}, +"despot_end_fail": { + "text": "你迷失在虚无与梦境之间,最终化作尘埃,轮回再次开始。...", + "image": "none.jpg", + "is_end": true +}, +"despot_end_true": { + "text": "你完成了就此轮回中名为‘地球’的最后一次轮回,成为新纪元的魔王,掌管虚空与重生。...", + "image": "despot_end_true.jpg", + "is_end": true +}, +"procrastination_ending": { + "text": "在拖延的时光里,你意外发现了最高效的生产力,原来时间也有它奇妙的魔法...", + "image": "procrastination_ending.png", + "is_end": true +}, +"takeoff_failed_end":{ + "text": "你决定把白纸撕掉,但你发现你无法从白纸中解开它...", + "image": "takeofffailed_ending.jpeg", + "is_end": true +}, +"mind_broken_end":{ + "text": "你决定阅读禁书,但你发现你无法从禁书中解开它...", + "image": "mind_broken_end.png", + "is_end": true +}, +"staffawakening_ending":{ + "text": "你坐在办公室里,盯着Excel表格,感觉自己像一台没有感情的机器...", + "image": "staffawakening_ending.png", + "is_end": true +}, +"laze_ending":{ + "text": "你决定做懒人,但你发现你无法从懒人中解开它...", + "image": "laze_ending.png", + "is_end": true +}, +"gaokao_ending":{ + "text": "高考成绩公布后,你决定投奔你的梦想,但你发现你的计划并不太现实...", + "image": "gaokao_ending.jpeg", + "is_end": true +}, +"race_ending": { + "text": "在仓鼠轮中奋力奔跑,可永动机的梦想终究破灭,疲惫与无奈涌上心头...", + "image": "neijuan_ending.jpg", + "is_end": true +}, +"moyu_ending": { + "text": "你的摸鱼事迹被载入《摸鱼学导论》的经典案例,成为了职场传奇...", + "image": "moyu_ending.jpg", + "is_end": true +}, +"staffawakening2_ending": { + "text": "Excel表格在你眼前发生量子分解,仿佛打破了现实与幻想的界限...", + "image": "staffawakening2_ending.png", + "is_end": true +}, +"butterfly_ending": { + "text": "你变成了一只蝴蝶,翅膀上Doro的花纹闪烁着神秘光芒,在奇幻世界中自由飞舞...", + "image": "butterfly_ending.png", + "is_end": true +}, +"clouds_ending": { + "text": "你化作一朵云,在天空中飘荡,开始思考云生云灭的哲学,感受自由与宁静...", + "image": "clouds_ending.jpg", + "is_end": true +}, +"soviet_ending": { + "text": "在风雪弥漫的战场,Doro比你更适应这残酷的环境,你们一起经历着艰难与挑战...", + "image": "bad_ending.jpeg", + "is_end": true +}, +"tangying_ending": { + "text": "作为熊猫饲养员,你受到游客喜爱,他们甚至为你众筹哦润吉自由,生活充满温暖与惊喜...", + "image": "tangying_ending.jpg", + "is_end": true +}, +"stone_ending": { + "text": "你变成了一块石头,静静躺在河边,看着河水潺潺流过,记忆在时光中沉淀...", + "image": "stone_ending.png", + "is_end": true +}, +"sloth_ending": { + "text": "变成树懒的你,在树上享受着悠闲时光,光合作用效率达到树懒巅峰,生活惬意又自在...", + "image": "sloth_ending.jpg", + "is_end": true +}, +"gingganggoolie_ending": { + "text": "服用灵感菇后,小人儿在你眼前忙碌编排着你的命运,奇幻与荒诞交织...", + "image": "gingganggoolie_ending.png", + "is_end": true +}, +"jingshenhunluan_ending": { + "text": "阅读破旧书籍时,书页间的Doro似乎在嘲笑你的理智,精神世界陷入混乱...", + "image": "jingshenhunluan_ending.jpeg", + "is_end": true +}, +"jiangwei_ending": { + "text": "你的表情包在二维宇宙中迅速扩散,成为了虚拟世界的热门话题,开启新的次元之旅...", + "image": "jiangwei_ending.jpeg", + "is_end": true +}, +"keyboard_ending": { + "text": "右手变成键盘后,每个按键都像是灵魂的墓碑,诉说着无奈与挣扎...", + "image": "bad_ending.png", + "is_end": true +}, +"kfc_end": { + "text": "在疯狂星期四,KFC的美味验证了宇宙真理,快乐与满足在此刻绽放...", + "image": "abd814eba4fa165f44f3e16fb93b3a72.png", + "is_end": true +}, +"dream_end": { + "text": "小笨床仿佛拥有魔力,逐渐吞噬现实维度,带你进入奇妙梦境...", + "image": ["dream_ending.png"], + "is_end": true, + "trigger": ["三次选择睡觉选项"] +}, +"shadow_ending": { + "text": "你成为社畜们的集体潜意识,在黑暗中默默观察着职场的风云变幻...", + "image": "shadow_ending.png", + "is_end": true, + "callback": ["corpse_cycle"] +}, +"good_end": { + "text": "你和Doro携手找到了量子态的幸福,生活充满了彩虹般的色彩与希望...", + "image": "good_ending.png", + "is_end": true, + "condition": ["解锁5个普通结局"] +}, +"orange_ending": { + "text": "哦润吉的魔力完成了对你的精神同化,你沉浸在它的甜蜜世界中无法自拔...", + "image": "orange_ending.png", + "is_end": true, + "secret_path": ["在所有分支找到隐藏橘子"] +}, +"marry_end": { + "text": "❤️ 触发【登记结局】", + "image": "marry_ending.png", + "is_end": true +}, +"indolent_ending":{ + "text": "你变成了一条鱼,生活在公司办公司的鱼缸里...", + "image": "indolent_ending.png", + "is_end": true +}, +"infinite_loop_ending": { + "text": "你跳入光点,眼前世界瞬间扭曲,再次回到‘欢迎来到Doro的世界!’\n你意识到,这或许是无尽的轮回,或许……你本就是这里的一部分。", + "image": "loop.png", + "is_end": true +}, +"rebirth_ending": { + "text": "你闭上双眼,默念一个无人知晓的名字。光点悄然消散,一缕晨光洒在你脸上——新的世界悄然开启,你成为了另一个自己。", + "image": "rebirth.png", + "is_end": true +}, +"true_end": { + "text": "你撕裂空间,一道璀璨裂缝浮现,Doro的声音在耳边回荡:‘原来你就是命定之人。’\n你成功跳脱这个虚拟轮回,获得‘旁观者之眼’,从此能看破所有世界线的秘密。", + "image": "true_end.png", + "is_end": true, + "secret": {"👁️": "解锁后可开启‘观测者模式’体验隐藏剧情"} +} + +} diff --git a/zhenxun/services/cache.py b/zhenxun/services/cache.py new file mode 100644 index 00000000..4957029f --- /dev/null +++ b/zhenxun/services/cache.py @@ -0,0 +1,701 @@ +from collections.abc import Callable +from datetime import datetime +from functools import wraps +import inspect +from typing import Any, ClassVar, Generic, TypeVar + +from aiocache import Cache as AioCache + +# from aiocache.backends.redis import RedisCache +from aiocache.base import BaseCache +from aiocache.serializers import JsonSerializer +import nonebot +from nonebot.compat import model_dump +from nonebot.utils import is_coroutine_callable +from pydantic import BaseModel + +from zhenxun.services.log import logger + +__all__ = ["Cache", "CacheData", "CacheRoot"] + +T = TypeVar("T") + +LOG_COMMAND = "cache" + +driver = nonebot.get_driver() + + +class Config(BaseModel): + redis_host: str | None = None + """redis地址""" + redis_port: int | None = None + """redis端口""" + redis_password: str | None = None + """redis密码""" + redis_expire: int = 600 + """redis过期时间""" + + +config = nonebot.get_plugin_config(Config) + + +class DbCacheException(Exception): + """缓存相关异常""" + + def __init__(self, info: str): + self.info = info + + def __str__(self) -> str: + return self.info + + +def validate_name(func: Callable): + """验证缓存名称是否存在的装饰器""" + + def wrapper(self, name: str, *args, **kwargs): + _name = name.upper() + if _name not in CacheManager._data: + raise DbCacheException(f"缓存数据 {name} 不存在") + return func(self, _name, *args, **kwargs) + + return wrapper + + +class CacheGetter(BaseModel, Generic[T]): + """缓存数据获取器""" + + get_func: Callable[..., Any] | None = None + get_all_func: Callable[..., Any] | None = None + + async def get(self, cache_data: "CacheData", key: str, *args, **kwargs) -> T: + """获取单个缓存数据""" + if not self.get_func: + data = await cache_data.get_key(key) + if cache_data.result_model: + return cache_data._deserialize_value(data, cache_data.result_model) + return data + + if is_coroutine_callable(self.get_func): + data = await self.get_func(cache_data, key, *args, **kwargs) + else: + data = self.get_func(cache_data, key, *args, **kwargs) + + if cache_data.result_model: + return cache_data._deserialize_value(data, cache_data.result_model) + return data + + async def get_all(self, cache_data: "CacheData", *args, **kwargs) -> dict[str, T]: + """获取所有缓存数据""" + if not self.get_all_func: + data = await cache_data.get_all_data() + if cache_data.result_model: + return { + k: cache_data._deserialize_value(v, cache_data.result_model) + for k, v in data.items() + } + return data + + if is_coroutine_callable(self.get_all_func): + data = await self.get_all_func(cache_data, *args, **kwargs) + else: + data = self.get_all_func(cache_data, *args, **kwargs) + + if cache_data.result_model: + return { + k: cache_data._deserialize_value(v, cache_data.result_model) + for k, v in data.items() + } + return data + + +class CacheData(BaseModel): + """缓存数据模型""" + + name: str + func: Callable[..., Any] + getter: CacheGetter | None = None + updater: Callable[..., Any] | None = None + with_refresh: Callable[..., Any] | None = None + expire: int = 600 # 默认10分钟过期 + reload_count: int = 0 + lazy_load: bool = True # 默认延迟加载 + result_model: type | None = None + _keys: set[str] = set() # 存储所有缓存键 + _cache: BaseCache | AioCache + + class Config: + arbitrary_types_allowed = True + + def _deserialize_value(self, value: Any, target_type: type | None = None) -> Any: + """反序列化值,将JSON数据转换回原始类型 + + 参数: + value: 需要反序列化的值 + target_type: 目标类型,用于指导反序列化 + + 返回: + 反序列化后的值 + """ + if value is None: + return None + + # 如果是字典且指定了目标类型 + if isinstance(value, dict) and target_type: + # 处理Tortoise-ORM Model + if hasattr(target_type, "_meta"): + return self._extracted_from__deserialize_value_19(value, target_type) + elif hasattr(target_type, "model_validate"): + return target_type.model_validate(value) + elif hasattr(target_type, "from_dict"): + return target_type.from_dict(value) + elif hasattr(target_type, "parse_obj"): + return target_type.parse_obj(value) + else: + return target_type(**value) + + # 处理列表类型 + if isinstance(value, list): + if not value: + return value + if ( + target_type + and hasattr(target_type, "__origin__") + and target_type.__origin__ is list + ): + item_type = target_type.__args__[0] + return [self._deserialize_value(item, item_type) for item in value] + return [self._deserialize_value(item) for item in value] + + # 处理字典类型 + if isinstance(value, dict): + return {k: self._deserialize_value(v) for k, v in value.items()} + + return value + + def _extracted_from__deserialize_value_19(self, value, target_type): + # 处理字段值 + processed_value = {} + for field_name, field_value in value.items(): + if field := target_type._meta.fields_map.get(field_name): + # 跳过反向关系字段 + if hasattr(field, "_related_name"): + continue + processed_value[field_name] = field_value + + logger.debug(f"处理后的值: {processed_value}") + + # 创建模型实例 + instance = target_type() + # 设置字段值 + for field_name, field_value in processed_value.items(): + if field_name in target_type._meta.fields_map: + field = target_type._meta.fields_map[field_name] + # 设置字段值 + try: + if hasattr(field, "to_python_value"): + if not field.field_type: + logger.debug(f"字段 {field_name} 类型为空") + continue + field_value = field.to_python_value(field_value) + setattr(instance, field_name, field_value) + except Exception as e: + logger.warning(f"设置字段 {field_name} 失败", e=e) + + # 设置 _saved_in_db 标志 + instance._saved_in_db = True + return instance + + async def get_data(self) -> Any: + """从缓存获取数据""" + try: + data = await self._cache.get(self.name) + logger.debug(f"获取缓存 {self.name} 数据: {data}") + + # 如果数据为空,尝试重新加载 + # if data is None: + # logger.debug(f"缓存 {self.name} 数据为空,尝试重新加载") + # try: + # if self.has_args(): + # new_data = ( + # await self.func() + # if is_coroutine_callable(self.func) + # else self.func() + # ) + # else: + # new_data = ( + # await self.func() + # if is_coroutine_callable(self.func) + # else self.func() + # ) + + # await self.set_data(new_data) + # self.reload_count += 1 + # logger.info(f"重新加载缓存 {self.name} 完成") + # return new_data + # except Exception as e: + # logger.error(f"重新加载缓存 {self.name} 失败: {e}") + # return None + + # 使用 result_model 进行反序列化 + if self.result_model: + return self._deserialize_value(data, self.result_model) + + return data + except Exception as e: + logger.error(f"获取缓存 {self.name} 失败: {e}") + return None + + def _serialize_value(self, value: Any) -> Any: + """序列化值,将数据转换为JSON可序列化的格式 + + 参数: + value: 需要序列化的值 + + 返回: + JSON可序列化的值 + """ + if value is None: + return None + + # 处理datetime + if isinstance(value, datetime): + return value.isoformat() + + # 处理Tortoise-ORM Model + if hasattr(value, "_meta") and hasattr(value, "__dict__"): + result = {} + for field in value._meta.fields: + try: + field_value = getattr(value, field) + # 跳过反向关系字段 + if isinstance(field_value, list | set) and hasattr( + field_value, "_related_name" + ): + continue + # 跳过外键关系字段 + if hasattr(field_value, "_meta"): + field_value = getattr( + field_value, value._meta.fields[field].related_name or "id" + ) + result[field] = self._serialize_value(field_value) + except AttributeError: + continue + return result + + # 处理Pydantic模型 + elif isinstance(value, BaseModel): + return model_dump(value) + elif isinstance(value, dict): + # 处理字典 + return {str(k): self._serialize_value(v) for k, v in value.items()} + elif isinstance(value, list | tuple | set): + # 处理列表、元组、集合 + return [self._serialize_value(item) for item in value] + elif isinstance(value, int | float | str | bool): + # 基本类型直接返回 + return value + else: + # 其他类型转换为字符串 + return str(value) + + async def set_data(self, value: Any): + """设置缓存数据""" + try: + # 1. 序列化数据 + serialized_value = self._serialize_value(value) + logger.debug(f"设置缓存 {self.name} 原始数据: {value}") + logger.debug(f"设置缓存 {self.name} 序列化后数据: {serialized_value}") + + # 2. 删除旧数据 + await self._cache.delete(self.name) + logger.debug(f"删除缓存 {self.name} 旧数据") + + # 3. 设置新数据 + await self._cache.set(self.name, serialized_value, ttl=self.expire) + logger.debug(f"设置缓存 {self.name} 新数据完成") + + except Exception as e: + logger.error(f"设置缓存 {self.name} 失败: {e}") + raise # 重新抛出异常,让上层处理 + + async def delete_data(self): + """删除缓存数据""" + try: + await self._cache.delete(self.name) + except Exception as e: + logger.error(f"删除缓存 {self.name}", e=e) + + async def get(self, key: str, *args, **kwargs) -> Any: + """获取缓存""" + if not self.reload_count and not self.lazy_load: + await self.reload(*args, **kwargs) + + if not self.getter: + return await self.get_key(key) + + return await self.getter.get(self, key, *args, **kwargs) + + async def get_all(self, *args, **kwargs) -> dict[str, Any]: + """获取所有缓存数据""" + if not self.reload_count and not self.lazy_load: + await self.reload(*args, **kwargs) + + if not self.getter: + return await self.get_all_data() + + return await self.getter.get_all(self, *args, **kwargs) + + async def update(self, key: str, value: Any = None, *args, **kwargs): + """更新单个缓存项""" + if not self.updater: + logger.warning(f"缓存 {self.name} 未配置更新方法") + return + + current_data = await self.get_key(key) or {} + if is_coroutine_callable(self.updater): + await self.updater(current_data, key, value, *args, **kwargs) + else: + self.updater(current_data, key, value, *args, **kwargs) + + await self.set_key(key, current_data) + logger.debug(f"更新缓存 {self.name}.{key}") + + async def refresh(self, *args, **kwargs): + """刷新缓存数据""" + if not self.with_refresh: + return await self.reload(*args, **kwargs) + + current_data = await self.get_data() + if current_data: + if is_coroutine_callable(self.with_refresh): + await self.with_refresh(current_data, *args, **kwargs) + else: + self.with_refresh(current_data, *args, **kwargs) + await self.set_data(current_data) + logger.debug(f"刷新缓存 {self.name}") + + async def reload(self, *args, **kwargs): + """重新加载全部数据""" + try: + if self.has_args(): + new_data = ( + await self.func(*args, **kwargs) + if is_coroutine_callable(self.func) + else self.func(*args, **kwargs) + ) + else: + new_data = ( + await self.func() + if is_coroutine_callable(self.func) + else self.func() + ) + + # 如果是字典,则分别存储每个键值对 + if isinstance(new_data, dict): + for key, value in new_data.items(): + await self.set_key(key, value) + else: + # 如果不是字典,则存储为单个键值对 + await self.set_key("default", new_data) + + self.reload_count += 1 + logger.info(f"重新加载缓存 {self.name} 完成") + except Exception as e: + logger.error(f"重新加载缓存 {self.name} 失败: {e}") + raise + + def has_args(self) -> bool: + """检查函数是否需要参数""" + sig = inspect.signature(self.func) + return any( + param.kind + in ( + param.POSITIONAL_OR_KEYWORD, + param.POSITIONAL_ONLY, + param.VAR_POSITIONAL, + ) + for param in sig.parameters.values() + ) + + async def get_key(self, key: str) -> Any: + """获取缓存中指定键的数据 + + 参数: + key: 要获取的键名 + + 返回: + 键对应的值,如果不存在返回None + """ + cache_key = self._get_cache_key(key) + try: + data = await self._cache.get(cache_key) + logger.debug(f"获取缓存 {cache_key} 数据: {data}") + + if self.result_model: + return self._deserialize_value(data, self.result_model) + return data + except Exception as e: + logger.error(f"获取缓存 {cache_key} 失败: {e}") + return None + + async def get_keys(self, keys: list[str]) -> dict[str, Any]: + """获取缓存中多个键的数据 + + 参数: + keys: 要获取的键名列表 + + 返回: + 包含所有请求键值的字典,不存在的键值为None + """ + try: + data = await self.get_data() + if isinstance(data, dict): + return {key: data.get(key) for key in keys} + return dict.fromkeys(keys) + except Exception as e: + logger.error(f"获取缓存 {self.name} 的多个键失败: {e}") + return dict.fromkeys(keys) + + def _get_cache_key(self, key: str) -> str: + """获取缓存键名""" + return f"{self.name}:{key}" + + async def get_all_data(self) -> dict[str, Any]: + """获取所有缓存数据""" + try: + result = {} + for key in self._keys: + # 提取原始键名(去掉前缀) + original_key = key.split(":", 1)[1] + data = await self._cache.get(key) + if self.result_model: + result[original_key] = self._deserialize_value( + data, self.result_model + ) + else: + result[original_key] = data + return result + except Exception as e: + logger.error(f"获取所有缓存数据失败: {e}") + return {} + + async def set_key(self, key: str, value: Any): + """设置指定键的缓存数据""" + cache_key = self._get_cache_key(key) + try: + serialized_value = self._serialize_value(value) + await self._cache.set(cache_key, serialized_value, ttl=self.expire) + self._keys.add(cache_key) # 添加到键列表 + logger.debug(f"设置缓存 {cache_key} 数据完成") + except Exception as e: + logger.error(f"设置缓存 {cache_key} 失败: {e}") + raise + + async def delete_key(self, key: str): + """删除指定键的缓存数据""" + cache_key = self._get_cache_key(key) + try: + await self._cache.delete(cache_key) + self._keys.discard(cache_key) # 从键列表中移除 + logger.debug(f"删除缓存 {cache_key} 完成") + except Exception as e: + logger.error(f"删除缓存 {cache_key} 失败: {e}") + + async def clear(self): + """清除所有缓存数据""" + try: + for key in list(self._keys): # 使用列表复制避免在迭代时修改 + await self._cache.delete(key) + self._keys.clear() + logger.debug(f"清除缓存 {self.name} 完成") + except Exception as e: + logger.error(f"清除缓存 {self.name} 失败: {e}") + + +class CacheManager: + """全局缓存管理器""" + + _cache_instance: BaseCache | AioCache | None = None + _data: ClassVar[dict[str, CacheData]] = {} + + @property + def _cache(self) -> BaseCache | AioCache: + """获取aiocache实例""" + if self._cache_instance is None: + if config.redis_host: + self._cache_instance = AioCache( + AioCache.REDIS, + serializer=JsonSerializer(), + namespace="zhenxun_cache", + timeout=30, # 操作超时时间 + ttl=config.redis_expire, # 设置默认过期时间 + endpoint=config.redis_host, + port=config.redis_port, + password=config.redis_password, + ) + else: + self._cache_instance = AioCache( + AioCache.MEMORY, + serializer=JsonSerializer(), + namespace="zhenxun_cache", + timeout=30, # 操作超时时间 + ttl=config.redis_expire, # 设置默认过期时间 + ) + logger.info("初始化缓存完成...", LOG_COMMAND) + return self._cache_instance + + async def close(self): + if self._cache_instance: + await self._cache_instance.close() + + async def verify_connection(self): + """连接测试""" + try: + await self._cache.get("__test__") + except Exception as e: + logger.error("连接失败", LOG_COMMAND, e=e) + raise + + async def init_non_lazy_caches(self): + """初始化所有非延迟加载的缓存""" + await self.verify_connection() + for name, cache in self._data.items(): + cache._cache = self._cache + if not cache.lazy_load: + try: + await cache.reload() + logger.info(f"初始化缓存 {name} 完成") + except Exception as e: + logger.error(f"初始化缓存 {name} 失败: {e}") + + def new(self, name: str, lazy_load: bool = True, expire: int = 600): + """注册新缓存 + + 参数: + name: 缓存名称 + lazy_load: 是否延迟加载,默认为True。为False时会在程序启动时自动加载 + expire: 过期时间(秒) + """ + + def wrapper(func: Callable): + _name = name.upper() + if _name in self._data: + raise DbCacheException(f"缓存 {name} 已存在") + + self._data[_name] = CacheData( + name=_name, + func=func, + expire=expire, + lazy_load=lazy_load, + _cache=self._cache, + ) + return func + + return wrapper + + def listener(self, name: str): + """创建缓存监听器""" + + def decorator(func): + @wraps(func) + async def wrapper(*args, **kwargs): + try: + return ( + await func(*args, **kwargs) + if is_coroutine_callable(func) + else func(*args, **kwargs) + ) + finally: + cache = self._data.get(name.upper()) + if cache and cache.with_refresh: + await cache.refresh() + logger.debug(f"监听器触发缓存 {name} 刷新") + + return wrapper + + return decorator + + @validate_name + def updater(self, name: str): + """设置缓存更新方法""" + + def wrapper(func: Callable): + self._data[name].updater = func + return func + + return wrapper + + @validate_name + def getter(self, name: str, result_model: type): + """设置缓存获取方法""" + + def wrapper(func: Callable): + self._data[name].getter = CacheGetter[result_model](get_func=func) + self._data[name].result_model = result_model + return func + + return wrapper + + @validate_name + def with_refresh(self, name: str): + """设置缓存刷新方法""" + + def wrapper(func: Callable): + self._data[name].with_refresh = func + return func + + return wrapper + + async def get_cache_data(self, name: str) -> Any | None: + """获取缓存数据""" + cache = await self.get_cache(name.upper()) + return await cache.get_data() if cache else None + + async def get_cache(self, name: str) -> CacheData | None: + """获取缓存对象""" + return self._data.get(name.upper()) + + async def get(self, name: str, *args, **kwargs) -> Any: + """获取缓存内容""" + cache = await self.get_cache(name.upper()) + return await cache.get(*args, **kwargs) if cache else None + + async def update(self, name: str, key: str, value: Any = None, *args, **kwargs): + """更新缓存项""" + cache = await self.get_cache(name.upper()) + if cache: + await cache.update(key, value, *args, **kwargs) + + async def reload(self, name: str, *args, **kwargs): + """重新加载缓存""" + cache = await self.get_cache(name.upper()) + if cache: + await cache.reload(*args, **kwargs) + + +# 全局缓存管理器实例 +CacheRoot = CacheManager() + + +class Cache(Generic[T]): + """类型化缓存访问接口""" + + def __init__(self, module: str): + self.module = module.upper() + + async def get(self, *args, **kwargs) -> T | None: + """获取缓存""" + return await CacheRoot.get(self.module, *args, **kwargs) + + async def update(self, key: str, value: Any = None, *args, **kwargs): + """更新缓存项""" + await CacheRoot.update(self.module, key, value, *args, **kwargs) + + async def reload(self, *args, **kwargs): + """重新加载缓存""" + await CacheRoot.reload(self.module, *args, **kwargs) + + +@driver.on_shutdown +async def _(): + await CacheRoot.close() diff --git a/zhenxun/services/db_context.py b/zhenxun/services/db_context.py index 9a44fa74..9f4da8ba 100644 --- a/zhenxun/services/db_context.py +++ b/zhenxun/services/db_context.py @@ -1,32 +1,172 @@ +from asyncio import Semaphore +from collections.abc import Iterable +from typing import Any, ClassVar +from typing_extensions import Self + +import nonebot from nonebot.utils import is_coroutine_callable from tortoise import Tortoise +from tortoise.backends.base.client import BaseDBAsyncClient from tortoise.connection import connections -from tortoise.models import Model as Model_ +from tortoise.models import Model as TortoiseModel from zhenxun.configs.config import BotConfig +from zhenxun.utils.enum import DbLockType +from .cache import CacheRoot from .log import logger SCRIPT_METHOD = [] MODELS: list[str] = [] +driver = nonebot.get_driver() -class Model(Model_): +CACHE_FLAG = False + + +@driver.on_bot_connect +def _(): + global CACHE_FLAG + CACHE_FLAG = True + + +class Model(TortoiseModel): """ 自动添加模块 - - Args: - Model_: Model """ + sem_data: ClassVar[dict[str, dict[str, Semaphore]]] = {} + def __init_subclass__(cls, **kwargs): - MODELS.append(cls.__module__) + if cls.__module__ not in MODELS: + MODELS.append(cls.__module__) if func := getattr(cls, "_run_script", None): SCRIPT_METHOD.append((cls.__module__, func)) + if enable_lock := getattr(cls, "enable_lock", []): + """创建锁""" + cls.sem_data[cls.__module__] = {} + for lock in enable_lock: + cls.sem_data[cls.__module__][lock] = Semaphore(1) + + @classmethod + def get_semaphore(cls, lock_type: DbLockType): + return cls.sem_data.get(cls.__module__, {}).get(lock_type, None) + + @classmethod + def get_cache_type(cls): + return getattr(cls, "cache_type", None) if CACHE_FLAG else None + + @classmethod + async def create( + cls, using_db: BaseDBAsyncClient | None = None, **kwargs: Any + ) -> Self: + return await super().create(using_db=using_db, **kwargs) + + @classmethod + async def get_or_create( + cls, + defaults: dict | None = None, + using_db: BaseDBAsyncClient | None = None, + **kwargs: Any, + ) -> tuple[Self, bool]: + result, is_create = await super().get_or_create( + defaults=defaults, using_db=using_db, **kwargs + ) + if is_create and (cache_type := cls.get_cache_type()): + await CacheRoot.reload(cache_type) + return (result, is_create) + + @classmethod + async def update_or_create( + cls, + defaults: dict | None = None, + using_db: BaseDBAsyncClient | None = None, + **kwargs: Any, + ) -> tuple[Self, bool]: + result = await super().update_or_create( + defaults=defaults, using_db=using_db, **kwargs + ) + if cache_type := cls.get_cache_type(): + await CacheRoot.reload(cache_type) + return result + + @classmethod + async def bulk_create( # type: ignore + cls, + objects: Iterable[Self], + batch_size: int | None = None, + ignore_conflicts: bool = False, + update_fields: Iterable[str] | None = None, + on_conflict: Iterable[str] | None = None, + using_db: BaseDBAsyncClient | None = None, + ) -> list[Self]: + result = await super().bulk_create( + objects=objects, + batch_size=batch_size, + ignore_conflicts=ignore_conflicts, + update_fields=update_fields, + on_conflict=on_conflict, + using_db=using_db, + ) + if cache_type := cls.get_cache_type(): + await CacheRoot.reload(cache_type) + return result + + @classmethod + async def bulk_update( # type: ignore + cls, + objects: Iterable[Self], + fields: Iterable[str], + batch_size: int | None = None, + using_db: BaseDBAsyncClient | None = None, + ) -> int: + result = await super().bulk_update( + objects=objects, + fields=fields, + batch_size=batch_size, + using_db=using_db, + ) + if cache_type := cls.get_cache_type(): + await CacheRoot.reload(cache_type) + return result + + async def save( + self, + using_db: BaseDBAsyncClient | None = None, + update_fields: Iterable[str] | None = None, + force_create: bool = False, + force_update: bool = False, + ): + if getattr(self, "id", None) is None: + sem = self.get_semaphore(DbLockType.CREATE) + else: + sem = self.get_semaphore(DbLockType.UPDATE) + if sem: + async with sem: + await super().save( + using_db=using_db, + update_fields=update_fields, + force_create=force_create, + force_update=force_update, + ) + else: + await super().save( + using_db=using_db, + update_fields=update_fields, + force_create=force_create, + force_update=force_update, + ) + if CACHE_FLAG and (cache_type := getattr(self, "cache_type", None)): + await CacheRoot.reload(cache_type) + + async def delete(self, using_db: BaseDBAsyncClient | None = None): + await super().delete(using_db=using_db) + if CACHE_FLAG and (cache_type := getattr(self, "cache_type", None)): + await CacheRoot.reload(cache_type) -class DbUrlIsNode(Exception): +class DbUrlMissing(Exception): """ 数据库链接地址为空 """ @@ -44,7 +184,7 @@ class DbConnectError(Exception): async def init(): if not BotConfig.db_url: - raise DbUrlIsNode("数据库配置为空,请在.env.dev中配置DB_URL...") + raise DbUrlMissing("数据库配置为空,请在.env.dev中配置DB_URL...") try: await Tortoise.init( db_url=BotConfig.db_url, diff --git a/zhenxun/services/llm/README.md b/zhenxun/services/llm/README.md new file mode 100644 index 00000000..263be1e6 --- /dev/null +++ b/zhenxun/services/llm/README.md @@ -0,0 +1,731 @@ +# Zhenxun LLM 服务模块 + +## 📑 目录 + +- [📖 概述](#-概述) +- [🌟 主要特性](#-主要特性) +- [🚀 快速开始](#-快速开始) +- [📚 API 参考](#-api-参考) +- [⚙️ 配置](#️-配置) +- [🔧 高级功能](#-高级功能) +- [🏗️ 架构设计](#️-架构设计) +- [🔌 支持的提供商](#-支持的提供商) +- [🎯 使用场景](#-使用场景) +- [📊 性能优化](#-性能优化) +- [🛠️ 故障排除](#️-故障排除) +- [❓ 常见问题](#-常见问题) +- [📝 示例项目](#-示例项目) +- [🤝 贡献](#-贡献) +- [📄 许可证](#-许可证) + +## 📖 概述 + +Zhenxun LLM 服务模块是一个现代化的AI服务框架,提供统一的接口来访问多个大语言模型提供商。该模块采用模块化设计,支持异步操作、智能重试、Key轮询和负载均衡等高级功能。 + +### 🌟 主要特性 + +- **多提供商支持**: OpenAI、Gemini、智谱AI、DeepSeek等 +- **统一接口**: 简洁一致的API设计 +- **智能Key轮询**: 自动负载均衡和故障转移 +- **异步高性能**: 基于asyncio的并发处理 +- **模型缓存**: 智能缓存机制提升性能 +- **工具调用**: 支持Function Calling +- **嵌入向量**: 文本向量化支持 +- **错误处理**: 完善的异常处理和重试机制 +- **多模态支持**: 文本、图像、音频、视频处理 +- **代码执行**: Gemini代码执行功能 +- **搜索增强**: Google搜索集成 + +## 🚀 快速开始 + +### 基本使用 + +```python +from zhenxun.services.llm import chat, code, search, analyze + +# 简单聊天 +response = await chat("你好,请介绍一下自己") +print(response) + +# 代码执行 +result = await code("计算斐波那契数列的前10项") +print(result["text"]) +print(result["code_executions"]) + +# 搜索功能 +search_result = await search("Python异步编程最佳实践") +print(search_result["text"]) + +# 多模态分析 +from nonebot_plugin_alconna.uniseg import UniMessage, Image, Text +message = UniMessage([ + Text("分析这张图片"), + Image(path="image.jpg") +]) +analysis = await analyze(message, model="Gemini/gemini-2.0-flash") +print(analysis) +``` + +### 使用AI类 + +```python +from zhenxun.services.llm import AI, AIConfig, CommonOverrides + +# 创建AI实例 +ai = AI(AIConfig(model="OpenAI/gpt-4")) + +# 聊天对话 +response = await ai.chat("解释量子计算的基本原理") + +# 多模态分析 +from nonebot_plugin_alconna.uniseg import UniMessage, Image, Text + +multimodal_msg = UniMessage([ + Text("这张图片显示了什么?"), + Image(path="image.jpg") +]) +result = await ai.analyze(multimodal_msg) + +# 便捷的多模态函数 +result = await analyze_with_images( + "分析这张图片", + images="image.jpg", + model="Gemini/gemini-2.0-flash" +) +``` + +## 📚 API 参考 + +### 快速函数 + +#### `chat(message, *, model=None, **kwargs) -> str` +简单聊天对话 + +**参数:** +- `message`: 消息内容(字符串、LLMMessage或内容部分列表) +- `model`: 模型名称(可选) +- `**kwargs`: 额外配置参数 + +#### `code(prompt, *, model=None, timeout=None, **kwargs) -> dict` +代码执行功能 + +**返回:** +```python +{ + "text": "执行结果说明", + "code_executions": [{"code": "...", "output": "..."}], + "success": True +} +``` + +#### `search(query, *, model=None, instruction="", **kwargs) -> dict` +搜索增强生成 + +**返回:** +```python +{ + "text": "搜索结果和分析", + "grounding_metadata": {...}, + "success": True +} +``` + +#### `analyze(message, *, instruction="", model=None, tools=None, tool_config=None, **kwargs) -> str | LLMResponse` +高级分析功能,支持多模态输入和工具调用 + +#### `analyze_with_images(text, images, *, instruction="", model=None, **kwargs) -> str` +图片分析便捷函数 + +#### `analyze_multimodal(text=None, images=None, videos=None, audios=None, *, instruction="", model=None, **kwargs) -> str` +多模态分析便捷函数 + +#### `embed(texts, *, model=None, task_type="RETRIEVAL_DOCUMENT", **kwargs) -> list[list[float]]` +文本嵌入向量 + +### AI类方法 + +#### `AI.chat(message, *, model=None, **kwargs) -> str` +聊天对话方法,支持简单多模态输入 + +#### `AI.analyze(message, *, instruction="", model=None, tools=None, tool_config=None, **kwargs) -> str | LLMResponse` +高级分析方法,接收UniMessage进行多模态分析和工具调用 + +### 模型管理 + +```python +from zhenxun.services.llm import ( + get_model_instance, + list_available_models, + set_global_default_model_name, + clear_model_cache +) + +# 获取模型实例 +model = await get_model_instance("OpenAI/gpt-4o") + +# 列出可用模型 +models = list_available_models() + +# 设置默认模型 +set_global_default_model_name("Gemini/gemini-2.0-flash") + +# 清理缓存 +clear_model_cache() +``` + +## ⚙️ 配置 + +### 预设配置 + +```python +from zhenxun.services.llm import CommonOverrides + +# 创意模式 +creative_config = CommonOverrides.creative() + +# 精确模式 +precise_config = CommonOverrides.precise() + +# Gemini特殊功能 +json_config = CommonOverrides.gemini_json() +thinking_config = CommonOverrides.gemini_thinking() +code_exec_config = CommonOverrides.gemini_code_execution() +grounding_config = CommonOverrides.gemini_grounding() +``` + +### 自定义配置 + +```python +from zhenxun.services.llm import LLMGenerationConfig + +config = LLMGenerationConfig( + temperature=0.7, + max_tokens=2048, + top_p=0.9, + frequency_penalty=0.1, + presence_penalty=0.1, + stop=["END", "STOP"], + response_mime_type="application/json", + enable_code_execution=True, + enable_grounding=True +) + +response = await chat("你的问题", override_config=config) +``` + +## 🔧 高级功能 + +### 工具调用 (Function Calling) + +```python +from zhenxun.services.llm import LLMTool, get_model_instance + +# 定义工具 +tools = [ + LLMTool( + name="get_weather", + description="获取天气信息", + parameters={ + "type": "object", + "properties": { + "city": {"type": "string", "description": "城市名称"} + }, + "required": ["city"] + } + ) +] + +# 工具执行器 +async def tool_executor(tool_name: str, args: dict) -> str: + if tool_name == "get_weather": + return f"{args['city']}今天晴天,25°C" + return "未知工具" + +# 使用工具 +model = await get_model_instance("OpenAI/gpt-4") +response = await model.generate_response( + messages=[{"role": "user", "content": "北京天气如何?"}], + tools=tools, + tool_executor=tool_executor +) +``` + +### 多模态处理 + +```python +from zhenxun.services.llm import create_multimodal_message, analyze_multimodal, analyze_with_images + +# 方法1:使用便捷函数 +result = await analyze_multimodal( + text="分析这些媒体文件", + images="image.jpg", + audios="audio.mp3", + model="Gemini/gemini-2.0-flash" +) + +# 方法2:使用create_multimodal_message +message = create_multimodal_message( + text="分析这张图片和音频", + images="image.jpg", + audios="audio.mp3" +) +result = await analyze(message) + +# 方法3:图片分析专用函数 +result = await analyze_with_images( + "这张图片显示了什么?", + images=["image1.jpg", "image2.jpg"] +) +``` + +## 🛠️ 故障排除 + +### 常见错误 + +1. **配置错误**: 检查API密钥和模型配置 +2. **网络问题**: 检查代理设置和网络连接 +3. **模型不可用**: 使用 `list_available_models()` 检查可用模型 +4. **超时错误**: 调整timeout参数或使用更快的模型 + +### 调试技巧 + +```python +from zhenxun.services.llm import get_cache_stats +from zhenxun.services.log import logger + +# 查看缓存状态 +stats = get_cache_stats() +print(f"缓存命中率: {stats['hit_rate']}") + +# 启用详细日志 +logger.setLevel("DEBUG") +``` + +## ❓ 常见问题 + + +### Q: 如何处理多模态输入? + +**A:** 有多种方式处理多模态输入: +```python +# 方法1:使用便捷函数 +result = await analyze_with_images("分析这张图片", images="image.jpg") + +# 方法2:使用analyze函数 +from nonebot_plugin_alconna.uniseg import UniMessage, Image, Text +message = UniMessage([Text("分析这张图片"), Image(path="image.jpg")]) +result = await analyze(message) + +# 方法3:使用create_multimodal_message +from zhenxun.services.llm import create_multimodal_message +message = create_multimodal_message(text="分析这张图片", images="image.jpg") +result = await analyze(message) +``` + +### Q: 如何自定义工具调用? + +**A:** 使用analyze函数的tools参数: +```python +# 定义工具 +tools = [{ + "name": "calculator", + "description": "计算数学表达式", + "parameters": { + "type": "object", + "properties": { + "expression": {"type": "string", "description": "数学表达式"} + }, + "required": ["expression"] + } +}] + +# 使用工具 +from nonebot_plugin_alconna.uniseg import UniMessage, Text +message = UniMessage([Text("计算 2+3*4")]) +response = await analyze(message, tools=tools, tool_config={"mode": "auto"}) + +# 如果返回LLMResponse,说明有工具调用 +if hasattr(response, 'tool_calls'): + for tool_call in response.tool_calls: + print(f"调用工具: {tool_call.function.name}") + print(f"参数: {tool_call.function.arguments}") +``` + + +### Q: 如何确保输出格式? + +**A:** 使用结构化输出: +```python +# JSON格式输出 +config = CommonOverrides.gemini_json() + +# 自定义Schema +schema = { + "type": "object", + "properties": { + "answer": {"type": "string"}, + "confidence": {"type": "number"} + } +} +config = CommonOverrides.gemini_structured(schema) +``` + +## 📝 示例项目 + +### 完整示例 + +#### 1. 智能客服机器人 + +```python +from zhenxun.services.llm import AI, CommonOverrides +from typing import Dict, List + +class CustomerService: + def __init__(self): + self.ai = AI() + self.sessions: Dict[str, List[dict]] = {} + + async def handle_query(self, user_id: str, query: str) -> str: + # 获取或创建会话历史 + if user_id not in self.sessions: + self.sessions[user_id] = [] + + history = self.sessions[user_id] + + # 添加系统提示 + if not history: + history.append({ + "role": "system", + "content": "你是一个专业的客服助手,请友好、准确地回答用户问题。" + }) + + # 添加用户问题 + history.append({"role": "user", "content": query}) + + # 生成回复 + response = await self.ai.chat( + query, + history=history[-20:], # 保留最近20轮对话 + override_config=CommonOverrides.balanced() + ) + + # 保存回复到历史 + history.append({"role": "assistant", "content": response}) + + return response +``` + +#### 2. 文档智能问答 + +```python +from zhenxun.services.llm import embed, analyze +import numpy as np +from typing import List, Tuple + +class DocumentQA: + def __init__(self): + self.documents: List[str] = [] + self.embeddings: List[List[float]] = [] + + async def add_document(self, text: str): + """添加文档到知识库""" + self.documents.append(text) + + # 生成嵌入向量 + embedding = await embed([text]) + self.embeddings.extend(embedding) + + async def query(self, question: str, top_k: int = 3) -> str: + """查询文档并生成答案""" + if not self.documents: + return "知识库为空,请先添加文档。" + + # 生成问题的嵌入向量 + question_embedding = await embed([question]) + + # 计算相似度并找到最相关的文档 + similarities = [] + for doc_embedding in self.embeddings: + similarity = np.dot(question_embedding[0], doc_embedding) + similarities.append(similarity) + + # 获取最相关的文档 + top_indices = np.argsort(similarities)[-top_k:][::-1] + relevant_docs = [self.documents[i] for i in top_indices] + + # 构建上下文 + context = "\n\n".join(relevant_docs) + prompt = f""" +基于以下文档内容回答问题: + +文档内容: +{context} + +问题:{question} + +请基于文档内容给出准确的答案,如果文档中没有相关信息,请说明。 +""" + + result = await analyze(prompt) + return result["text"] +``` + +#### 3. 代码审查助手 + +```python +from zhenxun.services.llm import code, analyze +import os + +class CodeReviewer: + async def review_file(self, file_path: str) -> dict: + """审查代码文件""" + if not os.path.exists(file_path): + return {"error": "文件不存在"} + + with open(file_path, 'r', encoding='utf-8') as f: + code_content = f.read() + + prompt = f""" +请审查以下代码,提供详细的反馈: + +文件:{file_path} +代码: +``` +{code_content} +``` + +请从以下方面进行审查: +1. 代码质量和可读性 +2. 潜在的bug和安全问题 +3. 性能优化建议 +4. 最佳实践建议 +5. 代码风格问题 + +请以JSON格式返回结果。 +""" + + result = await analyze( + prompt, + model="DeepSeek/deepseek-coder", + override_config=CommonOverrides.gemini_json() + ) + + return { + "file": file_path, + "review": result["text"], + "success": True + } + + async def suggest_improvements(self, code: str, language: str = "python") -> str: + """建议代码改进""" + prompt = f""" +请改进以下{language}代码,使其更加高效、可读和符合最佳实践: + +原代码: +```{language} +{code} +``` + +请提供改进后的代码和说明。 +""" + + result = await code(prompt, model="DeepSeek/deepseek-coder") + return result["text"] +``` + + +## 🏗️ 架构设计 + +### 模块结构 + +``` +zhenxun/services/llm/ +├── __init__.py # 包入口,导入和暴露公共API +├── api.py # 高级API接口(AI类、便捷函数) +├── core.py # 核心基础设施(HTTP客户端、重试逻辑、KeyStore) +├── service.py # LLM模型实现类 +├── utils.py # 工具和转换函数 +├── manager.py # 模型管理和缓存 +├── adapters/ # 适配器模块 +│ ├── __init__.py # 适配器包入口 +│ ├── base.py # 基础适配器 +│ ├── factory.py # 适配器工厂 +│ ├── openai.py # OpenAI适配器 +│ ├── gemini.py # Gemini适配器 +│ └── zhipu.py # 智谱AI适配器 +├── config/ # 配置模块 +│ ├── __init__.py # 配置包入口 +│ ├── generation.py # 生成配置 +│ ├── presets.py # 预设配置 +│ └── providers.py # 提供商配置 +└── types/ # 类型定义 + ├── __init__.py # 类型包入口 + ├── content.py # 内容类型 + ├── enums.py # 枚举定义 + ├── exceptions.py # 异常定义 + └── models.py # 数据模型 +``` + +### 模块职责 + +- **`__init__.py`**: 纯粹的包入口,只负责导入和暴露公共API +- **`api.py`**: 高级API接口,包含AI类和所有便捷函数 +- **`core.py`**: 核心基础设施,包含HTTP客户端管理、重试逻辑和KeyStore +- **`service.py`**: LLM模型实现类,专注于模型逻辑 +- **`utils.py`**: 工具和转换函数,如多模态消息处理 +- **`manager.py`**: 模型管理和缓存机制 +- **`adapters/`**: 各大提供商的适配器模块,负责与不同API的交互 + - `base.py`: 定义适配器的基础接口 + - `factory.py`: 适配器工厂,用于动态加载和实例化适配器 + - `openai.py`: OpenAI API适配器 + - `gemini.py`: Google Gemini API适配器 + - `zhipu.py`: 智谱AI API适配器 +- **`config/`**: 配置管理模块 + - `generation.py`: 生成配置和预设 + - `presets.py`: 预设配置 + - `providers.py`: 提供商配置 +- **`types/`**: 类型定义模块 + - `content.py`: 内容类型定义 + - `enums.py`: 枚举定义 + - `exceptions.py`: 异常定义 + - `models.py`: 数据模型定义 + +## 🔌 支持的提供商 + +### OpenAI 兼容 + +- **OpenAI**: GPT-4o, GPT-3.5-turbo等 +- **DeepSeek**: deepseek-chat, deepseek-reasoner等 +- **其他OpenAI兼容API**: 支持自定义端点 + +```python +# OpenAI +await chat("Hello", model="OpenAI/gpt-4o") + +# DeepSeek +await chat("写代码", model="DeepSeek/deepseek-reasoner") +``` + +### Google Gemini + +- **Gemini Pro**: gemini-2.5-flash-preview-05-20 gemini-2.0-flash等 +- **特殊功能**: 代码执行、搜索增强、思考模式 + +```python +# 基础使用 +await chat("你好", model="Gemini/gemini-2.0-flash") + +# 代码执行 +await code("计算质数", model="Gemini/gemini-2.0-flash") + +# 搜索增强 +await search("最新AI发展", model="Gemini/gemini-2.5-flash-preview-05-20") +``` + +### 智谱AI + +- **GLM系列**: glm-4, glm-4v等 +- **支持功能**: 文本生成、多模态理解 + +```python +await chat("介绍北京", model="Zhipu/glm-4") +``` + +## 🎯 使用场景 + +### 1. 聊天机器人 + +```python +from zhenxun.services.llm import AI, CommonOverrides + +class ChatBot: + def __init__(self): + self.ai = AI() + self.history = [] + + async def chat(self, user_input: str) -> str: + # 添加历史记录 + self.history.append({"role": "user", "content": user_input}) + + # 生成回复 + response = await self.ai.chat( + user_input, + history=self.history[-10:], # 保留最近10轮对话 + override_config=CommonOverrides.balanced() + ) + + self.history.append({"role": "assistant", "content": response}) + return response +``` + +### 2. 代码助手 + +```python +async def code_assistant(task: str) -> dict: + """代码生成和执行助手""" + result = await code( + f"请帮我{task},并执行代码验证结果", + model="Gemini/gemini-2.0-flash", + timeout=60 + ) + + return { + "explanation": result["text"], + "code_blocks": result["code_executions"], + "success": result["success"] + } + +# 使用示例 +result = await code_assistant("实现快速排序算法") +``` + +### 3. 文档分析 + +```python +from zhenxun.services.llm import analyze_with_images + +async def analyze_document(image_path: str, question: str) -> str: + """分析文档图片并回答问题""" + result = await analyze_with_images( + f"请分析这个文档并回答:{question}", + images=image_path, + model="Gemini/gemini-2.0-flash" + ) + return result +``` + +### 4. 智能搜索 + +```python +async def smart_search(query: str) -> dict: + """智能搜索和总结""" + result = await search( + query, + model="Gemini/gemini-2.0-flash", + instruction="请提供准确、最新的信息,并注明信息来源" + ) + + return { + "summary": result["text"], + "sources": result.get("grounding_metadata", {}), + "confidence": result.get("confidence_score", 0.0) + } +``` + +## 🔧 配置管理 + + +### 动态配置 + +```python +from zhenxun.services.llm import set_global_default_model_name + +# 运行时更改默认模型 +set_global_default_model_name("OpenAI/gpt-4") + +# 检查可用模型 +models = list_available_models() +for model in models: + print(f"{model.provider}/{model.name} - {model.description}") +``` + diff --git a/zhenxun/services/llm/__init__.py b/zhenxun/services/llm/__init__.py new file mode 100644 index 00000000..ff09ef7a --- /dev/null +++ b/zhenxun/services/llm/__init__.py @@ -0,0 +1,96 @@ +""" +LLM 服务模块 - 公共 API 入口 + +提供统一的 AI 服务调用接口、核心类型定义和模型管理功能。 +""" + +from .api import ( + AI, + AIConfig, + TaskType, + analyze, + analyze_multimodal, + analyze_with_images, + chat, + code, + embed, + search, + search_multimodal, +) +from .config import ( + CommonOverrides, + LLMGenerationConfig, + register_llm_configs, +) + +register_llm_configs() +from .api import ModelName +from .manager import ( + clear_model_cache, + get_cache_stats, + get_global_default_model_name, + get_model_instance, + list_available_models, + list_embedding_models, + list_model_identifiers, + set_global_default_model_name, +) +from .types import ( + EmbeddingTaskType, + LLMContentPart, + LLMErrorCode, + LLMException, + LLMMessage, + LLMResponse, + LLMTool, + ModelDetail, + ModelInfo, + ModelProvider, + ResponseFormat, + ToolCategory, + ToolMetadata, + UsageInfo, +) +from .utils import create_multimodal_message, unimsg_to_llm_parts + +__all__ = [ + "AI", + "AIConfig", + "CommonOverrides", + "EmbeddingTaskType", + "LLMContentPart", + "LLMErrorCode", + "LLMException", + "LLMGenerationConfig", + "LLMMessage", + "LLMResponse", + "LLMTool", + "ModelDetail", + "ModelInfo", + "ModelName", + "ModelProvider", + "ResponseFormat", + "TaskType", + "ToolCategory", + "ToolMetadata", + "UsageInfo", + "analyze", + "analyze_multimodal", + "analyze_with_images", + "chat", + "clear_model_cache", + "code", + "create_multimodal_message", + "embed", + "get_cache_stats", + "get_global_default_model_name", + "get_model_instance", + "list_available_models", + "list_embedding_models", + "list_model_identifiers", + "register_llm_configs", + "search", + "search_multimodal", + "set_global_default_model_name", + "unimsg_to_llm_parts", +] diff --git a/zhenxun/services/llm/adapters/__init__.py b/zhenxun/services/llm/adapters/__init__.py new file mode 100644 index 00000000..93ed9d31 --- /dev/null +++ b/zhenxun/services/llm/adapters/__init__.py @@ -0,0 +1,26 @@ +""" +LLM 适配器模块 + +提供不同LLM服务商的API适配器实现,统一接口调用方式。 +""" + +from .base import BaseAdapter, OpenAICompatAdapter, RequestData, ResponseData +from .factory import LLMAdapterFactory, get_adapter_for_api_type, register_adapter +from .gemini import GeminiAdapter +from .openai import OpenAIAdapter +from .zhipu import ZhipuAdapter + +LLMAdapterFactory.initialize() + +__all__ = [ + "BaseAdapter", + "GeminiAdapter", + "LLMAdapterFactory", + "OpenAIAdapter", + "OpenAICompatAdapter", + "RequestData", + "ResponseData", + "ZhipuAdapter", + "get_adapter_for_api_type", + "register_adapter", +] diff --git a/zhenxun/services/llm/adapters/base.py b/zhenxun/services/llm/adapters/base.py new file mode 100644 index 00000000..499f9248 --- /dev/null +++ b/zhenxun/services/llm/adapters/base.py @@ -0,0 +1,508 @@ +""" +LLM 适配器基类和通用数据结构 +""" + +from abc import ABC, abstractmethod +from typing import TYPE_CHECKING, Any + +from pydantic import BaseModel + +from zhenxun.services.log import logger + +from ..types.exceptions import LLMErrorCode, LLMException +from ..types.models import LLMToolCall + +if TYPE_CHECKING: + from ..config.generation import LLMGenerationConfig + from ..service import LLMModel + from ..types.content import LLMMessage + from ..types.enums import EmbeddingTaskType + + +class RequestData(BaseModel): + """请求数据封装""" + + url: str + headers: dict[str, str] + body: dict[str, Any] + + +class ResponseData(BaseModel): + """响应数据封装 - 支持所有高级功能""" + + text: str + usage_info: dict[str, Any] | None = None + raw_response: dict[str, Any] | None = None + tool_calls: list[LLMToolCall] | None = None + code_executions: list[Any] | None = None + grounding_metadata: Any | None = None + cache_info: Any | None = None + + code_execution_results: list[dict[str, Any]] | None = None + search_results: list[dict[str, Any]] | None = None + function_calls: list[dict[str, Any]] | None = None + safety_ratings: list[dict[str, Any]] | None = None + citations: list[dict[str, Any]] | None = None + + +class BaseAdapter(ABC): + """LLM API适配器基类""" + + @property + @abstractmethod + def api_type(self) -> str: + """API类型标识""" + pass + + @property + @abstractmethod + def supported_api_types(self) -> list[str]: + """支持的API类型列表""" + pass + + def prepare_simple_request( + self, + model: "LLMModel", + api_key: str, + prompt: str, + history: list[dict[str, str]] | None = None, + ) -> RequestData: + """准备简单文本生成请求 + + 默认实现:将简单请求转换为高级请求格式 + 子类可以重写此方法以提供特定的优化实现 + """ + from ..types.content import LLMMessage + + messages: list[LLMMessage] = [] + + if history: + for msg in history: + role = msg.get("role", "user") + content = msg.get("content", "") + messages.append(LLMMessage(role=role, content=content)) + + messages.append(LLMMessage(role="user", content=prompt)) + + config = model._generation_config + + return self.prepare_advanced_request( + model=model, + api_key=api_key, + messages=messages, + config=config, + tools=None, + tool_choice=None, + ) + + @abstractmethod + def prepare_advanced_request( + self, + model: "LLMModel", + api_key: str, + messages: list["LLMMessage"], + config: "LLMGenerationConfig | None" = None, + tools: list[dict[str, Any]] | None = None, + tool_choice: str | dict[str, Any] | None = None, + ) -> RequestData: + """准备高级请求""" + pass + + @abstractmethod + def parse_response( + self, + model: "LLMModel", + response_json: dict[str, Any], + is_advanced: bool = False, + ) -> ResponseData: + """解析API响应""" + pass + + @abstractmethod + def prepare_embedding_request( + self, + model: "LLMModel", + api_key: str, + texts: list[str], + task_type: "EmbeddingTaskType | str", + **kwargs: Any, + ) -> RequestData: + """准备文本嵌入请求""" + pass + + @abstractmethod + def parse_embedding_response( + self, response_json: dict[str, Any] + ) -> list[list[float]]: + """解析文本嵌入响应""" + pass + + def validate_embedding_response(self, response_json: dict[str, Any]) -> None: + """验证嵌入API响应""" + if "error" in response_json: + error_info = response_json["error"] + msg = ( + error_info.get("message", str(error_info)) + if isinstance(error_info, dict) + else str(error_info) + ) + raise LLMException( + f"嵌入API错误: {msg}", + code=LLMErrorCode.EMBEDDING_FAILED, + details=response_json, + ) + + def get_api_url(self, model: "LLMModel", endpoint: str) -> str: + """构建API URL""" + if not model.api_base: + raise LLMException( + f"模型 {model.model_name} 的 api_base 未设置", + code=LLMErrorCode.CONFIGURATION_ERROR, + ) + return f"{model.api_base.rstrip('/')}{endpoint}" + + def get_base_headers(self, api_key: str) -> dict[str, str]: + """获取基础请求头""" + from zhenxun.utils.user_agent import get_user_agent + + headers = get_user_agent() + headers.update( + { + "Content-Type": "application/json", + "Authorization": f"Bearer {api_key}", + } + ) + return headers + + def convert_messages_to_openai_format( + self, messages: list["LLMMessage"] + ) -> list[dict[str, Any]]: + """将LLMMessage转换为OpenAI格式 - 通用方法""" + openai_messages: list[dict[str, Any]] = [] + for msg in messages: + openai_msg: dict[str, Any] = {"role": msg.role} + + if msg.role == "tool": + openai_msg["tool_call_id"] = msg.tool_call_id + openai_msg["name"] = msg.name + openai_msg["content"] = msg.content + else: + if isinstance(msg.content, str): + openai_msg["content"] = msg.content + else: + content_parts = [] + for part in msg.content: + if part.type == "text": + content_parts.append({"type": "text", "text": part.text}) + elif part.type == "image": + content_parts.append( + { + "type": "image_url", + "image_url": {"url": part.image_source}, + } + ) + openai_msg["content"] = content_parts + + if msg.role == "assistant" and msg.tool_calls: + assistant_tool_calls = [] + for call in msg.tool_calls: + assistant_tool_calls.append( + { + "id": call.id, + "type": "function", + "function": { + "name": call.function.name, + "arguments": call.function.arguments, + }, + } + ) + openai_msg["tool_calls"] = assistant_tool_calls + + if msg.name and msg.role != "tool": + openai_msg["name"] = msg.name + + openai_messages.append(openai_msg) + return openai_messages + + def parse_openai_response(self, response_json: dict[str, Any]) -> ResponseData: + """解析OpenAI格式的响应 - 通用方法""" + self.validate_response(response_json) + + try: + choices = response_json.get("choices", []) + if not choices: + logger.debug("OpenAI响应中没有choices,可能为空回复或流结束。") + return ResponseData(text="", raw_response=response_json) + + choice = choices[0] + message = choice.get("message", {}) + content = message.get("content", "") + + parsed_tool_calls: list[LLMToolCall] | None = None + if message_tool_calls := message.get("tool_calls"): + from ..types.models import LLMToolFunction + + parsed_tool_calls = [] + for tc_data in message_tool_calls: + try: + if tc_data.get("type") == "function": + parsed_tool_calls.append( + LLMToolCall( + id=tc_data["id"], + function=LLMToolFunction( + name=tc_data["function"]["name"], + arguments=tc_data["function"]["arguments"], + ), + ) + ) + except KeyError as e: + logger.warning( + f"解析OpenAI工具调用数据时缺少键: {tc_data}, 错误: {e}" + ) + except Exception as e: + logger.warning( + f"解析OpenAI工具调用数据时出错: {tc_data}, 错误: {e}" + ) + if not parsed_tool_calls: + parsed_tool_calls = None + + final_text = content if content is not None else "" + if not final_text and parsed_tool_calls: + final_text = f"请求调用 {len(parsed_tool_calls)} 个工具。" + + usage_info = response_json.get("usage") + + return ResponseData( + text=final_text, + tool_calls=parsed_tool_calls, + usage_info=usage_info, + raw_response=response_json, + ) + + except Exception as e: + logger.error(f"解析OpenAI格式响应失败: {e}", e=e) + raise LLMException( + f"解析API响应失败: {e}", + code=LLMErrorCode.RESPONSE_PARSE_ERROR, + cause=e, + ) + + def validate_response(self, response_json: dict[str, Any]) -> None: + """验证API响应,解析不同API的错误结构""" + if "error" in response_json: + error_info = response_json["error"] + + if isinstance(error_info, dict): + error_message = error_info.get("message", "未知错误") + error_code = error_info.get("code", "unknown") + error_type = error_info.get("type", "api_error") + + error_code_mapping = { + "invalid_api_key": LLMErrorCode.API_KEY_INVALID, + "authentication_failed": LLMErrorCode.API_KEY_INVALID, + "rate_limit_exceeded": LLMErrorCode.API_RATE_LIMITED, + "quota_exceeded": LLMErrorCode.API_RATE_LIMITED, + "model_not_found": LLMErrorCode.MODEL_NOT_FOUND, + "invalid_model": LLMErrorCode.MODEL_NOT_FOUND, + "context_length_exceeded": LLMErrorCode.CONTEXT_LENGTH_EXCEEDED, + "max_tokens_exceeded": LLMErrorCode.CONTEXT_LENGTH_EXCEEDED, + } + + llm_error_code = error_code_mapping.get( + error_code, LLMErrorCode.API_RESPONSE_INVALID + ) + + logger.error( + f"API返回错误: {error_message} " + f"(代码: {error_code}, 类型: {error_type})" + ) + else: + error_message = str(error_info) + error_code = "unknown" + llm_error_code = LLMErrorCode.API_RESPONSE_INVALID + + logger.error(f"API返回错误: {error_message}") + + raise LLMException( + f"API请求失败: {error_message}", + code=llm_error_code, + details={"api_error": error_info, "error_code": error_code}, + ) + + if "candidates" in response_json: + candidates = response_json.get("candidates", []) + if candidates: + candidate = candidates[0] + finish_reason = candidate.get("finishReason") + if finish_reason in ["SAFETY", "RECITATION"]: + safety_ratings = candidate.get("safetyRatings", []) + logger.warning( + f"Gemini内容被安全过滤: {finish_reason}, " + f"安全评级: {safety_ratings}" + ) + raise LLMException( + f"内容被安全过滤: {finish_reason}", + code=LLMErrorCode.CONTENT_FILTERED, + details={ + "finish_reason": finish_reason, + "safety_ratings": safety_ratings, + }, + ) + + if not response_json: + logger.error("API返回空响应") + raise LLMException( + "API返回空响应", + code=LLMErrorCode.API_RESPONSE_INVALID, + details={"response": response_json}, + ) + + def _apply_generation_config( + self, + model: "LLMModel", + config: "LLMGenerationConfig | None" = None, + ) -> dict[str, Any]: + """通用的配置应用逻辑""" + if config is not None: + return config.to_api_params(model.api_type, model.model_name) + + if model._generation_config is not None: + return model._generation_config.to_api_params( + model.api_type, model.model_name + ) + + base_config = {} + if model.temperature is not None: + base_config["temperature"] = model.temperature + if model.max_tokens is not None: + if model.api_type in ["gemini", "gemini_native"]: + base_config["maxOutputTokens"] = model.max_tokens + else: + base_config["max_tokens"] = model.max_tokens + + return base_config + + def apply_config_override( + self, + model: "LLMModel", + body: dict[str, Any], + config: "LLMGenerationConfig | None" = None, + ) -> dict[str, Any]: + """应用配置覆盖""" + config_params = self._apply_generation_config(model, config) + body.update(config_params) + return body + + +class OpenAICompatAdapter(BaseAdapter): + """ + 处理所有 OpenAI 兼容 API 的通用适配器。 + 消除 OpenAIAdapter 和 ZhipuAdapter 之间的代码重复。 + """ + + @abstractmethod + def get_chat_endpoint(self) -> str: + """子类必须实现,返回 chat completions 的端点""" + pass + + @abstractmethod + def get_embedding_endpoint(self) -> str: + """子类必须实现,返回 embeddings 的端点""" + pass + + def prepare_advanced_request( + self, + model: "LLMModel", + api_key: str, + messages: list["LLMMessage"], + config: "LLMGenerationConfig | None" = None, + tools: list[dict[str, Any]] | None = None, + tool_choice: str | dict[str, Any] | None = None, + ) -> RequestData: + """准备高级请求 - OpenAI兼容格式""" + url = self.get_api_url(model, self.get_chat_endpoint()) + headers = self.get_base_headers(api_key) + openai_messages = self.convert_messages_to_openai_format(messages) + + body = { + "model": model.model_name, + "messages": openai_messages, + } + + if tools: + body["tools"] = tools + if tool_choice: + body["tool_choice"] = tool_choice + + body = self.apply_config_override(model, body, config) + return RequestData(url=url, headers=headers, body=body) + + def parse_response( + self, + model: "LLMModel", + response_json: dict[str, Any], + is_advanced: bool = False, + ) -> ResponseData: + """解析响应 - 直接使用基类的 OpenAI 格式解析""" + _ = model, is_advanced # 未使用的参数 + return self.parse_openai_response(response_json) + + def prepare_embedding_request( + self, + model: "LLMModel", + api_key: str, + texts: list[str], + task_type: "EmbeddingTaskType | str", + **kwargs: Any, + ) -> RequestData: + """准备嵌入请求 - OpenAI兼容格式""" + _ = task_type # 未使用的参数 + url = self.get_api_url(model, self.get_embedding_endpoint()) + headers = self.get_base_headers(api_key) + + body = { + "model": model.model_name, + "input": texts, + } + + # 应用额外的配置参数 + if kwargs: + body.update(kwargs) + + return RequestData(url=url, headers=headers, body=body) + + def parse_embedding_response( + self, response_json: dict[str, Any] + ) -> list[list[float]]: + """解析嵌入响应 - OpenAI兼容格式""" + self.validate_embedding_response(response_json) + + try: + data = response_json.get("data", []) + if not data: + raise LLMException( + "嵌入响应中没有数据", + code=LLMErrorCode.EMBEDDING_FAILED, + details=response_json, + ) + + embeddings = [] + for item in data: + if "embedding" in item: + embeddings.append(item["embedding"]) + else: + raise LLMException( + "嵌入响应格式错误:缺少embedding字段", + code=LLMErrorCode.EMBEDDING_FAILED, + details=item, + ) + + return embeddings + + except Exception as e: + logger.error(f"解析嵌入响应失败: {e}", e=e) + raise LLMException( + f"解析嵌入响应失败: {e}", + code=LLMErrorCode.EMBEDDING_FAILED, + cause=e, + ) diff --git a/zhenxun/services/llm/adapters/factory.py b/zhenxun/services/llm/adapters/factory.py new file mode 100644 index 00000000..8652fc67 --- /dev/null +++ b/zhenxun/services/llm/adapters/factory.py @@ -0,0 +1,78 @@ +""" +LLM 适配器工厂类 +""" + +from typing import ClassVar + +from ..types.exceptions import LLMErrorCode, LLMException +from .base import BaseAdapter + + +class LLMAdapterFactory: + """LLM适配器工厂类""" + + _adapters: ClassVar[dict[str, BaseAdapter]] = {} + _api_type_mapping: ClassVar[dict[str, str]] = {} + + @classmethod + def initialize(cls) -> None: + """初始化默认适配器""" + if cls._adapters: + return + + from .gemini import GeminiAdapter + from .openai import OpenAIAdapter + from .zhipu import ZhipuAdapter + + cls.register_adapter(OpenAIAdapter()) + cls.register_adapter(ZhipuAdapter()) + cls.register_adapter(GeminiAdapter()) + + @classmethod + def register_adapter(cls, adapter: BaseAdapter) -> None: + """注册适配器""" + adapter_key = adapter.api_type + cls._adapters[adapter_key] = adapter + + for api_type in adapter.supported_api_types: + cls._api_type_mapping[api_type] = adapter_key + + @classmethod + def get_adapter(cls, api_type: str) -> BaseAdapter: + """获取适配器""" + cls.initialize() + + adapter_key = cls._api_type_mapping.get(api_type) + if not adapter_key: + raise LLMException( + f"不支持的API类型: {api_type}", + code=LLMErrorCode.UNKNOWN_API_TYPE, + details={ + "api_type": api_type, + "supported_types": list(cls._api_type_mapping.keys()), + }, + ) + + return cls._adapters[adapter_key] + + @classmethod + def list_supported_types(cls) -> list[str]: + """列出所有支持的API类型""" + cls.initialize() + return list(cls._api_type_mapping.keys()) + + @classmethod + def list_adapters(cls) -> dict[str, BaseAdapter]: + """列出所有注册的适配器""" + cls.initialize() + return cls._adapters.copy() + + +def get_adapter_for_api_type(api_type: str) -> BaseAdapter: + """获取指定API类型的适配器""" + return LLMAdapterFactory.get_adapter(api_type) + + +def register_adapter(adapter: BaseAdapter) -> None: + """注册新的适配器""" + LLMAdapterFactory.register_adapter(adapter) diff --git a/zhenxun/services/llm/adapters/gemini.py b/zhenxun/services/llm/adapters/gemini.py new file mode 100644 index 00000000..0ca22185 --- /dev/null +++ b/zhenxun/services/llm/adapters/gemini.py @@ -0,0 +1,596 @@ +""" +Gemini API 适配器 +""" + +from typing import TYPE_CHECKING, Any + +from zhenxun.services.log import logger + +from ..types.exceptions import LLMErrorCode, LLMException +from .base import BaseAdapter, RequestData, ResponseData + +if TYPE_CHECKING: + from ..config.generation import LLMGenerationConfig + from ..service import LLMModel + from ..types.content import LLMMessage + from ..types.enums import EmbeddingTaskType + from ..types.models import LLMToolCall + + +class GeminiAdapter(BaseAdapter): + """Gemini API 适配器""" + + @property + def api_type(self) -> str: + return "gemini" + + @property + def supported_api_types(self) -> list[str]: + return ["gemini"] + + def get_base_headers(self, api_key: str) -> dict[str, str]: + """获取基础请求头""" + from zhenxun.utils.user_agent import get_user_agent + + headers = get_user_agent() + headers.update({"Content-Type": "application/json"}) + headers["x-goog-api-key"] = api_key + + return headers + + def prepare_advanced_request( + self, + model: "LLMModel", + api_key: str, + messages: list["LLMMessage"], + config: "LLMGenerationConfig | None" = None, + tools: list[dict[str, Any]] | None = None, + tool_choice: str | dict[str, Any] | None = None, + ) -> RequestData: + """准备高级请求""" + return self._prepare_request( + model, api_key, messages, config, tools, tool_choice + ) + + def _prepare_request( + self, + model: "LLMModel", + api_key: str, + messages: list["LLMMessage"], + config: "LLMGenerationConfig | None" = None, + tools: list[dict[str, Any]] | None = None, + tool_choice: str | dict[str, Any] | None = None, + ) -> RequestData: + """准备 Gemini API 请求 - 支持所有高级功能""" + effective_config = config if config is not None else model._generation_config + + endpoint = self._get_gemini_endpoint(model, effective_config) + url = self.get_api_url(model, endpoint) + headers = self.get_base_headers(api_key) + + gemini_contents: list[dict[str, Any]] = [] + system_instruction_parts: list[dict[str, Any]] | None = None + + for msg in messages: + current_parts: list[dict[str, Any]] = [] + if msg.role == "system": + if isinstance(msg.content, str): + system_instruction_parts = [{"text": msg.content}] + elif isinstance(msg.content, list): + system_instruction_parts = [ + part.convert_for_api("gemini") for part in msg.content + ] + continue + + elif msg.role == "user": + if isinstance(msg.content, str): + current_parts.append({"text": msg.content}) + elif isinstance(msg.content, list): + for part_obj in msg.content: + current_parts.append(part_obj.convert_for_api("gemini")) + gemini_contents.append({"role": "user", "parts": current_parts}) + + elif msg.role == "assistant" or msg.role == "model": + if isinstance(msg.content, str) and msg.content: + current_parts.append({"text": msg.content}) + elif isinstance(msg.content, list): + for part_obj in msg.content: + current_parts.append(part_obj.convert_for_api("gemini")) + + if msg.tool_calls: + import json + + for call in msg.tool_calls: + current_parts.append( + { + "functionCall": { + "name": call.function.name, + "args": json.loads(call.function.arguments), + } + } + ) + if current_parts: + gemini_contents.append({"role": "model", "parts": current_parts}) + + elif msg.role == "tool": + if not msg.name: + raise ValueError("Gemini 工具消息必须包含 'name' 字段(函数名)。") + + import json + + try: + content_str = ( + msg.content + if isinstance(msg.content, str) + else str(msg.content) + ) + tool_result_obj = json.loads(content_str) + except json.JSONDecodeError: + content_str = ( + msg.content + if isinstance(msg.content, str) + else str(msg.content) + ) + logger.warning( + f"工具 {msg.name} 的结果不是有效的 JSON: {content_str}. " + f"包装为原始字符串。" + ) + tool_result_obj = {"raw_output": content_str} + + current_parts.append( + { + "functionResponse": { + "name": msg.name, + "response": tool_result_obj, + } + } + ) + gemini_contents.append({"role": "function", "parts": current_parts}) + + body: dict[str, Any] = {"contents": gemini_contents} + + if system_instruction_parts: + body["systemInstruction"] = {"parts": system_instruction_parts} + + all_tools_for_request = [] + if tools: + for tool_item in tools: + if isinstance(tool_item, dict): + if "name" in tool_item and "description" in tool_item: + all_tools_for_request.append( + {"functionDeclarations": [tool_item]} + ) + else: + all_tools_for_request.append(tool_item) + else: + all_tools_for_request.append(tool_item) + + if effective_config: + if getattr(effective_config, "enable_grounding", False): + has_explicit_gs_tool = any( + "googleSearch" in tool_item for tool_item in all_tools_for_request + ) + if not has_explicit_gs_tool: + all_tools_for_request.append({"googleSearch": {}}) + logger.debug("隐式启用 Google Search 工具进行信息来源关联。") + + if getattr(effective_config, "enable_code_execution", False): + has_explicit_ce_tool = any( + "codeExecution" in tool_item for tool_item in all_tools_for_request + ) + if not has_explicit_ce_tool: + all_tools_for_request.append({"codeExecution": {}}) + logger.debug("隐式启用代码执行工具。") + + if all_tools_for_request: + gemini_api_tools = self._convert_tools_to_gemini_format( + all_tools_for_request + ) + if gemini_api_tools: + body["tools"] = gemini_api_tools + + final_tool_choice = tool_choice + if final_tool_choice is None and effective_config: + final_tool_choice = getattr(effective_config, "tool_choice", None) + + if final_tool_choice: + if isinstance(final_tool_choice, str): + mode_upper = final_tool_choice.upper() + if mode_upper in ["AUTO", "NONE", "ANY"]: + body["toolConfig"] = {"functionCallingConfig": {"mode": mode_upper}} + else: + body["toolConfig"] = self._convert_tool_choice_to_gemini( + final_tool_choice + ) + else: + body["toolConfig"] = self._convert_tool_choice_to_gemini( + final_tool_choice + ) + + final_generation_config = self._build_gemini_generation_config( + model, effective_config + ) + if final_generation_config: + body["generationConfig"] = final_generation_config + + safety_settings = self._build_safety_settings(effective_config) + if safety_settings: + body["safetySettings"] = safety_settings + + return RequestData(url=url, headers=headers, body=body) + + def apply_config_override( + self, + model: "LLMModel", + body: dict[str, Any], + config: "LLMGenerationConfig | None" = None, + ) -> dict[str, Any]: + """应用配置覆盖 - Gemini 不需要额外的配置覆盖""" + return body + + def _get_gemini_endpoint( + self, model: "LLMModel", config: "LLMGenerationConfig | None" = None + ) -> str: + """根据配置选择Gemini API端点""" + if config: + if getattr(config, "enable_code_execution", False): + return f"/v1beta/models/{model.model_name}:generateContent" + + if getattr(config, "enable_grounding", False): + return f"/v1beta/models/{model.model_name}:generateContent" + + return f"/v1beta/models/{model.model_name}:generateContent" + + def _convert_tools_to_gemini_format( + self, tools: list[dict[str, Any]] + ) -> list[dict[str, Any]]: + """转换工具格式为Gemini格式""" + gemini_tools = [] + + for tool in tools: + if tool.get("type") == "function": + func = tool["function"] + gemini_tool = { + "functionDeclarations": [ + { + "name": func["name"], + "description": func.get("description", ""), + "parameters": func.get("parameters", {}), + } + ] + } + gemini_tools.append(gemini_tool) + elif tool.get("type") == "code_execution": + gemini_tools.append( + {"codeExecution": {"language": tool.get("language", "python")}} + ) + elif tool.get("type") == "google_search": + gemini_tools.append({"googleSearch": {}}) + elif "googleSearch" in tool: + gemini_tools.append({"googleSearch": tool["googleSearch"]}) + elif "codeExecution" in tool: + gemini_tools.append({"codeExecution": tool["codeExecution"]}) + + return gemini_tools + + def _convert_tool_choice_to_gemini( + self, tool_choice_value: str | dict[str, Any] + ) -> dict[str, Any]: + """转换工具选择策略为Gemini格式""" + if isinstance(tool_choice_value, str): + mode_upper = tool_choice_value.upper() + if mode_upper in ["AUTO", "NONE", "ANY"]: + return {"functionCallingConfig": {"mode": mode_upper}} + else: + logger.warning( + f"不支持的 tool_choice 字符串值: '{tool_choice_value}'。" + f"回退到 AUTO。" + ) + return {"functionCallingConfig": {"mode": "AUTO"}} + + elif isinstance(tool_choice_value, dict): + if ( + tool_choice_value.get("type") == "function" + and "function" in tool_choice_value + ): + func_name = tool_choice_value["function"].get("name") + if func_name: + return { + "functionCallingConfig": { + "mode": "ANY", + "allowedFunctionNames": [func_name], + } + } + else: + logger.warning( + f"tool_choice dict 中的函数名无效: {tool_choice_value}。" + f"回退到 AUTO。" + ) + return {"functionCallingConfig": {"mode": "AUTO"}} + + elif "functionCallingConfig" in tool_choice_value: + return { + "functionCallingConfig": tool_choice_value["functionCallingConfig"] + } + + else: + logger.warning( + f"不支持的 tool_choice dict 值: {tool_choice_value}。回退到 AUTO。" + ) + return {"functionCallingConfig": {"mode": "AUTO"}} + + logger.warning( + f"tool_choice 的类型无效: {type(tool_choice_value)}。回退到 AUTO。" + ) + return {"functionCallingConfig": {"mode": "AUTO"}} + + def _build_gemini_generation_config( + self, model: "LLMModel", config: "LLMGenerationConfig | None" = None + ) -> dict[str, Any]: + """构建Gemini生成配置""" + generation_config: dict[str, Any] = {} + + effective_config = config if config is not None else model._generation_config + + if effective_config: + base_api_params = effective_config.to_api_params( + api_type="gemini", model_name=model.model_name + ) + generation_config.update(base_api_params) + + if getattr(effective_config, "response_mime_type", None): + generation_config["responseMimeType"] = ( + effective_config.response_mime_type + ) + + if getattr(effective_config, "response_schema", None): + generation_config["responseSchema"] = effective_config.response_schema + + thinking_budget = getattr(effective_config, "thinking_budget", None) + if thinking_budget is not None: + if "thinkingConfig" not in generation_config: + generation_config["thinkingConfig"] = {} + generation_config["thinkingConfig"]["thinkingBudget"] = thinking_budget + + if getattr(effective_config, "response_modalities", None): + modalities = effective_config.response_modalities + if isinstance(modalities, list): + generation_config["responseModalities"] = [ + m.upper() for m in modalities + ] + elif isinstance(modalities, str): + generation_config["responseModalities"] = [modalities.upper()] + + generation_config = { + k: v for k, v in generation_config.items() if v is not None + } + + if generation_config: + param_keys = list(generation_config.keys()) + logger.debug( + f"构建Gemini生成配置完成,包含 {len(generation_config)} 个参数: " + f"{param_keys}" + ) + + return generation_config + + def _build_safety_settings( + self, config: "LLMGenerationConfig | None" = None + ) -> list[dict[str, Any]] | None: + """构建安全设置""" + if not config: + return None + + safety_settings = [] + + safety_categories = [ + "HARM_CATEGORY_HARASSMENT", + "HARM_CATEGORY_HATE_SPEECH", + "HARM_CATEGORY_SEXUALLY_EXPLICIT", + "HARM_CATEGORY_DANGEROUS_CONTENT", + ] + + custom_safety_settings = getattr(config, "safety_settings", None) + if custom_safety_settings: + for category, threshold in custom_safety_settings.items(): + safety_settings.append({"category": category, "threshold": threshold}) + else: + for category in safety_categories: + safety_settings.append( + {"category": category, "threshold": "BLOCK_MEDIUM_AND_ABOVE"} + ) + + return safety_settings if safety_settings else None + + def parse_response( + self, + model: "LLMModel", + response_json: dict[str, Any], + is_advanced: bool = False, + ) -> ResponseData: + """解析API响应""" + return self._parse_response(model, response_json, is_advanced) + + def _parse_response( + self, + model: "LLMModel", + response_json: dict[str, Any], + is_advanced: bool = False, + ) -> ResponseData: + """解析 Gemini API 响应""" + _ = is_advanced + self.validate_response(response_json) + + try: + candidates = response_json.get("candidates", []) + if not candidates: + logger.debug("Gemini响应中没有candidates。") + return ResponseData(text="", raw_response=response_json) + + candidate = candidates[0] + + if candidate.get("finishReason") in [ + "RECITATION", + "OTHER", + ] and not candidate.get("content"): + logger.warning( + f"Gemini candidate finished with reason " + f"'{candidate.get('finishReason')}' and no content." + ) + return ResponseData( + text="", + raw_response=response_json, + usage_info=response_json.get("usageMetadata"), + ) + + content_data = candidate.get("content", {}) + parts = content_data.get("parts", []) + + text_content = "" + parsed_tool_calls: list["LLMToolCall"] | None = None + + for part in parts: + if "text" in part: + text_content += part["text"] + elif "functionCall" in part: + if parsed_tool_calls is None: + parsed_tool_calls = [] + fc_data = part["functionCall"] + try: + import json + + from ..types.models import LLMToolCall, LLMToolFunction + + call_id = f"call_{model.provider_name}_{len(parsed_tool_calls)}" + parsed_tool_calls.append( + LLMToolCall( + id=call_id, + function=LLMToolFunction( + name=fc_data["name"], + arguments=json.dumps(fc_data["args"]), + ), + ) + ) + except KeyError as e: + logger.warning( + f"解析Gemini functionCall时缺少键: {fc_data}, 错误: {e}" + ) + except Exception as e: + logger.warning( + f"解析Gemini functionCall时出错: {fc_data}, 错误: {e}" + ) + elif "codeExecutionResult" in part: + result = part["codeExecutionResult"] + if result.get("outcome") == "OK": + output = result.get("output", "") + text_content += f"\n[代码执行结果]:\n{output}\n" + else: + text_content += ( + f"\n[代码执行失败]: {result.get('outcome', 'UNKNOWN')}\n" + ) + + usage_info = response_json.get("usageMetadata") + + grounding_metadata_obj = None + if grounding_data := candidate.get("groundingMetadata"): + try: + from ..types.models import LLMGroundingMetadata + + grounding_metadata_obj = LLMGroundingMetadata(**grounding_data) + except Exception as e: + logger.warning(f"无法解析Grounding元数据: {grounding_data}, {e}") + + return ResponseData( + text=text_content, + tool_calls=parsed_tool_calls, + usage_info=usage_info, + raw_response=response_json, + grounding_metadata=grounding_metadata_obj, + ) + + except Exception as e: + logger.error(f"解析 Gemini 响应失败: {e}", e=e) + raise LLMException( + f"解析API响应失败: {e}", + code=LLMErrorCode.RESPONSE_PARSE_ERROR, + cause=e, + ) + + def prepare_embedding_request( + self, + model: "LLMModel", + api_key: str, + texts: list[str], + task_type: "EmbeddingTaskType | str", + **kwargs: Any, + ) -> RequestData: + """准备文本嵌入请求""" + api_model_name = model.model_name + if not api_model_name.startswith("models/"): + api_model_name = f"models/{api_model_name}" + + url = self.get_api_url(model, f"/{api_model_name}:batchEmbedContents") + headers = self.get_base_headers(api_key) + + requests_payload = [] + for text_content in texts: + request_item: dict[str, Any] = { + "content": {"parts": [{"text": text_content}]}, + } + + from ..types.enums import EmbeddingTaskType + + if task_type and task_type != EmbeddingTaskType.RETRIEVAL_DOCUMENT: + request_item["task_type"] = str(task_type).upper() + if title := kwargs.get("title"): + request_item["title"] = title + if output_dimensionality := kwargs.get("output_dimensionality"): + request_item["output_dimensionality"] = output_dimensionality + + requests_payload.append(request_item) + + body = {"requests": requests_payload} + return RequestData(url=url, headers=headers, body=body) + + def parse_embedding_response( + self, response_json: dict[str, Any] + ) -> list[list[float]]: + """解析文本嵌入响应""" + try: + embeddings_data = response_json["embeddings"] + return [item["values"] for item in embeddings_data] + except KeyError as e: + logger.error(f"解析Gemini嵌入响应时缺少键: {e}. 响应: {response_json}") + raise LLMException( + "Gemini嵌入响应格式错误", + code=LLMErrorCode.RESPONSE_PARSE_ERROR, + details={"error": str(e)}, + ) + except Exception as e: + logger.error( + f"解析Gemini嵌入响应时发生未知错误: {e}. 响应: {response_json}" + ) + raise LLMException( + f"解析Gemini嵌入响应失败: {e}", + code=LLMErrorCode.RESPONSE_PARSE_ERROR, + cause=e, + ) + + def validate_embedding_response(self, response_json: dict[str, Any]) -> None: + """验证嵌入响应""" + super().validate_embedding_response(response_json) + if "embeddings" not in response_json or not isinstance( + response_json["embeddings"], list + ): + raise LLMException( + "Gemini嵌入响应缺少'embeddings'字段或格式不正确", + code=LLMErrorCode.RESPONSE_PARSE_ERROR, + details=response_json, + ) + for item in response_json["embeddings"]: + if "values" not in item: + raise LLMException( + "Gemini嵌入响应的条目中缺少'values'字段", + code=LLMErrorCode.RESPONSE_PARSE_ERROR, + details=response_json, + ) diff --git a/zhenxun/services/llm/adapters/openai.py b/zhenxun/services/llm/adapters/openai.py new file mode 100644 index 00000000..046f0277 --- /dev/null +++ b/zhenxun/services/llm/adapters/openai.py @@ -0,0 +1,57 @@ +""" +OpenAI API 适配器 + +支持 OpenAI、DeepSeek 和其他 OpenAI 兼容的 API 服务。 +""" + +from typing import TYPE_CHECKING + +from .base import OpenAICompatAdapter, RequestData + +if TYPE_CHECKING: + from ..service import LLMModel + + +class OpenAIAdapter(OpenAICompatAdapter): + """OpenAI兼容API适配器""" + + @property + def api_type(self) -> str: + return "openai" + + @property + def supported_api_types(self) -> list[str]: + return ["openai", "deepseek", "general_openai_compat"] + + def get_chat_endpoint(self) -> str: + """返回聊天完成端点""" + return "/v1/chat/completions" + + def get_embedding_endpoint(self) -> str: + """返回嵌入端点""" + return "/v1/embeddings" + + def prepare_simple_request( + self, + model: "LLMModel", + api_key: str, + prompt: str, + history: list[dict[str, str]] | None = None, + ) -> RequestData: + """准备简单文本生成请求 - OpenAI优化实现""" + url = self.get_api_url(model, self.get_chat_endpoint()) + headers = self.get_base_headers(api_key) + + messages = [] + if history: + messages.extend(history) + messages.append({"role": "user", "content": prompt}) + + body = { + "model": model.model_name, + "messages": messages, + } + + body = self.apply_config_override(model, body) + + return RequestData(url=url, headers=headers, body=body) diff --git a/zhenxun/services/llm/adapters/zhipu.py b/zhenxun/services/llm/adapters/zhipu.py new file mode 100644 index 00000000..e5eb032f --- /dev/null +++ b/zhenxun/services/llm/adapters/zhipu.py @@ -0,0 +1,57 @@ +""" +智谱 AI API 适配器 + +支持智谱 AI 的 GLM 系列模型,使用 OpenAI 兼容的接口格式。 +""" + +from typing import TYPE_CHECKING + +from .base import OpenAICompatAdapter, RequestData + +if TYPE_CHECKING: + from ..service import LLMModel + + +class ZhipuAdapter(OpenAICompatAdapter): + """智谱AI适配器 - 使用智谱AI专用的OpenAI兼容接口""" + + @property + def api_type(self) -> str: + return "zhipu" + + @property + def supported_api_types(self) -> list[str]: + return ["zhipu"] + + def get_chat_endpoint(self) -> str: + """返回智谱AI聊天完成端点""" + return "/api/paas/v4/chat/completions" + + def get_embedding_endpoint(self) -> str: + """返回智谱AI嵌入端点""" + return "/v4/embeddings" + + def prepare_simple_request( + self, + model: "LLMModel", + api_key: str, + prompt: str, + history: list[dict[str, str]] | None = None, + ) -> RequestData: + """准备简单文本生成请求 - 智谱AI优化实现""" + url = self.get_api_url(model, self.get_chat_endpoint()) + headers = self.get_base_headers(api_key) + + messages = [] + if history: + messages.extend(history) + messages.append({"role": "user", "content": prompt}) + + body = { + "model": model.model_name, + "messages": messages, + } + + body = self.apply_config_override(model, body) + + return RequestData(url=url, headers=headers, body=body) diff --git a/zhenxun/services/llm/api.py b/zhenxun/services/llm/api.py new file mode 100644 index 00000000..7aaed437 --- /dev/null +++ b/zhenxun/services/llm/api.py @@ -0,0 +1,530 @@ +""" +LLM 服务的高级 API 接口 +""" + +from dataclasses import dataclass +from enum import Enum +from pathlib import Path +from typing import Any + +from nonebot_plugin_alconna.uniseg import UniMessage + +from zhenxun.services.log import logger + +from .config import CommonOverrides, LLMGenerationConfig +from .config.providers import get_ai_config +from .manager import get_global_default_model_name, get_model_instance +from .types import ( + EmbeddingTaskType, + LLMContentPart, + LLMErrorCode, + LLMException, + LLMMessage, + LLMResponse, + LLMTool, + ModelName, +) +from .utils import create_multimodal_message, unimsg_to_llm_parts + + +class TaskType(Enum): + """任务类型枚举""" + + CHAT = "chat" + CODE = "code" + SEARCH = "search" + ANALYSIS = "analysis" + GENERATION = "generation" + MULTIMODAL = "multimodal" + + +@dataclass +class AIConfig: + """AI配置类 - 简化版本""" + + model: ModelName = None + default_embedding_model: ModelName = None + temperature: float | None = None + max_tokens: int | None = None + enable_cache: bool = False + enable_code: bool = False + enable_search: bool = False + timeout: int | None = None + + enable_gemini_json_mode: bool = False + enable_gemini_thinking: bool = False + enable_gemini_safe_mode: bool = False + enable_gemini_multimodal: bool = False + enable_gemini_grounding: bool = False + + def __post_init__(self): + """初始化后从配置中读取默认值""" + ai_config = get_ai_config() + if self.model is None: + self.model = ai_config.get("default_model_name") + if self.timeout is None: + self.timeout = ai_config.get("timeout", 180) + + +class AI: + """统一的AI服务类 - 平衡设计版本 + + 提供三层API: + 1. 简单方法:ai.chat(), ai.code(), ai.search() + 2. 标准方法:ai.analyze() 支持复杂参数 + 3. 高级方法:通过get_model_instance()直接访问 + """ + + def __init__( + self, config: AIConfig | None = None, history: list[LLMMessage] | None = None + ): + """ + 初始化AI服务 + + Args: + config: AI 配置. + history: 可选的初始对话历史. + """ + self.config = config or AIConfig() + self.history = history or [] + + def clear_history(self): + """清空当前会话的历史记录""" + self.history = [] + logger.info("AI session history cleared.") + + async def chat( + self, + message: str | LLMMessage | list[LLMContentPart], + *, + model: ModelName = None, + **kwargs: Any, + ) -> str: + """ + 进行一次聊天对话。 + 此方法会自动使用和更新会话内的历史记录。 + """ + current_message: LLMMessage + if isinstance(message, str): + current_message = LLMMessage.user(message) + elif isinstance(message, list) and all( + isinstance(part, LLMContentPart) for part in message + ): + current_message = LLMMessage.user(message) + elif isinstance(message, LLMMessage): + current_message = message + else: + raise LLMException( + f"AI.chat 不支持的消息类型: {type(message)}. " + "请使用 str, LLMMessage, 或 list[LLMContentPart]. " + "对于更复杂的多模态输入或文件路径,请使用 AI.analyze().", + code=LLMErrorCode.API_REQUEST_FAILED, + ) + + final_messages = [*self.history, current_message] + + response = await self._execute_generation( + final_messages, model, "聊天失败", kwargs + ) + + self.history.append(current_message) + self.history.append(LLMMessage.assistant_text_response(response.text)) + + return response.text + + async def code( + self, + prompt: str, + *, + model: ModelName = None, + timeout: int | None = None, + **kwargs: Any, + ) -> dict[str, Any]: + """代码执行""" + resolved_model = model or self.config.model or "Gemini/gemini-2.0-flash" + + config = CommonOverrides.gemini_code_execution() + if timeout: + config.custom_params = config.custom_params or {} + config.custom_params["code_execution_timeout"] = timeout + + messages = [LLMMessage.user(prompt)] + + response = await self._execute_generation( + messages, resolved_model, "代码执行失败", kwargs, base_config=config + ) + + return { + "text": response.text, + "code_executions": response.code_executions or [], + "success": True, + } + + async def search( + self, + query: str | UniMessage, + *, + model: ModelName = None, + instruction: str = "", + **kwargs: Any, + ) -> dict[str, Any]: + """信息搜索 - 支持多模态输入""" + resolved_model = model or self.config.model or "Gemini/gemini-2.0-flash" + config = CommonOverrides.gemini_grounding() + + if isinstance(query, str): + messages = [LLMMessage.user(query)] + elif isinstance(query, UniMessage): + content_parts = await unimsg_to_llm_parts(query) + + final_messages: list[LLMMessage] = [] + if instruction: + final_messages.append(LLMMessage.system(instruction)) + + if not content_parts: + if instruction: + final_messages.append(LLMMessage.user(instruction)) + else: + raise LLMException( + "搜索内容为空或无法处理。", code=LLMErrorCode.API_REQUEST_FAILED + ) + else: + final_messages.append(LLMMessage.user(content_parts)) + + messages = final_messages + else: + raise LLMException( + f"不支持的搜索输入类型: {type(query)}. 请使用 str 或 UniMessage.", + code=LLMErrorCode.API_REQUEST_FAILED, + ) + + response = await self._execute_generation( + messages, resolved_model, "信息搜索失败", kwargs, base_config=config + ) + + result = { + "text": response.text, + "sources": [], + "queries": [], + "success": True, + } + + if response.grounding_metadata: + result["sources"] = response.grounding_metadata.grounding_attributions or [] + result["queries"] = response.grounding_metadata.web_search_queries or [] + + return result + + async def analyze( + self, + message: UniMessage, + *, + instruction: str = "", + model: ModelName = None, + tools: list[dict[str, Any]] | None = None, + tool_config: dict[str, Any] | None = None, + **kwargs: Any, + ) -> str | LLMResponse: + """ + 内容分析 - 接收 UniMessage 物件进行多模态分析和工具呼叫。 + 这是处理复杂互动的主要方法。 + """ + content_parts = await unimsg_to_llm_parts(message) + + final_messages: list[LLMMessage] = [] + if instruction: + final_messages.append(LLMMessage.system(instruction)) + + if not content_parts: + if instruction: + final_messages.append(LLMMessage.user(instruction)) + else: + raise LLMException( + "分析内容为空或无法处理。", code=LLMErrorCode.API_REQUEST_FAILED + ) + else: + final_messages.append(LLMMessage.user(content_parts)) + + llm_tools = None + if tools: + llm_tools = [] + for tool_dict in tools: + if isinstance(tool_dict, dict): + if "name" in tool_dict and "description" in tool_dict: + llm_tool = LLMTool( + type="function", + function={ + "name": tool_dict["name"], + "description": tool_dict["description"], + "parameters": tool_dict.get("parameters", {}), + }, + ) + llm_tools.append(llm_tool) + else: + llm_tools.append(LLMTool(**tool_dict)) + else: + llm_tools.append(tool_dict) + + tool_choice = None + if tool_config: + mode = tool_config.get("mode", "auto") + if mode == "auto": + tool_choice = "auto" + elif mode == "any": + tool_choice = "any" + elif mode == "none": + tool_choice = "none" + + response = await self._execute_generation( + final_messages, + model, + "内容分析失败", + kwargs, + llm_tools=llm_tools, + tool_choice=tool_choice, + ) + + if response.tool_calls: + return response + return response.text + + async def _execute_generation( + self, + messages: list[LLMMessage], + model_name: ModelName, + error_message: str, + config_overrides: dict[str, Any], + llm_tools: list[LLMTool] | None = None, + tool_choice: str | dict[str, Any] | None = None, + base_config: LLMGenerationConfig | None = None, + ) -> LLMResponse: + """通用的生成执行方法,封装重复的模型获取、配置合并和异常处理逻辑""" + try: + resolved_model_name = self._resolve_model_name( + model_name or self.config.model + ) + final_config_dict = self._merge_config( + config_overrides, base_config=base_config + ) + + async with await get_model_instance( + resolved_model_name, override_config=final_config_dict + ) as model_instance: + return await model_instance.generate_response( + messages, tools=llm_tools, tool_choice=tool_choice + ) + except LLMException: + raise + except Exception as e: + logger.error(f"{error_message}: {e}", e=e) + raise LLMException(f"{error_message}: {e}", cause=e) + + def _resolve_model_name(self, model_name: ModelName) -> str: + """解析模型名称""" + if model_name: + return model_name + + default_model = get_global_default_model_name() + if default_model: + return default_model + + raise LLMException( + "未指定模型名称且未设置全局默认模型", + code=LLMErrorCode.MODEL_NOT_FOUND, + ) + + def _merge_config( + self, + user_config: dict[str, Any], + base_config: LLMGenerationConfig | None = None, + ) -> dict[str, Any]: + """合并配置""" + final_config = {} + if base_config: + final_config.update(base_config.to_dict()) + + if self.config.temperature is not None: + final_config["temperature"] = self.config.temperature + if self.config.max_tokens is not None: + final_config["max_tokens"] = self.config.max_tokens + + if self.config.enable_cache: + final_config["enable_caching"] = True + if self.config.enable_code: + final_config["enable_code_execution"] = True + if self.config.enable_search: + final_config["enable_grounding"] = True + + if self.config.enable_gemini_json_mode: + final_config["response_mime_type"] = "application/json" + if self.config.enable_gemini_thinking: + final_config["thinking_budget"] = 0.8 + if self.config.enable_gemini_safe_mode: + final_config["safety_settings"] = ( + CommonOverrides.gemini_safe().safety_settings + ) + if self.config.enable_gemini_multimodal: + final_config.update(CommonOverrides.gemini_multimodal().to_dict()) + if self.config.enable_gemini_grounding: + final_config["enable_grounding"] = True + + final_config.update(user_config) + + return final_config + + async def embed( + self, + texts: list[str] | str, + *, + model: ModelName = None, + task_type: EmbeddingTaskType | str = EmbeddingTaskType.RETRIEVAL_DOCUMENT, + **kwargs: Any, + ) -> list[list[float]]: + """生成文本嵌入向量""" + if isinstance(texts, str): + texts = [texts] + if not texts: + return [] + + try: + resolved_model_str = ( + model or self.config.default_embedding_model or self.config.model + ) + if not resolved_model_str: + raise LLMException( + "使用 embed 功能时必须指定嵌入模型名称," + "或在 AIConfig 中配置 default_embedding_model。", + code=LLMErrorCode.MODEL_NOT_FOUND, + ) + resolved_model_str = self._resolve_model_name(resolved_model_str) + + async with await get_model_instance( + resolved_model_str, + override_config=None, + ) as embedding_model_instance: + return await embedding_model_instance.generate_embeddings( + texts, task_type=task_type, **kwargs + ) + except LLMException: + raise + except Exception as e: + logger.error(f"文本嵌入失败: {e}", e=e) + raise LLMException( + f"文本嵌入失败: {e}", code=LLMErrorCode.EMBEDDING_FAILED, cause=e + ) + + +async def chat( + message: str | LLMMessage | list[LLMContentPart], + *, + model: ModelName = None, + **kwargs: Any, +) -> str: + """聊天对话便捷函数""" + ai = AI() + return await ai.chat(message, model=model, **kwargs) + + +async def code( + prompt: str, + *, + model: ModelName = None, + timeout: int | None = None, + **kwargs: Any, +) -> dict[str, Any]: + """代码执行便捷函数""" + ai = AI() + return await ai.code(prompt, model=model, timeout=timeout, **kwargs) + + +async def search( + query: str | UniMessage, + *, + model: ModelName = None, + instruction: str = "", + **kwargs: Any, +) -> dict[str, Any]: + """信息搜索便捷函数""" + ai = AI() + return await ai.search(query, model=model, instruction=instruction, **kwargs) + + +async def analyze( + message: UniMessage, + *, + instruction: str = "", + model: ModelName = None, + tools: list[dict[str, Any]] | None = None, + tool_config: dict[str, Any] | None = None, + **kwargs: Any, +) -> str | LLMResponse: + """内容分析便捷函数""" + ai = AI() + return await ai.analyze( + message, + instruction=instruction, + model=model, + tools=tools, + tool_config=tool_config, + **kwargs, + ) + + +async def analyze_with_images( + text: str, + images: list[str | Path | bytes] | str | Path | bytes, + *, + instruction: str = "", + model: ModelName = None, + **kwargs: Any, +) -> str | LLMResponse: + """图片分析便捷函数""" + message = create_multimodal_message(text=text, images=images) + return await analyze(message, instruction=instruction, model=model, **kwargs) + + +async def analyze_multimodal( + text: str | None = None, + images: list[str | Path | bytes] | str | Path | bytes | None = None, + videos: list[str | Path | bytes] | str | Path | bytes | None = None, + audios: list[str | Path | bytes] | str | Path | bytes | None = None, + *, + instruction: str = "", + model: ModelName = None, + **kwargs: Any, +) -> str | LLMResponse: + """多模态分析便捷函数""" + message = create_multimodal_message( + text=text, images=images, videos=videos, audios=audios + ) + return await analyze(message, instruction=instruction, model=model, **kwargs) + + +async def search_multimodal( + text: str | None = None, + images: list[str | Path | bytes] | str | Path | bytes | None = None, + videos: list[str | Path | bytes] | str | Path | bytes | None = None, + audios: list[str | Path | bytes] | str | Path | bytes | None = None, + *, + instruction: str = "", + model: ModelName = None, + **kwargs: Any, +) -> dict[str, Any]: + """多模态搜索便捷函数""" + message = create_multimodal_message( + text=text, images=images, videos=videos, audios=audios + ) + ai = AI() + return await ai.search(message, model=model, instruction=instruction, **kwargs) + + +async def embed( + texts: list[str] | str, + *, + model: ModelName = None, + task_type: EmbeddingTaskType | str = EmbeddingTaskType.RETRIEVAL_DOCUMENT, + **kwargs: Any, +) -> list[list[float]]: + """文本嵌入便捷函数""" + ai = AI() + return await ai.embed(texts, model=model, task_type=task_type, **kwargs) diff --git a/zhenxun/services/llm/config/__init__.py b/zhenxun/services/llm/config/__init__.py new file mode 100644 index 00000000..09fd9599 --- /dev/null +++ b/zhenxun/services/llm/config/__init__.py @@ -0,0 +1,35 @@ +""" +LLM 配置模块 + +提供生成配置、预设配置和配置验证功能。 +""" + +from .generation import ( + LLMGenerationConfig, + ModelConfigOverride, + apply_api_specific_mappings, + create_generation_config_from_kwargs, + validate_override_params, +) +from .presets import CommonOverrides +from .providers import ( + LLMConfig, + get_llm_config, + register_llm_configs, + set_default_model, + validate_llm_config, +) + +__all__ = [ + "CommonOverrides", + "LLMConfig", + "LLMGenerationConfig", + "ModelConfigOverride", + "apply_api_specific_mappings", + "create_generation_config_from_kwargs", + "get_llm_config", + "register_llm_configs", + "set_default_model", + "validate_llm_config", + "validate_override_params", +] diff --git a/zhenxun/services/llm/config/generation.py b/zhenxun/services/llm/config/generation.py new file mode 100644 index 00000000..a143dedd --- /dev/null +++ b/zhenxun/services/llm/config/generation.py @@ -0,0 +1,260 @@ +""" +LLM 生成配置相关类和函数 +""" + +from typing import Any + +from pydantic import BaseModel, Field + +from zhenxun.services.log import logger + +from ..types.enums import ResponseFormat +from ..types.exceptions import LLMErrorCode, LLMException + + +class ModelConfigOverride(BaseModel): + """模型配置覆盖参数""" + + temperature: float | None = Field( + default=None, ge=0.0, le=2.0, description="生成温度" + ) + max_tokens: int | None = Field(default=None, gt=0, description="最大输出token数") + top_p: float | None = Field(default=None, ge=0.0, le=1.0, description="核采样参数") + top_k: int | None = Field(default=None, gt=0, description="Top-K采样参数") + frequency_penalty: float | None = Field( + default=None, ge=-2.0, le=2.0, description="频率惩罚" + ) + presence_penalty: float | None = Field( + default=None, ge=-2.0, le=2.0, description="存在惩罚" + ) + repetition_penalty: float | None = Field( + default=None, ge=0.0, le=2.0, description="重复惩罚" + ) + + stop: list[str] | str | None = Field(default=None, description="停止序列") + + response_format: ResponseFormat | dict[str, Any] | None = Field( + default=None, description="期望的响应格式" + ) + response_mime_type: str | None = Field( + default=None, description="响应MIME类型(Gemini专用)" + ) + response_schema: dict[str, Any] | None = Field( + default=None, description="JSON响应模式" + ) + thinking_budget: float | None = Field( + default=None, ge=0.0, le=1.0, description="思考预算" + ) + safety_settings: dict[str, str] | None = Field(default=None, description="安全设置") + response_modalities: list[str] | None = Field( + default=None, description="响应模态类型" + ) + + enable_code_execution: bool | None = Field( + default=None, description="是否启用代码执行" + ) + enable_grounding: bool | None = Field( + default=None, description="是否启用信息来源关联" + ) + enable_caching: bool | None = Field(default=None, description="是否启用响应缓存") + + custom_params: dict[str, Any] | None = Field(default=None, description="自定义参数") + + def to_dict(self) -> dict[str, Any]: + """转换为字典,排除None值""" + result = {} + model_data = getattr(self, "model_dump", lambda: {})() + if not model_data: + model_data = {} + for field_name, _ in self.__class__.__dict__.get( + "model_fields", {} + ).items(): + value = getattr(self, field_name, None) + if value is not None: + model_data[field_name] = value + for key, value in model_data.items(): + if value is not None: + if key == "custom_params" and isinstance(value, dict): + result.update(value) + else: + result[key] = value + return result + + def merge_with_base_config( + self, + base_temperature: float | None = None, + base_max_tokens: int | None = None, + ) -> dict[str, Any]: + """与基础配置合并,覆盖参数优先""" + merged = {} + + if base_temperature is not None: + merged["temperature"] = base_temperature + if base_max_tokens is not None: + merged["max_tokens"] = base_max_tokens + + override_dict = self.to_dict() + merged.update(override_dict) + + return merged + + +class LLMGenerationConfig(ModelConfigOverride): + """LLM 生成配置,继承模型配置覆盖参数""" + + def to_api_params(self, api_type: str, model_name: str) -> dict[str, Any]: + """转换为API参数,支持不同API类型的参数名映射""" + _ = model_name + params = {} + + if self.temperature is not None: + params["temperature"] = self.temperature + + if self.max_tokens is not None: + if api_type in ["gemini", "gemini_native"]: + params["maxOutputTokens"] = self.max_tokens + else: + params["max_tokens"] = self.max_tokens + + if api_type in ["gemini", "gemini_native"]: + if self.top_k is not None: + params["topK"] = self.top_k + if self.top_p is not None: + params["topP"] = self.top_p + else: + if self.top_k is not None: + params["top_k"] = self.top_k + if self.top_p is not None: + params["top_p"] = self.top_p + + if api_type in ["openai", "deepseek", "zhipu", "general_openai_compat"]: + if self.frequency_penalty is not None: + params["frequency_penalty"] = self.frequency_penalty + if self.presence_penalty is not None: + params["presence_penalty"] = self.presence_penalty + + if self.repetition_penalty is not None: + if api_type == "openai": + logger.warning("OpenAI官方API不支持repetition_penalty参数,已忽略") + else: + params["repetition_penalty"] = self.repetition_penalty + + if self.response_format is not None: + if isinstance(self.response_format, dict): + if api_type in ["openai", "zhipu", "deepseek", "general_openai_compat"]: + params["response_format"] = self.response_format + logger.debug( + f"为 {api_type} 使用自定义 response_format: " + f"{self.response_format}" + ) + elif self.response_format == ResponseFormat.JSON: + if api_type in ["openai", "zhipu", "deepseek", "general_openai_compat"]: + params["response_format"] = {"type": "json_object"} + logger.debug(f"为 {api_type} 启用 JSON 对象输出模式") + elif api_type in ["gemini", "gemini_native"]: + params["responseMimeType"] = "application/json" + if self.response_schema: + params["responseSchema"] = self.response_schema + logger.debug(f"为 {api_type} 启用 JSON MIME 类型输出模式") + + if api_type in ["gemini", "gemini_native"]: + if ( + self.response_format != ResponseFormat.JSON + and self.response_mime_type is not None + ): + params["responseMimeType"] = self.response_mime_type + logger.debug( + f"使用显式设置的 responseMimeType: {self.response_mime_type}" + ) + + if self.response_schema is not None and "responseSchema" not in params: + params["responseSchema"] = self.response_schema + if self.thinking_budget is not None: + params["thinkingBudget"] = self.thinking_budget + if self.safety_settings is not None: + params["safetySettings"] = self.safety_settings + if self.response_modalities is not None: + params["responseModalities"] = self.response_modalities + + if self.custom_params: + custom_mapped = apply_api_specific_mappings(self.custom_params, api_type) + params.update(custom_mapped) + + logger.debug(f"为{api_type}转换配置参数: {len(params)}个参数") + return params + + +def validate_override_params( + override_config: dict[str, Any] | LLMGenerationConfig | None, +) -> LLMGenerationConfig: + """验证和标准化覆盖参数""" + if override_config is None: + return LLMGenerationConfig() + + if isinstance(override_config, dict): + try: + filtered_config = { + k: v for k, v in override_config.items() if v is not None + } + return LLMGenerationConfig(**filtered_config) + except Exception as e: + logger.warning(f"覆盖配置参数验证失败: {e}") + raise LLMException( + f"无效的覆盖配置参数: {e}", + code=LLMErrorCode.CONFIGURATION_ERROR, + cause=e, + ) + + return override_config + + +def apply_api_specific_mappings( + params: dict[str, Any], api_type: str +) -> dict[str, Any]: + """应用API特定的参数映射""" + mapped_params = params.copy() + + if api_type in ["gemini", "gemini_native"]: + if "max_tokens" in mapped_params: + mapped_params["maxOutputTokens"] = mapped_params.pop("max_tokens") + if "top_k" in mapped_params: + mapped_params["topK"] = mapped_params.pop("top_k") + if "top_p" in mapped_params: + mapped_params["topP"] = mapped_params.pop("top_p") + + unsupported = ["frequency_penalty", "presence_penalty", "repetition_penalty"] + for param in unsupported: + if param in mapped_params: + logger.warning(f"Gemini 原生API不支持参数 '{param}',已忽略") + mapped_params.pop(param) + + elif api_type in ["openai", "deepseek", "zhipu", "general_openai_compat"]: + if "repetition_penalty" in mapped_params and api_type == "openai": + logger.warning("OpenAI官方API不支持repetition_penalty参数,已忽略") + mapped_params.pop("repetition_penalty") + + if "stop" in mapped_params: + stop_value = mapped_params["stop"] + if isinstance(stop_value, str): + mapped_params["stop"] = [stop_value] + + return mapped_params + + +def create_generation_config_from_kwargs(**kwargs) -> LLMGenerationConfig: + """从关键字参数创建生成配置""" + model_fields = getattr(LLMGenerationConfig, "model_fields", {}) + known_fields = set(model_fields.keys()) + known_params = {} + custom_params = {} + + for key, value in kwargs.items(): + if key in known_fields: + known_params[key] = value + else: + custom_params[key] = value + + if custom_params: + known_params["custom_params"] = custom_params + + return LLMGenerationConfig(**known_params) diff --git a/zhenxun/services/llm/config/presets.py b/zhenxun/services/llm/config/presets.py new file mode 100644 index 00000000..7a6023d5 --- /dev/null +++ b/zhenxun/services/llm/config/presets.py @@ -0,0 +1,169 @@ +""" +LLM 预设配置 + +提供常用的配置预设,特别是针对 Gemini 的高级功能。 +""" + +from typing import Any + +from .generation import LLMGenerationConfig + + +class CommonOverrides: + """常用的配置覆盖预设""" + + @staticmethod + def creative() -> LLMGenerationConfig: + """创意模式:高温度,鼓励创新""" + return LLMGenerationConfig(temperature=0.9, top_p=0.95, frequency_penalty=0.1) + + @staticmethod + def precise() -> LLMGenerationConfig: + """精确模式:低温度,确定性输出""" + return LLMGenerationConfig(temperature=0.1, top_p=0.9, frequency_penalty=0.0) + + @staticmethod + def balanced() -> LLMGenerationConfig: + """平衡模式:中等温度""" + return LLMGenerationConfig(temperature=0.5, top_p=0.9, frequency_penalty=0.0) + + @staticmethod + def concise(max_tokens: int = 100) -> LLMGenerationConfig: + """简洁模式:限制输出长度""" + return LLMGenerationConfig( + temperature=0.3, + max_tokens=max_tokens, + stop=["\n\n", "。", "!", "?"], + ) + + @staticmethod + def detailed(max_tokens: int = 2000) -> LLMGenerationConfig: + """详细模式:鼓励详细输出""" + return LLMGenerationConfig( + temperature=0.7, max_tokens=max_tokens, frequency_penalty=-0.1 + ) + + @staticmethod + def gemini_json() -> LLMGenerationConfig: + """Gemini JSON模式:强制JSON输出""" + return LLMGenerationConfig( + temperature=0.3, response_mime_type="application/json" + ) + + @staticmethod + def gemini_thinking(budget: float = 0.8) -> LLMGenerationConfig: + """Gemini 思考模式:使用思考预算""" + return LLMGenerationConfig(temperature=0.7, thinking_budget=budget) + + @staticmethod + def gemini_creative() -> LLMGenerationConfig: + """Gemini 创意模式:高温度创意输出""" + return LLMGenerationConfig(temperature=0.9, top_p=0.95) + + @staticmethod + def gemini_structured(schema: dict[str, Any]) -> LLMGenerationConfig: + """Gemini 结构化输出:自定义JSON模式""" + return LLMGenerationConfig( + temperature=0.3, + response_mime_type="application/json", + response_schema=schema, + ) + + @staticmethod + def gemini_safe() -> LLMGenerationConfig: + """Gemini 安全模式:严格安全设置""" + return LLMGenerationConfig( + temperature=0.5, + safety_settings={ + "HARM_CATEGORY_HARASSMENT": "BLOCK_MEDIUM_AND_ABOVE", + "HARM_CATEGORY_HATE_SPEECH": "BLOCK_MEDIUM_AND_ABOVE", + "HARM_CATEGORY_SEXUALLY_EXPLICIT": "BLOCK_MEDIUM_AND_ABOVE", + "HARM_CATEGORY_DANGEROUS_CONTENT": "BLOCK_MEDIUM_AND_ABOVE", + }, + ) + + @staticmethod + def gemini_multimodal() -> LLMGenerationConfig: + """Gemini 多模态模式:优化多模态处理""" + return LLMGenerationConfig(temperature=0.6, max_tokens=2048, top_p=0.8) + + @staticmethod + def gemini_code_execution() -> LLMGenerationConfig: + """Gemini 代码执行模式:启用代码执行功能""" + return LLMGenerationConfig( + temperature=0.3, + max_tokens=4096, + enable_code_execution=True, + custom_params={"code_execution_timeout": 30}, + ) + + @staticmethod + def gemini_grounding() -> LLMGenerationConfig: + """Gemini 信息来源关联模式:启用Google搜索""" + return LLMGenerationConfig( + temperature=0.5, + max_tokens=4096, + enable_grounding=True, + custom_params={ + "grounding_config": {"dynamicRetrievalConfig": {"mode": "MODE_DYNAMIC"}} + }, + ) + + @staticmethod + def gemini_cached() -> LLMGenerationConfig: + """Gemini 缓存模式:启用响应缓存""" + return LLMGenerationConfig( + temperature=0.3, + max_tokens=2048, + enable_caching=True, + ) + + @staticmethod + def gemini_advanced() -> LLMGenerationConfig: + """Gemini 高级模式:启用所有高级功能""" + return LLMGenerationConfig( + temperature=0.5, + max_tokens=4096, + enable_code_execution=True, + enable_grounding=True, + enable_caching=True, + custom_params={ + "code_execution_timeout": 30, + "grounding_config": { + "dynamicRetrievalConfig": {"mode": "MODE_DYNAMIC"} + }, + }, + ) + + @staticmethod + def gemini_research() -> LLMGenerationConfig: + """Gemini 研究模式:思考+搜索+结构化输出""" + return LLMGenerationConfig( + temperature=0.6, + max_tokens=4096, + thinking_budget=0.8, + enable_grounding=True, + response_mime_type="application/json", + custom_params={ + "grounding_config": {"dynamicRetrievalConfig": {"mode": "MODE_DYNAMIC"}} + }, + ) + + @staticmethod + def gemini_analysis() -> LLMGenerationConfig: + """Gemini 分析模式:深度思考+详细输出""" + return LLMGenerationConfig( + temperature=0.4, + max_tokens=6000, + thinking_budget=0.9, + top_p=0.8, + ) + + @staticmethod + def gemini_fast_response() -> LLMGenerationConfig: + """Gemini 快速响应模式:低延迟+简洁输出""" + return LLMGenerationConfig( + temperature=0.3, + max_tokens=512, + top_p=0.8, + ) diff --git a/zhenxun/services/llm/config/providers.py b/zhenxun/services/llm/config/providers.py new file mode 100644 index 00000000..8f4dea80 --- /dev/null +++ b/zhenxun/services/llm/config/providers.py @@ -0,0 +1,328 @@ +""" +LLM 提供商配置管理 + +负责注册和管理 AI 服务提供商的配置项。 +""" + +from typing import Any + +from pydantic import BaseModel, Field + +from zhenxun.configs.config import Config +from zhenxun.services.log import logger + +from ..types.models import ModelDetail, ProviderConfig + +AI_CONFIG_GROUP = "AI" +PROVIDERS_CONFIG_KEY = "PROVIDERS" + + +class LLMConfig(BaseModel): + """LLM 服务配置类""" + + default_model_name: str | None = Field( + default=None, + description="LLM服务全局默认使用的模型名称 (格式: ProviderName/ModelName)", + ) + proxy: str | None = Field( + default=None, + description="LLM服务请求使用的网络代理,例如 http://127.0.0.1:7890", + ) + timeout: int = Field(default=180, description="LLM服务API请求超时时间(秒)") + max_retries_llm: int = Field( + default=3, description="LLM服务请求失败时的最大重试次数" + ) + retry_delay_llm: int = Field( + default=2, description="LLM服务请求重试的基础延迟时间(秒)" + ) + providers: list[ProviderConfig] = Field( + default_factory=list, description="配置多个 AI 服务提供商及其模型信息" + ) + + def get_provider_by_name(self, name: str) -> ProviderConfig | None: + """根据名称获取提供商配置 + + 参数: + name: 提供商名称 + + 返回: + ProviderConfig | None: 提供商配置,如果未找到则返回 None + """ + for provider in self.providers: + if provider.name == name: + return provider + return None + + def get_model_by_provider_and_name( + self, provider_name: str, model_name: str + ) -> tuple[ProviderConfig, ModelDetail] | None: + """根据提供商名称和模型名称获取配置 + + 参数: + provider_name: 提供商名称 + model_name: 模型名称 + + 返回: + tuple[ProviderConfig, ModelDetail] | None: 提供商配置和模型详情的元组, + 如果未找到则返回 None + """ + provider = self.get_provider_by_name(provider_name) + if not provider: + return None + + for model in provider.models: + if model.model_name == model_name: + return provider, model + return None + + def list_available_models(self) -> list[dict[str, Any]]: + """列出所有可用的模型 + + 返回: + list[dict[str, Any]]: 模型信息列表 + """ + models = [] + for provider in self.providers: + for model in provider.models: + models.append( + { + "provider_name": provider.name, + "model_name": model.model_name, + "full_name": f"{provider.name}/{model.model_name}", + "is_available": model.is_available, + "is_embedding_model": model.is_embedding_model, + "api_type": provider.api_type, + } + ) + return models + + def validate_model_name(self, provider_model_name: str) -> bool: + """验证模型名称格式是否正确 + + 参数: + provider_model_name: 格式为 "ProviderName/ModelName" 的字符串 + + 返回: + bool: 是否有效 + """ + if not provider_model_name or "/" not in provider_model_name: + return False + + parts = provider_model_name.split("/", 1) + if len(parts) != 2: + return False + + provider_name, model_name = parts + return ( + self.get_model_by_provider_and_name(provider_name, model_name) is not None + ) + + +def get_ai_config(): + """获取 AI 配置组""" + return Config.get(AI_CONFIG_GROUP) + + +def get_default_providers() -> list[dict[str, Any]]: + """获取默认的提供商配置 + + 返回: + list[dict[str, Any]]: 默认提供商配置列表 + """ + return [ + { + "name": "DeepSeek", + "api_key": "sk-******", + "api_base": "https://api.deepseek.com", + "api_type": "openai", + "models": [ + { + "model_name": "deepseek-chat", + "max_tokens": 4096, + "temperature": 0.7, + }, + { + "model_name": "deepseek-reasoner", + }, + ], + }, + { + "name": "GLM", + "api_key": "", + "api_base": "https://open.bigmodel.cn", + "api_type": "zhipu", + "models": [ + {"model_name": "glm-4-flash"}, + {"model_name": "glm-4-plus"}, + ], + }, + { + "name": "Gemini", + "api_key": [ + "AIzaSy*****************************", + "AIzaSy*****************************", + "AIzaSy*****************************", + ], + "api_base": "https://generativelanguage.googleapis.com", + "api_type": "gemini", + "models": [ + {"model_name": "gemini-2.0-flash"}, + {"model_name": "gemini-2.5-flash-preview-05-20"}, + ], + }, + ] + + +def register_llm_configs(): + """注册 LLM 服务的配置项""" + logger.info("注册 LLM 服务的配置项") + + llm_config = LLMConfig() + + Config.add_plugin_config( + AI_CONFIG_GROUP, + "default_model_name", + llm_config.default_model_name, + help="LLM服务全局默认使用的模型名称 (格式: ProviderName/ModelName)", + type=str, + ) + Config.add_plugin_config( + AI_CONFIG_GROUP, + "proxy", + llm_config.proxy, + help="LLM服务请求使用的网络代理,例如 http://127.0.0.1:7890", + type=str, + ) + Config.add_plugin_config( + AI_CONFIG_GROUP, + "timeout", + llm_config.timeout, + help="LLM服务API请求超时时间(秒)", + type=int, + ) + Config.add_plugin_config( + AI_CONFIG_GROUP, + "max_retries_llm", + llm_config.max_retries_llm, + help="LLM服务请求失败时的最大重试次数", + type=int, + ) + Config.add_plugin_config( + AI_CONFIG_GROUP, + "retry_delay_llm", + llm_config.retry_delay_llm, + help="LLM服务请求重试的基础延迟时间(秒)", + type=int, + ) + + Config.add_plugin_config( + AI_CONFIG_GROUP, + PROVIDERS_CONFIG_KEY, + get_default_providers(), + help="配置多个 AI 服务提供商及其模型信息", + default_value=[], + type=list[ProviderConfig], + ) + + +def get_llm_config() -> LLMConfig: + """获取 LLM 配置实例 + + 返回: + LLMConfig: LLM 配置实例 + """ + ai_config = get_ai_config() + + config_data = { + "default_model_name": ai_config.get("default_model_name"), + "proxy": ai_config.get("proxy"), + "timeout": ai_config.get("timeout", 180), + "max_retries_llm": ai_config.get("max_retries_llm", 3), + "retry_delay_llm": ai_config.get("retry_delay_llm", 2), + "providers": ai_config.get(PROVIDERS_CONFIG_KEY, []), + } + + return LLMConfig(**config_data) + + +def validate_llm_config() -> tuple[bool, list[str]]: + """验证 LLM 配置的有效性 + + 返回: + tuple[bool, list[str]]: (是否有效, 错误信息列表) + """ + errors = [] + + try: + llm_config = get_llm_config() + + if llm_config.timeout <= 0: + errors.append("timeout 必须大于 0") + + if llm_config.max_retries_llm < 0: + errors.append("max_retries_llm 不能小于 0") + + if llm_config.retry_delay_llm <= 0: + errors.append("retry_delay_llm 必须大于 0") + + if not llm_config.providers: + errors.append("至少需要配置一个 AI 服务提供商") + else: + provider_names = set() + for provider in llm_config.providers: + if provider.name in provider_names: + errors.append(f"提供商名称重复: {provider.name}") + provider_names.add(provider.name) + + if not provider.api_key: + errors.append(f"提供商 {provider.name} 缺少 API Key") + + if not provider.models: + errors.append(f"提供商 {provider.name} 没有配置任何模型") + else: + model_names = set() + for model in provider.models: + if model.model_name in model_names: + errors.append( + f"提供商 {provider.name} 中模型名称重复: " + f"{model.model_name}" + ) + model_names.add(model.model_name) + + if llm_config.default_model_name: + if not llm_config.validate_model_name(llm_config.default_model_name): + errors.append( + f"默认模型 {llm_config.default_model_name} 在配置中不存在" + ) + + except Exception as e: + errors.append(f"配置解析失败: {e!s}") + + return len(errors) == 0, errors + + +def set_default_model(provider_model_name: str | None) -> bool: + """设置默认模型 + + 参数: + provider_model_name: 模型名称,格式为 "ProviderName/ModelName",None 表示清除 + + 返回: + bool: 是否设置成功 + """ + if provider_model_name: + llm_config = get_llm_config() + if not llm_config.validate_model_name(provider_model_name): + logger.error(f"模型 {provider_model_name} 在配置中不存在") + return False + + Config.set_config( + AI_CONFIG_GROUP, "default_model_name", provider_model_name, auto_save=True + ) + + if provider_model_name: + logger.info(f"默认模型已设置为: {provider_model_name}") + else: + logger.info("默认模型已清除") + + return True diff --git a/zhenxun/services/llm/core.py b/zhenxun/services/llm/core.py new file mode 100644 index 00000000..ffd900cf --- /dev/null +++ b/zhenxun/services/llm/core.py @@ -0,0 +1,378 @@ +""" +LLM 核心基础设施模块 + +包含执行 LLM 请求所需的底层组件,如 HTTP 客户端、API Key 存储和智能重试逻辑。 +""" + +import asyncio +from typing import Any + +import httpx +from pydantic import BaseModel + +from zhenxun.services.log import logger +from zhenxun.utils.user_agent import get_user_agent + +from .types import ProviderConfig +from .types.exceptions import LLMErrorCode, LLMException + + +class HttpClientConfig(BaseModel): + """HTTP客户端配置""" + + timeout: int = 180 + max_connections: int = 100 + max_keepalive_connections: int = 20 + proxy: str | None = None + + +class LLMHttpClient: + """LLM服务专用HTTP客户端""" + + def __init__(self, config: HttpClientConfig | None = None): + self.config = config or HttpClientConfig() + self._client: httpx.AsyncClient | None = None + self._active_requests = 0 + self._lock = asyncio.Lock() + + async def _ensure_client_initialized(self) -> httpx.AsyncClient: + if self._client is None or self._client.is_closed: + async with self._lock: + if self._client is None or self._client.is_closed: + logger.debug( + f"LLMHttpClient: Initializing new httpx.AsyncClient " + f"with config: {self.config}" + ) + headers = get_user_agent() + limits = httpx.Limits( + max_connections=self.config.max_connections, + max_keepalive_connections=self.config.max_keepalive_connections, + ) + timeout = httpx.Timeout(self.config.timeout) + self._client = httpx.AsyncClient( + headers=headers, + limits=limits, + timeout=timeout, + proxies=self.config.proxy, + follow_redirects=True, + ) + if self._client is None: + raise LLMException( + "HTTP client failed to initialize.", LLMErrorCode.CONFIGURATION_ERROR + ) + return self._client + + async def post(self, url: str, **kwargs: Any) -> httpx.Response: + client = await self._ensure_client_initialized() + async with self._lock: + self._active_requests += 1 + try: + return await client.post(url, **kwargs) + finally: + async with self._lock: + self._active_requests -= 1 + + async def close(self): + async with self._lock: + if self._client and not self._client.is_closed: + logger.debug( + f"LLMHttpClient: Closing with config: {self.config}. " + f"Active requests: {self._active_requests}" + ) + if self._active_requests > 0: + logger.warning( + f"LLMHttpClient: Closing while {self._active_requests} " + f"requests are still active." + ) + await self._client.aclose() + self._client = None + logger.debug(f"LLMHttpClient for config {self.config} definitively closed.") + + @property + def is_closed(self) -> bool: + return self._client is None or self._client.is_closed + + +class LLMHttpClientManager: + """管理 LLMHttpClient 实例的工厂和池""" + + def __init__(self): + self._clients: dict[tuple[int, str | None], LLMHttpClient] = {} + self._lock = asyncio.Lock() + + def _get_client_key( + self, provider_config: ProviderConfig + ) -> tuple[int, str | None]: + return (provider_config.timeout, provider_config.proxy) + + async def get_client(self, provider_config: ProviderConfig) -> LLMHttpClient: + key = self._get_client_key(provider_config) + async with self._lock: + client = self._clients.get(key) + if client and not client.is_closed: + logger.debug( + f"LLMHttpClientManager: Reusing existing LLMHttpClient " + f"for key: {key}" + ) + return client + + if client and client.is_closed: + logger.debug( + f"LLMHttpClientManager: Found a closed client for key {key}. " + f"Creating a new one." + ) + + logger.debug( + f"LLMHttpClientManager: Creating new LLMHttpClient for key: {key}" + ) + http_client_config = HttpClientConfig( + timeout=provider_config.timeout, proxy=provider_config.proxy + ) + new_client = LLMHttpClient(config=http_client_config) + self._clients[key] = new_client + return new_client + + async def shutdown(self): + async with self._lock: + logger.info( + f"LLMHttpClientManager: Shutting down. " + f"Closing {len(self._clients)} client(s)." + ) + close_tasks = [ + client.close() + for client in self._clients.values() + if client and not client.is_closed + ] + if close_tasks: + await asyncio.gather(*close_tasks, return_exceptions=True) + self._clients.clear() + logger.info("LLMHttpClientManager: Shutdown complete.") + + +http_client_manager = LLMHttpClientManager() + + +async def create_llm_http_client( + timeout: int = 180, + proxy: str | None = None, +) -> LLMHttpClient: + """创建LLM HTTP客户端""" + config = HttpClientConfig(timeout=timeout, proxy=proxy) + return LLMHttpClient(config) + + +class RetryConfig: + """重试配置""" + + def __init__( + self, + max_retries: int = 3, + retry_delay: float = 1.0, + exponential_backoff: bool = True, + key_rotation: bool = True, + ): + self.max_retries = max_retries + self.retry_delay = retry_delay + self.exponential_backoff = exponential_backoff + self.key_rotation = key_rotation + + +async def with_smart_retry( + func, + *args, + retry_config: RetryConfig | None = None, + key_store: "KeyStatusStore | None" = None, + provider_name: str | None = None, + **kwargs: Any, +) -> Any: + """智能重试装饰器 - 支持Key轮询和错误分类""" + config = retry_config or RetryConfig() + last_exception: Exception | None = None + failed_keys: set[str] = set() + + for attempt in range(config.max_retries + 1): + try: + if config.key_rotation and "failed_keys" in func.__code__.co_varnames: + kwargs["failed_keys"] = failed_keys + + return await func(*args, **kwargs) + + except LLMException as e: + last_exception = e + + if e.code in [ + LLMErrorCode.API_KEY_INVALID, + LLMErrorCode.API_QUOTA_EXCEEDED, + ]: + if hasattr(e, "details") and e.details and "api_key" in e.details: + failed_keys.add(e.details["api_key"]) + if key_store and provider_name: + await key_store.record_failure( + e.details["api_key"], e.details.get("status_code") + ) + + should_retry = _should_retry_llm_error(e, attempt, config.max_retries) + if not should_retry: + logger.error(f"不可重试的错误,停止重试: {e}") + raise + + if attempt < config.max_retries: + wait_time = config.retry_delay + if config.exponential_backoff: + wait_time *= 2**attempt + logger.warning( + f"请求失败,{wait_time}秒后重试 (第{attempt + 1}次): {e}" + ) + await asyncio.sleep(wait_time) + else: + logger.error(f"重试{config.max_retries}次后仍然失败: {e}") + + except Exception as e: + last_exception = e + logger.error(f"非LLM异常,停止重试: {e}") + raise LLMException( + f"操作失败: {e}", + code=LLMErrorCode.GENERATION_FAILED, + cause=e, + ) + + if last_exception: + raise last_exception + else: + raise RuntimeError("重试函数未能正常执行且未捕获到异常") + + +def _should_retry_llm_error( + error: LLMException, attempt: int, max_retries: int +) -> bool: + """判断LLM错误是否应该重试""" + non_retryable_errors = { + LLMErrorCode.MODEL_NOT_FOUND, + LLMErrorCode.CONTEXT_LENGTH_EXCEEDED, + LLMErrorCode.USER_LOCATION_NOT_SUPPORTED, + LLMErrorCode.CONFIGURATION_ERROR, + } + + if error.code in non_retryable_errors: + return False + + retryable_errors = { + LLMErrorCode.API_REQUEST_FAILED, + LLMErrorCode.API_TIMEOUT, + LLMErrorCode.API_RATE_LIMITED, + LLMErrorCode.API_RESPONSE_INVALID, + LLMErrorCode.RESPONSE_PARSE_ERROR, + LLMErrorCode.GENERATION_FAILED, + LLMErrorCode.CONTENT_FILTERED, + LLMErrorCode.API_KEY_INVALID, + LLMErrorCode.API_QUOTA_EXCEEDED, + } + + if error.code in retryable_errors: + if error.code == LLMErrorCode.API_QUOTA_EXCEEDED: + return attempt < min(2, max_retries) + elif error.code == LLMErrorCode.CONTENT_FILTERED: + return attempt < min(1, max_retries) + return True + + return False + + +class KeyStatusStore: + """API Key 状态管理存储 - 优化版本,支持轮询和负载均衡""" + + def __init__(self): + self._key_status: dict[str, bool] = {} + self._key_usage_count: dict[str, int] = {} + self._key_last_used: dict[str, float] = {} + self._provider_key_index: dict[str, int] = {} + self._lock = asyncio.Lock() + + async def get_next_available_key( + self, + provider_name: str, + api_keys: list[str], + exclude_keys: set[str] | None = None, + ) -> str | None: + """获取下一个可用的API密钥(轮询策略)""" + if not api_keys: + return None + + exclude_keys = exclude_keys or set() + available_keys = [ + key + for key in api_keys + if key not in exclude_keys and self._key_status.get(key, True) + ] + + if not available_keys: + return api_keys[0] if api_keys else None + + async with self._lock: + current_index = self._provider_key_index.get(provider_name, 0) + + selected_key = available_keys[current_index % len(available_keys)] + + self._provider_key_index[provider_name] = (current_index + 1) % len( + available_keys + ) + + import time + + self._key_usage_count[selected_key] = ( + self._key_usage_count.get(selected_key, 0) + 1 + ) + self._key_last_used[selected_key] = time.time() + + logger.debug( + f"轮询选择API密钥: {self._get_key_id(selected_key)} " + f"(使用次数: {self._key_usage_count[selected_key]})" + ) + + return selected_key + + async def record_success(self, api_key: str): + """记录成功使用""" + async with self._lock: + self._key_status[api_key] = True + logger.debug(f"记录API密钥成功使用: {self._get_key_id(api_key)}") + + async def record_failure(self, api_key: str, status_code: int | None): + """记录失败使用""" + key_id = self._get_key_id(api_key) + async with self._lock: + if status_code in [401, 403]: + self._key_status[api_key] = False + logger.warning( + f"API密钥认证失败,标记为不可用: {key_id} (状态码: {status_code})" + ) + else: + logger.debug(f"记录API密钥失败使用: {key_id} (状态码: {status_code})") + + async def reset_key_status(self, api_key: str): + """重置密钥状态(用于恢复机制)""" + async with self._lock: + self._key_status[api_key] = True + logger.info(f"重置API密钥状态: {self._get_key_id(api_key)}") + + async def get_key_stats(self, api_keys: list[str]) -> dict[str, dict]: + """获取密钥使用统计""" + stats = {} + async with self._lock: + for key in api_keys: + key_id = self._get_key_id(key) + stats[key_id] = { + "available": self._key_status.get(key, True), + "usage_count": self._key_usage_count.get(key, 0), + "last_used": self._key_last_used.get(key, 0), + } + return stats + + def _get_key_id(self, api_key: str) -> str: + """获取API密钥的标识符(用于日志)""" + if len(api_key) <= 8: + return api_key + return f"{api_key[:4]}...{api_key[-4:]}" + + +key_store = KeyStatusStore() diff --git a/zhenxun/services/llm/manager.py b/zhenxun/services/llm/manager.py new file mode 100644 index 00000000..f23dfa50 --- /dev/null +++ b/zhenxun/services/llm/manager.py @@ -0,0 +1,434 @@ +""" +LLM 模型管理器 + +负责模型实例的创建、缓存、配置管理和生命周期管理。 +""" + +import hashlib +import json +import time +from typing import Any + +from zhenxun.configs.config import Config +from zhenxun.services.log import logger + +from .config import validate_override_params +from .config.providers import AI_CONFIG_GROUP, PROVIDERS_CONFIG_KEY, get_ai_config +from .core import http_client_manager, key_store +from .service import LLMModel +from .types import LLMErrorCode, LLMException, ModelDetail, ProviderConfig + +DEFAULT_MODEL_NAME_KEY = "default_model_name" +PROXY_KEY = "proxy" +TIMEOUT_KEY = "timeout" + +_model_cache: dict[str, tuple[LLMModel, float]] = {} +_cache_ttl = 3600 +_max_cache_size = 10 + + +def parse_provider_model_string(name_str: str | None) -> tuple[str | None, str | None]: + """解析 'ProviderName/ModelName' 格式的字符串""" + if not name_str or "/" not in name_str: + return None, None + parts = name_str.split("/", 1) + if len(parts) == 2 and parts[0].strip() and parts[1].strip(): + return parts[0].strip(), parts[1].strip() + return None, None + + +def _make_cache_key( + provider_model_name: str | None, override_config: dict | None +) -> str: + """生成缓存键""" + config_str = ( + json.dumps(override_config, sort_keys=True) if override_config else "None" + ) + key_data = f"{provider_model_name}:{config_str}" + return hashlib.md5(key_data.encode()).hexdigest() + + +def _get_cached_model(cache_key: str) -> LLMModel | None: + """从缓存获取模型""" + if cache_key in _model_cache: + model, created_time = _model_cache[cache_key] + current_time = time.time() + + if current_time - created_time > _cache_ttl: + del _model_cache[cache_key] + logger.debug(f"模型缓存已过期: {cache_key}") + return None + + if model._is_closed: + logger.debug( + f"缓存的模型 {cache_key} ({model.provider_name}/{model.model_name}) " + f"处于_is_closed=True状态,重置为False以供复用。" + ) + model._is_closed = False + + logger.debug( + f"使用缓存的模型: {cache_key} -> {model.provider_name}/{model.model_name}" + ) + return model + return None + + +def _cache_model(cache_key: str, model: LLMModel): + """缓存模型实例""" + current_time = time.time() + + if len(_model_cache) >= _max_cache_size: + oldest_key = min(_model_cache.keys(), key=lambda k: _model_cache[k][1]) + del _model_cache[oldest_key] + + _model_cache[cache_key] = (model, current_time) + + +def clear_model_cache(): + """清空模型缓存""" + global _model_cache + _model_cache.clear() + logger.info("已清空模型缓存") + + +def get_cache_stats() -> dict[str, Any]: + """获取缓存统计信息""" + return { + "cache_size": len(_model_cache), + "max_cache_size": _max_cache_size, + "cache_ttl": _cache_ttl, + "cached_models": list(_model_cache.keys()), + } + + +def get_default_api_base_for_type(api_type: str) -> str | None: + """根据API类型获取默认的API基础地址""" + default_api_bases = { + "openai": "https://api.openai.com", + "deepseek": "https://api.deepseek.com", + "zhipu": "https://open.bigmodel.cn", + "gemini": "https://generativelanguage.googleapis.com", + "general_openai_compat": None, + } + + return default_api_bases.get(api_type) + + +def get_configured_providers() -> list[ProviderConfig]: + """从配置中获取Provider列表 - 简化版本""" + ai_config = get_ai_config() + providers_raw = ai_config.get(PROVIDERS_CONFIG_KEY, []) + if not isinstance(providers_raw, list): + logger.error( + f"配置项 {AI_CONFIG_GROUP}.{PROVIDERS_CONFIG_KEY} 不是一个列表," + f"将使用空列表。" + ) + return [] + + valid_providers = [] + for i, item in enumerate(providers_raw): + if not isinstance(item, dict): + logger.warning(f"配置文件中第 {i + 1} 项不是字典格式,已跳过。") + continue + + try: + if not item.get("name"): + logger.warning(f"Provider {i + 1} 缺少 'name' 字段,已跳过。") + continue + + if not item.get("api_key"): + logger.warning( + f"Provider '{item['name']}' 缺少 'api_key' 字段,已跳过。" + ) + continue + + if "api_type" not in item or not item["api_type"]: + provider_name = item.get("name", "").lower() + if "glm" in provider_name or "zhipu" in provider_name: + item["api_type"] = "zhipu" + elif "gemini" in provider_name or "google" in provider_name: + item["api_type"] = "gemini" + else: + item["api_type"] = "openai" + + if "api_base" not in item or not item["api_base"]: + api_type = item.get("api_type") + if api_type: + default_api_base = get_default_api_base_for_type(api_type) + if default_api_base: + item["api_base"] = default_api_base + + if "models" not in item: + item["models"] = [{"model_name": item.get("name", "default")}] + + provider_conf = ProviderConfig(**item) + valid_providers.append(provider_conf) + + except Exception as e: + logger.warning(f"解析配置文件中 Provider {i + 1} 时出错: {e},已跳过。") + + return valid_providers + + +def find_model_config( + provider_name: str, model_name: str +) -> tuple[ProviderConfig, ModelDetail] | None: + """在配置中查找指定的 Provider 和 ModelDetail + + Args: + provider_name: 提供商名称 + model_name: 模型名称 + + Returns: + 找到的 (ProviderConfig, ModelDetail) 元组,未找到则返回 None + """ + providers = get_configured_providers() + + for provider in providers: + if provider.name.lower() == provider_name.lower(): + for model_detail in provider.models: + if model_detail.model_name.lower() == model_name.lower(): + return provider, model_detail + + return None + + +def list_available_models() -> list[dict[str, Any]]: + """列出所有配置的可用模型""" + providers = get_configured_providers() + model_list = [] + for provider in providers: + for model_detail in provider.models: + model_info = { + "provider_name": provider.name, + "model_name": model_detail.model_name, + "full_name": f"{provider.name}/{model_detail.model_name}", + "api_type": provider.api_type or "auto-detect", + "api_base": provider.api_base, + "is_available": model_detail.is_available, + "is_embedding_model": model_detail.is_embedding_model, + "available_identifiers": _get_model_identifiers( + provider.name, model_detail + ), + } + model_list.append(model_info) + return model_list + + +def _get_model_identifiers(provider_name: str, model_detail: ModelDetail) -> list[str]: + """获取模型的所有可用标识符""" + return [f"{provider_name}/{model_detail.model_name}"] + + +def list_model_identifiers() -> dict[str, list[str]]: + """列出所有模型的可用标识符 + + Returns: + 字典,键为模型的完整名称,值为该模型的所有可用标识符列表 + """ + providers = get_configured_providers() + result = {} + + for provider in providers: + for model_detail in provider.models: + full_name = f"{provider.name}/{model_detail.model_name}" + identifiers = _get_model_identifiers(provider.name, model_detail) + result[full_name] = identifiers + + return result + + +def list_embedding_models() -> list[dict[str, Any]]: + """列出所有配置的嵌入模型""" + all_models = list_available_models() + return [model for model in all_models if model.get("is_embedding_model", False)] + + +async def get_model_instance( + provider_model_name: str | None = None, + override_config: dict[str, Any] | None = None, +) -> LLMModel: + """根据 'ProviderName/ModelName' 字符串获取并实例化 LLMModel (异步版本)""" + cache_key = _make_cache_key(provider_model_name, override_config) + cached_model = _get_cached_model(cache_key) + if cached_model: + if override_config: + validated_override = validate_override_params(override_config) + if cached_model._generation_config != validated_override: + cached_model._generation_config = validated_override + logger.debug( + f"对缓存模型 {provider_model_name} 应用新的覆盖配置: " + f"{validated_override.to_dict()}" + ) + return cached_model + + resolved_model_name_str = provider_model_name + if resolved_model_name_str is None: + resolved_model_name_str = get_global_default_model_name() + if resolved_model_name_str is None: + available_models_list = list_available_models() + if not available_models_list: + raise LLMException( + "未配置任何AI模型", code=LLMErrorCode.CONFIGURATION_ERROR + ) + resolved_model_name_str = available_models_list[0]["full_name"] + logger.warning(f"未指定模型,使用第一个可用模型: {resolved_model_name_str}") + + prov_name_str, mod_name_str = parse_provider_model_string(resolved_model_name_str) + if not prov_name_str or not mod_name_str: + raise LLMException( + f"无效的模型名称格式: '{resolved_model_name_str}'", + code=LLMErrorCode.MODEL_NOT_FOUND, + ) + + config_tuple_found = find_model_config(prov_name_str, mod_name_str) + if not config_tuple_found: + all_models = list_available_models() + raise LLMException( + f"未找到模型: '{resolved_model_name_str}'. " + f"可用: {[m['full_name'] for m in all_models]}", + code=LLMErrorCode.MODEL_NOT_FOUND, + ) + + provider_config_found, model_detail_found = config_tuple_found + + ai_config = get_ai_config() + global_proxy_setting = ai_config.get(PROXY_KEY) + default_timeout = ( + provider_config_found.timeout + if provider_config_found.timeout is not None + else 180 + ) + global_timeout_setting = ai_config.get(TIMEOUT_KEY, default_timeout) + + config_for_http_client = ProviderConfig( + name=provider_config_found.name, + api_key=provider_config_found.api_key, + models=provider_config_found.models, + timeout=global_timeout_setting, + proxy=global_proxy_setting, + api_base=provider_config_found.api_base, + api_type=provider_config_found.api_type, + openai_compat=provider_config_found.openai_compat, + temperature=provider_config_found.temperature, + max_tokens=provider_config_found.max_tokens, + ) + + shared_http_client = await http_client_manager.get_client(config_for_http_client) + + try: + model_instance = LLMModel( + provider_config=config_for_http_client, + model_detail=model_detail_found, + key_store=key_store, + http_client=shared_http_client, + ) + + if override_config: + validated_override_params = validate_override_params(override_config) + model_instance._generation_config = validated_override_params + logger.debug( + f"为新模型 {resolved_model_name_str} 应用配置覆盖: " + f"{validated_override_params.to_dict()}" + ) + + _cache_model(cache_key, model_instance) + logger.debug( + f"创建并缓存了新模型: {cache_key} -> {prov_name_str}/{mod_name_str}" + ) + return model_instance + except LLMException: + raise + except Exception as e: + logger.error( + f"实例化 LLMModel ({resolved_model_name_str}) 时发生内部错误: {e!s}", e=e + ) + raise LLMException( + f"初始化模型 '{resolved_model_name_str}' 失败: {e!s}", + code=LLMErrorCode.MODEL_INIT_FAILED, + cause=e, + ) + + +def get_global_default_model_name() -> str | None: + """获取全局默认模型名称""" + ai_config = get_ai_config() + return ai_config.get(DEFAULT_MODEL_NAME_KEY) + + +def set_global_default_model_name(provider_model_name: str | None) -> bool: + """设置全局默认模型名称""" + if provider_model_name: + prov_name, mod_name = parse_provider_model_string(provider_model_name) + if not prov_name or not mod_name or not find_model_config(prov_name, mod_name): + logger.error( + f"尝试设置的全局默认模型 '{provider_model_name}' 无效或未配置。" + ) + return False + + Config.set_config( + AI_CONFIG_GROUP, DEFAULT_MODEL_NAME_KEY, provider_model_name, auto_save=True + ) + if provider_model_name: + logger.info(f"LLM 服务全局默认模型已更新为: {provider_model_name}") + else: + logger.info("LLM 服务全局默认模型已清除。") + return True + + +async def get_key_usage_stats() -> dict[str, Any]: + """获取所有Provider的Key使用统计""" + providers = get_configured_providers() + stats = {} + + for provider in providers: + provider_stats = await key_store.get_key_stats( + [provider.api_key] + if isinstance(provider.api_key, str) + else provider.api_key + ) + stats[provider.name] = { + "total_keys": len( + [provider.api_key] + if isinstance(provider.api_key, str) + else provider.api_key + ), + "key_stats": provider_stats, + } + + return stats + + +async def reset_key_status(provider_name: str, api_key: str | None = None) -> bool: + """重置指定Provider的Key状态""" + providers = get_configured_providers() + target_provider = None + + for provider in providers: + if provider.name.lower() == provider_name.lower(): + target_provider = provider + break + + if not target_provider: + logger.error(f"未找到Provider: {provider_name}") + return False + + provider_keys = ( + [target_provider.api_key] + if isinstance(target_provider.api_key, str) + else target_provider.api_key + ) + + if api_key: + if api_key in provider_keys: + await key_store.reset_key_status(api_key) + logger.info(f"已重置Provider '{provider_name}' 的指定Key状态") + return True + else: + logger.error(f"指定的Key不属于Provider '{provider_name}'") + return False + else: + for key in provider_keys: + await key_store.reset_key_status(key) + logger.info(f"已重置Provider '{provider_name}' 的所有Key状态") + return True diff --git a/zhenxun/services/llm/service.py b/zhenxun/services/llm/service.py new file mode 100644 index 00000000..d054ca9b --- /dev/null +++ b/zhenxun/services/llm/service.py @@ -0,0 +1,632 @@ +""" +LLM 模型实现类 + +包含 LLM 模型的抽象基类和具体实现,负责与各种 AI 提供商的 API 交互。 +""" + +from abc import ABC, abstractmethod +from collections.abc import Awaitable, Callable +import json +from typing import Any + +from zhenxun.services.log import logger + +from .config import LLMGenerationConfig +from .config.providers import get_ai_config +from .core import ( + KeyStatusStore, + LLMHttpClient, + RetryConfig, + http_client_manager, + with_smart_retry, +) +from .types import ( + EmbeddingTaskType, + LLMErrorCode, + LLMException, + LLMMessage, + LLMResponse, + LLMTool, + ModelDetail, + ProviderConfig, +) + + +class LLMModelBase(ABC): + """LLM模型抽象基类""" + + @abstractmethod + async def generate_text( + self, + prompt: str, + history: list[dict[str, str]] | None = None, + **kwargs: Any, + ) -> str: + """生成文本""" + pass + + @abstractmethod + async def generate_response( + self, + messages: list[LLMMessage], + config: LLMGenerationConfig | None = None, + tools: list[LLMTool] | None = None, + tool_choice: str | dict[str, Any] | None = None, + **kwargs: Any, + ) -> LLMResponse: + """生成高级响应""" + pass + + @abstractmethod + async def generate_embeddings( + self, + texts: list[str], + task_type: EmbeddingTaskType | str = EmbeddingTaskType.RETRIEVAL_DOCUMENT, + **kwargs: Any, + ) -> list[list[float]]: + """生成文本嵌入向量""" + pass + + +class LLMModel(LLMModelBase): + """LLM 模型实现类""" + + def __init__( + self, + provider_config: ProviderConfig, + model_detail: ModelDetail, + key_store: KeyStatusStore, + http_client: LLMHttpClient, + config_override: LLMGenerationConfig | None = None, + ): + self.provider_config = provider_config + self.model_detail = model_detail + self.key_store = key_store + self.http_client: LLMHttpClient = http_client + self._generation_config = config_override + + self.provider_name = provider_config.name + self.api_type = provider_config.api_type + self.api_base = provider_config.api_base + self.api_keys = ( + [provider_config.api_key] + if isinstance(provider_config.api_key, str) + else provider_config.api_key + ) + self.model_name = model_detail.model_name + self.temperature = model_detail.temperature + self.max_tokens = model_detail.max_tokens + + self._is_closed = False + + async def _get_http_client(self) -> LLMHttpClient: + """获取HTTP客户端""" + if self.http_client.is_closed: + logger.debug( + f"LLMModel {self.provider_name}/{self.model_name} 的 HTTP 客户端已关闭," + "正在获取新的客户端" + ) + self.http_client = await http_client_manager.get_client( + self.provider_config + ) + return self.http_client + + async def _select_api_key(self, failed_keys: set[str] | None = None) -> str: + """选择可用的API密钥(使用轮询策略)""" + if not self.api_keys: + raise LLMException( + f"提供商 {self.provider_name} 没有配置API密钥", + code=LLMErrorCode.NO_AVAILABLE_KEYS, + ) + + selected_key = await self.key_store.get_next_available_key( + self.provider_name, self.api_keys, failed_keys + ) + + if not selected_key: + raise LLMException( + f"提供商 {self.provider_name} 的所有API密钥当前都不可用", + code=LLMErrorCode.NO_AVAILABLE_KEYS, + details={ + "total_keys": len(self.api_keys), + "failed_keys": len(failed_keys or set()), + }, + ) + + return selected_key + + async def _execute_embedding_request( + self, + adapter, + texts: list[str], + task_type: EmbeddingTaskType | str, + http_client: LLMHttpClient, + failed_keys: set[str] | None = None, + ) -> list[list[float]]: + """执行单次嵌入请求 - 供重试机制调用""" + api_key = await self._select_api_key(failed_keys) + + try: + request_data = adapter.prepare_embedding_request( + model=self, + api_key=api_key, + texts=texts, + task_type=task_type, + ) + + http_response = await http_client.post( + request_data.url, + headers=request_data.headers, + json=request_data.body, + ) + + if http_response.status_code != 200: + error_text = http_response.text + logger.error( + f"HTTP嵌入请求失败: {http_response.status_code} - {error_text}" + ) + await self.key_store.record_failure(api_key, http_response.status_code) + + error_code = LLMErrorCode.API_REQUEST_FAILED + if http_response.status_code in [401, 403]: + error_code = LLMErrorCode.API_KEY_INVALID + elif http_response.status_code == 429: + error_code = LLMErrorCode.API_RATE_LIMITED + + raise LLMException( + f"HTTP嵌入请求失败: {http_response.status_code}", + code=error_code, + details={ + "status_code": http_response.status_code, + "response": error_text, + "api_key": api_key, + }, + ) + + try: + response_json = http_response.json() + adapter.validate_embedding_response(response_json) + embeddings = adapter.parse_embedding_response(response_json) + except Exception as e: + logger.error(f"解析嵌入响应失败: {e}", e=e) + await self.key_store.record_failure(api_key, None) + if isinstance(e, LLMException): + raise + else: + raise LLMException( + f"解析API嵌入响应失败: {e}", + code=LLMErrorCode.RESPONSE_PARSE_ERROR, + cause=e, + ) + + await self.key_store.record_success(api_key) + return embeddings + + except LLMException: + raise + except Exception as e: + logger.error(f"生成嵌入时发生未预期错误: {e}", e=e) + await self.key_store.record_failure(api_key, None) + raise LLMException( + f"生成嵌入失败: {e}", + code=LLMErrorCode.EMBEDDING_FAILED, + cause=e, + ) + + async def _execute_with_smart_retry( + self, + adapter, + messages: list[LLMMessage], + config: LLMGenerationConfig | None, + tools_dict: list[dict[str, Any]] | None, + tool_choice: str | dict[str, Any] | None, + http_client: LLMHttpClient, + ): + """智能重试机制 - 使用统一的重试装饰器""" + ai_config = get_ai_config() + max_retries = ai_config.get("max_retries_llm", 3) + retry_delay = ai_config.get("retry_delay_llm", 2) + retry_config = RetryConfig(max_retries=max_retries, retry_delay=retry_delay) + + return await with_smart_retry( + self._execute_single_request, + adapter, + messages, + config, + tools_dict, + tool_choice, + http_client, + retry_config=retry_config, + key_store=self.key_store, + provider_name=self.provider_name, + ) + + async def _execute_single_request( + self, + adapter, + messages: list[LLMMessage], + config: LLMGenerationConfig | None, + tools_dict: list[dict[str, Any]] | None, + tool_choice: str | dict[str, Any] | None, + http_client: LLMHttpClient, + failed_keys: set[str] | None = None, + ) -> LLMResponse: + """执行单次请求 - 供重试机制调用,直接返回 LLMResponse""" + api_key = await self._select_api_key(failed_keys) + + try: + request_data = adapter.prepare_advanced_request( + model=self, + api_key=api_key, + messages=messages, + config=config, + tools=tools_dict, + tool_choice=tool_choice, + ) + + http_response = await http_client.post( + request_data.url, + headers=request_data.headers, + json=request_data.body, + ) + + if http_response.status_code != 200: + error_text = http_response.text + logger.error( + f"HTTP请求失败: {http_response.status_code} - {error_text}" + ) + + await self.key_store.record_failure(api_key, http_response.status_code) + + if http_response.status_code in [401, 403]: + error_code = LLMErrorCode.API_KEY_INVALID + elif http_response.status_code == 429: + error_code = LLMErrorCode.API_RATE_LIMITED + elif http_response.status_code in [402, 413]: + error_code = LLMErrorCode.API_QUOTA_EXCEEDED + else: + error_code = LLMErrorCode.API_REQUEST_FAILED + + raise LLMException( + f"HTTP请求失败: {http_response.status_code}", + code=error_code, + details={ + "status_code": http_response.status_code, + "response": error_text, + "api_key": api_key, + }, + ) + + try: + response_json = http_response.json() + response_data = adapter.parse_response( + model=self, + response_json=response_json, + is_advanced=True, + ) + + from .types.models import LLMToolCall + + response_tool_calls = [] + if response_data.tool_calls: + for tc_data in response_data.tool_calls: + if isinstance(tc_data, LLMToolCall): + response_tool_calls.append(tc_data) + elif isinstance(tc_data, dict): + try: + response_tool_calls.append(LLMToolCall(**tc_data)) + except Exception as e: + logger.warning( + f"无法将工具调用数据转换为LLMToolCall: {tc_data}, " + f"error: {e}" + ) + else: + logger.warning(f"工具调用数据格式未知: {tc_data}") + + llm_response = LLMResponse( + text=response_data.text, + usage_info=response_data.usage_info, + raw_response=response_data.raw_response, + tool_calls=response_tool_calls if response_tool_calls else None, + code_executions=response_data.code_executions, + grounding_metadata=response_data.grounding_metadata, + cache_info=response_data.cache_info, + ) + + except Exception as e: + logger.error(f"解析响应失败: {e}", e=e) + await self.key_store.record_failure(api_key, None) + + if isinstance(e, LLMException): + raise + else: + raise LLMException( + f"解析API响应失败: {e}", + code=LLMErrorCode.RESPONSE_PARSE_ERROR, + cause=e, + ) + + await self.key_store.record_success(api_key) + + return llm_response + + except LLMException: + raise + except Exception as e: + logger.error(f"生成响应时发生未预期错误: {e}", e=e) + await self.key_store.record_failure(api_key, None) + + raise LLMException( + f"生成响应失败: {e}", + code=LLMErrorCode.GENERATION_FAILED, + cause=e, + ) + + async def close(self): + """ + 标记模型实例的当前使用周期结束。 + 共享的 HTTP 客户端由 LLMHttpClientManager 管理,不由 LLMModel 关闭。 + """ + if self._is_closed: + return + self._is_closed = True + logger.debug( + f"LLMModel实例的使用周期已结束: {self} (共享HTTP客户端状态不受影响)" + ) + + async def __aenter__(self): + if self._is_closed: + logger.debug( + f"Re-entering context for closed LLMModel {self}. " + f"Resetting _is_closed to False." + ) + self._is_closed = False + self._check_not_closed() + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + """异步上下文管理器出口""" + _ = exc_type, exc_val, exc_tb + await self.close() + + def _check_not_closed(self): + """检查实例是否已关闭""" + if self._is_closed: + raise RuntimeError(f"LLMModel实例已关闭: {self}") + + async def generate_text( + self, + prompt: str, + history: list[dict[str, str]] | None = None, + **kwargs: Any, + ) -> str: + """生成文本 - 通过 generate_response 实现""" + self._check_not_closed() + + messages: list[LLMMessage] = [] + + if history: + for msg in history: + role = msg.get("role", "user") + content_text = msg.get("content", "") + messages.append(LLMMessage(role=role, content=content_text)) + + messages.append(LLMMessage.user(prompt)) + + model_fields = getattr(LLMGenerationConfig, "model_fields", {}) + request_specific_config_dict = { + k: v for k, v in kwargs.items() if k in model_fields + } + request_specific_config = None + if request_specific_config_dict: + request_specific_config = LLMGenerationConfig( + **request_specific_config_dict + ) + + for key in request_specific_config_dict: + kwargs.pop(key, None) + + response = await self.generate_response( + messages, + config=request_specific_config, + **kwargs, + ) + return response.text + + async def generate_response( + self, + messages: list[LLMMessage], + config: LLMGenerationConfig | None = None, + tools: list[LLMTool] | None = None, + tool_choice: str | dict[str, Any] | None = None, + tool_executor: Callable[[str, dict[str, Any]], Awaitable[Any]] | None = None, + max_tool_iterations: int = 5, + **kwargs: Any, + ) -> LLMResponse: + """生成高级响应 - 实现完整的工具调用循环""" + self._check_not_closed() + + from .adapters import get_adapter_for_api_type + from .config.generation import create_generation_config_from_kwargs + + adapter = get_adapter_for_api_type(self.api_type) + if not adapter: + raise LLMException( + f"未找到适用于 API 类型 '{self.api_type}' 的适配器", + code=LLMErrorCode.CONFIGURATION_ERROR, + ) + + final_request_config = self._generation_config or LLMGenerationConfig() + if kwargs: + kwargs_config = create_generation_config_from_kwargs(**kwargs) + merged_dict = final_request_config.to_dict() + merged_dict.update(kwargs_config.to_dict()) + final_request_config = LLMGenerationConfig(**merged_dict) + + if config is not None: + merged_dict = final_request_config.to_dict() + merged_dict.update(config.to_dict()) + final_request_config = LLMGenerationConfig(**merged_dict) + + tools_dict: list[dict[str, Any]] | None = None + if tools: + tools_dict = [] + for tool in tools: + if hasattr(tool, "model_dump"): + model_dump_func = getattr(tool, "model_dump") + tools_dict.append(model_dump_func(exclude_none=True)) + elif isinstance(tool, dict): + tools_dict.append(tool) + else: + try: + tools_dict.append(dict(tool)) + except (TypeError, ValueError): + logger.warning(f"工具 '{tool}' 无法转换为字典,已忽略。") + + http_client = await self._get_http_client() + current_messages = list(messages) + + for iteration in range(max_tool_iterations): + logger.debug(f"工具调用循环迭代: {iteration + 1}/{max_tool_iterations}") + + llm_response = await self._execute_with_smart_retry( + adapter, + current_messages, + final_request_config, + tools_dict if iteration == 0 else None, + tool_choice if iteration == 0 else None, + http_client, + ) + + response_tool_calls = llm_response.tool_calls or [] + + if not response_tool_calls or not tool_executor: + logger.debug("模型未请求工具调用,或未提供工具执行器。返回当前响应。") + return llm_response + + logger.info(f"模型请求执行 {len(response_tool_calls)} 个工具。") + + assistant_message_content = llm_response.text if llm_response.text else "" + current_messages.append( + LLMMessage.assistant_tool_calls( + content=assistant_message_content, tool_calls=response_tool_calls + ) + ) + + tool_response_messages: list[LLMMessage] = [] + for tool_call in response_tool_calls: + tool_name = tool_call.function.name + try: + tool_args_dict = json.loads(tool_call.function.arguments) + logger.debug(f"执行工具: {tool_name},参数: {tool_args_dict}") + + tool_result = await tool_executor(tool_name, tool_args_dict) + logger.debug( + f"工具 '{tool_name}' 执行结果: {str(tool_result)[:200]}..." + ) + + tool_response_messages.append( + LLMMessage.tool_response( + tool_call_id=tool_call.id, + function_name=tool_name, + result=tool_result, + ) + ) + except json.JSONDecodeError as e: + logger.error( + f"工具 '{tool_name}' 参数JSON解析失败: " + f"{tool_call.function.arguments}, 错误: {e}" + ) + tool_response_messages.append( + LLMMessage.tool_response( + tool_call_id=tool_call.id, + function_name=tool_name, + result={ + "error": "Argument JSON parsing failed", + "details": str(e), + }, + ) + ) + except Exception as e: + logger.error(f"执行工具 '{tool_name}' 失败: {e}", e=e) + tool_response_messages.append( + LLMMessage.tool_response( + tool_call_id=tool_call.id, + function_name=tool_name, + result={ + "error": "Tool execution failed", + "details": str(e), + }, + ) + ) + + current_messages.extend(tool_response_messages) + + logger.warning(f"已达到最大工具调用迭代次数 ({max_tool_iterations})。") + raise LLMException( + "已达到最大工具调用迭代次数,但模型仍在请求工具调用或未提供最终文本回复。", + code=LLMErrorCode.GENERATION_FAILED, + details={ + "iterations": max_tool_iterations, + "last_messages": current_messages[-2:], + }, + ) + + async def generate_embeddings( + self, + texts: list[str], + task_type: EmbeddingTaskType | str = EmbeddingTaskType.RETRIEVAL_DOCUMENT, + **kwargs: Any, + ) -> list[list[float]]: + """生成文本嵌入向量""" + self._check_not_closed() + if not texts: + return [] + + from .adapters import get_adapter_for_api_type + + adapter = get_adapter_for_api_type(self.api_type) + if not adapter: + raise LLMException( + f"未找到适用于 API 类型 '{self.api_type}' 的嵌入适配器", + code=LLMErrorCode.CONFIGURATION_ERROR, + ) + + http_client = await self._get_http_client() + + ai_config = get_ai_config() + default_max_retries = ai_config.get("max_retries_llm", 3) + default_retry_delay = ai_config.get("retry_delay_llm", 2) + max_retries_embed = kwargs.get( + "max_retries_embed", max(1, default_max_retries // 2) + ) + retry_delay_embed = kwargs.get("retry_delay_embed", default_retry_delay / 2) + + retry_config = RetryConfig( + max_retries=max_retries_embed, + retry_delay=retry_delay_embed, + exponential_backoff=True, + key_rotation=True, + ) + + return await with_smart_retry( + self._execute_embedding_request, + adapter, + texts, + task_type, + http_client, + retry_config=retry_config, + key_store=self.key_store, + provider_name=self.provider_name, + ) + + def __str__(self) -> str: + status = "closed" if self._is_closed else "active" + return f"LLMModel({self.provider_name}/{self.model_name}, {status})" + + def __repr__(self) -> str: + status = "closed" if self._is_closed else "active" + return ( + f"LLMModel(provider={self.provider_name}, model={self.model_name}, " + f"api_type={self.api_type}, status={status})" + ) diff --git a/zhenxun/services/llm/types/__init__.py b/zhenxun/services/llm/types/__init__.py new file mode 100644 index 00000000..ebae4185 --- /dev/null +++ b/zhenxun/services/llm/types/__init__.py @@ -0,0 +1,54 @@ +""" +LLM 类型定义模块 + +统一导出所有核心类型、协议和异常定义。 +""" + +from .content import ( + LLMContentPart, + LLMMessage, + LLMResponse, +) +from .enums import EmbeddingTaskType, ModelProvider, ResponseFormat, ToolCategory +from .exceptions import LLMErrorCode, LLMException, get_user_friendly_error_message +from .models import ( + LLMCacheInfo, + LLMCodeExecution, + LLMGroundingAttribution, + LLMGroundingMetadata, + LLMTool, + LLMToolCall, + LLMToolFunction, + ModelDetail, + ModelInfo, + ModelName, + ProviderConfig, + ToolMetadata, + UsageInfo, +) + +__all__ = [ + "EmbeddingTaskType", + "LLMCacheInfo", + "LLMCodeExecution", + "LLMContentPart", + "LLMErrorCode", + "LLMException", + "LLMGroundingAttribution", + "LLMGroundingMetadata", + "LLMMessage", + "LLMResponse", + "LLMTool", + "LLMToolCall", + "LLMToolFunction", + "ModelDetail", + "ModelInfo", + "ModelName", + "ModelProvider", + "ProviderConfig", + "ResponseFormat", + "ToolCategory", + "ToolMetadata", + "UsageInfo", + "get_user_friendly_error_message", +] diff --git a/zhenxun/services/llm/types/content.py b/zhenxun/services/llm/types/content.py new file mode 100644 index 00000000..54887bc3 --- /dev/null +++ b/zhenxun/services/llm/types/content.py @@ -0,0 +1,428 @@ +""" +LLM 内容类型定义 + +包含多模态内容部分、消息和响应的数据模型。 +""" + +import base64 +import mimetypes +from pathlib import Path +from typing import Any + +import aiofiles +from pydantic import BaseModel + +from zhenxun.services.log import logger + + +class LLMContentPart(BaseModel): + """LLM 消息内容部分 - 支持多模态内容""" + + type: str + text: str | None = None + image_source: str | None = None + audio_source: str | None = None + video_source: str | None = None + document_source: str | None = None + file_uri: str | None = None + file_source: str | None = None + url: str | None = None + mime_type: str | None = None + metadata: dict[str, Any] | None = None + + def model_post_init(self, /, __context: Any) -> None: + """验证内容部分的有效性""" + _ = __context + validation_rules = { + "text": lambda: self.text, + "image": lambda: self.image_source, + "audio": lambda: self.audio_source, + "video": lambda: self.video_source, + "document": lambda: self.document_source, + "file": lambda: self.file_uri or self.file_source, + "url": lambda: self.url, + } + + if self.type in validation_rules: + if not validation_rules[self.type](): + raise ValueError(f"{self.type}类型的内容部分必须包含相应字段") + + @classmethod + def text_part(cls, text: str) -> "LLMContentPart": + """创建文本内容部分""" + return cls(type="text", text=text) + + @classmethod + def image_url_part(cls, url: str) -> "LLMContentPart": + """创建图片URL内容部分""" + return cls(type="image", image_source=url) + + @classmethod + def image_base64_part( + cls, data: str, mime_type: str = "image/png" + ) -> "LLMContentPart": + """创建Base64图片内容部分""" + data_url = f"data:{mime_type};base64,{data}" + return cls(type="image", image_source=data_url) + + @classmethod + def audio_url_part(cls, url: str, mime_type: str = "audio/wav") -> "LLMContentPart": + """创建音频URL内容部分""" + return cls(type="audio", audio_source=url, mime_type=mime_type) + + @classmethod + def video_url_part(cls, url: str, mime_type: str = "video/mp4") -> "LLMContentPart": + """创建视频URL内容部分""" + return cls(type="video", video_source=url, mime_type=mime_type) + + @classmethod + def video_base64_part( + cls, data: str, mime_type: str = "video/mp4" + ) -> "LLMContentPart": + """创建Base64视频内容部分""" + data_url = f"data:{mime_type};base64,{data}" + return cls(type="video", video_source=data_url, mime_type=mime_type) + + @classmethod + def audio_base64_part( + cls, data: str, mime_type: str = "audio/wav" + ) -> "LLMContentPart": + """创建Base64音频内容部分""" + data_url = f"data:{mime_type};base64,{data}" + return cls(type="audio", audio_source=data_url, mime_type=mime_type) + + @classmethod + def file_uri_part( + cls, + file_uri: str, + mime_type: str | None = None, + metadata: dict[str, Any] | None = None, + ) -> "LLMContentPart": + """创建Gemini File API URI内容部分""" + return cls( + type="file", + file_uri=file_uri, + mime_type=mime_type, + metadata=metadata or {}, + ) + + @classmethod + async def from_path( + cls, path_like: str | Path, target_api: str | None = None + ) -> "LLMContentPart | None": + """ + 从本地文件路径创建 LLMContentPart。 + 自动检测MIME类型,并根据类型(如图片)可能加载为Base64。 + target_api 可以用于提示如何最好地准备数据(例如 'gemini' 可能偏好 base64) + """ + try: + path = Path(path_like) + if not path.exists() or not path.is_file(): + logger.warning(f"文件不存在或不是一个文件: {path}") + return None + + mime_type, _ = mimetypes.guess_type(path.resolve().as_uri()) + + if not mime_type: + logger.warning( + f"无法猜测文件 {path.name} 的MIME类型,将尝试作为文本文件处理。" + ) + try: + async with aiofiles.open(path, encoding="utf-8") as f: + text_content = await f.read() + return cls.text_part(text_content) + except Exception as e: + logger.error(f"读取文本文件 {path.name} 失败: {e}") + return None + + if mime_type.startswith("image/"): + if target_api == "gemini" or not path.is_absolute(): + try: + async with aiofiles.open(path, "rb") as f: + img_bytes = await f.read() + base64_data = base64.b64encode(img_bytes).decode("utf-8") + return cls.image_base64_part( + data=base64_data, mime_type=mime_type + ) + except Exception as e: + logger.error(f"读取或编码图片文件 {path.name} 失败: {e}") + return None + else: + logger.warning( + f"为本地图片路径 {path.name} 生成 image_url_part。" + "实际API可能不支持 file:// URI。考虑使用Base64或公网URL。" + ) + return cls.image_url_part(url=path.resolve().as_uri()) + elif mime_type.startswith("audio/"): + return cls.audio_url_part( + url=path.resolve().as_uri(), mime_type=mime_type + ) + elif mime_type.startswith("video/"): + if target_api == "gemini": + # 对于 Gemini API,将视频转换为 base64 + try: + async with aiofiles.open(path, "rb") as f: + video_bytes = await f.read() + base64_data = base64.b64encode(video_bytes).decode("utf-8") + return cls.video_base64_part( + data=base64_data, mime_type=mime_type + ) + except Exception as e: + logger.error(f"读取或编码视频文件 {path.name} 失败: {e}") + return None + else: + return cls.video_url_part( + url=path.resolve().as_uri(), mime_type=mime_type + ) + elif ( + mime_type.startswith("text/") + or mime_type == "application/json" + or mime_type == "application/xml" + ): + try: + async with aiofiles.open(path, encoding="utf-8") as f: + text_content = await f.read() + return cls.text_part(text_content) + except Exception as e: + logger.error(f"读取文本类文件 {path.name} 失败: {e}") + return None + else: + logger.info( + f"文件 {path.name} (MIME: {mime_type}) 将作为通用文件URI处理。" + ) + return cls.file_uri_part( + file_uri=path.resolve().as_uri(), + mime_type=mime_type, + metadata={"name": path.name, "source": "local_path"}, + ) + + except Exception as e: + logger.error(f"从路径 {path_like} 创建LLMContentPart时出错: {e}") + return None + + def is_image_url(self) -> bool: + """检查图像源是否为URL""" + if not self.image_source: + return False + return self.image_source.startswith(("http://", "https://")) + + def is_image_base64(self) -> bool: + """检查图像源是否为Base64 Data URL""" + if not self.image_source: + return False + return self.image_source.startswith("data:") + + def get_base64_data(self) -> tuple[str, str] | None: + """从Data URL中提取Base64数据和MIME类型""" + if not self.is_image_base64() or not self.image_source: + return None + + try: + header, data = self.image_source.split(",", 1) + mime_part = header.split(";")[0].replace("data:", "") + return mime_part, data + except (ValueError, IndexError): + logger.warning(f"无法解析Base64图像数据: {self.image_source[:50]}...") + return None + + def convert_for_api(self, api_type: str) -> dict[str, Any]: + """根据API类型转换多模态内容格式""" + if self.type == "text": + if api_type == "openai": + return {"type": "text", "text": self.text} + elif api_type == "gemini": + return {"text": self.text} + else: + return {"type": "text", "text": self.text} + + elif self.type == "image": + if not self.image_source: + raise ValueError("图像类型的内容必须包含image_source") + + if api_type == "openai": + return {"type": "image_url", "image_url": {"url": self.image_source}} + elif api_type == "gemini": + if self.is_image_base64(): + base64_info = self.get_base64_data() + if base64_info: + mime_type, data = base64_info + return {"inlineData": {"mimeType": mime_type, "data": data}} + else: + # 如果无法解析 Base64 数据,抛出异常 + raise ValueError( + f"无法解析Base64图像数据: {self.image_source[:50]}..." + ) + else: + logger.warning( + f"Gemini API需要Base64格式,但提供的是URL: {self.image_source}" + ) + return { + "inlineData": { + "mimeType": "image/jpeg", + "data": self.image_source, + } + } + else: + return {"type": "image_url", "image_url": {"url": self.image_source}} + + elif self.type == "video": + if not self.video_source: + raise ValueError("视频类型的内容必须包含video_source") + + if api_type == "gemini": + # Gemini 支持视频,但需要通过 File API 上传 + if self.video_source.startswith("data:"): + # 处理 base64 视频数据 + try: + header, data = self.video_source.split(",", 1) + mime_type = header.split(";")[0].replace("data:", "") + return {"inlineData": {"mimeType": mime_type, "data": data}} + except (ValueError, IndexError): + raise ValueError( + f"无法解析Base64视频数据: {self.video_source[:50]}..." + ) + else: + # 对于 URL 或其他格式,暂时不支持直接内联 + raise ValueError( + "Gemini API 的视频处理需要通过 File API 上传,不支持直接 URL" + ) + else: + # 其他 API 可能不支持视频 + raise ValueError(f"API类型 '{api_type}' 不支持视频内容") + + elif self.type == "audio": + if not self.audio_source: + raise ValueError("音频类型的内容必须包含audio_source") + + if api_type == "gemini": + # Gemini 支持音频,处理方式类似视频 + if self.audio_source.startswith("data:"): + try: + header, data = self.audio_source.split(",", 1) + mime_type = header.split(";")[0].replace("data:", "") + return {"inlineData": {"mimeType": mime_type, "data": data}} + except (ValueError, IndexError): + raise ValueError( + f"无法解析Base64音频数据: {self.audio_source[:50]}..." + ) + else: + raise ValueError( + "Gemini API 的音频处理需要通过 File API 上传,不支持直接 URL" + ) + else: + raise ValueError(f"API类型 '{api_type}' 不支持音频内容") + + elif self.type == "file": + if api_type == "gemini" and self.file_uri: + return { + "fileData": {"mimeType": self.mime_type, "fileUri": self.file_uri} + } + elif self.file_source: + file_name = ( + self.metadata.get("name", "file") if self.metadata else "file" + ) + if api_type == "gemini": + return {"text": f"[文件: {file_name}]\n{self.file_source}"} + else: + return { + "type": "text", + "text": f"[文件: {file_name}]\n{self.file_source}", + } + else: + raise ValueError("文件类型的内容必须包含file_uri或file_source") + + else: + raise ValueError(f"不支持的内容类型: {self.type}") + + +class LLMMessage(BaseModel): + """LLM 消息""" + + role: str + content: str | list[LLMContentPart] + name: str | None = None + tool_calls: list[Any] | None = None + tool_call_id: str | None = None + + def model_post_init(self, /, __context: Any) -> None: + """验证消息的有效性""" + _ = __context + if self.role == "tool": + if not self.tool_call_id: + raise ValueError("工具角色的消息必须包含 tool_call_id") + if not self.name: + raise ValueError("工具角色的消息必须包含函数名 (在 name 字段中)") + if self.role == "tool" and not isinstance(self.content, str): + logger.warning( + f"工具角色消息的内容期望是字符串,但得到的是: {type(self.content)}. " + "将尝试转换为字符串。" + ) + try: + self.content = str(self.content) + except Exception as e: + raise ValueError(f"无法将工具角色的内容转换为字符串: {e}") + + @classmethod + def user(cls, content: str | list[LLMContentPart]) -> "LLMMessage": + """创建用户消息""" + return cls(role="user", content=content) + + @classmethod + def assistant_tool_calls( + cls, + tool_calls: list[Any], + content: str | list[LLMContentPart] = "", + ) -> "LLMMessage": + """创建助手请求工具调用的消息""" + return cls(role="assistant", content=content, tool_calls=tool_calls) + + @classmethod + def assistant_text_response( + cls, content: str | list[LLMContentPart] + ) -> "LLMMessage": + """创建助手纯文本回复的消息""" + return cls(role="assistant", content=content, tool_calls=None) + + @classmethod + def tool_response( + cls, + tool_call_id: str, + function_name: str, + result: Any, + ) -> "LLMMessage": + """创建工具执行结果的消息""" + import json + + try: + content_str = json.dumps(result) + except TypeError as e: + logger.error( + f"工具 '{function_name}' 的结果无法JSON序列化: {result}. 错误: {e}" + ) + content_str = json.dumps( + {"error": "Tool result not JSON serializable", "details": str(e)} + ) + + return cls( + role="tool", + content=content_str, + tool_call_id=tool_call_id, + name=function_name, + ) + + @classmethod + def system(cls, content: str) -> "LLMMessage": + """创建系统消息""" + return cls(role="system", content=content) + + +class LLMResponse(BaseModel): + """LLM 响应""" + + text: str + usage_info: dict[str, Any] | None = None + raw_response: dict[str, Any] | None = None + tool_calls: list[Any] | None = None + code_executions: list[Any] | None = None + grounding_metadata: Any | None = None + cache_info: Any | None = None diff --git a/zhenxun/services/llm/types/enums.py b/zhenxun/services/llm/types/enums.py new file mode 100644 index 00000000..718a52ef --- /dev/null +++ b/zhenxun/services/llm/types/enums.py @@ -0,0 +1,67 @@ +""" +LLM 枚举类型定义 +""" + +from enum import Enum, auto + + +class ModelProvider(Enum): + """模型提供商枚举""" + + OPENAI = "openai" + GEMINI = "gemini" + ZHIXPU = "zhipu" + CUSTOM = "custom" + + +class ResponseFormat(Enum): + """响应格式枚举""" + + TEXT = "text" + JSON = "json" + MULTIMODAL = "multimodal" + + +class EmbeddingTaskType(str, Enum): + """文本嵌入任务类型 (主要用于Gemini)""" + + RETRIEVAL_QUERY = "RETRIEVAL_QUERY" + RETRIEVAL_DOCUMENT = "RETRIEVAL_DOCUMENT" + SEMANTIC_SIMILARITY = "SEMANTIC_SIMILARITY" + CLASSIFICATION = "CLASSIFICATION" + CLUSTERING = "CLUSTERING" + QUESTION_ANSWERING = "QUESTION_ANSWERING" + FACT_VERIFICATION = "FACT_VERIFICATION" + + +class ToolCategory(Enum): + """工具分类枚举""" + + FILE_SYSTEM = auto() + NETWORK = auto() + SYSTEM_INFO = auto() + CALCULATION = auto() + DATA_PROCESSING = auto() + CUSTOM = auto() + + +class LLMErrorCode(Enum): + """LLM 服务相关的错误代码枚举""" + + MODEL_INIT_FAILED = 2000 + MODEL_NOT_FOUND = 2001 + API_REQUEST_FAILED = 2002 + API_RESPONSE_INVALID = 2003 + API_KEY_INVALID = 2004 + API_QUOTA_EXCEEDED = 2005 + API_TIMEOUT = 2006 + API_RATE_LIMITED = 2007 + NO_AVAILABLE_KEYS = 2008 + UNKNOWN_API_TYPE = 2009 + CONFIGURATION_ERROR = 2010 + RESPONSE_PARSE_ERROR = 2011 + CONTEXT_LENGTH_EXCEEDED = 2012 + CONTENT_FILTERED = 2013 + USER_LOCATION_NOT_SUPPORTED = 2014 + GENERATION_FAILED = 2015 + EMBEDDING_FAILED = 2016 diff --git a/zhenxun/services/llm/types/exceptions.py b/zhenxun/services/llm/types/exceptions.py new file mode 100644 index 00000000..623d4c26 --- /dev/null +++ b/zhenxun/services/llm/types/exceptions.py @@ -0,0 +1,80 @@ +""" +LLM 异常类型定义 +""" + +from typing import Any + +from .enums import LLMErrorCode + + +class LLMException(Exception): + """LLM 服务相关的基础异常类""" + + def __init__( + self, + message: str, + code: LLMErrorCode = LLMErrorCode.API_REQUEST_FAILED, + details: dict[str, Any] | None = None, + recoverable: bool = True, + cause: Exception | None = None, + ): + self.message = message + self.code = code + self.details = details or {} + self.recoverable = recoverable + self.cause = cause + super().__init__(message) + + def __str__(self) -> str: + if self.details: + return f"{self.message} (错误码: {self.code.name}, 详情: {self.details})" + return f"{self.message} (错误码: {self.code.name})" + + @property + def user_friendly_message(self) -> str: + """返回适合向用户展示的错误消息""" + error_messages = { + LLMErrorCode.MODEL_NOT_FOUND: "AI模型未找到,请检查配置或联系管理员。", + LLMErrorCode.API_KEY_INVALID: "API密钥无效,请联系管理员更新配置。", + LLMErrorCode.API_QUOTA_EXCEEDED: ( + "API使用配额已用尽,请稍后再试或联系管理员。" + ), + LLMErrorCode.API_TIMEOUT: "AI服务响应超时,请稍后再试。", + LLMErrorCode.API_RATE_LIMITED: "请求过于频繁,已被AI服务限流,请稍后再试。", + LLMErrorCode.MODEL_INIT_FAILED: "AI模型初始化失败,请联系管理员检查配置。", + LLMErrorCode.NO_AVAILABLE_KEYS: ( + "当前所有API密钥均不可用,请稍后再试或联系管理员。" + ), + LLMErrorCode.USER_LOCATION_NOT_SUPPORTED: ( + "当前地区暂不支持此AI服务,请联系管理员或尝试其他模型。" + ), + LLMErrorCode.API_REQUEST_FAILED: "AI服务请求失败,请稍后再试。", + LLMErrorCode.API_RESPONSE_INVALID: "AI服务响应异常,请稍后再试。", + LLMErrorCode.CONFIGURATION_ERROR: "AI服务配置错误,请联系管理员。", + LLMErrorCode.CONTEXT_LENGTH_EXCEEDED: "输入内容过长,请缩短后重试。", + LLMErrorCode.CONTENT_FILTERED: "内容被安全过滤,请修改后重试。", + LLMErrorCode.RESPONSE_PARSE_ERROR: "AI服务响应解析失败,请稍后再试。", + LLMErrorCode.UNKNOWN_API_TYPE: "不支持的AI服务类型,请联系管理员。", + } + return error_messages.get(self.code, "AI服务暂时不可用,请稍后再试。") + + +def get_user_friendly_error_message(error: Exception) -> str: + """将任何异常转换为用户友好的错误消息""" + if isinstance(error, LLMException): + return error.user_friendly_message + + error_str = str(error).lower() + + if "timeout" in error_str or "超时" in error_str: + return "请求超时,请稍后再试。" + elif "connection" in error_str or "连接" in error_str: + return "网络连接失败,请检查网络后重试。" + elif "permission" in error_str or "权限" in error_str: + return "权限不足,请联系管理员。" + elif "not found" in error_str or "未找到" in error_str: + return "请求的资源未找到,请检查配置。" + elif "invalid" in error_str or "无效" in error_str: + return "请求参数无效,请检查输入。" + else: + return "服务暂时不可用,请稍后再试。" diff --git a/zhenxun/services/llm/types/models.py b/zhenxun/services/llm/types/models.py new file mode 100644 index 00000000..c5f541bc --- /dev/null +++ b/zhenxun/services/llm/types/models.py @@ -0,0 +1,160 @@ +""" +LLM 数据模型定义 + +包含模型信息、配置、工具定义和响应数据的模型类。 +""" + +from dataclasses import dataclass, field +from typing import Any + +from pydantic import BaseModel, Field + +from .enums import ModelProvider, ToolCategory + +ModelName = str | None + + +@dataclass(frozen=True) +class ModelInfo: + """模型信息(不可变数据类)""" + + name: str + provider: ModelProvider + max_tokens: int = 4096 + supports_tools: bool = False + supports_vision: bool = False + supports_audio: bool = False + cost_per_1k_tokens: float = 0.0 + + +@dataclass +class UsageInfo: + """使用信息数据类""" + + prompt_tokens: int = 0 + completion_tokens: int = 0 + total_tokens: int = 0 + cost: float = 0.0 + + @property + def efficiency_ratio(self) -> float: + """计算效率比(输出/输入)""" + return self.completion_tokens / max(self.prompt_tokens, 1) + + +@dataclass +class ToolMetadata: + """工具元数据""" + + name: str + description: str + category: ToolCategory + read_only: bool = True + destructive: bool = False + open_world: bool = False + parameters: dict[str, Any] = field(default_factory=dict) + required_params: list[str] = field(default_factory=list) + + +class ModelDetail(BaseModel): + """模型详细信息""" + + model_name: str + is_available: bool = True + is_embedding_model: bool = False + temperature: float | None = None + max_tokens: int | None = None + + +class ProviderConfig(BaseModel): + """LLM 提供商配置""" + + name: str = Field(..., description="Provider 的唯一名称标识") + api_key: str | list[str] = Field(..., description="API Key 或 Key 列表") + api_base: str | None = Field(None, description="API Base URL,如果为空则使用默认值") + api_type: str = Field(default="openai", description="API 类型") + openai_compat: bool = Field(default=False, description="是否使用 OpenAI 兼容模式") + temperature: float | None = Field(default=0.7, description="默认温度参数") + max_tokens: int | None = Field(default=None, description="默认最大输出 token 限制") + models: list[ModelDetail] = Field(..., description="支持的模型列表") + timeout: int = Field(default=180, description="请求超时时间") + proxy: str | None = Field(default=None, description="代理设置") + + +class LLMToolFunction(BaseModel): + """LLM 工具函数定义""" + + name: str + arguments: str + + +class LLMToolCall(BaseModel): + """LLM 工具调用""" + + id: str + function: LLMToolFunction + + +class LLMTool(BaseModel): + """LLM 工具定义(支持 MCP 风格)""" + + type: str = "function" + function: dict[str, Any] + annotations: dict[str, Any] | None = Field(default=None, description="工具注解") + + @classmethod + def create( + cls, + name: str, + description: str, + parameters: dict[str, Any], + required: list[str] | None = None, + annotations: dict[str, Any] | None = None, + ) -> "LLMTool": + """创建工具""" + function_def = { + "name": name, + "description": description, + "parameters": { + "type": "object", + "properties": parameters, + "required": required or [], + }, + } + return cls(type="function", function=function_def, annotations=annotations) + + +class LLMCodeExecution(BaseModel): + """代码执行结果""" + + code: str + output: str | None = None + error: str | None = None + execution_time: float | None = None + files_generated: list[str] | None = None + + +class LLMGroundingAttribution(BaseModel): + """信息来源关联""" + + title: str | None = None + uri: str | None = None + snippet: str | None = None + confidence_score: float | None = None + + +class LLMGroundingMetadata(BaseModel): + """信息来源关联元数据""" + + web_search_queries: list[str] | None = None + grounding_attributions: list[LLMGroundingAttribution] | None = None + search_suggestions: list[dict[str, Any]] | None = None + + +class LLMCacheInfo(BaseModel): + """缓存信息""" + + cache_hit: bool = False + cache_key: str | None = None + cache_ttl: int | None = None + created_at: str | None = None diff --git a/zhenxun/services/llm/utils.py b/zhenxun/services/llm/utils.py new file mode 100644 index 00000000..3610df27 --- /dev/null +++ b/zhenxun/services/llm/utils.py @@ -0,0 +1,218 @@ +""" +LLM 模块的工具和转换函数 +""" + +import base64 +from pathlib import Path + +from nonebot_plugin_alconna.uniseg import ( + At, + File, + Image, + Reply, + Text, + UniMessage, + Video, + Voice, +) + +from zhenxun.services.log import logger + +from .types import LLMContentPart + + +async def unimsg_to_llm_parts(message: UniMessage) -> list[LLMContentPart]: + """ + 将 UniMessage 实例转换为一个 LLMContentPart 列表。 + 这是处理多模态输入的核心转换逻辑。 + """ + parts: list[LLMContentPart] = [] + for seg in message: + part = None + if isinstance(seg, Text): + if seg.text.strip(): + part = LLMContentPart.text_part(seg.text) + elif isinstance(seg, Image): + if seg.path: + part = await LLMContentPart.from_path(seg.path, target_api="gemini") + elif seg.url: + part = LLMContentPart.image_url_part(seg.url) + elif hasattr(seg, "raw") and seg.raw: + mime_type = ( + getattr(seg, "mimetype", "image/png") + if hasattr(seg, "mimetype") + else "image/png" + ) + if isinstance(seg.raw, bytes): + b64_data = base64.b64encode(seg.raw).decode("utf-8") + part = LLMContentPart.image_base64_part(b64_data, mime_type) + + elif isinstance(seg, File | Voice | Video): + if seg.path: + part = await LLMContentPart.from_path(seg.path) + elif seg.url: + logger.warning( + f"直接使用 URL 的 {type(seg).__name__} 段," + f"API 可能不支持: {seg.url}" + ) + part = LLMContentPart.text_part( + f"[{type(seg).__name__.upper()} FILE: {seg.name or seg.url}]" + ) + elif hasattr(seg, "raw") and seg.raw: + mime_type = getattr(seg, "mimetype", None) + if isinstance(seg.raw, bytes): + b64_data = base64.b64encode(seg.raw).decode("utf-8") + + if isinstance(seg, Video): + if not mime_type: + mime_type = "video/mp4" + part = LLMContentPart.video_base64_part( + data=b64_data, mime_type=mime_type + ) + logger.debug( + f"处理视频字节数据: {mime_type}, 大小: {len(seg.raw)} bytes" + ) + elif isinstance(seg, Voice): + if not mime_type: + mime_type = "audio/wav" + part = LLMContentPart.audio_base64_part( + data=b64_data, mime_type=mime_type + ) + logger.debug( + f"处理音频字节数据: {mime_type}, 大小: {len(seg.raw)} bytes" + ) + else: + part = LLMContentPart.text_part( + f"[FILE: {mime_type or 'unknown'}, {len(seg.raw)} bytes]" + ) + logger.debug( + f"处理其他文件字节数据: {mime_type}, " + f"大小: {len(seg.raw)} bytes" + ) + + elif isinstance(seg, At): + if seg.flag == "all": + part = LLMContentPart.text_part("[Mentioned Everyone]") + else: + part = LLMContentPart.text_part(f"[Mentioned user: {seg.target}]") + + elif isinstance(seg, Reply): + if seg.msg: + try: + extract_method = getattr(seg.msg, "extract_plain_text", None) + if extract_method and callable(extract_method): + reply_text = str(extract_method()).strip() + else: + reply_text = str(seg.msg).strip() + if reply_text: + part = LLMContentPart.text_part( + f'[Replied to: "{reply_text[:50]}..."]' + ) + except Exception: + part = LLMContentPart.text_part("[Replied to a message]") + + if part: + parts.append(part) + + return parts + + +def create_multimodal_message( + text: str | None = None, + images: list[str | Path | bytes] | str | Path | bytes | None = None, + videos: list[str | Path | bytes] | str | Path | bytes | None = None, + audios: list[str | Path | bytes] | str | Path | bytes | None = None, + image_mimetypes: list[str] | str | None = None, + video_mimetypes: list[str] | str | None = None, + audio_mimetypes: list[str] | str | None = None, +) -> UniMessage: + """ + 创建多模态消息的便捷函数,方便第三方调用。 + + Args: + text: 文本内容 + images: 图片数据,支持路径、字节数据或URL + videos: 视频数据,支持路径、字节数据或URL + audios: 音频数据,支持路径、字节数据或URL + image_mimetypes: 图片MIME类型,当images为bytes时需要指定 + video_mimetypes: 视频MIME类型,当videos为bytes时需要指定 + audio_mimetypes: 音频MIME类型,当audios为bytes时需要指定 + + Returns: + UniMessage: 构建好的多模态消息 + + Examples: + # 纯文本 + msg = create_multimodal_message("请分析这段文字") + + # 文本 + 单张图片(路径) + msg = create_multimodal_message("分析图片", images="/path/to/image.jpg") + + # 文本 + 多张图片 + msg = create_multimodal_message( + "比较图片", images=["/path/1.jpg", "/path/2.jpg"] + ) + + # 文本 + 图片字节数据 + msg = create_multimodal_message( + "分析", images=image_data, image_mimetypes="image/jpeg" + ) + + # 文本 + 视频 + msg = create_multimodal_message("分析视频", videos="/path/to/video.mp4") + + # 文本 + 音频 + msg = create_multimodal_message("转录音频", audios="/path/to/audio.wav") + + # 混合多模态 + msg = create_multimodal_message( + "分析这些媒体文件", + images="/path/to/image.jpg", + videos="/path/to/video.mp4", + audios="/path/to/audio.wav" + ) + """ + message = UniMessage() + + if text: + message.append(Text(text)) + + if images is not None: + _add_media_to_message(message, images, image_mimetypes, Image, "image/png") + + if videos is not None: + _add_media_to_message(message, videos, video_mimetypes, Video, "video/mp4") + + if audios is not None: + _add_media_to_message(message, audios, audio_mimetypes, Voice, "audio/wav") + + return message + + +def _add_media_to_message( + message: UniMessage, + media_items: list[str | Path | bytes] | str | Path | bytes, + mimetypes: list[str] | str | None, + media_class: type, + default_mimetype: str, +) -> None: + """添加媒体文件到 UniMessage 的辅助函数""" + if not isinstance(media_items, list): + media_items = [media_items] + + mime_list = [] + if mimetypes is not None: + if isinstance(mimetypes, str): + mime_list = [mimetypes] * len(media_items) + else: + mime_list = list(mimetypes) + + for i, item in enumerate(media_items): + if isinstance(item, str | Path): + if str(item).startswith(("http://", "https://")): + message.append(media_class(url=str(item))) + else: + message.append(media_class(path=Path(item))) + elif isinstance(item, bytes): + mimetype = mime_list[i] if i < len(mime_list) else default_mimetype + message.append(media_class(raw=item, mimetype=mimetype)) diff --git a/zhenxun/services/log.py b/zhenxun/services/log.py index 96a45bce..beb2b9c0 100644 --- a/zhenxun/services/log.py +++ b/zhenxun/services/log.py @@ -1,4 +1,4 @@ -from datetime import datetime, timedelta +from datetime import timedelta from typing import Any, overload import nonebot @@ -17,7 +17,7 @@ driver = nonebot.get_driver() log_level = driver.config.log_level or "INFO" logger_.add( - LOG_PATH / f"{datetime.now().date()}.log", + LOG_PATH / "{time:YYYY-MM-DD}.log", level=log_level, rotation="00:00", format=default_format, @@ -26,7 +26,7 @@ logger_.add( ) logger_.add( - LOG_PATH / f"error_{datetime.now().date()}.log", + LOG_PATH / "error_{time:YYYY-MM-DD}.log", level="ERROR", rotation="00:00", format=default_format, @@ -36,26 +36,92 @@ logger_.add( class logger: - TEMPLATE_A = "Adapter[{}] {}" - TEMPLATE_B = "Adapter[{}] [{}]: {}" - TEMPLATE_C = "Adapter[{}] 用户[{}] 触发 [{}]: {}" - TEMPLATE_D = "Adapter[{}] 群聊[{}] 用户[{}] 触发" - " [{}]: {}" - TEMPLATE_E = "Adapter[{}] 群聊[{}] 用户[{}] 触发" - " [{}] [Target]({}): {}" - - TEMPLATE_ADAPTER = "Adapter[{}] " - TEMPLATE_USER = "用户[{}] " - TEMPLATE_GROUP = "群聊[{}] " - TEMPLATE_COMMAND = "CMD[{}] " - TEMPLATE_PLATFORM = "平台[{}] " - TEMPLATE_TARGET = "[Target]([{}]) " + """ + 一个经过优化的、支持多种上下文和格式的日志记录器。 + """ + TEMPLATE_ADAPTER = "Adapter[{}]" + TEMPLATE_USER = "用户[{}]" + TEMPLATE_GROUP = "群聊[{}]" + TEMPLATE_COMMAND = "CMD[{}]" + TEMPLATE_PLATFORM = "平台[{}]" + TEMPLATE_TARGET = "[Target]([{}])" SUCCESS_TEMPLATE = "[{}]: {} | 参数[{}] 返回: [{}]" - WARNING_TEMPLATE = "[{}]: {}" + @classmethod + def __parser_template( + cls, + info: str, + command: str | None = None, + user_id: int | str | None = None, + group_id: int | str | None = None, + adapter: str | None = None, + target: Any = None, + platform: str | None = None, + ) -> str: + """ + 优化后的模板解析器,构建并连接日志信息片段。 + """ + parts = [] + if adapter: + parts.append(cls.TEMPLATE_ADAPTER.format(adapter)) + if platform: + parts.append(cls.TEMPLATE_PLATFORM.format(platform)) + if group_id: + parts.append(cls.TEMPLATE_GROUP.format(group_id)) + if user_id: + parts.append(cls.TEMPLATE_USER.format(user_id)) + if command: + parts.append(cls.TEMPLATE_COMMAND.format(command)) + if target: + parts.append(cls.TEMPLATE_TARGET.format(target)) - ERROR_TEMPLATE = "[{}]: {}" + parts.append(info) + return " ".join(parts) + + @classmethod + def _log( + cls, + level: str, + info: str, + command: str | None = None, + session: int | str | Session | uninfoSession | None = None, + group_id: int | str | None = None, + adapter: str | None = None, + target: Any = None, + platform: str | None = None, + e: Exception | None = None, + ): + """ + 核心日志处理方法,处理所有日志级别的通用逻辑。 + """ + user_id: str | None = str(session) if isinstance(session, int | str) else None + + if isinstance(session, Session): + user_id = session.id1 + adapter = session.bot_type + group_id = f"{session.id3}:{session.id2}" if session.id3 else session.id2 + platform = platform or session.platform + elif isinstance(session, uninfoSession): + user_id = session.user.id + adapter = session.adapter + if session.group: + group_id = session.group.id + platform = session.basic.get("scope") + + template = cls.__parser_template( + info, command, user_id, group_id, adapter, target, platform + ) + + if e: + template += f" || 错误 {type(e).__name__}: {e}" + + try: + log_func = getattr(logger_.opt(colors=True), level) + log_func(template) + except Exception: + log_func_fallback = getattr(logger_, level) + log_func_fallback(template) @overload @classmethod @@ -70,7 +136,6 @@ class logger: target: Any = None, platform: str | None = None, ): ... - @overload @classmethod def info( @@ -82,7 +147,6 @@ class logger: target: Any = None, platform: str | None = None, ): ... - @overload @classmethod def info( @@ -107,28 +171,16 @@ class logger: target: Any = None, platform: str | None = None, ): - user_id: str | None = session # type: ignore - if isinstance(session, Session): - user_id = session.id1 - adapter = session.bot_type - if session.id3: - group_id = f"{session.id3}:{session.id2}" - elif session.id2: - group_id = f"{session.id2}" - platform = platform or session.platform - elif isinstance(session, uninfoSession): - user_id = session.user.id - adapter = session.adapter - if session.group: - group_id = session.group.id - platform = session.basic["scope"] - template = cls.__parser_template( - info, command, user_id, group_id, adapter, target, platform + cls._log( + "info", + info=info, + command=command, + session=session, + group_id=group_id, + adapter=adapter, + target=target, + platform=platform, ) - try: - logger_.opt(colors=True).info(template) - except Exception: - logger_.info(template) @classmethod def success( @@ -138,9 +190,11 @@ class logger: param: dict[str, Any] | None = None, result: str = "", ): - param_str = "" - if param: - param_str = ",".join([f"{k}:{v}" for k, v in param.items()]) + param_str = ( + ",".join([f"{k}:{v}" for k, v in param.items()]) + if param + else "" + ) logger_.opt(colors=True).success( cls.SUCCESS_TEMPLATE.format(command, info, param_str, result) ) @@ -159,7 +213,6 @@ class logger: platform: str | None = None, e: Exception | None = None, ): ... - @overload @classmethod def warning( @@ -168,12 +221,10 @@ class logger: command: str | None = None, *, session: Session | None = None, - adapter: str | None = None, target: Any = None, platform: str | None = None, e: Exception | None = None, ): ... - @overload @classmethod def warning( @@ -182,7 +233,6 @@ class logger: command: str | None = None, *, session: uninfoSession | None = None, - adapter: str | None = None, target: Any = None, platform: str | None = None, e: Exception | None = None, @@ -201,30 +251,17 @@ class logger: platform: str | None = None, e: Exception | None = None, ): - user_id: str | None = session # type: ignore - if isinstance(session, Session): - user_id = session.id1 - adapter = session.bot_type - if session.id3: - group_id = f"{session.id3}:{session.id2}" - elif session.id2: - group_id = f"{session.id2}" - platform = platform or session.platform - elif isinstance(session, uninfoSession): - user_id = session.user.id - adapter = session.adapter - if session.group: - group_id = session.group.id - platform = session.basic["scope"] - template = cls.__parser_template( - info, command, user_id, group_id, adapter, target, platform + cls._log( + "warning", + info=info, + command=command, + session=session, + group_id=group_id, + adapter=adapter, + target=target, + platform=platform, + e=e, ) - if e: - template += f" || 错误{type(e)}: {e}" - try: - logger_.opt(colors=True).warning(template) - except Exception as e: - logger_.warning(template) @overload @classmethod @@ -240,7 +277,6 @@ class logger: platform: str | None = None, e: Exception | None = None, ): ... - @overload @classmethod def error( @@ -253,7 +289,6 @@ class logger: platform: str | None = None, e: Exception | None = None, ): ... - @overload @classmethod def error( @@ -280,30 +315,17 @@ class logger: platform: str | None = None, e: Exception | None = None, ): - user_id: str | None = session # type: ignore - if isinstance(session, Session): - user_id = session.id1 - adapter = session.bot_type - if session.id3: - group_id = f"{session.id3}:{session.id2}" - elif session.id2: - group_id = f"{session.id2}" - platform = platform or session.platform - elif isinstance(session, uninfoSession): - user_id = session.user.id - adapter = session.adapter - if session.group: - group_id = session.group.id - platform = session.basic["scope"] - template = cls.__parser_template( - info, command, user_id, group_id, adapter, target, platform + cls._log( + "error", + info=info, + command=command, + session=session, + group_id=group_id, + adapter=adapter, + target=target, + platform=platform, + e=e, ) - if e: - template += f" || 错误 {type(e)}: {e}" - try: - logger_.opt(colors=True).error(template) - except Exception as e: - logger_.error(template) @overload @classmethod @@ -319,7 +341,6 @@ class logger: platform: str | None = None, e: Exception | None = None, ): ... - @overload @classmethod def debug( @@ -332,7 +353,6 @@ class logger: platform: str | None = None, e: Exception | None = None, ): ... - @overload @classmethod def debug( @@ -359,62 +379,78 @@ class logger: platform: str | None = None, e: Exception | None = None, ): - user_id: str | None = session # type: ignore - if isinstance(session, Session): - user_id = session.id1 - adapter = session.bot_type - if session.id3: - group_id = f"{session.id3}:{session.id2}" - elif session.id2: - group_id = f"{session.id2}" - platform = platform or session.platform - elif isinstance(session, uninfoSession): - user_id = session.user.id - adapter = session.adapter - if session.group: - group_id = session.group.id - platform = session.basic["scope"] - template = cls.__parser_template( - info, command, user_id, group_id, adapter, target, platform + cls._log( + "debug", + info=info, + command=command, + session=session, + group_id=group_id, + adapter=adapter, + target=target, + platform=platform, + e=e, ) - if e: - template += f" || 错误 {type(e)}: {e}" - try: - logger_.opt(colors=True).debug(template) - except Exception as e: - logger_.debug(template) + @overload @classmethod - def __parser_template( + def trace( cls, info: str, command: str | None = None, - user_id: int | str | None = None, + *, + session: int | str | None = None, group_id: int | str | None = None, adapter: str | None = None, target: Any = None, platform: str | None = None, - ) -> str: - arg_list = [] - template = "" - if adapter is not None: - template += cls.TEMPLATE_ADAPTER - arg_list.append(adapter) - if platform is not None: - template += cls.TEMPLATE_PLATFORM - arg_list.append(platform) - if group_id is not None: - template += cls.TEMPLATE_GROUP - arg_list.append(group_id) - if user_id is not None: - template += cls.TEMPLATE_USER - arg_list.append(user_id) - if command is not None: - template += cls.TEMPLATE_COMMAND - arg_list.append(command) - if target is not None: - template += cls.TEMPLATE_TARGET - arg_list.append(target) - arg_list.append(info) - template += "{}" - return template.format(*arg_list) + e: Exception | None = None, + ): ... + @overload + @classmethod + def trace( + cls, + info: str, + command: str | None = None, + *, + session: Session | None = None, + target: Any = None, + platform: str | None = None, + e: Exception | None = None, + ): ... + @overload + @classmethod + def trace( + cls, + info: str, + command: str | None = None, + *, + session: uninfoSession | None = None, + target: Any = None, + platform: str | None = None, + e: Exception | None = None, + ): ... + + @classmethod + def trace( + cls, + info: str, + command: str | None = None, + *, + session: int | str | Session | uninfoSession | None = None, + group_id: int | str | None = None, + adapter: str | None = None, + target: Any = None, + platform: str | None = None, + e: Exception | None = None, + ): + cls._log( + "trace", + info=info, + command=command, + session=session, + group_id=group_id, + adapter=adapter, + target=target, + platform=platform, + e=e, + ) diff --git a/zhenxun/utils/enum.py b/zhenxun/utils/enum.py index c0b79342..23b5a946 100644 --- a/zhenxun/utils/enum.py +++ b/zhenxun/utils/enum.py @@ -1,4 +1,84 @@ -from strenum import StrEnum +import sys + +if sys.version_info >= (3, 11): + from enum import StrEnum +else: + from strenum import StrEnum + + +class PriorityLifecycleType(StrEnum): + STARTUP = "STARTUP" + """启动""" + SHUTDOWN = "SHUTDOWN" + """关闭""" + + +class BotSentType(StrEnum): + GROUP = "GROUP" + PRIVATE = "PRIVATE" + + +class EventLogType(StrEnum): + GROUP_MEMBER_INCREASE = "GROUP_MEMBER_INCREASE" + """群成员增加""" + GROUP_MEMBER_DECREASE = "GROUP_MEMBER_DECREASE" + """群成员减少""" + KICK_MEMBER = "KICK_MEMBER" + """踢出群成员""" + KICK_BOT = "KICK_BOT" + """踢出Bot""" + LEAVE_MEMBER = "LEAVE_MEMBER" + """主动退群""" + + +class CacheType(StrEnum): + """ + 缓存类型 + """ + + PLUGINS = "GLOBAL_ALL_PLUGINS" + """全局全部插件""" + GROUPS = "GLOBAL_ALL_GROUPS" + """全局全部群组""" + USERS = "GLOBAL_ALL_USERS" + """全部用户""" + BAN = "GLOBAL_ALL_BAN" + """全局ban列表""" + BOT = "GLOBAL_BOT" + """全局bot信息""" + LEVEL = "GLOBAL_USER_LEVEL" + """用户权限""" + LIMIT = "GLOBAL_LIMIT" + """插件限制""" + +class BankHandleType(StrEnum): + DEPOSIT = "DEPOSIT" + """存款""" + WITHDRAW = "WITHDRAW" + """取款""" + LOAN = "LOAN" + """贷款""" + REPAYMENT = "REPAYMENT" + """还款""" + INTEREST = "INTEREST" + """利息""" + + + + +class DbLockType(StrEnum): + """ + 锁类型 + """ + + CREATE = "CREATE" + """创建""" + DELETE = "DELETE" + """删除""" + UPDATE = "UPDATE" + """更新""" + QUERY = "QUERY" + """查询""" class GoldHandle(StrEnum): @@ -92,7 +172,9 @@ class RequestType(StrEnum): """ FRIEND = "FRIEND" + """好友""" GROUP = "GROUP" + """群组""" class RequestHandleType(StrEnum): diff --git a/zhenxun/utils/exception.py b/zhenxun/utils/exception.py index db8c0656..889c6c5c 100644 --- a/zhenxun/utils/exception.py +++ b/zhenxun/utils/exception.py @@ -1,3 +1,17 @@ +class HookPriorityException(BaseException): + """ + 钩子优先级异常 + """ + + def __init__(self, info: str = "") -> None: + self.info = info + + def __str__(self) -> str: + return self.info + + + + class NotFoundError(Exception): """ 未发现 diff --git a/zhenxun/utils/github_utils/func.py b/zhenxun/utils/github_utils/func.py index 19daf10d..b568b5bd 100644 --- a/zhenxun/utils/github_utils/func.py +++ b/zhenxun/utils/github_utils/func.py @@ -26,6 +26,10 @@ async def get_fastest_raw_formats() -> list[str]: "https://mirror.ghproxy.com/": f"https://mirror.ghproxy.com/{RAW_CONTENT_FORMAT}", "https://gh-proxy.com/": f"https://gh-proxy.com/{RAW_CONTENT_FORMAT}", "https://cdn.jsdelivr.net/": "https://cdn.jsdelivr.net/gh/{owner}/{repo}@{branch}/{path}", + #"https://raw.gitcode.com/": "https://raw.gitcode.com/qq_41605780/{repo}/raw/{branch}/{path}", # ✅ 新增 GitCode raw 格式 + #"https://raw.gitcode.com/": "https://raw.gitcode.com/ATTomato/{repo}/raw/{branch}/{path}" + #"https://raw.gitcode.com/": "https://raw.gitcode.com/{owner}/{repo}/raw/{branch}/{path}" + } return await __get_fastest_formats(formats) @@ -61,4 +65,4 @@ async def get_fastest_release_source_formats() -> list[str]: "https://codeload.github.com/": RELEASE_SOURCE_FORMAT, "https://p.102333.xyz/": f"https://p.102333.xyz/{RELEASE_SOURCE_FORMAT}", } - return await __get_fastest_formats(formats) + return await __get_fastest_formats(formats) \ No newline at end of file diff --git a/zhenxun/utils/http_utils.py b/zhenxun/utils/http_utils.py index 962c9e01..0ccf777f 100644 --- a/zhenxun/utils/http_utils.py +++ b/zhenxun/utils/http_utils.py @@ -1,210 +1,223 @@ import asyncio -from asyncio.exceptions import TimeoutError -from collections.abc import AsyncGenerator +from collections.abc import AsyncGenerator, Sequence from contextlib import asynccontextmanager from pathlib import Path import time -from typing import Any, ClassVar, Literal +from typing import Any, ClassVar, Literal, cast import aiofiles -from anyio import EndOfStream import httpx -from httpx import ConnectTimeout, HTTPStatusError, Response +from httpx import AsyncHTTPTransport, HTTPStatusError, Proxy, Response from nonebot_plugin_alconna import UniMessage from nonebot_plugin_htmlrender import get_browser from playwright.async_api import Page -from retrying import retry -import rich +from rich.progress import ( + BarColumn, + DownloadColumn, + Progress, + TextColumn, + TransferSpeedColumn, +) from zhenxun.configs.config import BotConfig from zhenxun.services.log import logger from zhenxun.utils.message import MessageUtils from zhenxun.utils.user_agent import get_user_agent -# from .browser import get_browser +CLIENT_KEY = ["use_proxy", "proxies", "proxy", "verify", "headers"] + + +def get_async_client( + proxies: dict[str, str] | None = None, + proxy: str | None = None, + verify: bool = False, + **kwargs, +) -> httpx.AsyncClient: + transport = kwargs.pop("transport", None) or AsyncHTTPTransport(verify=verify) + if proxies: + http_proxy = proxies.get("http://") + https_proxy = proxies.get("https://") + return httpx.AsyncClient( + mounts={ + "http://": AsyncHTTPTransport( + proxy=Proxy(http_proxy) if http_proxy else None + ), + "https://": AsyncHTTPTransport( + proxy=Proxy(https_proxy) if https_proxy else None + ), + }, + transport=transport, + **kwargs, + ) + elif proxy: + return httpx.AsyncClient( + mounts={ + "http://": AsyncHTTPTransport(proxy=Proxy(proxy)), + "https://": AsyncHTTPTransport(proxy=Proxy(proxy)), + }, + transport=transport, + **kwargs, + ) + return httpx.AsyncClient(transport=transport, **kwargs) class AsyncHttpx: - proxy: ClassVar[dict[str, str | None]] = { - "http://": BotConfig.system_proxy, - "https://": BotConfig.system_proxy, - } + default_proxy: ClassVar[dict[str, str] | None] = ( + { + "http://": BotConfig.system_proxy, + "https://": BotConfig.system_proxy, + } + if BotConfig.system_proxy + else None + ) + + @classmethod + @asynccontextmanager + async def _create_client( + cls, + *, + use_proxy: bool = True, + proxies: dict[str, str] | None = None, + proxy: str | None = None, + headers: dict[str, str] | None = None, + verify: bool = False, + **kwargs, + ) -> AsyncGenerator[httpx.AsyncClient, None]: + """创建一个私有的、配置好的 httpx.AsyncClient 上下文管理器。 + + 说明: + 此方法用于内部统一创建客户端,处理代理和请求头逻辑,减少代码重复。 + + 参数: + use_proxy: 是否使用在类中定义的默认代理。 + proxies: 手动指定的代理,会覆盖默认代理。 + proxy: 单个代理,用于兼容旧版本,不再使用 + headers: 需要合并到客户端的自定义请求头。 + verify: 是否验证 SSL 证书。 + **kwargs: 其他所有传递给 httpx.AsyncClient 的参数。 + + 返回: + AsyncGenerator[httpx.AsyncClient, None]: 生成器。 + """ + proxies_to_use = proxies or (cls.default_proxy if use_proxy else None) + + final_headers = get_user_agent() + if headers: + final_headers.update(headers) + + async with get_async_client( + proxies=proxies_to_use, + proxy=proxy, + verify=verify, + headers=final_headers, + **kwargs, + ) as client: + yield client @classmethod - @retry(stop_max_attempt_number=3) async def get( cls, url: str | list[str], *, - params: dict[str, Any] | None = None, - headers: dict[str, str] | None = None, - cookies: dict[str, str] | None = None, - verify: bool = True, - use_proxy: bool = True, - proxy: dict[str, str] | None = None, - timeout: int = 30, # noqa: ASYNC109 + check_status_code: int | None = None, **kwargs, - ) -> Response: - """Get + ) -> Response: # sourcery skip: use-assigned-variable + """发送 GET 请求,并返回第一个成功的响应。 + + 说明: + 本方法是 httpx.get 的高级包装,增加了多链接尝试、自动重试和统一的代理管理。 + 如果提供 URL 列表,它将依次尝试直到成功为止。 参数: - url: url - params: params - headers: 请求头 - cookies: cookies - verify: verify - use_proxy: 使用默认代理 - proxy: 指定代理 - timeout: 超时时间 + url: 单个请求 URL 或一个 URL 列表。 + check_status_code: (可选) 若提供,将检查响应状态码是否匹配,否则抛出异常。 + **kwargs: 其他所有传递给 httpx.get 的参数 + (如 `params`, `headers`, `timeout`等)。 + + 返回: + Response: Response """ urls = [url] if isinstance(url, str) else url - return await cls._get_first_successful( - urls, - params=params, - headers=headers, - cookies=cookies, - verify=verify, - use_proxy=use_proxy, - proxy=proxy, - timeout=timeout, - **kwargs, - ) - - @classmethod - async def _get_first_successful( - cls, - urls: list[str], - **kwargs, - ) -> Response: last_exception = None - for url in urls: + for current_url in urls: try: - return await cls._get_single(url, **kwargs) + logger.info(f"开始获取 {current_url}..") + client_kwargs = {k: v for k, v in kwargs.items() if k in CLIENT_KEY} + for key in CLIENT_KEY: + kwargs.pop(key, None) + async with cls._create_client(**client_kwargs) as client: + response = await client.get(current_url, **kwargs) + + if check_status_code and response.status_code != check_status_code: + raise HTTPStatusError( + f"状态码错误: {response.status_code}!={check_status_code}", + request=response.request, + response=response, + ) + return response except Exception as e: last_exception = e - if url != urls[-1]: - logger.warning(f"获取 {url} 失败, 尝试下一个") - raise last_exception or Exception("All URLs failed") + if current_url != urls[-1]: + logger.warning(f"获取 {current_url} 失败, 尝试下一个", e=e) + + raise last_exception or Exception("所有URL都获取失败") @classmethod - async def _get_single( - cls, - url: str, - *, - params: dict[str, Any] | None = None, - headers: dict[str, str] | None = None, - cookies: dict[str, str] | None = None, - verify: bool = True, - use_proxy: bool = True, - proxy: dict[str, str] | None = None, - timeout: int = 30, # noqa: ASYNC109 - **kwargs, - ) -> Response: - if not headers: - headers = get_user_agent() - _proxy = proxy or (cls.proxy if use_proxy else None) - async with httpx.AsyncClient(proxies=_proxy, verify=verify) as client: # type: ignore - return await client.get( - url, - params=params, - headers=headers, - cookies=cookies, - timeout=timeout, - **kwargs, - ) + async def head(cls, url: str, **kwargs) -> Response: + """发送 HEAD 请求。 - @classmethod - async def head( - cls, - url: str, - *, - params: dict[str, Any] | None = None, - headers: dict[str, str] | None = None, - cookies: dict[str, str] | None = None, - verify: bool = True, - use_proxy: bool = True, - proxy: dict[str, str] | None = None, - timeout: int = 30, # noqa: ASYNC109 - **kwargs, - ) -> Response: - """Get - - 参数: - url: url - params: params - headers: 请求头 - cookies: cookies - verify: verify - use_proxy: 使用默认代理 - proxy: 指定代理 - timeout: 超时时间 - """ - if not headers: - headers = get_user_agent() - _proxy = proxy or (cls.proxy if use_proxy else None) - async with httpx.AsyncClient(proxies=_proxy, verify=verify) as client: # type: ignore - return await client.head( - url, - params=params, - headers=headers, - cookies=cookies, - timeout=timeout, - **kwargs, - ) - - @classmethod - async def post( - cls, - url: str, - *, - data: dict[str, Any] | None = None, - content: Any = None, - files: Any = None, - verify: bool = True, - use_proxy: bool = True, - proxy: dict[str, str] | None = None, - json: dict[str, Any] | None = None, - params: dict[str, str] | None = None, - headers: dict[str, str] | None = None, - cookies: dict[str, str] | None = None, - timeout: int = 30, # noqa: ASYNC109 - **kwargs, - ) -> Response: - """ 说明: - Post + 本方法是对 httpx.head 的封装,通常用于检查资源的元信息(如大小、类型)。 + 参数: - url: url - data: data - content: content - files: files - use_proxy: 是否默认代理 - proxy: 指定代理 - json: json - params: params - headers: 请求头 - cookies: cookies - timeout: 超时时间 + url: 请求的 URL。 + **kwargs: 其他所有传递给 httpx.head 的参数 + (如 `headers`, `timeout`, `allow_redirects`)。 + + 返回: + Response: Response """ - if not headers: - headers = get_user_agent() - _proxy = proxy or (cls.proxy if use_proxy else None) - async with httpx.AsyncClient(proxies=_proxy, verify=verify) as client: # type: ignore - return await client.post( - url, - content=content, - data=data, - files=files, - json=json, - params=params, - headers=headers, - cookies=cookies, - timeout=timeout, - **kwargs, - ) + client_kwargs = {k: v for k, v in kwargs.items() if k in CLIENT_KEY} + for key in CLIENT_KEY: + kwargs.pop(key, None) + async with cls._create_client(**client_kwargs) as client: + return await client.head(url, **kwargs) + + @classmethod + async def post(cls, url: str, **kwargs) -> Response: + """发送 POST 请求。 + + 说明: + 本方法是对 httpx.post 的封装,提供了统一的代理和客户端管理。 + + 参数: + url: 请求的 URL。 + **kwargs: 其他所有传递给 httpx.post 的参数 + (如 `data`, `json`, `content` 等)。 + + 返回: + Response: Response。 + """ + client_kwargs = {k: v for k, v in kwargs.items() if k in CLIENT_KEY} + for key in CLIENT_KEY: + kwargs.pop(key, None) + async with cls._create_client(**client_kwargs) as client: + return await client.post(url, **kwargs) @classmethod async def get_content(cls, url: str, **kwargs) -> bytes: + """获取指定 URL 的二进制内容。 + + 说明: + 这是一个便捷方法,等同于调用 get() 后再访问 .content 属性。 + + 参数: + url: 请求的 URL。 + **kwargs: 所有传递给 get() 方法的参数。 + + 返回: + bytes: 响应内容的二进制字节流 (bytes)。 + """ res = await cls.get(url, **kwargs) return res.content @@ -214,195 +227,143 @@ class AsyncHttpx: url: str | list[str], path: str | Path, *, - params: dict[str, str] | None = None, - verify: bool = True, - use_proxy: bool = True, - proxy: dict[str, str] | None = None, - headers: dict[str, str] | None = None, - cookies: dict[str, str] | None = None, - timeout: int = 30, # noqa: ASYNC109 stream: bool = False, - follow_redirects: bool = True, **kwargs, ) -> bool: - """下载文件 + """下载文件到指定路径。 + + 说明: + 支持多链接尝试和流式下载(带进度条)。 参数: - url: url - path: 存储路径 - params: params - verify: verify - use_proxy: 使用代理 - proxy: 指定代理 - headers: 请求头 - cookies: cookies - timeout: 超时时间 - stream: 是否使用流式下载(流式写入+进度条,适用于下载大文件) + url: 单个文件 URL 或一个备用 URL 列表。 + path: 文件保存的本地路径。 + stream: (可选) 是否使用流式下载,适用于大文件,默认为 False。 + **kwargs: 其他所有传递给 get() 方法或 httpx.stream() 的参数。 + + 返回: + bool: 是否下载成功。 """ - if isinstance(path, str): - path = Path(path) + path = Path(path) path.parent.mkdir(parents=True, exist_ok=True) - try: - for _ in range(3): - if not isinstance(url, list): - url = [url] - for u in url: - try: - if not stream: - response = await cls.get( - u, - params=params, - headers=headers, - cookies=cookies, - use_proxy=use_proxy, - proxy=proxy, - timeout=timeout, - follow_redirects=follow_redirects, - **kwargs, - ) + + urls = [url] if isinstance(url, str) else url + + for current_url in urls: + try: + if not stream: + response = await cls.get(current_url, **kwargs) + response.raise_for_status() + async with aiofiles.open(path, "wb") as f: + await f.write(response.content) + else: + async with cls._create_client(**kwargs) as client: + stream_kwargs = { + k: v + for k, v in kwargs.items() + if k not in ["use_proxy", "proxy", "verify"] + } + async with client.stream( + "GET", current_url, **stream_kwargs + ) as response: response.raise_for_status() - content = response.content - async with aiofiles.open(path, "wb") as wf: - await wf.write(content) - logger.info(f"下载 {u} 成功.. Path:{path.absolute()}") - else: - if not headers: - headers = get_user_agent() - _proxy = proxy or (cls.proxy if use_proxy else None) - async with httpx.AsyncClient( - proxies=_proxy, # type: ignore - verify=verify, - ) as client: - async with client.stream( - "GET", - u, - params=params, - headers=headers, - cookies=cookies, - timeout=timeout, - follow_redirects=True, - **kwargs, - ) as response: - response.raise_for_status() - logger.info( - f"开始下载 {path.name}.. " - f"Url: {u}.. " - f"Path: {path.absolute()}" - ) - async with aiofiles.open(path, "wb") as wf: - total = int( - response.headers.get("Content-Length", 0) - ) - with rich.progress.Progress( # type: ignore - rich.progress.TextColumn(path.name), # type: ignore - "[progress.percentage]{task.percentage:>3.0f}%", # type: ignore - rich.progress.BarColumn(bar_width=None), # type: ignore - rich.progress.DownloadColumn(), # type: ignore - rich.progress.TransferSpeedColumn(), # type: ignore - ) as progress: - download_task = progress.add_task( - "Download", - total=total or None, - ) - async for chunk in response.aiter_bytes(): - await wf.write(chunk) - await wf.flush() - progress.update( - download_task, - completed=response.num_bytes_downloaded, - ) - logger.info( - f"下载 {u} 成功.. Path:{path.absolute()}" - ) - return True - except (TimeoutError, ConnectTimeout, HTTPStatusError): - logger.warning(f"下载 {u} 失败.. 尝试下一个地址..") - except EndOfStream as e: - logger.warning( - f"下载 {url} EndOfStream 异常 Path:{path.absolute()}", e=e - ) - if path.exists(): - return True - logger.error(f"下载 {url} 下载超时.. Path:{path.absolute()}") - except Exception as e: - logger.error(f"下载 {url} 错误 Path:{path.absolute()}", e=e) + total = int(response.headers.get("Content-Length", 0)) + + with Progress( + TextColumn(path.name), + "[progress.percentage]{task.percentage:>3.0f}%", + BarColumn(bar_width=None), + DownloadColumn(), + TransferSpeedColumn(), + ) as progress: + task_id = progress.add_task("Download", total=total) + async with aiofiles.open(path, "wb") as f: + async for chunk in response.aiter_bytes(): + await f.write(chunk) + progress.update(task_id, advance=len(chunk)) + + logger.info(f"下载 {current_url} 成功 -> {path.absolute()}") + return True + + except Exception as e: + logger.warning(f"下载 {current_url} 失败,尝试下一个。错误: {e}") + + logger.error(f"所有URL {urls} 下载均失败 -> {path.absolute()}") return False @classmethod async def gather_download_file( cls, - url_list: list[str] | list[list[str]], - path_list: list[str | Path], + url_list: Sequence[list[str] | str], + path_list: Sequence[str | Path], *, - limit_async_number: int | None = None, - params: dict[str, str] | None = None, - use_proxy: bool = True, - proxy: dict[str, str] | None = None, - headers: dict[str, str] | None = None, - cookies: dict[str, str] | None = None, - timeout: int = 30, # noqa: ASYNC109 + limit_async_number: int = 5, **kwargs, ) -> list[bool]: - """分组同时下载文件 + """并发下载多个文件,支持为每个文件提供备用镜像链接。 + + 说明: + 使用 asyncio.Semaphore 来控制并发请求的数量。 + 对于 url_list 中的每个元素,如果它是一个列表,则会依次尝试直到下载成功。 参数: - url_list: url列表 - path_list: 存储路径列表 - limit_async_number: 限制同时请求数量 - params: params - use_proxy: 使用代理 - proxy: 指定代理 - headers: 请求头 - cookies: cookies - timeout: 超时时间 + url_list: 包含所有文件下载任务的列表。每个元素可以是: + - 一个字符串 (str): 代表该任务的唯一URL。 + - 一个字符串列表 (list[str]): 代表该任务的多个备用/镜像URL。 + path_list: 与 url_list 对应的文件保存路径列表。 + limit_async_number: (可选) 最大并发下载数,默认为 5。 + **kwargs: 其他所有传递给 download_file() 方法的参数。 + + 返回: + list[bool]: 对应每个下载任务是否成功。 """ - if n := len(url_list) != len(path_list): - raise UrlPathNumberNotEqual( - f"Url数量与Path数量不对等,Url:{len(url_list)},Path:{len(path_list)}" - ) - if limit_async_number and n > limit_async_number: - m = float(n) / limit_async_number - x = 0 - j = limit_async_number - _split_url_list = [] - _split_path_list = [] - for _ in range(int(m)): - _split_url_list.append(url_list[x:j]) - _split_path_list.append(path_list[x:j]) - x += limit_async_number - j += limit_async_number - if int(m) < m: - _split_url_list.append(url_list[j:]) - _split_path_list.append(path_list[j:]) - else: - _split_url_list = [url_list] - _split_path_list = [path_list] - tasks = [] - result_ = [] - for x, y in zip(_split_url_list, _split_path_list): - tasks.extend( - asyncio.create_task( - cls.download_file( - url, - path, - params=params, - headers=headers, - cookies=cookies, - use_proxy=use_proxy, - timeout=timeout, - proxy=proxy, - **kwargs, - ) + if len(url_list) != len(path_list): + raise ValueError("URL 列表和路径列表的长度必须相等") + + semaphore = asyncio.Semaphore(limit_async_number) + + async def _download_with_semaphore( + urls_for_one_path: str | list[str], path: str | Path + ): + async with semaphore: + return await cls.download_file(urls_for_one_path, path, **kwargs) + + tasks = [ + _download_with_semaphore(url_group, path) + for url_group, path in zip(url_list, path_list) + ] + + results = await asyncio.gather(*tasks, return_exceptions=True) + + final_results = [] + for i, result in enumerate(results): + if isinstance(result, Exception): + url_info = ( + url_list[i] + if isinstance(url_list[i], str) + else ", ".join(url_list[i]) ) - for url, path in zip(x, y) - ) - _x = await asyncio.gather(*tasks) - result_ = result_ + list(_x) - tasks.clear() - return result_ + logger.error(f"并发下载任务 ({url_info}) 时发生错误", e=result) + final_results.append(False) + else: + # download_file 返回的是 bool,可以直接附加 + final_results.append(cast(bool, result)) + + return final_results @classmethod async def get_fastest_mirror(cls, url_list: list[str]) -> list[str]: + """测试并返回最快的镜像地址。 + + 说明: + 通过并发发送 HEAD 请求来测试每个 URL 的响应时间和可用性,并按响应速度排序。 + + 参数: + url_list: 需要测试的镜像 URL 列表。 + + 返回: + list[str]: 按从快到慢的顺序包含了所有可用的 URL。 + """ assert url_list async def head_mirror(client: type[AsyncHttpx], url: str) -> dict[str, Any]: @@ -471,7 +432,7 @@ class AsyncPlaywright: wait_until: ( Literal["domcontentloaded", "load", "networkidle"] | None ) = "networkidle", - timeout: float | None = None, # noqa: ASYNC109 + timeout: float | None = None, type_: Literal["jpeg", "png"] | None = None, user_agent: str | None = None, cookies: list[dict[str, Any]] | dict[str, Any] | None = None, @@ -515,9 +476,5 @@ class AsyncPlaywright: return None -class UrlPathNumberNotEqual(Exception): - pass - - class BrowserIsNone(Exception): pass diff --git a/zhenxun/utils/manager/priority_manager.py b/zhenxun/utils/manager/priority_manager.py new file mode 100644 index 00000000..1c59635c --- /dev/null +++ b/zhenxun/utils/manager/priority_manager.py @@ -0,0 +1,57 @@ +from collections.abc import Callable +from typing import ClassVar + +import nonebot +from nonebot.utils import is_coroutine_callable + +from zhenxun.services.log import logger +from zhenxun.utils.enum import PriorityLifecycleType +from zhenxun.utils.exception import HookPriorityException + +driver = nonebot.get_driver() + + +class PriorityLifecycle: + _data: ClassVar[dict[PriorityLifecycleType, dict[int, list[Callable]]]] = {} + + @classmethod + def add(cls, hook_type: PriorityLifecycleType, func: Callable, priority: int): + if hook_type not in cls._data: + cls._data[hook_type] = {} + if priority not in cls._data[hook_type]: + cls._data[hook_type][priority] = [] + cls._data[hook_type][priority].append(func) + + @classmethod + def on_startup(cls, *, priority: int): + def wrapper(func): + cls.add(PriorityLifecycleType.STARTUP, func, priority) + return func + + return wrapper + + @classmethod + def on_shutdown(cls, *, priority: int): + def wrapper(func): + cls.add(PriorityLifecycleType.SHUTDOWN, func, priority) + return func + + return wrapper + + +@driver.on_startup +async def _(): + priority_data = PriorityLifecycle._data.get(PriorityLifecycleType.STARTUP) + if not priority_data: + return + priority_list = sorted(priority_data.keys()) + priority = 0 + try: + for priority in priority_list: + for func in priority_data[priority]: + if is_coroutine_callable(func): + await func() + else: + func() + except HookPriorityException as e: + logger.error(f"打断优先级 [{priority}] on_startup 方法. {type(e)}: {e}") diff --git a/zhenxun/utils/manager/schedule_manager.py b/zhenxun/utils/manager/schedule_manager.py new file mode 100644 index 00000000..a3b21272 --- /dev/null +++ b/zhenxun/utils/manager/schedule_manager.py @@ -0,0 +1,810 @@ +import asyncio +from collections.abc import Callable, Coroutine +import copy +import inspect +import random +from typing import ClassVar + +import nonebot +from nonebot import get_bots +from nonebot_plugin_apscheduler import scheduler +from pydantic import BaseModel, ValidationError + +from zhenxun.configs.config import Config +from zhenxun.models.schedule_info import ScheduleInfo +from zhenxun.services.log import logger +from zhenxun.utils.common_utils import CommonUtils +from zhenxun.utils.manager.priority_manager import PriorityLifecycle +from zhenxun.utils.platform import PlatformUtils + +SCHEDULE_CONCURRENCY_KEY = "all_groups_concurrency_limit" + + +class SchedulerManager: + """ + 一个通用的、持久化的定时任务管理器,供所有插件使用。 + """ + + _registered_tasks: ClassVar[ + dict[str, dict[str, Callable | type[BaseModel] | None]] + ] = {} + _JOB_PREFIX = "zhenxun_schedule_" + _running_tasks: ClassVar[set] = set() + + def register( + self, plugin_name: str, params_model: type[BaseModel] | None = None + ) -> Callable: + """ + 注册一个可调度的任务函数。 + 被装饰的函数签名应为 `async def func(group_id: str | None, **kwargs)` + + Args: + plugin_name (str): 插件的唯一名称 (通常是模块名)。 + params_model (type[BaseModel], optional): 一个 Pydantic BaseModel 类, + 用于定义和验证任务函数接受的额外参数。 + """ + + def decorator(func: Callable[..., Coroutine]) -> Callable[..., Coroutine]: + if plugin_name in self._registered_tasks: + logger.warning(f"插件 '{plugin_name}' 的定时任务已被重复注册。") + self._registered_tasks[plugin_name] = { + "func": func, + "model": params_model, + } + model_name = params_model.__name__ if params_model else "无" + logger.debug( + f"插件 '{plugin_name}' 的定时任务已注册,参数模型: {model_name}" + ) + return func + + return decorator + + def get_registered_plugins(self) -> list[str]: + """获取所有已注册定时任务的插件列表。""" + return list(self._registered_tasks.keys()) + + def _get_job_id(self, schedule_id: int) -> str: + """根据数据库ID生成唯一的 APScheduler Job ID。""" + return f"{self._JOB_PREFIX}{schedule_id}" + + async def _execute_job(self, schedule_id: int): + """ + APScheduler 调度的入口函数。 + 根据 schedule_id 处理特定任务、所有群组任务或全局任务。 + """ + schedule = await ScheduleInfo.get_or_none(id=schedule_id) + if not schedule or not schedule.is_enabled: + logger.warning(f"定时任务 {schedule_id} 不存在或已禁用,跳过执行。") + return + + plugin_name = schedule.plugin_name + + task_meta = self._registered_tasks.get(plugin_name) + if not task_meta: + logger.error( + f"无法执行定时任务:插件 '{plugin_name}' 未注册或已卸载。将禁用该任务。" + ) + schedule.is_enabled = False + await schedule.save(update_fields=["is_enabled"]) + self._remove_aps_job(schedule.id) + return + + try: + if schedule.bot_id: + bot = nonebot.get_bot(schedule.bot_id) + else: + bot = nonebot.get_bot() + logger.debug( + f"任务 {schedule_id} 未关联特定Bot,使用默认Bot {bot.self_id}" + ) + except KeyError: + logger.warning( + f"定时任务 {schedule_id} 需要的 Bot {schedule.bot_id} " + f"不在线,本次执行跳过。" + ) + return + except ValueError: + logger.warning(f"当前没有Bot在线,定时任务 {schedule_id} 跳过。") + return + + if schedule.group_id == "__ALL_GROUPS__": + await self._execute_for_all_groups(schedule, task_meta, bot) + else: + await self._execute_for_single_target(schedule, task_meta, bot) + + async def _execute_for_all_groups( + self, schedule: ScheduleInfo, task_meta: dict, bot + ): + """为所有群组执行任务,并处理优先级覆盖。""" + plugin_name = schedule.plugin_name + + concurrency_limit = Config.get_config( + "SchedulerManager", SCHEDULE_CONCURRENCY_KEY, 5 + ) + if not isinstance(concurrency_limit, int) or concurrency_limit <= 0: + logger.warning( + f"无效的定时任务并发限制配置 '{concurrency_limit}',将使用默认值 5。" + ) + concurrency_limit = 5 + + logger.info( + f"开始执行针对 [所有群组] 的任务 " + f"(ID: {schedule.id}, 插件: {plugin_name}, Bot: {bot.self_id})," + f"并发限制: {concurrency_limit}" + ) + + all_gids = set() + try: + group_list, _ = await PlatformUtils.get_group_list(bot) + all_gids.update( + g.group_id for g in group_list if g.group_id and not g.channel_id + ) + except Exception as e: + logger.error(f"为 'all' 任务获取 Bot {bot.self_id} 的群列表失败", e=e) + return + + specific_tasks_gids = set( + await ScheduleInfo.filter( + plugin_name=plugin_name, group_id__in=list(all_gids) + ).values_list("group_id", flat=True) + ) + + semaphore = asyncio.Semaphore(concurrency_limit) + + async def worker(gid: str): + """使用 Semaphore 包装单个群组的任务执行""" + async with semaphore: + temp_schedule = copy.deepcopy(schedule) + temp_schedule.group_id = gid + await self._execute_for_single_target(temp_schedule, task_meta, bot) + await asyncio.sleep(random.uniform(0.1, 0.5)) + + tasks_to_run = [] + for gid in all_gids: + if gid in specific_tasks_gids: + logger.debug(f"群组 {gid} 已有特定任务,跳过 'all' 任务的执行。") + continue + tasks_to_run.append(worker(gid)) + + if tasks_to_run: + await asyncio.gather(*tasks_to_run) + + async def _execute_for_single_target( + self, schedule: ScheduleInfo, task_meta: dict, bot + ): + """为单个目标(具体群组或全局)执行任务。""" + plugin_name = schedule.plugin_name + group_id = schedule.group_id + + try: + is_blocked = await CommonUtils.task_is_block(bot, plugin_name, group_id) + if is_blocked: + target_desc = f"群 {group_id}" if group_id else "全局" + logger.info( + f"插件 '{plugin_name}' 的定时任务在目标 [{target_desc}]" + "因功能被禁用而跳过执行。" + ) + return + + task_func = task_meta["func"] + job_kwargs = schedule.job_kwargs + if not isinstance(job_kwargs, dict): + logger.error( + f"任务 {schedule.id} 的 job_kwargs 不是字典类型: {type(job_kwargs)}" + ) + return + + sig = inspect.signature(task_func) + if "bot" in sig.parameters: + job_kwargs["bot"] = bot + + logger.info( + f"插件 '{plugin_name}' 开始为目标 [{group_id or '全局'}] " + f"执行定时任务 (ID: {schedule.id})。" + ) + task = asyncio.create_task(task_func(group_id, **job_kwargs)) + self._running_tasks.add(task) + task.add_done_callback(self._running_tasks.discard) + await task + except Exception as e: + logger.error( + f"执行定时任务 (ID: {schedule.id}, 插件: {plugin_name}, " + f"目标: {group_id or '全局'}) 时发生异常", + e=e, + ) + + def _validate_and_prepare_kwargs( + self, plugin_name: str, job_kwargs: dict | None + ) -> tuple[bool, str | dict]: + """验证并准备任务参数,应用默认值""" + task_meta = self._registered_tasks.get(plugin_name) + if not task_meta: + return False, f"插件 '{plugin_name}' 未注册。" + + params_model = task_meta.get("model") + job_kwargs = job_kwargs if job_kwargs is not None else {} + + if not params_model: + if job_kwargs: + logger.warning( + f"插件 '{plugin_name}' 未定义参数模型,但收到了参数: {job_kwargs}" + ) + return True, job_kwargs + + if not (isinstance(params_model, type) and issubclass(params_model, BaseModel)): + logger.error(f"插件 '{plugin_name}' 的参数模型不是有效的 BaseModel 类") + return False, f"插件 '{plugin_name}' 的参数模型配置错误" + + try: + model_validate = getattr(params_model, "model_validate", None) + if not model_validate: + return False, f"插件 '{plugin_name}' 的参数模型不支持验证" + + validated_model = model_validate(job_kwargs) + + model_dump = getattr(validated_model, "model_dump", None) + if not model_dump: + return False, f"插件 '{plugin_name}' 的参数模型不支持导出" + + return True, model_dump() + except ValidationError as e: + errors = [f" - {err['loc'][0]}: {err['msg']}" for err in e.errors()] + error_str = "\n".join(errors) + msg = f"插件 '{plugin_name}' 的任务参数验证失败:\n{error_str}" + return False, msg + + def _add_aps_job(self, schedule: ScheduleInfo): + """根据 ScheduleInfo 对象添加或更新一个 APScheduler 任务。""" + job_id = self._get_job_id(schedule.id) + try: + scheduler.remove_job(job_id) + except Exception: + pass + + if not isinstance(schedule.trigger_config, dict): + logger.error( + f"任务 {schedule.id} 的 trigger_config 不是字典类型: " + f"{type(schedule.trigger_config)}" + ) + return + + scheduler.add_job( + self._execute_job, + trigger=schedule.trigger_type, + id=job_id, + misfire_grace_time=300, + args=[schedule.id], + **schedule.trigger_config, + ) + logger.debug( + f"已在 APScheduler 中添加/更新任务: {job_id} " + f"with trigger: {schedule.trigger_config}" + ) + + def _remove_aps_job(self, schedule_id: int): + """移除一个 APScheduler 任务。""" + job_id = self._get_job_id(schedule_id) + try: + scheduler.remove_job(job_id) + logger.debug(f"已从 APScheduler 中移除任务: {job_id}") + except Exception: + pass + + async def add_schedule( + self, + plugin_name: str, + group_id: str | None, + trigger_type: str, + trigger_config: dict, + job_kwargs: dict | None = None, + bot_id: str | None = None, + ) -> tuple[bool, str]: + """ + 添加或更新一个定时任务。 + """ + if plugin_name not in self._registered_tasks: + return False, f"插件 '{plugin_name}' 没有注册可用的定时任务。" + + is_valid, result = self._validate_and_prepare_kwargs(plugin_name, job_kwargs) + if not is_valid: + return False, str(result) + + validated_job_kwargs = result + + effective_bot_id = bot_id if group_id == "__ALL_GROUPS__" else None + + search_kwargs = { + "plugin_name": plugin_name, + "group_id": group_id, + } + if effective_bot_id: + search_kwargs["bot_id"] = effective_bot_id + else: + search_kwargs["bot_id__isnull"] = True + + defaults = { + "trigger_type": trigger_type, + "trigger_config": trigger_config, + "job_kwargs": validated_job_kwargs, + "is_enabled": True, + } + + schedule = await ScheduleInfo.filter(**search_kwargs).first() + created = False + + if schedule: + for key, value in defaults.items(): + setattr(schedule, key, value) + await schedule.save() + else: + creation_kwargs = { + "plugin_name": plugin_name, + "group_id": group_id, + "bot_id": effective_bot_id, + **defaults, + } + schedule = await ScheduleInfo.create(**creation_kwargs) + created = True + self._add_aps_job(schedule) + action = "设置" if created else "更新" + return True, f"已成功{action}插件 '{plugin_name}' 的定时任务。" + + async def add_schedule_for_all( + self, + plugin_name: str, + trigger_type: str, + trigger_config: dict, + job_kwargs: dict | None = None, + ) -> tuple[int, int]: + """为所有机器人所在的群组添加定时任务。""" + if plugin_name not in self._registered_tasks: + raise ValueError(f"插件 '{plugin_name}' 没有注册可用的定时任务。") + + groups = set() + for bot in get_bots().values(): + try: + group_list, _ = await PlatformUtils.get_group_list(bot) + groups.update( + g.group_id for g in group_list if g.group_id and not g.channel_id + ) + except Exception as e: + logger.error(f"获取 Bot {bot.self_id} 的群列表失败", e=e) + + success_count = 0 + fail_count = 0 + for gid in groups: + try: + success, _ = await self.add_schedule( + plugin_name, gid, trigger_type, trigger_config, job_kwargs + ) + if success: + success_count += 1 + else: + fail_count += 1 + except Exception as e: + logger.error(f"为群 {gid} 添加定时任务失败: {e}", e=e) + fail_count += 1 + await asyncio.sleep(0.05) + return success_count, fail_count + + async def update_schedule( + self, + schedule_id: int, + trigger_type: str | None = None, + trigger_config: dict | None = None, + job_kwargs: dict | None = None, + ) -> tuple[bool, str]: + """部分更新一个已存在的定时任务。""" + schedule = await self.get_schedule_by_id(schedule_id) + if not schedule: + return False, f"未找到 ID 为 {schedule_id} 的任务。" + + updated_fields = [] + if trigger_config is not None: + schedule.trigger_config = trigger_config + updated_fields.append("trigger_config") + + if trigger_type is not None and schedule.trigger_type != trigger_type: + schedule.trigger_type = trigger_type + updated_fields.append("trigger_type") + + if job_kwargs is not None: + if not isinstance(schedule.job_kwargs, dict): + return False, f"任务 {schedule_id} 的 job_kwargs 数据格式错误。" + + merged_kwargs = schedule.job_kwargs.copy() + merged_kwargs.update(job_kwargs) + + is_valid, result = self._validate_and_prepare_kwargs( + schedule.plugin_name, merged_kwargs + ) + if not is_valid: + return False, str(result) + + schedule.job_kwargs = result # type: ignore + updated_fields.append("job_kwargs") + + if not updated_fields: + return True, "没有任何需要更新的配置。" + + await schedule.save(update_fields=updated_fields) + self._add_aps_job(schedule) + return True, f"成功更新了任务 ID: {schedule_id} 的配置。" + + async def remove_schedule( + self, plugin_name: str, group_id: str | None, bot_id: str | None = None + ) -> tuple[bool, str]: + """移除指定插件和群组的定时任务。""" + query = {"plugin_name": plugin_name, "group_id": group_id} + if bot_id: + query["bot_id"] = bot_id + + schedules = await ScheduleInfo.filter(**query) + if not schedules: + msg = ( + f"未找到与 Bot {bot_id} 相关的群 {group_id} " + f"的插件 '{plugin_name}' 定时任务。" + ) + return (False, msg) + + for schedule in schedules: + self._remove_aps_job(schedule.id) + await schedule.delete() + + target_desc = f"群 {group_id}" if group_id else "全局" + msg = ( + f"已取消 Bot {bot_id} 在 [{target_desc}] " + f"的插件 '{plugin_name}' 所有定时任务。" + ) + return (True, msg) + + async def remove_schedule_for_all( + self, plugin_name: str, bot_id: str | None = None + ) -> int: + """移除指定插件在所有群组的定时任务。""" + query = {"plugin_name": plugin_name} + if bot_id: + query["bot_id"] = bot_id + + schedules_to_delete = await ScheduleInfo.filter(**query).all() + if not schedules_to_delete: + return 0 + + for schedule in schedules_to_delete: + self._remove_aps_job(schedule.id) + await schedule.delete() + await asyncio.sleep(0.01) + + return len(schedules_to_delete) + + async def remove_schedules_by_group(self, group_id: str) -> tuple[bool, str]: + """移除指定群组的所有定时任务。""" + schedules = await ScheduleInfo.filter(group_id=group_id) + if not schedules: + return False, f"群 {group_id} 没有任何定时任务。" + + count = 0 + for schedule in schedules: + self._remove_aps_job(schedule.id) + await schedule.delete() + count += 1 + await asyncio.sleep(0.01) + + return True, f"已成功移除群 {group_id} 的 {count} 个定时任务。" + + async def pause_schedules_by_group(self, group_id: str) -> tuple[int, str]: + """暂停指定群组的所有定时任务。""" + schedules = await ScheduleInfo.filter(group_id=group_id, is_enabled=True) + if not schedules: + return 0, f"群 {group_id} 没有正在运行的定时任务可暂停。" + + count = 0 + for schedule in schedules: + success, _ = await self.pause_schedule(schedule.id) + if success: + count += 1 + await asyncio.sleep(0.01) + + return count, f"已成功暂停群 {group_id} 的 {count} 个定时任务。" + + async def resume_schedules_by_group(self, group_id: str) -> tuple[int, str]: + """恢复指定群组的所有定时任务。""" + schedules = await ScheduleInfo.filter(group_id=group_id, is_enabled=False) + if not schedules: + return 0, f"群 {group_id} 没有已暂停的定时任务可恢复。" + + count = 0 + for schedule in schedules: + success, _ = await self.resume_schedule(schedule.id) + if success: + count += 1 + await asyncio.sleep(0.01) + + return count, f"已成功恢复群 {group_id} 的 {count} 个定时任务。" + + async def pause_schedules_by_plugin(self, plugin_name: str) -> tuple[int, str]: + """暂停指定插件在所有群组的定时任务。""" + schedules = await ScheduleInfo.filter(plugin_name=plugin_name, is_enabled=True) + if not schedules: + return 0, f"插件 '{plugin_name}' 没有正在运行的定时任务可暂停。" + + count = 0 + for schedule in schedules: + success, _ = await self.pause_schedule(schedule.id) + if success: + count += 1 + await asyncio.sleep(0.01) + + return ( + count, + f"已成功暂停插件 '{plugin_name}' 在所有群组的 {count} 个定时任务。", + ) + + async def resume_schedules_by_plugin(self, plugin_name: str) -> tuple[int, str]: + """恢复指定插件在所有群组的定时任务。""" + schedules = await ScheduleInfo.filter(plugin_name=plugin_name, is_enabled=False) + if not schedules: + return 0, f"插件 '{plugin_name}' 没有已暂停的定时任务可恢复。" + + count = 0 + for schedule in schedules: + success, _ = await self.resume_schedule(schedule.id) + if success: + count += 1 + await asyncio.sleep(0.01) + + return ( + count, + f"已成功恢复插件 '{plugin_name}' 在所有群组的 {count} 个定时任务。", + ) + + async def pause_schedule_by_plugin_group( + self, plugin_name: str, group_id: str | None, bot_id: str | None = None + ) -> tuple[bool, str]: + """暂停指定插件在指定群组的定时任务。""" + query = {"plugin_name": plugin_name, "group_id": group_id, "is_enabled": True} + if bot_id: + query["bot_id"] = bot_id + + schedules = await ScheduleInfo.filter(**query) + if not schedules: + return ( + False, + f"群 {group_id} 未设置插件 '{plugin_name}' 的定时任务或任务已暂停。", + ) + + count = 0 + for schedule in schedules: + success, _ = await self.pause_schedule(schedule.id) + if success: + count += 1 + + return ( + True, + f"已成功暂停群 {group_id} 的插件 '{plugin_name}' 共 {count} 个定时任务。", + ) + + async def resume_schedule_by_plugin_group( + self, plugin_name: str, group_id: str | None, bot_id: str | None = None + ) -> tuple[bool, str]: + """恢复指定插件在指定群组的定时任务。""" + query = {"plugin_name": plugin_name, "group_id": group_id, "is_enabled": False} + if bot_id: + query["bot_id"] = bot_id + + schedules = await ScheduleInfo.filter(**query) + if not schedules: + return ( + False, + f"群 {group_id} 未设置插件 '{plugin_name}' 的定时任务或任务已启用。", + ) + + count = 0 + for schedule in schedules: + success, _ = await self.resume_schedule(schedule.id) + if success: + count += 1 + + return ( + True, + f"已成功恢复群 {group_id} 的插件 '{plugin_name}' 共 {count} 个定时任务。", + ) + + async def remove_all_schedules(self) -> tuple[int, str]: + """移除所有群组的所有定时任务。""" + schedules = await ScheduleInfo.all() + if not schedules: + return 0, "当前没有任何定时任务。" + + count = 0 + for schedule in schedules: + self._remove_aps_job(schedule.id) + await schedule.delete() + count += 1 + await asyncio.sleep(0.01) + + return count, f"已成功移除所有群组的 {count} 个定时任务。" + + async def pause_all_schedules(self) -> tuple[int, str]: + """暂停所有群组的所有定时任务。""" + schedules = await ScheduleInfo.filter(is_enabled=True) + if not schedules: + return 0, "当前没有正在运行的定时任务可暂停。" + + count = 0 + for schedule in schedules: + success, _ = await self.pause_schedule(schedule.id) + if success: + count += 1 + await asyncio.sleep(0.01) + + return count, f"已成功暂停所有群组的 {count} 个定时任务。" + + async def resume_all_schedules(self) -> tuple[int, str]: + """恢复所有群组的所有定时任务。""" + schedules = await ScheduleInfo.filter(is_enabled=False) + if not schedules: + return 0, "当前没有已暂停的定时任务可恢复。" + + count = 0 + for schedule in schedules: + success, _ = await self.resume_schedule(schedule.id) + if success: + count += 1 + await asyncio.sleep(0.01) + + return count, f"已成功恢复所有群组的 {count} 个定时任务。" + + async def remove_schedule_by_id(self, schedule_id: int) -> tuple[bool, str]: + """通过ID移除指定的定时任务。""" + schedule = await self.get_schedule_by_id(schedule_id) + if not schedule: + return False, f"未找到 ID 为 {schedule_id} 的定时任务。" + + self._remove_aps_job(schedule.id) + await schedule.delete() + + return ( + True, + f"已删除插件 '{schedule.plugin_name}' 在群 {schedule.group_id} " + f"的定时任务 (ID: {schedule.id})。", + ) + + async def get_schedule_by_id(self, schedule_id: int) -> ScheduleInfo | None: + """通过ID获取定时任务信息。""" + return await ScheduleInfo.get_or_none(id=schedule_id) + + async def get_schedules( + self, plugin_name: str, group_id: str | None + ) -> list[ScheduleInfo]: + """获取特定群组特定插件的所有定时任务。""" + return await ScheduleInfo.filter(plugin_name=plugin_name, group_id=group_id) + + async def get_schedule( + self, plugin_name: str, group_id: str | None + ) -> ScheduleInfo | None: + """获取特定群组的定时任务信息。""" + return await ScheduleInfo.get_or_none( + plugin_name=plugin_name, group_id=group_id + ) + + async def get_all_schedules( + self, plugin_name: str | None = None + ) -> list[ScheduleInfo]: + """获取所有定时任务信息,可按插件名过滤。""" + if plugin_name: + return await ScheduleInfo.filter(plugin_name=plugin_name).all() + return await ScheduleInfo.all() + + async def get_schedule_status(self, schedule_id: int) -> dict | None: + """获取任务的详细状态。""" + schedule = await self.get_schedule_by_id(schedule_id) + if not schedule: + return None + + job_id = self._get_job_id(schedule.id) + job = scheduler.get_job(job_id) + + status = { + "id": schedule.id, + "bot_id": schedule.bot_id, + "plugin_name": schedule.plugin_name, + "group_id": schedule.group_id, + "is_enabled": schedule.is_enabled, + "trigger_type": schedule.trigger_type, + "trigger_config": schedule.trigger_config, + "job_kwargs": schedule.job_kwargs, + "next_run_time": job.next_run_time.strftime("%Y-%m-%d %H:%M:%S") + if job and job.next_run_time + else "N/A", + "is_paused_in_scheduler": not bool(job.next_run_time) if job else "N/A", + } + return status + + async def pause_schedule(self, schedule_id: int) -> tuple[bool, str]: + """暂停一个定时任务。""" + schedule = await self.get_schedule_by_id(schedule_id) + if not schedule or not schedule.is_enabled: + return False, "任务不存在或已暂停。" + + schedule.is_enabled = False + await schedule.save(update_fields=["is_enabled"]) + + job_id = self._get_job_id(schedule.id) + try: + scheduler.pause_job(job_id) + except Exception: + pass + + return ( + True, + f"已暂停插件 '{schedule.plugin_name}' 在群 {schedule.group_id} " + f"的定时任务 (ID: {schedule.id})。", + ) + + async def resume_schedule(self, schedule_id: int) -> tuple[bool, str]: + """恢复一个定时任务。""" + schedule = await self.get_schedule_by_id(schedule_id) + if not schedule or schedule.is_enabled: + return False, "任务不存在或已启用。" + + schedule.is_enabled = True + await schedule.save(update_fields=["is_enabled"]) + + job_id = self._get_job_id(schedule.id) + try: + scheduler.resume_job(job_id) + except Exception: + self._add_aps_job(schedule) + + return ( + True, + f"已恢复插件 '{schedule.plugin_name}' 在群 {schedule.group_id} " + f"的定时任务 (ID: {schedule.id})。", + ) + + async def trigger_now(self, schedule_id: int) -> tuple[bool, str]: + """手动触发一个定时任务。""" + schedule = await self.get_schedule_by_id(schedule_id) + if not schedule: + return False, f"未找到 ID 为 {schedule_id} 的定时任务。" + + if schedule.plugin_name not in self._registered_tasks: + return False, f"插件 '{schedule.plugin_name}' 没有注册可用的定时任务。" + + try: + await self._execute_job(schedule.id) + return ( + True, + f"已手动触发插件 '{schedule.plugin_name}' 在群 {schedule.group_id} " + f"的定时任务 (ID: {schedule.id})。", + ) + except Exception as e: + logger.error(f"手动触发任务失败: {e}") + return False, f"手动触发任务失败: {e}" + + +scheduler_manager = SchedulerManager() + + +@PriorityLifecycle.on_startup(priority=90) +async def _load_schedules_from_db(): + """在服务启动时从数据库加载并调度所有任务。""" + Config.add_plugin_config( + "SchedulerManager", + SCHEDULE_CONCURRENCY_KEY, + 5, + help="“所有群组”类型定时任务的并发执行数量限制", + type=int, + ) + + logger.info("正在从数据库加载并调度所有定时任务...") + schedules = await ScheduleInfo.filter(is_enabled=True).all() + count = 0 + for schedule in schedules: + if schedule.plugin_name in scheduler_manager._registered_tasks: + scheduler_manager._add_aps_job(schedule) + count += 1 + else: + logger.warning(f"跳过加载定时任务:插件 '{schedule.plugin_name}' 未注册。") + logger.info(f"定时任务加载完成,共成功加载 {count} 个任务。") diff --git a/zhenxun/utils/manager/virtual_env_package_manager.py b/zhenxun/utils/manager/virtual_env_package_manager.py new file mode 100644 index 00000000..ba60d9b3 --- /dev/null +++ b/zhenxun/utils/manager/virtual_env_package_manager.py @@ -0,0 +1,180 @@ +from pathlib import Path +import subprocess +from subprocess import CalledProcessError +from typing import ClassVar + +from zhenxun.configs.config import Config +from zhenxun.services.log import logger + +BAT_FILE = Path() / "win启动.bat" + +LOG_COMMAND = "VirtualEnvPackageManager" + +Config.add_plugin_config( + "virtualenv", + "python_path", + None, + help="虚拟环境python路径,为空时使用系统环境的poetry", +) + + +class VirtualEnvPackageManager: + WIN_COMMAND: ClassVar[list[str]] = [ + "./Python310/python.exe", + "-m", + "pip", + ] + + DEFAULT_COMMAND: ClassVar[list[str]] = ["poetry", "run", "pip"] + + @classmethod + def __get_command(cls) -> list[str]: + if path := Config.get_config("virtualenv", "python_path"): + return [path, "-m", "pip"] + return ( + cls.WIN_COMMAND.copy() if BAT_FILE.exists() else cls.DEFAULT_COMMAND.copy() + ) + + @classmethod + def install(cls, package: list[str] | str): + """安装依赖包 + + 参数: + package: 安装依赖包名称或列表 + """ + if isinstance(package, str): + package = [package] + try: + command = cls.__get_command() + command.append("install") + command.append(" ".join(package)) + logger.info(f"执行虚拟环境安装包指令: {command}", LOG_COMMAND) + result = subprocess.run( + command, + check=True, + capture_output=True, + text=True, + ) + logger.debug( + f"安装虚拟环境包指令执行完成: {result.stdout}", + LOG_COMMAND, + ) + return result.stdout + except CalledProcessError as e: + logger.error(f"安装虚拟环境包指令执行失败: {e.stderr}.", LOG_COMMAND) + return e.stderr + + @classmethod + def uninstall(cls, package: list[str] | str): + """卸载依赖包 + + 参数: + package: 卸载依赖包名称或列表 + """ + if isinstance(package, str): + package = [package] + try: + command = cls.__get_command() + command.append("uninstall") + command.append("-y") + command.append(" ".join(package)) + logger.info(f"执行虚拟环境卸载包指令: {command}", LOG_COMMAND) + result = subprocess.run( + command, + check=True, + capture_output=True, + text=True, + ) + logger.debug( + f"卸载虚拟环境包指令执行完成: {result.stdout}", + LOG_COMMAND, + ) + return result.stdout + except CalledProcessError as e: + logger.error(f"卸载虚拟环境包指令执行失败: {e.stderr}.", LOG_COMMAND) + return e.stderr + + @classmethod + def update(cls, package: list[str] | str): + """更新依赖包 + + 参数: + package: 更新依赖包名称或列表 + """ + if isinstance(package, str): + package = [package] + try: + command = cls.__get_command() + command.append("install") + command.append("--upgrade") + command.append(" ".join(package)) + logger.info(f"执行虚拟环境更新包指令: {command}", LOG_COMMAND) + result = subprocess.run( + command, + check=True, + capture_output=True, + text=True, + ) + logger.debug(f"更新虚拟环境包指令执行完成: {result.stdout}", LOG_COMMAND) + return result.stdout + except CalledProcessError as e: + logger.error(f"更新虚拟环境包指令执行失败: {e.stderr}.", LOG_COMMAND) + return e.stderr + + @classmethod + def install_requirement(cls, requirement_file: Path): + """安装依赖文件 + + 参数: + requirement_file: requirement文件路径 + + 异常: + FileNotFoundError: 文件不存在 + """ + if not requirement_file.exists(): + raise FileNotFoundError(f"依赖文件 {requirement_file} 不存在", LOG_COMMAND) + try: + command = cls.__get_command() + command.append("install") + command.append("-r") + command.append(str(requirement_file.absolute())) + logger.info(f"执行虚拟环境安装依赖文件指令: {command}", LOG_COMMAND) + result = subprocess.run( + command, + check=True, + capture_output=True, + text=True, + ) + logger.debug( + f"安装虚拟环境依赖文件指令执行完成: {result.stdout}", + LOG_COMMAND, + ) + return result.stdout + except CalledProcessError as e: + logger.error( + f"安装虚拟环境依赖文件指令执行失败: {e.stderr}.", + LOG_COMMAND, + ) + return e.stderr + + @classmethod + def list(cls) -> str: + """列出已安装的依赖包""" + try: + command = cls.__get_command() + command.append("list") + logger.info(f"执行虚拟环境列出包指令: {command}", LOG_COMMAND) + result = subprocess.run( + command, + check=True, + capture_output=True, + text=True, + ) + logger.debug( + f"列出虚拟环境包指令执行完成: {result.stdout}", + LOG_COMMAND, + ) + return result.stdout + except CalledProcessError as e: + logger.error(f"列出虚拟环境包指令执行失败: {e.stderr}.", LOG_COMMAND) + return "" diff --git a/zhenxun/utils/platform.py b/zhenxun/utils/platform.py index 6d379131..634c8226 100644 --- a/zhenxun/utils/platform.py +++ b/zhenxun/utils/platform.py @@ -83,7 +83,7 @@ class PlatformUtils: bot: Bot, message: UniMessage | str, superuser_id: str | None = None, - ) -> Receipt | None: + ) -> list[tuple[str, Receipt]]: """发送消息给超级用户 参数: @@ -97,15 +97,33 @@ class PlatformUtils: 返回: Receipt | None: Receipt """ - if not superuser_id: - if platform := cls.get_platform(bot): - if platform_superusers := BotConfig.get_superuser(platform): - superuser_id = random.choice(platform_superusers) - else: - raise NotFindSuperuser() + superuser_ids = [] + if superuser_id: + superuser_ids.append(superuser_id) + elif platform := cls.get_platform(bot): + if platform_superusers := BotConfig.get_superuser(platform): + superuser_ids = platform_superusers + else: + raise NotFindSuperuser() if isinstance(message, str): message = MessageUtils.build_message(message) - return await cls.send_message(bot, superuser_id, None, message) + result = [] + for superuser_id in superuser_ids: + try: + result.append( + ( + superuser_id, + await cls.send_message(bot, superuser_id, None, message), + ) + ) + except Exception as e: + logger.error( + "发送消息给超级用户失败", + "PlatformUtils:send_superuser", + target=superuser_id, + e=e, + ) + return result @classmethod async def get_group_member_list(cls, bot: Bot, group_id: str) -> list[UserData]: @@ -209,7 +227,7 @@ class PlatformUtils: url = None if platform == "qq": if user_id.isdigit(): - url = f"http://q1.qlogo.cn/g?b=qq&nk={user_id}&s=160" + url = f"http://q1.qlogo.cn/g?b=qq&nk={user_id}&s=640" else: url = f"https://q.qlogo.cn/qqapp/{appid}/{user_id}/640" return await AsyncHttpx.get_content(url) if url else None diff --git a/zhenxun/utils/utils.py b/zhenxun/utils/utils.py index c8046813..1e3d26e3 100644 --- a/zhenxun/utils/utils.py +++ b/zhenxun/utils/utils.py @@ -1,4 +1,5 @@ from collections import defaultdict +from dataclasses import dataclass from datetime import datetime import os from pathlib import Path @@ -6,6 +7,7 @@ import time from typing import Any import httpx +from nonebot_plugin_uninfo import Uninfo import pypinyin import pytz @@ -13,6 +15,16 @@ from zhenxun.configs.config import Config from zhenxun.services.log import logger +@dataclass +class EntityIDs: + user_id: str + """用户id""" + group_id: str | None + """群组id""" + channel_id: str | None + """频道id""" + + class ResourceDirManager: """ 临时文件管理器 @@ -230,6 +242,27 @@ def is_valid_date(date_text: str, separator: str = "-") -> bool: return False +def get_entity_ids(session: Uninfo) -> EntityIDs: + """获取用户id,群组id,频道id + + 参数: + session: Uninfo + + 返回: + EntityIDs: 用户id,群组id,频道id + """ + user_id = session.user.id + group_id = None + channel_id = None + if session.group: + if session.group.parent: + group_id = session.group.parent.id + channel_id = session.group.id + else: + group_id = session.group.id + return EntityIDs(user_id=user_id, group_id=group_id, channel_id=channel_id) + + def is_number(text: str) -> bool: """是否为数字