Commit 721d35f

benny-dou <60535774+benny-dou@users.noreply.github.com>
2025-01-27 16:35:31
refactor(gpt): split gpt response into multiple files
1 parent 8d28152
src/llm/contexts.py
@@ -0,0 +1,148 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+import base64
+import re
+from pathlib import Path
+from typing import TYPE_CHECKING
+
+from loguru import logger
+from pyrogram.client import Client
+from pyrogram.types import Message
+
+from config import GPT, PREFIX
+from llm.utils import BOT_TIPS
+from messages.parser import parse_msg
+
+if TYPE_CHECKING:
+    from io import BytesIO
+
+
+def get_conversations(message: Message) -> list[Message]:
+    """Get all conversation messages from old to new."""
+    messages = [message]
+    while message.reply_to_message:
+        message = message.reply_to_message
+        messages.append(message)
+    messages.reverse()  # old to new
+    return messages
+
+
+async def get_conversation_contexts(client: Client, conversations: list[Message]) -> list[dict]:
+    """Generate contexts for GPT conversation.
+
+    From old to new messages.
+
+    Returns:
+        list[dict]: [
+            {
+                "role": "user or assistant",
+                "content": [
+                    {'type': 'text', 'text': 'caption this img'},
+                    {'type': 'image_url', 'image_url': {'url': 'data:image/jpeg;base64,base64_image'}},
+                    {'type': 'image_url', 'image_url': {'url': 'https://server.com/dir/image.jpg'}},
+                ]
+            }
+        ]
+    """
+    # parse context for each message
+    contexts = [await single_context(client, message) for message in conversations]
+    contexts = combine_consecutive_role_contexts(contexts)
+    return contexts[: int(GPT.HISTORY_CONTEXT)]
+
+
+async def single_context(client: Client, message: Message) -> dict:
+    """Generate GPT contexts for a single message (Without considering reply message).
+
+    Returns:
+    {
+        "role": "user or assistant",
+        "content": [
+            {"type": "image_url", "image_url": {"url": "https://server.com/dir/image.jpg"}},
+            {"type": "video_url", "video_url": {"url": "https://server.com/dir/video.mp4"}},  # 智谱
+            # {"type": "video", "video": ["https://server.com/dir/1.jpg","https://server.com/dir/2.jpg","https://server.com/dir/3.jpg"]},  # 千问
+        ],
+    }
+    """
+
+    def clean_text(text: str) -> str:
+        if not text:
+            return ""
+        return re.sub(rf"(.*?){BOT_TIPS}\)", "", text.removeprefix(PREFIX.GPT)).strip()
+
+    info = parse_msg(message, silent=True)
+    role = "assistant" if BOT_TIPS in info["text"] else "user"
+    # only text
+    if info["mtype"] == "text" and (text := clean_text(info["text"])):
+        return {"role": role, "content": [{"type": "text", "text": text}]}
+
+    if info["mtype"] not in ["photo", "voice", "video", "document"]:
+        return {}
+
+    # has media
+    messages = await client.get_media_group(message.chat.id, message.id) if message.media_group_id else [message]
+    media = []
+    for msg in messages:
+        info = parse_msg(msg, silent=True)
+        try:
+            if GPT.MEDIA_FORMAT == "base64":
+                res: BytesIO = await client.download_media(msg, in_memory=True)  # type: ignore
+                logger.debug(f"Downloaded GPT media: {res.name}")
+                ext = Path(res.name).suffix.removeprefix(".").replace("jpg", "jpeg")
+                b64 = base64.b64encode(res.getvalue()).decode("utf-8")
+                if info["mtype"] == "photo":
+                    media.append({"type": "image_url", "image_url": {"url": f"data:image/{ext};base64,{b64}"}})
+                elif info["mtype"] == "video":
+                    media.append({"type": "video_url", "video_url": {"url": b64}})
+                elif info["mtype"] == "document" and info["mime_type"] == "text/plain":
+                    media.append({"type": "text", "text": res.getvalue().decode("utf-8")})
+                else:
+                    logger.warning(f"Unsupported message type: {info['mtype']}")
+                    continue
+            else:
+                path: str = await client.download_media(msg)  # type: ignore
+                logger.debug(f"Downloaded GPT media: {path}")
+                if info["mtype"] == "photo":
+                    media.append({"type": "image_url", "image_url": {"url": f"{GPT.MEDIA_SERVER}/{Path(path).name}"}})
+                elif info["mtype"] == "video":
+                    media.append({"type": "video_url", "video_url": {"url": f"{GPT.MEDIA_SERVER}/{Path(path).name}"}})
+                elif info["mtype"] == "document" and info["mime_type"] == "text/plain":
+                    media.append({"type": "text", "text": Path(path).read_text()})
+                    Path(path).unlink(missing_ok=True)
+                else:
+                    logger.warning(f"Unsupported message type: {info['mtype']}")
+                    continue
+            if caption := info["text"]:
+                media.append({"type": "text", "text": caption})
+        except Exception as e:
+            logger.warning(f"Download media from message failed: {e}")
+            continue
+    return {"role": role, "content": media}
+
+
+def combine_consecutive_role_contexts(contexts: list[dict]) -> list[dict]:
+    """Combine consecutive user and assistant contexts into one message.
+
+    Some GPT models don't support consecutive user and assistant contexts. (e.g. Hunyuan)
+
+    Args:
+        contexts (list[dict]): [
+            {
+                "role": "user or assistant",
+                "content": [
+                    {'type': 'text', 'text': 'caption this img'},
+                    {'type': 'image_url', 'image_url': {'url': 'data:image/jpeg;base64,base64_image'}},
+                    {'type': 'image_url', 'image_url': {'url': 'https://server.com/dir/image.jpg'}},
+                ]
+            }
+        ]
+    """
+    combined_contexts = []
+    for i, msg in enumerate(contexts):
+        if i == 0:
+            combined_contexts.append(msg)
+            continue
+        if msg["role"] == combined_contexts[-1]["role"]:
+            combined_contexts[-1]["content"].extend(msg["content"])
+        else:
+            combined_contexts.append(msg)
+    return combined_contexts
src/llm/gpt.py
@@ -0,0 +1,92 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+from loguru import logger
+from pyrogram.client import Client
+from pyrogram.types import Message
+
+from config import ENABLE, GPT, PREFIX, cache
+from llm.contexts import get_conversation_contexts, get_conversations
+from llm.models import get_model_type, get_model_with_contexts
+from llm.response import get_gpt_response
+from llm.utils import llm_cleanup_files
+from messages.parser import parse_msg
+from messages.progress import modify_progress
+from messages.sender import send2tg
+from messages.utils import equal_prefix, startswith_prefix
+
+HELP = f"""🤖**GPT对话**
+当前模型:
+- 文本模型: **{GPT.TEXT_MODEL_NAME}**
+- 图片模型: **{GPT.IMAGE_MODEL_NAME}**
+- 视频模型: **{GPT.VIDEO_MODEL_NAME}**
+
+使用说明:
+1. 在 `{PREFIX.GPT}` 后接提示词即可与GPT对话
+2. 以 `{PREFIX.GPT}` 回复消息可将其加入上下文
+3. 暂不支持音频模型, 可以先用 `{PREFIX.ASR}` 命令转为文字后再使用 `{PREFIX.GPT}`
+"""
+
+
+def is_gpt_conversation(message: Message) -> bool:
+    info = parse_msg(message)
+    if startswith_prefix(info["text"], prefix=[PREFIX.GPT]):
+        return True
+    # is replying to gpt-bot response message?
+    if not message.reply_to_message:
+        return False
+
+    reply_msg = message.reply_to_message
+    reply_info = parse_msg(reply_msg, silent=True)
+    return reply_info["text"].startswith("🤖")
+
+
+@cache.memoize(ttl=60)
+async def gpt_response(
+    client: Client,
+    message: Message,
+    # contexts: list[dict] | None = None,
+    **kwargs,
+):
+    """Get GPT response from Various API.
+
+    message and contexts must be provided at least one.
+
+    Args:
+        client (Client): The Pyrogram client.
+        message (Message): The trigger message object.
+        contexts (list[dict]): The conversation contexts in OpenAI format.
+    """
+    if not ENABLE.GPT:
+        return
+    info = parse_msg(message)
+    # send docs if message == "/ai", without reply
+    if equal_prefix(info["text"], prefix=[PREFIX.GPT]) and not message.reply_to_message:
+        await send2tg(client, message, texts=HELP, **kwargs)
+        return
+
+    if not is_gpt_conversation(message):
+        return
+
+    # cache media_group message, only process once
+    if media_group_id := message.media_group_id:
+        if cache.get(f"gpt-{info['cid']}-{media_group_id}"):
+            return
+        cache.set(f"gpt-{info['cid']}-{media_group_id}", "1", ttl=120)
+    conversations = get_conversations(message)
+    model_type = get_model_type(conversations)
+    if model_type.startswith("ERROR"):
+        logger.error(model_type)
+        await send2tg(client, message, texts=model_type, **kwargs)
+        return
+    contexts = await get_conversation_contexts(client, conversations)
+    model_conf, contexts = get_model_with_contexts(model_type, contexts)
+    msg = f"🤖{model_conf['friendly_name']}: 思考中..."
+    if kwargs.get("show_progress"):
+        res = await send2tg(client, message, texts=msg, **kwargs)
+        kwargs["progress"] = res[0]
+    response = await get_gpt_response(model_conf, contexts, **kwargs)
+    llm_cleanup_files(contexts)
+    texts = f"{model_conf['bot_msg_prefix']}\n\n{response}"
+    logger.debug(texts)
+    await send2tg(client, message, texts=texts, **kwargs)
+    await modify_progress(del_status=True, **kwargs)
src/llm/models.py
@@ -0,0 +1,64 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from loguru import logger
+from pyrogram.types import Message
+
+from config import GPT
+from llm.utils import BOT_TIPS, fix_doubao
+from messages.parser import parse_msg
+
+
+def get_model_type(conversations: list[Message]) -> str:
+    """Get model type based on conversation messages."""
+    has_image = False
+    has_video = False
+    for message in conversations:
+        info = parse_msg(message, silent=True)
+        if info["mtype"] == "photo":
+            model_type = "image"
+            has_image = True
+        if info["mtype"] == "video":
+            model_type = "video"
+            has_video = True
+    if not has_image and not has_video:
+        model_type = "text"
+    elif has_image and has_video:
+        model_type = "ERROR: this conversation have both image and video."
+    return model_type
+
+
+def get_model_with_contexts(model_type: str, contexts: list[dict]) -> tuple[dict, list[dict]]:
+    """Get GPT model config based on contexts, and return the config and adjusted contexts.
+
+    contexts:
+    [
+      {
+          "role": "user",
+          "content": [
+              {"type": "text", "text": "text"},
+              {"type": "image_url", "image_url": {"url": "https://server.com/dir/image.jpg"}},
+              {"type": "video_url", "video_url": {"url": "https://server.com/dir/video.mp4"}},
+            ]
+      }
+    ]
+    """
+    models = {"text": GPT.TEXT_MODEL, "image": GPT.IMAGE_MODEL, "video": GPT.VIDEO_MODEL}
+    model_names = {"text": GPT.TEXT_MODEL_NAME, "image": GPT.IMAGE_MODEL_NAME, "video": GPT.VIDEO_MODEL_NAME}
+    timeouts = {"text": GPT.TEXT_TIMEOUT, "image": GPT.IMAGE_TIMEOUT, "video": GPT.VIDEO_TIMEOUT}
+    apis = {"text": GPT.TEXT_API_KEY, "image": GPT.IMAGE_API_KEY, "video": GPT.VIDEO_API_KEY}
+    urls = {"text": GPT.TEXT_BASE_URL, "image": GPT.IMAGE_BASE_URL, "video": GPT.VIDEO_BASE_URL}
+    model = models[model_type]
+    config = {
+        "model": model,
+        "friendly_name": model_names[model_type],
+        "timeout": round(float(timeouts[model_type])),
+        "base_url": urls[model_type],
+        "key": apis[model_type],
+        "temperature": float(GPT.TEMPERATURE),
+        "bot_msg_prefix": f"🤖**{model_names[model_type]}**: ({BOT_TIPS})",
+    }
+    if model.startswith("豆包"):
+        contexts = fix_doubao(contexts)
+    logger.trace(config)
+    return config, contexts
src/llm/response.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+from loguru import logger
+from openai import AsyncOpenAI, DefaultAsyncHttpxClient
+
+from config import PROXY
+from messages.progress import modify_progress
+
+
+async def get_gpt_response(config: dict, contexts: list[dict], **kwargs) -> str:
+    """Get GPT response for text model."""
+    response = f"🤖{config['friendly_name']}对话失败, 请稍后重试."
+    logger.trace(contexts)
+    try:
+        openai = AsyncOpenAI(
+            api_key=config["key"],
+            base_url=config["base_url"],
+            timeout=config["timeout"],
+            http_client=DefaultAsyncHttpxClient(proxy=PROXY.GPT),
+        )
+        resp = await openai.chat.completions.create(
+            model=config["model"],
+            messages=contexts,  # type: ignore
+            temperature=config["temperature"],
+        )
+        if choices := resp.model_dump().get("choices", []):
+            response = choices[0].get("message", {}).get("content")
+    except Exception as e:
+        error = f"🤖{config['friendly_name']}对话失败, 请稍后重试.\n{e}"
+        logger.error(f"GPT request failed: {e}")
+        await modify_progress(text=error, force_update=True, **kwargs)
+        return error
+    return response
src/llm/utils.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+from pathlib import Path
+
+from config import DOWNLOAD_DIR
+
+BOT_TIPS = "回复此消息以继续对话"
+
+
+def fix_doubao(contexts: list[dict]) -> list[dict]:
+    """Fix doubao context format.
+
+    Doubao do not support this content for:
+        [{'text': 'hi', 'type': 'text'}], 'role': 'user'}]
+    It support:
+        [{'content': 'hi', 'role': 'user'}]
+
+    Args:
+        contexts (list[dict]): [
+            {
+                "role": "user or assistant",
+                "content": [
+                    {'type': 'text', 'text': 'caption this img'},
+                    {'type': 'image_url', 'image_url': {'url': 'data:image/jpeg;base64,base64_image'}},
+                    {'type': 'image_url', 'image_url': {'url': 'https://server.com/dir/image.jpg'}},
+                ]
+            }
+        ]
+    """
+    fixed_contexts = []
+    for msg in contexts:
+        if (lst := msg.get("content", [])) and all(x.get("type") == "text" for x in lst):
+            msg["content"] = "\n".join([x.get("text") for x in lst])
+            fixed_contexts.append(msg)
+    return fixed_contexts
+
+
+def llm_cleanup_files(messages: list[dict]):
+    """Clean downloaded files.
+
+    [
+      {
+          'role': 'user',
+          'content': [
+              {'type': 'text', 'text': 'text'},
+              {'type': 'image_url', 'image_url': {'url': 'https://server.com/dir/image.jpg'}},
+              {"type": "video_url", "video_url": {"url": "https://server.com/dir/video.mp4"}},  # 智谱
+              {"type": "video", "video": ["https://server.com/dir/1.jpg","https://server.com/dir/2.jpg","https://server.com/dir/3.jpg"]},  # 千问
+              {"audio": "https://server.com/dir/audio.mp3"},
+            ]
+      }
+    ]
+    """
+    for item in messages:
+        content = item.get("content", [])
+        if not isinstance(content, list):
+            continue
+        for x in content:
+            if url := x.get("image_url", {}).get("url"):
+                (Path(DOWNLOAD_DIR) / Path(url).name).unlink(missing_ok=True)
+            if url := x.get("video_url", {}).get("url"):
+                (Path(DOWNLOAD_DIR) / Path(url).name).unlink(missing_ok=True)
+            if urls := x.get("video", []):
+                for url in urls:
+                    (Path(DOWNLOAD_DIR) / Path(url).name).unlink(missing_ok=True)
+            if url := x.get("audio"):
+                (Path(DOWNLOAD_DIR) / Path(url).name).unlink(missing_ok=True)
src/messages/parser.py
@@ -28,6 +28,7 @@ def parse_msg(message: Message, *, silent: bool = False, verbose: bool = False)
     uid = message.from_user.id if message.from_user else 0
     cid = message.chat.id if message.chat else 0
     mid = message.id if message.id else 0
+    media_group_id = message.media_group_id if message.media_group_id else 0
     is_bot = bool(message.from_user and message.from_user.is_bot)
     text = message.text or message.caption or ""
     dt = message.date.replace(tzinfo=ZoneInfo(TZ)) if isinstance(message.date, datetime) else nowdt(TZ)
@@ -92,6 +93,7 @@ def parse_msg(message: Message, *, silent: bool = False, verbose: bool = False)
         "uid": int(uid),
         "cid": int(cid),
         "mid": int(mid),
+        "media_group_id": int(media_group_id),
         "is_bot": bool(is_bot),
         "text": str(text),
         "first_name": str(first_name),
src/messages/sender.py
@@ -171,9 +171,6 @@ async def send2tg(
     ]
     TODO: Support to send audio and document
     """
-    if kwargs:
-        logger.debug(f"kwargs: {kwargs}")
-
     if not target_chat:
         target_chat = kwargs["target_chat"] if kwargs.get("target_chat") else message.chat.id
     target_chat = to_int(target_chat)
src/others/gpt.py
@@ -1,404 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-
-import asyncio
-import base64
-import copy
-import re
-from pathlib import Path
-from typing import TYPE_CHECKING
-
-from loguru import logger
-from pyrogram.client import Client
-from pyrogram.types import Message
-
-from config import DOWNLOAD_DIR, ENABLE, GPT, PREFIX, PROXY, cache
-from messages.parser import parse_msg
-from messages.progress import modify_progress
-from messages.sender import send2tg
-from messages.utils import equal_prefix, startswith_prefix
-from multimedia import convert_to_audio
-from networking import hx_req
-
-if TYPE_CHECKING:
-    from io import BytesIO
-
-clean_name = lambda x: x.removesuffix("-mini").removesuffix("-latest").removesuffix("-turbo").removesuffix("-exp").removesuffix("-chat")
-HELP = f"""🤖**GPT对话**
-当前模型:
-- 文本模型: **{clean_name(GPT.TEXT_MODEL)}**
-- 图片模型: **{clean_name(GPT.VISION_MODEL)}**
-- 视频模型: **{clean_name(GPT.VIDEO_MODEL)}**
-- 音频模型: **{clean_name(GPT.AUDIO_MODEL)}** (30秒以内)
-
-使用说明:
-1. 在 `{PREFIX.GPT}` 后接提示词即可与GPT对话
-2. 以 `{PREFIX.GPT}` 回复消息可将其加入上下文
-3. 音频模型仅支持30秒以内的音频文件, 如需更长可以先用 `{PREFIX.ASR}` 命令转为文字后再使用 `{PREFIX.GPT}`
-"""
-BOT_TIPS = "回复此消息以继续对话"
-
-
-@cache.memoize(ttl=60)
-async def gpt_response(client: Client, message: Message, **kwargs):
-    """Get GPT response from Various API.
-
-    Args:
-        client (Client): The Pyrogram client.
-        message (Message): The trigger message object.
-    """
-    if not ENABLE.GPT:
-        return
-    info = parse_msg(message)
-    # send docs if message == "/ai", without reply
-    if equal_prefix(info["text"], prefix=[PREFIX.GPT]) and not message.reply_to_message:
-        await send2tg(client, message, texts=HELP, **kwargs)
-        return
-
-    if not is_valid_conversation(message):
-        return
-
-    # cache media_group message, only process once
-    if media_group_id := message.media_group_id:
-        if cache.get(f"gpt-{message.chat.id}-{media_group_id}"):
-            return
-        cache.set(f"gpt-{message.chat.id}-{media_group_id}", "1", ttl=120)
-
-    contexts = await generate_contexts(client, message)
-    model_conf = get_model_config(contexts)
-    if model_conf["friendly_name"] == "豆包":
-        contexts = fix_doubao(contexts)
-    msg = f"🤖{model_conf['friendly_name']}: 思考中..."
-    if kwargs.get("show_progress"):
-        res = await send2tg(client, message, texts=msg, **kwargs)
-        kwargs["progress"] = res[0]
-    headers = {"authorization": f"Bearer {model_conf['key']}"}
-    try:
-        if model_conf["model_type"] == "audio":
-            resp = await hx_req(model_conf["url"], "POST", headers=headers, post_json=model_conf["payload"], proxy=PROXY.GPT, check_has_kv=["output.choices"], timeout=model_conf["timeout"])
-            choices = resp.json()["output"]["choices"]
-            ai_response = choices[0].get("message", {}).get("content", [{}])[0].get("text", "")
-        else:
-            resp = await hx_req(model_conf["url"], "POST", headers=headers, post_json=model_conf["payload"], proxy=PROXY.GPT, check_has_kv=["choices"], timeout=model_conf["timeout"])
-            choices = resp.json()["choices"]
-            ai_response = choices[0].get("message", {}).get("content")
-    except Exception as e:
-        logger.error(f"GPT request failed: {e}")
-        await modify_progress(text=f"🤖{model_conf['friendly_name']}未响应, 请稍后重试.\n{e}", force_update=True, **kwargs)
-        await asyncio.sleep(3)
-        await modify_progress(del_status=True, **kwargs)
-        return
-    cleanup(contexts)
-    texts = f"{model_conf['bot_msg_prefix']}\n\n{ai_response}"
-    logger.debug(texts)
-    await send2tg(client, message, texts=texts, **kwargs)
-    await modify_progress(del_status=True, **kwargs)
-
-
-def get_model_config(contexts: list[dict]):
-    """Clean downloaded files.
-
-    [
-      {
-          "role": "user",
-          "content": [
-              {"type": "text", "text": "text"},
-              {"type": "image_url", "image_url": {"url": "https://server.com/dir/image.jpg"}},
-              {"type": "video_url", "video_url": {"url": "https://server.com/dir/video.mp4"}},  # 智谱
-              {"type": "video", "video": ["https://server.com/dir/1.jpg","https://server.com/dir/2.jpg","https://server.com/dir/3.jpg"]},  # 千问
-              {"audio": "https://server.com/dir/audio.mp3"},
-            ]
-      }
-    ]
-    """
-    model = GPT.TEXT_MODEL
-    model_type = "text"
-    timeout = GPT.TEXT_TIMEOUT
-    for item in contexts:
-        content = item.get("content", [])
-        for x in content:
-            if x.get("image_url"):
-                model = GPT.VISION_MODEL
-                model_type = "vision"
-                timeout = GPT.VISION_TIMEOUT
-                break
-            if x.get("video_url") or x.get("video"):
-                model = GPT.VIDEO_MODEL
-                model_type = "video"
-                timeout = GPT.VIDEO_TIMEOUT
-                break
-            if x.get("audio"):
-                model = GPT.AUDIO_MODEL
-                model_type = "audio"
-                timeout = GPT.AUDIO_TIMEOUT
-                break
-
-    common = {"model_type": model_type, "timeout": int(timeout)}
-    openai = {
-        "url": GPT.OPENAI_BASE_URL + "/chat/completions",
-        "key": GPT.OPENAI_API_KEY,
-    }
-    gemini = {
-        "url": GPT.GEMINI_BASE_URL + "/chat/completions",
-        "key": GPT.GEMINI_API_KEY,
-    }
-    hunyuan = {
-        "url": GPT.HUNYUAN_BASE_URL + "/chat/completions",
-        "key": GPT.HUNYUAN_API_KEY,
-    }
-    qwen = {
-        "url": GPT.DASHSCOPE_BASE_URL + "/chat/completions",
-        "key": GPT.DASHSCOPE_API_KEY,
-    }
-    openrouter = {
-        "url": GPT.OPENROUTER_BASE_URL + "/chat/completions",
-        "key": GPT.OPENROUTER_API_KEY,
-    }
-    glm = {
-        "url": GPT.GLM_BASE_URL + "/chat/completions",
-        "key": GPT.GLM_API_KEY,
-    }
-    doubao = {
-        "url": GPT.ARK_BASE_URL + "/chat/completions",
-        "key": GPT.ARK_API_KEY,
-    }
-    if model.startswith("gpt"):
-        openai["friendly_name"] = clean_name(model.replace("gpt", "GPT"))
-        config = common | openai
-
-    if model.startswith("gemini"):
-        gemini["friendly_name"] = clean_name(model.capitalize())
-        config = common | gemini
-
-    if model.startswith("hunyuan"):
-        hunyuan["friendly_name"] = clean_name(model.replace("hunyuan", "混元"))
-        config = common | hunyuan
-
-    if model.startswith("qwen"):
-        qwen["friendly_name"] = clean_name(model.replace("qwen", "通义千问"))
-        if model_type == "audio":  # qwen-audio
-            qwen["url"] = "https://dashscope.aliyuncs.com/api/v1/services/aigc/multimodal-generation/generation"
-        config = common | qwen
-
-    if model.startswith("deepseek"):  # via openrouter: deepseek/deepseek-chat
-        openrouter["friendly_name"] = clean_name(model.split("/")[-1].capitalize())
-        config = common | openrouter
-
-    if model.startswith("glm"):
-        glm["friendly_name"] = model.upper().removesuffix("-FLASH")
-        config = common | glm
-
-    if model.startswith("doubao"):
-        doubao["friendly_name"] = "豆包"
-        if model_type == "text":
-            model = GPT.DOUBAO_TEXT_ENTRYPOINT
-        elif model_type == "vision":
-            model = GPT.DOUBAO_VISION_ENTRYPOINT
-        config = common | doubao
-
-    config["bot_msg_prefix"] = f"🤖**{config['friendly_name']}**: ({BOT_TIPS})"
-    payload = {"model": model, "input": {"messages": contexts}} if model_type == "audio" else {"model": model, "messages": contexts, "temperature": float(GPT.TEMPERATURE)}
-    config["payload"] = payload
-    logger.trace(config)
-    return config
-
-
-async def generate_contexts(client: Client, message: Message) -> list[dict]:
-    """Generate contexts for GPT conversation.
-
-    Returns:
-        list[dict]: [
-            {
-                "role": "user or assistant",
-                "content": [
-                    {'type': 'text', 'text': 'caption this img'},
-                    {'type': 'image_url', 'image_url': {'url': 'data:image/jpeg;base64,base64_image'}},
-                    {'type': 'image_url', 'image_url': {'url': 'https://server.com/dir/image.jpg'}},
-                ]
-            }
-        ]
-    """
-    # 按时间顺序如下
-    reply_msg = copy.deepcopy(message.reply_to_message)
-    contexts = []
-    if context := await generate_single_msg_context(client, message):  # this message
-        contexts.append(context)
-    while reply_msg:
-        if context := await generate_single_msg_context(client, reply_msg):  # this message
-            contexts.append(context)
-        reply_msg = reply_msg.reply_to_message
-    contexts.reverse()
-    contexts = combine_consecutive_role_contexts(contexts)
-    return contexts[: int(GPT.HISTORY_CONTEXT)]
-
-
-def combine_consecutive_role_contexts(contexts: list[dict]) -> list[dict]:
-    """Combine consecutive user and assistant contexts into one message.
-
-    Some GPT models don't support consecutive user and assistant contexts. (e.g. Hunyuan)
-
-    Args:
-        contexts (list[dict]): [
-            {
-                "role": "user or assistant",
-                "content": [
-                    {'type': 'text', 'text': 'caption this img'},
-                    {'type': 'image_url', 'image_url': {'url': 'data:image/jpeg;base64,base64_image'}},
-                    {'type': 'image_url', 'image_url': {'url': 'https://server.com/dir/image.jpg'}},
-                ]
-            }
-        ]
-    """
-    combined_contexts = []
-    for i, msg in enumerate(contexts):
-        if i == 0:
-            combined_contexts.append(msg)
-            continue
-        if msg["role"] == combined_contexts[-1]["role"]:
-            combined_contexts[-1]["content"].extend(msg["content"])
-        else:
-            combined_contexts.append(msg)
-    return combined_contexts
-
-
-def fix_doubao(contexts: list[dict]) -> list[dict]:
-    """Fix doubao context format.
-
-    Doubao do not support this content for:
-        [{'text': 'hi', 'type': 'text'}], 'role': 'user'}]
-    It support:
-        [{'content': 'hi', 'role': 'user'}]
-
-    Args:
-        contexts (list[dict]): [
-            {
-                "role": "user or assistant",
-                "content": [
-                    {'type': 'text', 'text': 'caption this img'},
-                    {'type': 'image_url', 'image_url': {'url': 'data:image/jpeg;base64,base64_image'}},
-                    {'type': 'image_url', 'image_url': {'url': 'https://server.com/dir/image.jpg'}},
-                ]
-            }
-        ]
-    """
-    fixed_contexts = []
-    for msg in contexts:
-        if (lst := msg.get("content", [])) and all(x.get("type") == "text" for x in lst):
-            msg["content"] = "\n".join([x.get("text") for x in lst])
-            fixed_contexts.append(msg)
-    return fixed_contexts
-
-
-def is_valid_conversation(message: Message) -> bool:
-    info = parse_msg(message)
-    if startswith_prefix(info["text"], prefix=[PREFIX.GPT]):
-        return True
-    # is replying to gpt-bot response message?
-    if not message.reply_to_message:
-        return False
-
-    reply_msg = message.reply_to_message
-    reply_info = parse_msg(reply_msg, silent=True)
-    return reply_info["text"].startswith("🤖")
-
-
-async def generate_single_msg_context(client: Client, message: Message) -> dict:
-    """Generate GPT contexts for a single message (Without consider reply message).
-
-    Returns:
-    {
-        "role": "user or assistant",
-        "content": [
-            {"type": "image_url", "image_url": {"url": "https://server.com/dir/image.jpg"}},
-            {"type": "video_url", "video_url": {"url": "https://server.com/dir/video.mp4"}},  # 智谱
-            {"type": "video", "video": ["https://server.com/dir/1.jpg","https://server.com/dir/2.jpg","https://server.com/dir/3.jpg"]},  # 千问
-            {"audio": "https://server.com/dir/audio.mp3"},
-        ],
-    }
-    """
-
-    def clean_text(text: str) -> str:
-        if not text:
-            return ""
-        return re.sub(rf"(.*?){BOT_TIPS}\)", "", text.removeprefix(PREFIX.GPT)).strip()
-
-    info = parse_msg(message, silent=True)
-    role = "assistant" if BOT_TIPS in info["text"] else "user"
-    # only text
-    if text := clean_text(message.text):
-        return {"role": role, "content": [{"type": "text", "text": text}]}
-
-    if info["mtype"] not in ["photo", "voice", "audio", "video", "document"]:
-        return {}
-
-    # has media
-    messages = await client.get_media_group(message.chat.id, message.id) if message.media_group_id else [message]
-    media = []
-    for msg in messages:
-        info = parse_msg(msg, silent=True)
-        try:
-            if GPT.MEDIA_FORMAT == "base64":
-                res: BytesIO = await client.download_media(msg, in_memory=True)  # type: ignore
-                logger.debug(f"Downloaded GPT media: {res.name}")
-                ext = Path(res.name).suffix.removeprefix(".").replace("jpg", "jpeg")
-                b64 = base64.b64encode(res.getvalue()).decode("utf-8")
-                if info["mtype"] == "photo":
-                    media.append({"type": "image_url", "image_url": {"url": f"data:image/{ext};base64,{b64}"}})
-                elif info["mtype"] == "video":
-                    media.append({"type": "video_url", "video_url": {"url": b64}})
-                elif info["mtype"] == "document" and info["mime_type"] == "text/plain":
-                    media.append({"type": "text", "text": res.getvalue().decode("utf-8")})
-                else:
-                    logger.warning("Audio do not support base64, please use http")
-            else:
-                path: str = await client.download_media(msg)  # type: ignore
-                logger.debug(f"Downloaded GPT media: {path}")
-                if info["mtype"] == "photo":
-                    media.append({"type": "image_url", "image_url": {"url": f"{GPT.MEDIA_SERVER}/{Path(path).name}"}})
-                elif info["mtype"] == "video":
-                    media.append({"type": "video_url", "video_url": {"url": f"{GPT.MEDIA_SERVER}/{Path(path).name}"}})
-                elif info["mtype"] in ["audio", "voice"]:
-                    mp3 = convert_to_audio(path, ext="mp3", codec="libmp3lame")
-                    media.append({"audio": f"{GPT.MEDIA_SERVER}/{mp3.name}"})
-                elif info["mtype"] == "document" and info["mime_type"] == "text/plain":
-                    media.append({"type": "text", "text": Path(path).read_text()})
-                    Path(path).unlink(missing_ok=True)
-            if caption := info["text"]:
-                media.append({"type": "text", "text": caption})
-        except Exception as e:
-            logger.warning(f"Download image from message failed: {e}")
-            continue
-    return {"role": role, "content": media}
-
-
-def cleanup(messages: list[dict]):
-    """Clean downloaded files.
-
-    [
-      {
-          'role': 'user',
-          'content': [
-              {'type': 'text', 'text': 'text'},
-              {'type': 'image_url', 'image_url': {'url': 'https://server.com/dir/image.jpg'}},
-              {"type": "video_url", "video_url": {"url": "https://server.com/dir/video.mp4"}},  # 智谱
-              {"type": "video", "video": ["https://server.com/dir/1.jpg","https://server.com/dir/2.jpg","https://server.com/dir/3.jpg"]},  # 千问
-              {"audio": "https://server.com/dir/audio.mp3"},
-            ]
-      }
-    ]
-    """
-    for item in messages:
-        content = item.get("content", [])
-        if not isinstance(content, list):
-            continue
-        for x in content:
-            if url := x.get("image_url", {}).get("url"):
-                (Path(DOWNLOAD_DIR) / Path(url).name).unlink(missing_ok=True)
-            if url := x.get("video_url", {}).get("url"):
-                (Path(DOWNLOAD_DIR) / Path(url).name).unlink(missing_ok=True)
-            if urls := x.get("video", []):
-                for url in urls:
-                    (Path(DOWNLOAD_DIR) / Path(url).name).unlink(missing_ok=True)
-            if url := x.get("audio"):
-                (Path(DOWNLOAD_DIR) / Path(url).name).unlink(missing_ok=True)
src/config.py
@@ -119,33 +119,24 @@ class COOKIE:  # See: https://github.com/easychen/CookieCloud
 
 class GPT:
     TEXT_MODEL = os.getenv("GPT_TEXT_MODEL", "gpt-4o")
-    VISION_MODEL = os.getenv("GPT_VISION_MODEL", "gpt-4o")
-    AUDIO_MODEL = os.getenv("GPT_AUDIO_MODEL", "qwen-audio-turbo-latest")
+    IMAGE_MODEL = os.getenv("GPT_IMAGE_MODEL", "gpt-4o")
     VIDEO_MODEL = os.getenv("GPT_VIDEO_MODEL", "glm-4v-plus")
+    TEXT_MODEL_NAME = os.getenv("GPT_TEXT_MODEL_NAME", "gpt-4o")  # custom name
+    IMAGE_MODEL_NAME = os.getenv("GPT_IMAGE_MODEL_NAME", "gpt-4o")
+    VIDEO_MODEL_NAME = os.getenv("GPT_VIDEO_MODEL_NAME", "glm-4v-plus")
     TEXT_TIMEOUT = os.getenv("GPT_TEXT_TIMEOUT", "15")
-    VISION_TIMEOUT = os.getenv("GPT_VISION_TIMEOUT", "30")
-    AUDIO_TIMEOUT = os.getenv("GPT_AUDIO_TIMEOUT", "30")
+    IMAGE_TIMEOUT = os.getenv("GPT_IMAGE_TIMEOUT", "30")
     VIDEO_TIMEOUT = os.getenv("GPT_VIDEO_TIMEOUT", "30")
     TEMPERATURE = os.getenv("GPT_TEMPERATURE", "0.5")
     HISTORY_CONTEXT = os.getenv("GPT_HISTORY_CONTEXT", "20")  # 最多携带多少条历史消息
     MEDIA_FORMAT = os.getenv("GPT_MEDIA_FORMAT", "base64")  # base64 or http
-    MEDIA_SERVER = os.getenv("GPT_MEDIA_SERVER", "https://server.com/dir")
-    OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "")
-    OPENAI_BASE_URL = os.getenv("OPENAI_BASE_URL", "https://api.openai.com/v1")
-    GEMINI_API_KEY = os.getenv("GEMINI_API_KEY", "")
-    GEMINI_BASE_URL = os.getenv("GEMINI_BASE_URL", "https://generativelanguage.googleapis.com/v1beta/openai")
-    HUNYUAN_API_KEY = os.getenv("HUNYUAN_API_KEY", "")
-    HUNYUAN_BASE_URL = os.getenv("HUNYUAN_BASE_URL", "https://api.hunyuan.cloud.tencent.com/v1")
-    DASHSCOPE_API_KEY = os.getenv("DASHSCOPE_API_KEY", "")
-    DASHSCOPE_BASE_URL = os.getenv("DASHSCOPE_BASE_URL", "https://dashscope.aliyuncs.com/compatible-mode/v1")
-    GLM_API_KEY = os.getenv("GLM_API_KEY", "")
-    GLM_BASE_URL = os.getenv("GLM_BASE_URL", "https://open.bigmodel.cn/api/paas/v4")
-    ARK_API_KEY = os.getenv("ARK_API_KEY", "")
-    ARK_BASE_URL = os.getenv("ARK_BASE_URL", "https://ark.cn-beijing.volces.com/api/v3")
-    DOUBAO_TEXT_ENTRYPOINT = os.getenv("GPT_DOUBAO_TEXT_ENTRYPOINT", "")
-    DOUBAO_VISION_ENTRYPOINT = os.getenv("GPT_DOUBAO_VISION_ENTRYPOINT", "")
-    OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY", "")
-    OPENROUTER_BASE_URL = os.getenv("OPENROUTER_BASE_URL", "https://openrouter.ai/api/v1")
+    MEDIA_SERVER = os.getenv("GPT_MEDIA_SERVER", "https://server.com/dir")  # only when MEDIA_FORMAT is http
+    TEXT_API_KEY = os.getenv("GPT_TEXT_API_KEY", "")
+    TEXT_BASE_URL = os.getenv("GPT_TEXT_BASE_URL", "https://api.openai.com/v1")
+    IMAGE_API_KEY = os.getenv("GPT_IMAGE_API_KEY", "")
+    IMAGE_BASE_URL = os.getenv("GPT_IMAGE_BASE_URL", "https://api.openai.com/v1")
+    VIDEO_API_KEY = os.getenv("GPT_VIDEO_API_KEY", "")
+    VIDEO_BASE_URL = os.getenv("GPT_VIDEO_BASE_URL", "https://open.bigmodel.cn/api/paas/v4")
 
 
 class TID:
src/handler.py
@@ -11,6 +11,7 @@ from asr.voice_recognition import voice_to_text
 from bridge.ocr import send_to_ocr_bridge
 from config import ENABLE, PREFIX, PROXY, cache
 from database import del_db
+from llm.gpt import gpt_response
 from messages.parser import parse_msg
 from messages.sender import send2tg
 from messages.utils import equal_prefix, startswith_prefix
@@ -18,7 +19,6 @@ from networking import flatten_rediercts, match_social_media_link
 from others.combine_history import combine_history
 from others.download_external import download_url_in_message
 from others.extract_audio import extract_audio_file
-from others.gpt import gpt_response
 from others.raw_img_file import convert_raw_img_file
 from others.subtitle import get_subtitle
 from preview.douyin import preview_douyin
pyproject.toml
@@ -7,6 +7,7 @@ dependencies = [
   "feedparser>=6.0.11",
   "httpx[http2,socks]>=0.28.1",
   "loguru>=0.7.2",
+  "openai>=1.60.1",
   "pillow-heif>=0.18.0",
   "pillow>=10.4.0",
   "puremagic>=1.28",
uv.lock
@@ -137,6 +137,15 @@ wheels = [
     { url = "https://files.pythonhosted.org/packages/ec/6a/bc7e17a3e87a2985d3e8f4da4cd0f481060eb78fb08596c42be62c90a4d9/aiosignal-1.3.2-py2.py3-none-any.whl", hash = "sha256:45cde58e409a301715980c2b01d0c28bdde3770d8290b5eb2173759d9acb31a5", size = 7597 },
 ]
 
+[[package]]
+name = "annotated-types"
+version = "0.7.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081 }
+wheels = [
+    { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643 },
+]
+
 [[package]]
 name = "anyio"
 version = "4.8.0"
@@ -205,6 +214,7 @@ dependencies = [
     { name = "feedparser", marker = "sys_platform == 'darwin' or sys_platform == 'linux'" },
     { name = "httpx", extra = ["http2", "socks"], marker = "sys_platform == 'darwin' or sys_platform == 'linux'" },
     { name = "loguru", marker = "sys_platform == 'darwin' or sys_platform == 'linux'" },
+    { name = "openai", marker = "sys_platform == 'darwin' or sys_platform == 'linux'" },
     { name = "pillow", marker = "sys_platform == 'darwin' or sys_platform == 'linux'" },
     { name = "pillow-heif", marker = "sys_platform == 'darwin' or sys_platform == 'linux'" },
     { name = "puremagic", marker = "sys_platform == 'darwin' or sys_platform == 'linux'" },
@@ -232,6 +242,7 @@ requires-dist = [
     { name = "feedparser", specifier = ">=6.0.11" },
     { name = "httpx", extras = ["http2", "socks"], specifier = ">=0.28.1" },
     { name = "loguru", specifier = ">=0.7.2" },
+    { name = "openai", specifier = ">=1.60.1" },
     { name = "pillow", specifier = ">=10.4.0" },
     { name = "pillow-heif", specifier = ">=0.18.0" },
     { name = "puremagic", specifier = ">=1.28" },
@@ -354,6 +365,15 @@ wheels = [
     { url = "https://files.pythonhosted.org/packages/07/6c/aa3f2f849e01cb6a001cd8554a88d4c77c5c1a31c95bdf1cf9301e6d9ef4/defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61", size = 25604 },
 ]
 
+[[package]]
+name = "distro"
+version = "1.9.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/fc/f8/98eea607f65de6527f8a2e8885fc8015d3e6f5775df186e443e0964a11c3/distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed", size = 60722 }
+wheels = [
+    { url = "https://files.pythonhosted.org/packages/12/b3/231ffd4ab1fc9d679809f356cebee130ac7daa00d6d6f3206dd4fd137e9e/distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2", size = 20277 },
+]
+
 [[package]]
 name = "executing"
 version = "2.1.0"
@@ -553,6 +573,46 @@ wheels = [
     { url = "https://files.pythonhosted.org/packages/c0/5a/9cac0c82afec3d09ccd97c8b6502d48f165f9124db81b4bcb90b4af974ee/jedi-0.19.2-py2.py3-none-any.whl", hash = "sha256:a8ef22bde8490f57fe5c7681a3c83cb58874daf72b4784de3cce5b6ef6edb5b9", size = 1572278 },
 ]
 
+[[package]]
+name = "jiter"
+version = "0.8.2"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/f8/70/90bc7bd3932e651486861df5c8ffea4ca7c77d28e8532ddefe2abc561a53/jiter-0.8.2.tar.gz", hash = "sha256:cd73d3e740666d0e639f678adb176fad25c1bcbdae88d8d7b857e1783bb4212d", size = 163007 }
+wheels = [
+    { url = "https://files.pythonhosted.org/packages/cb/b0/c1a7caa7f9dc5f1f6cfa08722867790fe2d3645d6e7170ca280e6e52d163/jiter-0.8.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:2dd61c5afc88a4fda7d8b2cf03ae5947c6ac7516d32b7a15bf4b49569a5c076b", size = 303666 },
+    { url = "https://files.pythonhosted.org/packages/f5/97/0468bc9eeae43079aaa5feb9267964e496bf13133d469cfdc135498f8dd0/jiter-0.8.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a6c710d657c8d1d2adbbb5c0b0c6bfcec28fd35bd6b5f016395f9ac43e878a15", size = 311934 },
+    { url = "https://files.pythonhosted.org/packages/e5/69/64058e18263d9a5f1e10f90c436853616d5f047d997c37c7b2df11b085ec/jiter-0.8.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a9584de0cd306072635fe4b89742bf26feae858a0683b399ad0c2509011b9dc0", size = 335506 },
+    { url = "https://files.pythonhosted.org/packages/9d/14/b747f9a77b8c0542141d77ca1e2a7523e854754af2c339ac89a8b66527d6/jiter-0.8.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5a90a923338531b7970abb063cfc087eebae6ef8ec8139762007188f6bc69a9f", size = 355849 },
+    { url = "https://files.pythonhosted.org/packages/53/e2/98a08161db7cc9d0e39bc385415890928ff09709034982f48eccfca40733/jiter-0.8.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d21974d246ed0181558087cd9f76e84e8321091ebfb3a93d4c341479a736f099", size = 381700 },
+    { url = "https://files.pythonhosted.org/packages/7a/38/1674672954d35bce3b1c9af99d5849f9256ac8f5b672e020ac7821581206/jiter-0.8.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:32475a42b2ea7b344069dc1e81445cfc00b9d0e3ca837f0523072432332e9f74", size = 389710 },
+    { url = "https://files.pythonhosted.org/packages/f8/9b/92f9da9a9e107d019bcf883cd9125fa1690079f323f5a9d5c6986eeec3c0/jiter-0.8.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b9931fd36ee513c26b5bf08c940b0ac875de175341cbdd4fa3be109f0492586", size = 345553 },
+    { url = "https://files.pythonhosted.org/packages/44/a6/6d030003394e9659cd0d7136bbeabd82e869849ceccddc34d40abbbbb269/jiter-0.8.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ce0820f4a3a59ddced7fce696d86a096d5cc48d32a4183483a17671a61edfddc", size = 376388 },
+    { url = "https://files.pythonhosted.org/packages/ad/8d/87b09e648e4aca5f9af89e3ab3cfb93db2d1e633b2f2931ede8dabd9b19a/jiter-0.8.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:8ffc86ae5e3e6a93765d49d1ab47b6075a9c978a2b3b80f0f32628f39caa0c88", size = 511226 },
+    { url = "https://files.pythonhosted.org/packages/77/95/8008ebe4cdc82eac1c97864a8042ca7e383ed67e0ec17bfd03797045c727/jiter-0.8.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5127dc1abd809431172bc3fbe8168d6b90556a30bb10acd5ded41c3cfd6f43b6", size = 504134 },
+    { url = "https://files.pythonhosted.org/packages/a1/17/c8747af8ea4e045f57d6cfd6fc180752cab9bc3de0e8a0c9ca4e8af333b1/jiter-0.8.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:e6ec2be506e7d6f9527dae9ff4b7f54e68ea44a0ef6b098256ddf895218a2f8f", size = 302027 },
+    { url = "https://files.pythonhosted.org/packages/3c/c1/6da849640cd35a41e91085723b76acc818d4b7d92b0b6e5111736ce1dd10/jiter-0.8.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:76e324da7b5da060287c54f2fabd3db5f76468006c811831f051942bf68c9d44", size = 310326 },
+    { url = "https://files.pythonhosted.org/packages/06/99/a2bf660d8ccffee9ad7ed46b4f860d2108a148d0ea36043fd16f4dc37e94/jiter-0.8.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:180a8aea058f7535d1c84183c0362c710f4750bef66630c05f40c93c2b152a0f", size = 334242 },
+    { url = "https://files.pythonhosted.org/packages/a7/5f/cea1c17864828731f11427b9d1ab7f24764dbd9aaf4648a7f851164d2718/jiter-0.8.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:025337859077b41548bdcbabe38698bcd93cfe10b06ff66617a48ff92c9aec60", size = 356654 },
+    { url = "https://files.pythonhosted.org/packages/e9/13/62774b7e5e7f5d5043efe1d0f94ead66e6d0f894ae010adb56b3f788de71/jiter-0.8.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ecff0dc14f409599bbcafa7e470c00b80f17abc14d1405d38ab02e4b42e55b57", size = 379967 },
+    { url = "https://files.pythonhosted.org/packages/ec/fb/096b34c553bb0bd3f2289d5013dcad6074948b8d55212aa13a10d44c5326/jiter-0.8.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ffd9fee7d0775ebaba131f7ca2e2d83839a62ad65e8e02fe2bd8fc975cedeb9e", size = 389252 },
+    { url = "https://files.pythonhosted.org/packages/17/61/beea645c0bf398ced8b199e377b61eb999d8e46e053bb285c91c3d3eaab0/jiter-0.8.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14601dcac4889e0a1c75ccf6a0e4baf70dbc75041e51bcf8d0e9274519df6887", size = 345490 },
+    { url = "https://files.pythonhosted.org/packages/d5/df/834aa17ad5dcc3cf0118821da0a0cf1589ea7db9832589278553640366bc/jiter-0.8.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:92249669925bc1c54fcd2ec73f70f2c1d6a817928480ee1c65af5f6b81cdf12d", size = 376991 },
+    { url = "https://files.pythonhosted.org/packages/67/80/87d140399d382fb4ea5b3d56e7ecaa4efdca17cd7411ff904c1517855314/jiter-0.8.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:e725edd0929fa79f8349ab4ec7f81c714df51dc4e991539a578e5018fa4a7152", size = 510822 },
+    { url = "https://files.pythonhosted.org/packages/5c/37/3394bb47bac1ad2cb0465601f86828a0518d07828a650722e55268cdb7e6/jiter-0.8.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:bf55846c7b7a680eebaf9c3c48d630e1bf51bdf76c68a5f654b8524335b0ad29", size = 503730 },
+    { url = "https://files.pythonhosted.org/packages/6c/b0/bfa1f6f2c956b948802ef5a021281978bf53b7a6ca54bb126fd88a5d014e/jiter-0.8.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:ca1f08b8e43dc3bd0594c992fb1fd2f7ce87f7bf0d44358198d6da8034afdf84", size = 301190 },
+    { url = "https://files.pythonhosted.org/packages/a4/8f/396ddb4e292b5ea57e45ade5dc48229556b9044bad29a3b4b2dddeaedd52/jiter-0.8.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5672a86d55416ccd214c778efccf3266b84f87b89063b582167d803246354be4", size = 309334 },
+    { url = "https://files.pythonhosted.org/packages/7f/68/805978f2f446fa6362ba0cc2e4489b945695940656edd844e110a61c98f8/jiter-0.8.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58dc9bc9767a1101f4e5e22db1b652161a225874d66f0e5cb8e2c7d1c438b587", size = 333918 },
+    { url = "https://files.pythonhosted.org/packages/b3/99/0f71f7be667c33403fa9706e5b50583ae5106d96fab997fa7e2f38ee8347/jiter-0.8.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:37b2998606d6dadbb5ccda959a33d6a5e853252d921fec1792fc902351bb4e2c", size = 356057 },
+    { url = "https://files.pythonhosted.org/packages/8d/50/a82796e421a22b699ee4d2ce527e5bcb29471a2351cbdc931819d941a167/jiter-0.8.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4ab9a87f3784eb0e098f84a32670cfe4a79cb6512fd8f42ae3d0709f06405d18", size = 379790 },
+    { url = "https://files.pythonhosted.org/packages/3c/31/10fb012b00f6d83342ca9e2c9618869ab449f1aa78c8f1b2193a6b49647c/jiter-0.8.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:79aec8172b9e3c6d05fd4b219d5de1ac616bd8da934107325a6c0d0e866a21b6", size = 388285 },
+    { url = "https://files.pythonhosted.org/packages/c8/81/f15ebf7de57be488aa22944bf4274962aca8092e4f7817f92ffa50d3ee46/jiter-0.8.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:711e408732d4e9a0208008e5892c2966b485c783cd2d9a681f3eb147cf36c7ef", size = 344764 },
+    { url = "https://files.pythonhosted.org/packages/b3/e8/0cae550d72b48829ba653eb348cdc25f3f06f8a62363723702ec18e7be9c/jiter-0.8.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:653cf462db4e8c41995e33d865965e79641ef45369d8a11f54cd30888b7e6ff1", size = 376620 },
+    { url = "https://files.pythonhosted.org/packages/b8/50/e5478ff9d82534a944c03b63bc217c5f37019d4a34d288db0f079b13c10b/jiter-0.8.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:9c63eaef32b7bebac8ebebf4dabebdbc6769a09c127294db6babee38e9f405b9", size = 510402 },
+    { url = "https://files.pythonhosted.org/packages/8e/1e/3de48bbebbc8f7025bd454cedc8c62378c0e32dd483dece5f4a814a5cb55/jiter-0.8.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:eb21aaa9a200d0a80dacc7a81038d2e476ffe473ffdd9c91eb745d623561de05", size = 503018 },
+    { url = "https://files.pythonhosted.org/packages/2f/3c/71a491952c37b87d127790dd7a0b1ebea0514c6b6ad30085b16bbe00aee6/jiter-0.8.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:b426f72cd77da3fec300ed3bc990895e2dd6b49e3bfe6c438592a3ba660e41ca", size = 308347 },
+    { url = "https://files.pythonhosted.org/packages/a0/4c/c02408042e6a7605ec063daed138e07b982fdb98467deaaf1c90950cf2c6/jiter-0.8.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b2dd880785088ff2ad21ffee205e58a8c1ddabc63612444ae41e5e4b321b39c0", size = 342875 },
+]
+
 [[package]]
 name = "jmespath"
 version = "1.0.1"
@@ -631,6 +691,25 @@ wheels = [
     { url = "https://files.pythonhosted.org/packages/99/b7/b9e70fde2c0f0c9af4cc5277782a89b66d35948ea3369ec9f598358c3ac5/multidict-6.1.0-py3-none-any.whl", hash = "sha256:48e171e52d1c4d33888e529b999e5900356b9ae588c2f09a52dcefb158b27506", size = 10051 },
 ]
 
+[[package]]
+name = "openai"
+version = "1.60.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+    { name = "anyio", marker = "sys_platform == 'darwin' or sys_platform == 'linux'" },
+    { name = "distro", marker = "sys_platform == 'darwin' or sys_platform == 'linux'" },
+    { name = "httpx", marker = "sys_platform == 'darwin' or sys_platform == 'linux'" },
+    { name = "jiter", marker = "sys_platform == 'darwin' or sys_platform == 'linux'" },
+    { name = "pydantic", marker = "sys_platform == 'darwin' or sys_platform == 'linux'" },
+    { name = "sniffio", marker = "sys_platform == 'darwin' or sys_platform == 'linux'" },
+    { name = "tqdm", marker = "sys_platform == 'darwin' or sys_platform == 'linux'" },
+    { name = "typing-extensions", marker = "sys_platform == 'darwin' or sys_platform == 'linux'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/4c/c4/a220c957aa4097f25498770c6eff8f3abd35934a8859e7a78928a8a70846/openai-1.60.1.tar.gz", hash = "sha256:beb1541dfc38b002bd629ab68b0d6fe35b870c5f4311d9bc4404d85af3214d5e", size = 348070 }
+wheels = [
+    { url = "https://files.pythonhosted.org/packages/7a/ad/55b2d03feda5a0adc0a86048dcb7c9863fd24a3726815a04d5669e82e41e/openai-1.60.1-py3-none-any.whl", hash = "sha256:714181ec1c452353d456f143c22db892de7b373e3165063d02a2b798ed575ba1", size = 456110 },
+]
+
 [[package]]
 name = "parso"
 version = "0.8.4"
@@ -814,6 +893,64 @@ version = "1.6.1"
 source = { registry = "https://pypi.org/simple" }
 sdist = { url = "https://files.pythonhosted.org/packages/44/66/2c17bae31c906613795711fc78045c285048168919ace2220daa372c7d72/pyaes-1.6.1.tar.gz", hash = "sha256:02c1b1405c38d3c370b085fb952dd8bea3fadcee6411ad99f312cc129c536d8f", size = 28536 }
 
+[[package]]
+name = "pydantic"
+version = "2.10.6"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+    { name = "annotated-types", marker = "sys_platform == 'darwin' or sys_platform == 'linux'" },
+    { name = "pydantic-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux'" },
+    { name = "typing-extensions", marker = "sys_platform == 'darwin' or sys_platform == 'linux'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/b7/ae/d5220c5c52b158b1de7ca89fc5edb72f304a70a4c540c84c8844bf4008de/pydantic-2.10.6.tar.gz", hash = "sha256:ca5daa827cce33de7a42be142548b0096bf05a7e7b365aebfa5f8eeec7128236", size = 761681 }
+wheels = [
+    { url = "https://files.pythonhosted.org/packages/f4/3c/8cc1cc84deffa6e25d2d0c688ebb80635dfdbf1dbea3e30c541c8cf4d860/pydantic-2.10.6-py3-none-any.whl", hash = "sha256:427d664bf0b8a2b34ff5dd0f5a18df00591adcee7198fbd71981054cef37b584", size = 431696 },
+]
+
+[[package]]
+name = "pydantic-core"
+version = "2.27.2"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+    { name = "typing-extensions", marker = "sys_platform == 'darwin' or sys_platform == 'linux'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/fc/01/f3e5ac5e7c25833db5eb555f7b7ab24cd6f8c322d3a3ad2d67a952dc0abc/pydantic_core-2.27.2.tar.gz", hash = "sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39", size = 413443 }
+wheels = [
+    { url = "https://files.pythonhosted.org/packages/c2/89/f3450af9d09d44eea1f2c369f49e8f181d742f28220f88cc4dfaae91ea6e/pydantic_core-2.27.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:8e10c99ef58cfdf2a66fc15d66b16c4a04f62bca39db589ae8cba08bc55331bc", size = 1893421 },
+    { url = "https://files.pythonhosted.org/packages/9e/e3/71fe85af2021f3f386da42d291412e5baf6ce7716bd7101ea49c810eda90/pydantic_core-2.27.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:26f32e0adf166a84d0cb63be85c562ca8a6fa8de28e5f0d92250c6b7e9e2aff7", size = 1814998 },
+    { url = "https://files.pythonhosted.org/packages/a6/3c/724039e0d848fd69dbf5806894e26479577316c6f0f112bacaf67aa889ac/pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c19d1ea0673cd13cc2f872f6c9ab42acc4e4f492a7ca9d3795ce2b112dd7e15", size = 1826167 },
+    { url = "https://files.pythonhosted.org/packages/2b/5b/1b29e8c1fb5f3199a9a57c1452004ff39f494bbe9bdbe9a81e18172e40d3/pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e68c4446fe0810e959cdff46ab0a41ce2f2c86d227d96dc3847af0ba7def306", size = 1865071 },
+    { url = "https://files.pythonhosted.org/packages/89/6c/3985203863d76bb7d7266e36970d7e3b6385148c18a68cc8915fd8c84d57/pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d9640b0059ff4f14d1f37321b94061c6db164fbe49b334b31643e0528d100d99", size = 2036244 },
+    { url = "https://files.pythonhosted.org/packages/0e/41/f15316858a246b5d723f7d7f599f79e37493b2e84bfc789e58d88c209f8a/pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:40d02e7d45c9f8af700f3452f329ead92da4c5f4317ca9b896de7ce7199ea459", size = 2737470 },
+    { url = "https://files.pythonhosted.org/packages/a8/7c/b860618c25678bbd6d1d99dbdfdf0510ccb50790099b963ff78a124b754f/pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c1fd185014191700554795c99b347d64f2bb637966c4cfc16998a0ca700d048", size = 1992291 },
+    { url = "https://files.pythonhosted.org/packages/bf/73/42c3742a391eccbeab39f15213ecda3104ae8682ba3c0c28069fbcb8c10d/pydantic_core-2.27.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d81d2068e1c1228a565af076598f9e7451712700b673de8f502f0334f281387d", size = 1994613 },
+    { url = "https://files.pythonhosted.org/packages/94/7a/941e89096d1175d56f59340f3a8ebaf20762fef222c298ea96d36a6328c5/pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1a4207639fb02ec2dbb76227d7c751a20b1a6b4bc52850568e52260cae64ca3b", size = 2002355 },
+    { url = "https://files.pythonhosted.org/packages/6e/95/2359937a73d49e336a5a19848713555605d4d8d6940c3ec6c6c0ca4dcf25/pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:3de3ce3c9ddc8bbd88f6e0e304dea0e66d843ec9de1b0042b0911c1663ffd474", size = 2126661 },
+    { url = "https://files.pythonhosted.org/packages/2b/4c/ca02b7bdb6012a1adef21a50625b14f43ed4d11f1fc237f9d7490aa5078c/pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:30c5f68ded0c36466acede341551106821043e9afaad516adfb6e8fa80a4e6a6", size = 2153261 },
+    { url = "https://files.pythonhosted.org/packages/d6/74/51c8a5482ca447871c93e142d9d4a92ead74de6c8dc5e66733e22c9bba89/pydantic_core-2.27.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9e0c8cfefa0ef83b4da9588448b6d8d2a2bf1a53c3f1ae5fca39eb3061e2f0b0", size = 1893127 },
+    { url = "https://files.pythonhosted.org/packages/d3/f3/c97e80721735868313c58b89d2de85fa80fe8dfeeed84dc51598b92a135e/pydantic_core-2.27.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83097677b8e3bd7eaa6775720ec8e0405f1575015a463285a92bfdfe254529ef", size = 1811340 },
+    { url = "https://files.pythonhosted.org/packages/9e/91/840ec1375e686dbae1bd80a9e46c26a1e0083e1186abc610efa3d9a36180/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:172fce187655fece0c90d90a678424b013f8fbb0ca8b036ac266749c09438cb7", size = 1822900 },
+    { url = "https://files.pythonhosted.org/packages/f6/31/4240bc96025035500c18adc149aa6ffdf1a0062a4b525c932065ceb4d868/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:519f29f5213271eeeeb3093f662ba2fd512b91c5f188f3bb7b27bc5973816934", size = 1869177 },
+    { url = "https://files.pythonhosted.org/packages/fa/20/02fbaadb7808be578317015c462655c317a77a7c8f0ef274bc016a784c54/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05e3a55d124407fffba0dd6b0c0cd056d10e983ceb4e5dbd10dda135c31071d6", size = 2038046 },
+    { url = "https://files.pythonhosted.org/packages/06/86/7f306b904e6c9eccf0668248b3f272090e49c275bc488a7b88b0823444a4/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c3ed807c7b91de05e63930188f19e921d1fe90de6b4f5cd43ee7fcc3525cb8c", size = 2685386 },
+    { url = "https://files.pythonhosted.org/packages/8d/f0/49129b27c43396581a635d8710dae54a791b17dfc50c70164866bbf865e3/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb4aadc0b9a0c063206846d603b92030eb6f03069151a625667f982887153e2", size = 1997060 },
+    { url = "https://files.pythonhosted.org/packages/0d/0f/943b4af7cd416c477fd40b187036c4f89b416a33d3cc0ab7b82708a667aa/pydantic_core-2.27.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28ccb213807e037460326424ceb8b5245acb88f32f3d2777427476e1b32c48c4", size = 2004870 },
+    { url = "https://files.pythonhosted.org/packages/35/40/aea70b5b1a63911c53a4c8117c0a828d6790483f858041f47bab0b779f44/pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de3cd1899e2c279b140adde9357c4495ed9d47131b4a4eaff9052f23398076b3", size = 1999822 },
+    { url = "https://files.pythonhosted.org/packages/f2/b3/807b94fd337d58effc5498fd1a7a4d9d59af4133e83e32ae39a96fddec9d/pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:220f892729375e2d736b97d0e51466252ad84c51857d4d15f5e9692f9ef12be4", size = 2130364 },
+    { url = "https://files.pythonhosted.org/packages/fc/df/791c827cd4ee6efd59248dca9369fb35e80a9484462c33c6649a8d02b565/pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a0fcd29cd6b4e74fe8ddd2c90330fd8edf2e30cb52acda47f06dd615ae72da57", size = 2158303 },
+    { url = "https://files.pythonhosted.org/packages/41/b1/9bc383f48f8002f99104e3acff6cba1231b29ef76cfa45d1506a5cad1f84/pydantic_core-2.27.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7d14bd329640e63852364c306f4d23eb744e0f8193148d4044dd3dacdaacbd8b", size = 1892709 },
+    { url = "https://files.pythonhosted.org/packages/10/6c/e62b8657b834f3eb2961b49ec8e301eb99946245e70bf42c8817350cbefc/pydantic_core-2.27.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82f91663004eb8ed30ff478d77c4d1179b3563df6cdb15c0817cd1cdaf34d154", size = 1811273 },
+    { url = "https://files.pythonhosted.org/packages/ba/15/52cfe49c8c986e081b863b102d6b859d9defc63446b642ccbbb3742bf371/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71b24c7d61131bb83df10cc7e687433609963a944ccf45190cfc21e0887b08c9", size = 1823027 },
+    { url = "https://files.pythonhosted.org/packages/b1/1c/b6f402cfc18ec0024120602bdbcebc7bdd5b856528c013bd4d13865ca473/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa8e459d4954f608fa26116118bb67f56b93b209c39b008277ace29937453dc9", size = 1868888 },
+    { url = "https://files.pythonhosted.org/packages/bd/7b/8cb75b66ac37bc2975a3b7de99f3c6f355fcc4d89820b61dffa8f1e81677/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce8918cbebc8da707ba805b7fd0b382816858728ae7fe19a942080c24e5b7cd1", size = 2037738 },
+    { url = "https://files.pythonhosted.org/packages/c8/f1/786d8fe78970a06f61df22cba58e365ce304bf9b9f46cc71c8c424e0c334/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eda3f5c2a021bbc5d976107bb302e0131351c2ba54343f8a496dc8783d3d3a6a", size = 2685138 },
+    { url = "https://files.pythonhosted.org/packages/a6/74/d12b2cd841d8724dc8ffb13fc5cef86566a53ed358103150209ecd5d1999/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8086fa684c4775c27f03f062cbb9eaa6e17f064307e86b21b9e0abc9c0f02e", size = 1997025 },
+    { url = "https://files.pythonhosted.org/packages/a0/6e/940bcd631bc4d9a06c9539b51f070b66e8f370ed0933f392db6ff350d873/pydantic_core-2.27.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8d9b3388db186ba0c099a6d20f0604a44eabdeef1777ddd94786cdae158729e4", size = 2004633 },
+    { url = "https://files.pythonhosted.org/packages/50/cc/a46b34f1708d82498c227d5d80ce615b2dd502ddcfd8376fc14a36655af1/pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7a66efda2387de898c8f38c0cf7f14fca0b51a8ef0b24bfea5849f1b3c95af27", size = 1999404 },
+    { url = "https://files.pythonhosted.org/packages/ca/2d/c365cfa930ed23bc58c41463bae347d1005537dc8db79e998af8ba28d35e/pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:18a101c168e4e092ab40dbc2503bdc0f62010e95d292b27827871dc85450d7ee", size = 2130130 },
+    { url = "https://files.pythonhosted.org/packages/f4/d7/eb64d015c350b7cdb371145b54d96c919d4db516817f31cd1c650cae3b21/pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ba5dd002f88b78a4215ed2f8ddbdf85e8513382820ba15ad5ad8955ce0ca19a1", size = 2157946 },
+]
+
 [[package]]
 name = "pyee"
 version = "12.1.1"
@@ -1022,6 +1159,15 @@ wheels = [
     { url = "https://files.pythonhosted.org/packages/f1/7b/ce1eafaf1a76852e2ec9b22edecf1daa58175c090266e9f6c64afcd81d91/stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695", size = 24521 },
 ]
 
+[[package]]
+name = "tqdm"
+version = "4.67.1"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/a8/4b/29b4ef32e036bb34e4ab51796dd745cdba7ed47ad142a9f4a1eb8e0c744d/tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2", size = 169737 }
+wheels = [
+    { url = "https://files.pythonhosted.org/packages/d0/30/dc54f88dd4a2b5dc8a0279bdd7270e735851848b762aeb1c1184ed1f6b14/tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2", size = 78540 },
+]
+
 [[package]]
 name = "traitlets"
 version = "5.14.3"