Commit ccaf718

benny-dou <60535774+benny-dou@users.noreply.github.com>
2025-12-21 07:55:51
chore(gpt): separate `enable_gpt_tools` and `enable_gemini_tools` flags
1 parent a09d31a
Changed files (3)
src/llm/gpt.py
@@ -49,7 +49,8 @@ async def gpt_response(
     *,
     custom_model_id: str = "",
     custom_model_name: str = "",
-    enable_tools: bool = True,
+    enable_gpt_tools: bool = True,
+    enable_gemini_tools: bool = True,
     silent: bool = False,
     **kwargs,
 ) -> dict:
@@ -63,7 +64,8 @@ async def gpt_response(
         message (Message): The trigger message object.
         custom_model_id (str, optional): Custom model id.
         custom_model_name (str, optional): Custom model name.
-        enable_tools (bool, optional): Whether to enable tools. Defaults to True.
+        enable_gpt_tools (bool, optional): Whether to enable GPT tools. Defaults to True.
+        enable_gemini_tools (bool, optional): Whether to enable Gemini tools. Defaults to True.
         silent (bool, optional): Whether to disable progressing. Defaults to False.
 
     Returns:
@@ -93,7 +95,7 @@ async def gpt_response(
         cache.set(f"gpt-{info['cid']}-{media_group_id}", "1", ttl=120)
     kwargs["message_info"] = info  # save trigger message info
     if resp_modality == "image":
-        return await text2img(client, message, enable_tools=enable_tools, **kwargs)
+        return await text2img(client, message, enable_tools=enable_gemini_tools, **kwargs)
 
     # handle custom model_id here
     if matched := re.match(r"^/ai @([a-zA-Z0-9_\-\.]+)(\s+)?", info["text"]):  # match /ai @custom_model_id
@@ -109,12 +111,12 @@ async def gpt_response(
             message,
             model_id=custom_model_id,
             model_name=custom_model_name or custom_model_id,
-            enable_tools=enable_tools,
+            enable_tools=enable_gemini_tools,
             silent=silent,
             **kwargs,
         )
     if model_id == GEMINI.TEXT_MODEL and not custom_model_id:
-        return await gemini_chat_completion(client, message, enable_tools=enable_tools, silent=silent, **kwargs)
+        return await gemini_chat_completion(client, message, enable_tools=enable_gemini_tools, silent=silent, **kwargs)
 
     # GPT models
     if custom_model_id:
@@ -128,7 +130,7 @@ async def gpt_response(
     if not silent and kwargs.get("show_progress"):
         kwargs["progress"] = (await send2tg(client, message, texts=msg, **kwargs))[0]
 
-    if enable_tools:
+    if enable_gpt_tools:
         config, response = await merge_tools_response(config, **kwargs)
         # skip send a new request if tool_model is the same as the current model
         if response and config["completions"]["model"] == GPT.TOOLS_MODEL and response.get("content"):
src/llm/summary.py
@@ -215,7 +215,8 @@ async def ai_summary(
         custom_model_id=summary_model_id,
         custom_model_name=summary_model_name,
         system_prompt=SYSTEM_PROMPT,
-        enable_tools=False,
+        enable_gpt_tools=False,
+        enable_gemini_tools=False,
         include_thoughts=False,
         append_grounding=False,
         silent=True,
src/ytdlp/main.py
@@ -151,7 +151,15 @@ async def preview_ytdlp(
             text=Str(f"{strings_list(PREFIX.GPT)[0]} {prompt}"),
             reply_to_message=Message(id=rand_number(), chat=message.chat, text=Str(subtitles)),
         )
-        params = {"include_thoughts": False, "append_grounding": False, "silent": True, "custom_model_id": summary_model_id, "custom_model_name": summary_model_name}
+        params = {
+            "include_thoughts": False,
+            "append_grounding": False,
+            "silent": True,
+            "custom_model_id": summary_model_id,
+            "custom_model_name": summary_model_name,
+            "enable_gpt_tools": False,
+            "enable_gemini_tools": True,
+        }
         aires = await gpt_response(client, ai_msg, **params)
         if aires.get("texts"):
             summary = f"🤖<b>{aires['model_name']}总结:</b>\n{markdown.markdown(aires['texts'])}\n"