Commit ef84345
Changed files (6)
src/llm/contexts.py
@@ -70,7 +70,7 @@ async def single_context(client: Client, message: Message) -> dict:
def clean_text(text: str) -> str:
if not text:
return ""
- return re.sub(rf"(.*?){BOT_TIPS}\)", "", text.removeprefix(PREFIX.GPT)).strip()
+ return re.sub(rf"(.*?){BOT_TIPS}\)", "", text.removeprefix(PREFIX.GPT), flags=re.DOTALL).strip()
info = parse_msg(message, silent=True)
role = "assistant" if BOT_TIPS in info["text"] else "user"
src/llm/gpt.py
@@ -82,7 +82,7 @@ async def gpt_response(client: Client, message: Message, **kwargs):
response = await get_gpt_response(config, **kwargs)
media = [{"document": save_txt(reasoning, f"{DOWNLOAD_DIR}/GPT-Reasoning-{rand_number()}.txt")}] if (reasoning := response.get("reasoning")) else []
if content := response.get("content"):
- texts = f"{config['bot_msg_prefix']}\n\n{content}"
+ texts = f"{response['bot_msg_prefix']}\n\n{content}"
logger.debug(texts)
await send2tg(client, message, texts=texts, media=media, **kwargs)
await modify_progress(del_status=True, **kwargs)
src/llm/models.py
@@ -1,6 +1,5 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
-from loguru import logger
from openai import DefaultAsyncHttpxClient
from pyrogram.types import Message
@@ -73,7 +72,6 @@ def get_model_config_with_contexts(model_type: str, contexts: list[dict]) -> dic
"completions": completions,
}
- logger.trace(config)
return config
@@ -84,5 +82,5 @@ def openrouter_hook(base_url: str) -> dict:
params = {}
params |= {"extra_body": {"include_reasoning": True}}
if models := [x.strip() for x in GPT.FALLBACK_MODELS.split(",") if x.strip()]:
- params |= {"extra_body": {"models": models}}
+ params["extra_body"]["models"] = models
return params
src/llm/response.py
@@ -7,9 +7,9 @@ from glom import glom
from loguru import logger
from openai import AsyncOpenAI
-from config import GPT, TZ
+from config import ENABLE, GPT, TZ
from llm.tool_call import get_online_search_result
-from llm.utils import change_system_prompt
+from llm.utils import BOT_TIPS, change_system_prompt
from messages.progress import modify_progress
from utils import nowdt
@@ -91,7 +91,16 @@ async def parse_tool_call(openai: AsyncOpenAI, config: dict, response: dict, **k
logger.debug(response)
content = glom(response, "choices.0.message.content", default="") or ""
reasoning = glom(response, "choices.0.message.reasoning", default="") or ""
+ res = {"content": content.strip(), "reasoning": reasoning.strip(), "bot_msg_prefix": config["bot_msg_prefix"]}
+ primary_model = glom(config, "completions.model", default="") or ""
+ used_model = glom(response, "model", default="") or ""
+ if not (used_model in primary_model or primary_model in used_model):
+ # do not use `!=` to compare. (deepseek/deepseek-r1:free != deepseek/deepseek-r1, gpt-4o != gpt-4o-2024-07-18)
+ used_model = used_model.split("/")[-1]
+ logger.warning(f"Fallback model {primary_model} -> {used_model}")
+ if ENABLE.GPT_WARN_FALLBACK:
+ res["bot_msg_prefix"] = res["bot_msg_prefix"].replace(f"({BOT_TIPS})", f"(发生回退: {used_model})\n({BOT_TIPS})")
except Exception as e:
logger.error(f"GPT failed: {e}")
raise
- return {"content": content.strip(), "reasoning": reasoning.strip()}
+ return res
src/llm/utils.py
@@ -7,7 +7,7 @@ from loguru import logger
from config import DOWNLOAD_DIR, GPT
-BOT_TIPS = "回复此消息以继续对话"
+BOT_TIPS = "回复以继续"
def llm_cleanup_files(messages: list[dict]):
src/config.py
@@ -35,6 +35,7 @@ class ENABLE:
DOUYIN = os.getenv("ENABLE_DOUYIN", "1").lower() in ["1", "y", "yes", "t", "true", "on"]
GPT = os.getenv("ENABLE_GPT", "1").lower() in ["1", "y", "yes", "t", "true", "on"]
GPT_ONLINE_SEARCH = os.getenv("ENABLE_GPT_ONLINE_SEARCH", "1").lower() in ["1", "y", "yes", "t", "true", "on"]
+ GPT_WARN_FALLBACK = os.getenv("ENABLE_GPT_WARN_FALLBACK", "1").lower() in ["1", "y", "yes", "t", "true", "on"]
INSTAGRAM = os.getenv("ENABLE_INSTAGRAM", "1").lower() in ["1", "y", "yes", "t", "true", "on"]
OCR = os.getenv("ENABLE_OCR", "1").lower() in ["1", "y", "yes", "t", "true", "on"]
PRICE = os.getenv("ENABLE_PRICE", "1").lower() in ["1", "y", "yes", "t", "true", "on"]