Commit 44004ae
Changed files (2)
src
src/llm/response.py
@@ -11,7 +11,7 @@ from openai import AsyncOpenAI
from config import ENABLE, GPT, TZ
from llm.models import openrouter_hook
from llm.tools import add_tools, get_online_search_result
-from llm.utils import change_system_prompt
+from llm.utils import beautify_model_name, change_system_prompt
from messages.progress import modify_progress
from utils import nowdt
@@ -128,7 +128,7 @@ async def parse_response(config: dict, response: dict) -> dict[str, str]:
response = {"content": content.strip(), "reasoning": reasoning.strip(), "reasoning_model": used_model, "bot_msg_prefix": config["bot_msg_prefix"]}
if not (used_model in primary_model or primary_model in used_model):
# do not use `!=` to compare. (deepseek/deepseek-r1:free != deepseek/deepseek-r1, gpt-4o != gpt-4o-2024-07-18)
- used_model = used_model.split("/")[-1]
+ used_model = beautify_model_name(used_model)
logger.warning(f"Fallback model {primary_model} -> {used_model}")
if ENABLE.GPT_WARN_FALLBACK:
response["bot_msg_prefix"] = response["bot_msg_prefix"].replace(config["friendly_name"], used_model)
src/llm/utils.py
@@ -62,3 +62,24 @@ def change_system_prompt(context: list[dict], prompt: str) -> list[dict]:
return context
context.insert(0, {"role": "system", "content": prompt})
return context
+
+
+def beautify_model_name(name: str) -> str:
+ """Beautify model name.
+
+ Args:
+ name: model name
+ Returns:
+ beautified model name
+ """
+ # example: openai/o1-preview:online
+
+ # remove suffix ":"
+ name = "".join(name.split(":")[:-1]) # openai/o1-preview
+
+ # remove prefix "/"
+ name = name.split("/")[-1] # o1-preview
+ # remove "-latest"
+ name = name.replace("-latest", "")
+
+ return name.replace("gpt", "GPT").replace("deepseek", "DeepSeek").title() # O1-Preview