Commit a0c29bf
Changed files (2)
src
src/llm/response.py
@@ -11,7 +11,7 @@ from openai import AsyncOpenAI
from config import ENABLE, GPT, TZ
from llm.models import openrouter_hook
from llm.tools import add_tools, get_online_search_result
-from llm.utils import beautify_model_name, change_system_prompt
+from llm.utils import beautify_model_name, change_system_prompt, extract_reasoning
from messages.progress import modify_progress
from utils import nowdt
@@ -124,7 +124,9 @@ async def parse_response(config: dict, response: dict) -> dict[str, str]:
return response | {"content": "", "reasoning": "", "reasoning_model": "", "bot_msg_prefix": config["bot_msg_prefix"]}
try:
content = glom(choice, "message.content", default="") or ""
- reasoning = glom(choice, "message.reasoning", default="") or ""
+ reasoning, content = extract_reasoning(content) # extract reasoning from content (<think>...</think>)
+ if not reasoning:
+ reasoning = glom(choice, "message.reasoning", default="") or ""
primary_model = glom(config, "completions.model", default="") or ""
used_model = glom(response, "model", default="") or ""
response = {"content": content.strip(), "reasoning": reasoning.strip(), "reasoning_model": used_model, "bot_msg_prefix": config["bot_msg_prefix"]}
src/llm/utils.py
@@ -1,5 +1,6 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
+import re
from pathlib import Path
import tiktoken
@@ -94,3 +95,12 @@ def beautify_model_name(name: str) -> str:
name = name.replace("-latest", "")
return name.replace("gpt", "GPT").replace("deepseek", "DeepSeek").title() # O1-Preview
+
+
+def extract_reasoning(text: str) -> tuple[str, str]:
+ pattern = r"<think>(.*?)</think>"
+ reasoning = ""
+ if matched := re.search(pattern, text, re.DOTALL):
+ reasoning = matched.group(1)
+ text = re.sub(pattern, "", text, count=1, flags=re.DOTALL) # remove <think>...</think>
+ return reasoning.strip(), text.strip()