Commit 84ac0c2

benny-dou <60535774+benny-dou@users.noreply.github.com>
2025-02-26 03:33:10
fix(gpt): remove reasoning content from contexts
1 parent 2fcca6a
Changed files (2)
src/llm/contexts.py
@@ -73,16 +73,15 @@ async def single_context(client: Client, message: Message) -> dict:
             return ""
         for prefix in [PREFIX.GPT, "/gpt", "/gemini", "/ds"]:
             text = text.removeprefix(prefix).strip()
-        return re.sub(rf"(.*?){BOT_TIPS}\)", "", text, flags=re.DOTALL).strip()
+        # remove bot tips
+        text = re.sub(rf"(.*?){BOT_TIPS}\)", "", text, flags=re.DOTALL).strip()
+        # remove reasoning
+        return re.sub(r"^๐Ÿค”(.*?)๐Ÿ’ก", "", text, flags=re.DOTALL).strip()
 
     info = parse_msg(message, silent=True)
     role = "assistant" if f"{BOT_TIPS})" in info["text"] else "user"
-    texts = clean_text(info["text"])
-    # only text
-    if info["mtype"] == "text" and texts:
-        return {"role": role, "content": [{"type": "text", "text": texts}]}
 
-    if info["mtype"] not in ["photo", "voice", "video", "document"]:
+    if info["mtype"] not in ["text", "photo", "voice", "video", "document"]:
         return {}
 
     # has media
@@ -98,11 +97,9 @@ async def single_context(client: Client, message: Message) -> dict:
                     media.append({"type": "image_url", "image_url": {"url": f"data:image/{res['ext']};base64,{res['base64']}"}})
                 # elif info["mtype"] == "video":
                 #     media.append({"type": "video_url", "video_url": {"url": b64}})
-                elif info["mtype"] == "document" and info["mime_type"] == "text/plain" and not info["file_name"].startswith("GPT-Reasoning"):  # skip GPT reasoning
+                elif info["mtype"] == "document" and info["mime_type"] == "text/plain":
                     res = await base64_media(client, msg)
                     media.append({"type": "text", "text": res["value"]})
-                else:
-                    logger.warning(f"Skip message type: {info['mtype']}")
             else:
                 path: str = await client.download_media(msg)  # type: ignore
                 logger.debug(f"Downloaded GPT media: {path}")
@@ -110,11 +107,9 @@ async def single_context(client: Client, message: Message) -> dict:
                     media.append({"type": "image_url", "image_url": {"url": f"{GPT.MEDIA_SERVER}/{Path(path).name}"}})
                 # elif info["mtype"] == "video":
                 #     media.append({"type": "video_url", "video_url": {"url": f"{GPT.MEDIA_SERVER}/{Path(path).name}"}})
-                elif info["mtype"] == "document" and info["mime_type"] == "text/plain" and not info["file_name"].startswith("GPT-Reasoning"):  # skip GPT reasoning
+                elif info["mtype"] == "document" and info["mime_type"] == "text/plain":
                     media.append({"type": "text", "text": Path(path).read_text()})
                     Path(path).unlink(missing_ok=True)
-                else:
-                    logger.warning(f"Unsupported message type: {info['mtype']}")
             if msg_text:
                 media.append({"type": "text", "text": msg_text})
         except Exception as e:
src/llm/response.py
@@ -128,7 +128,7 @@ async def parse_response(config: dict, response: dict) -> dict[str, str]:
             reasoning = glom(choice, Coalesce("message.reasoning_content", "message.reasoning"), default="") or ""
         if reasoning and str(reasoning) != "None":  # add expandable block quotation mark for reasoning
             reasoning = reasoning.strip().replace("\n", "\n> ")
-            reasoning = f"**> ๐Ÿค”{reasoning}"
+            reasoning = f"**> ๐Ÿค”{reasoning}๐Ÿ’ก"  # if change this line, remember to remove the reasoning from contexts (`llm/contexts.py`)
 
         primary_model = glom(config, "completions.model", default="") or ""
         used_model = glom(response, "model", default="") or ""