Commit 78eb438

benny-dou <60535774+benny-dou@users.noreply.github.com>
2025-02-18 11:35:37
fix(gpt): only include cited url in reasoning response
1 parent 145ab18
Changed files (4)
src/llm/gpt.py
@@ -103,8 +103,7 @@ async def gpt_response(client: Client, message: Message, **kwargs):
     response = await send_to_gpt(config, **kwargs)
     media = []
     if reasoning := response.get("reasoning"):
-        reasoning_model = f"推理模型: {response['reasoning_model']}\n\n" if response.get("reasoning_model") else ""
-        media = [{"document": save_txt(f"{reasoning_model}{reasoning}", f"{DOWNLOAD_DIR}/GPT-Reasoning-{rand_number()}.txt")}]
+        media = [{"document": save_txt(reasoning, f"{DOWNLOAD_DIR}/GPT-Reasoning-{rand_number()}.txt")}]
     if content := response.get("content"):
         texts = f"🤖**{response['model']}**: ({BOT_TIPS})\n\n{content}"
         logger.debug(texts)
src/llm/response.py
@@ -145,8 +145,8 @@ def add_search_results_to_response(search_results: list[dict], response: str) ->
         return response
     response = response.strip()
     for idx, result in enumerate(search_results):
-        title = result.get("title", "")[:15]
+        title = result.get("title", "")[:20]
         link = result.get("link", "")
-        if link.startswith("http"):
+        if link.startswith("http") and f"({link})" in response:
             response += f"\n{number_to_emoji(idx + 1)} [{title}]({link})"
     return response.strip()
src/llm/summary.py
@@ -100,7 +100,6 @@ def get_summay_model(contexts: list[dict]) -> dict:
     """Get the model for the summary."""
     models = {"text": GPT.TEXT_MODEL, "image": GPT.IMAGE_MODEL}
     model_names = {"text": GPT.TEXT_MODEL_NAME, "image": GPT.IMAGE_MODEL_NAME}
-    timeouts = {"text": GPT.TEXT_TIMEOUT, "image": GPT.IMAGE_TIMEOUT}
     apis = {"text": GPT.TEXT_API_KEY, "image": GPT.IMAGE_API_KEY}
     urls = {"text": GPT.TEXT_BASE_URL, "image": GPT.IMAGE_BASE_URL}
     # model_type = "image" if "photo" in {x["mtype"] for x in history} else "text"
@@ -109,7 +108,7 @@ def get_summay_model(contexts: list[dict]) -> dict:
     config = {
         "model": model,
         "friendly_name": model_names[model_type],
-        "timeout": round(float(timeouts[model_type])),
+        "timeout": round(float(GPT.TIMEOUT)),
         "base_url": urls[model_type],
         "key": apis[model_type],
         "temperature": float(GPT.TEMPERATURE),
@@ -122,7 +121,7 @@ def get_summay_model(contexts: list[dict]) -> dict:
         "client": {
             "api_key": apis[model_type],
             "base_url": urls[model_type],
-            "timeout": round(float(timeouts[model_type])),
+            "timeout": round(float(GPT.TIMEOUT)),
             "http_client": DefaultAsyncHttpxClient(proxy=PROXY.GPT),
         },
         "completions": completions,
src/config.py
@@ -149,7 +149,7 @@ class GPT:  # see `llm/README.md`
     VIDEO_MODEL_NAME = os.getenv("GPT_VIDEO_MODEL_NAME", "GLM-4V-Plus")
     GLM_API_KEY = os.getenv("GPT_GLM_API_KEY", "")
     GLM_BASE_URL = os.getenv("GPT_GLM_BASE_URL", "https://open.bigmodel.cn/api/paas/v4")
-    SEARCH_NUM_RESULTS = os.getenv("GPT_SEARCH_NUM_RESULTS", "5")
+    SEARCH_NUM_RESULTS = os.getenv("GPT_SEARCH_NUM_RESULTS", "10")
     PRIMARY_SEARCH_ENGINE = os.getenv("GPT_PRIMARY_SEARCH_ENGINE", "google")  # google or glm
     TIMEOUT = os.getenv("GPT_TIMEOUT", "300")
     TEMPERATURE = os.getenv("GPT_TEMPERATURE", "1.0")