Commit 9aee836
Changed files (2)
src
src/llm/summary.py
@@ -19,7 +19,7 @@ from messages.parser import parse_msg
from messages.progress import modify_progress
from messages.sender import send2tg
from messages.utils import equal_prefix, startswith_prefix, to_int
-from utils import nowdt, rand_number
+from utils import nowdt, rand_number, strings_list
HELP = f"""🤖**AI总结历史消息** (最多{MAX_MESSAGE_SUMMARY}条)
⚠️使用`{PREFIX.COMBINATION}`命令只生成聊天记录文件, 不进行AI总结
@@ -144,7 +144,13 @@ async def ai_summary(client: Client, message: Message, summary_prefix: str | Non
return
# set custom chat_id and message_id (useful for debug)
if matched := re.search(r"cid=(-?\w+)", info["text"], re.IGNORECASE):
- info["cid"] = to_int(matched.group(1))
+ # check if cid is in whitelist
+ cid = to_int(matched.group(1))
+ if str(cid) in strings_list(GPT.SUMMARY_WHITELIST_CUSTOM_CHATS):
+ info["cid"] = to_int(matched.group(1))
+ else:
+ await send2tg(client, message, texts="该chatid不在白名单中, 无法对其进行总结", **kwargs)
+ return
if matched := re.search(r"mid=(\d+)", info["text"], re.IGNORECASE):
offset_id = int(matched.group(1)) + 1 # include this message
if kwargs.get("show_progress") and "progress" not in kwargs:
src/config.py
@@ -347,7 +347,8 @@ class GPT:
# AI summary (/summary)
SUMMARY_CMD = os.getenv("GPT_SUMMARY_CMD", "/gemini") # add this command prefix to call AI summary
-
+ # comma separated chat ids that are allowed to use `cid` as the chatid for the summary
+ SUMMARY_WHITELIST_CUSTOM_CHATS = os.getenv("GPT_SUMMARY_WHITELIST_CUSTOM_CHATS", "")
# For tool_call. Some models doesn't support tool call, so we use this model to do the tool_call first.
# Then construct the new questions for the original model.
TOOLS_MODEL = os.getenv("GPT_TOOLS_MODEL", "gpt-4o-mini") # this model should be fast and cheap