Commit 989c84f
Changed files (4)
src
src/llm/summary.py
@@ -181,6 +181,7 @@ async def ai_summary(client: Client, message: Message, summary_prefix: str | Non
client,
ai_msg,
custom_model_id=GPT.CHAT_SUMMARY_MODEL_ID,
+ custom_model_name=GPT.CHAT_SUMMARY_MODEL_NAME,
system_prompt=SYSTEM_PROMPT,
enable_tools=False,
include_thoughts=False,
src/podcast/main.py
@@ -37,6 +37,7 @@ from database.r2 import get_cf_r2, set_cf_r2
from llm.gpt import gpt_response
from llm.utils import convert_html, convert_md, remove_consecutive_newlines
from messages.sender import send2tg
+from messages.utils import blockquote
from networking import download_file, hx_req
from podcast.asr import backup_audio, get_duration, get_transcripts
from podcast.utils import HEADERS, clean_feed_url, feed_saved_target, get_pubdate, remove_img_tag
@@ -117,7 +118,7 @@ async def summary_pods(client: Client):
silent=True,
)
if gpt_res.get("texts"):
- await send2tg(client, txt_msg, texts=gpt_res["prefix"] + gpt_res["texts"])
+ await send2tg(client, txt_msg, texts=gpt_res["prefix"] + blockquote(gpt_res["texts"]))
processed_xml = await update_xml_desc(feed_url, processed_xml, entry, summary=gpt_res.get("texts", ""), audio_path=info["asr_path"])
await set_cf_r2(entry["db_key"], data={"title": entry["title"], "url": entry["link"]})
has_update = True
src/subtitles/subtitle.py
@@ -16,7 +16,7 @@ from llm.gpt import gpt_response
from messages.parser import parse_msg
from messages.progress import modify_progress
from messages.sender import send2tg
-from messages.utils import delete_message, equal_prefix
+from messages.utils import blockquote, delete_message, equal_prefix
from networking import match_social_media_link
from preview.bilibili import get_bilibili_vinfo
from preview.youtube import get_youtube_vinfo
@@ -126,10 +126,10 @@ async def get_subtitle(client: Client, message: Message, *, to_telegraph: bool =
text=Str(f"/ai {prompt}"),
reply_to_message=Message(id=rand_number(), chat=subtitle_msg.chat, text=Str(subtitles)),
)
- kwargs |= {"include_thoughts": False, "append_grounding": False, "silent": True, "custom_model_id": GPT.SUBTITLE_SUMMARY_MODEL_ID}
+ kwargs |= {"include_thoughts": False, "append_grounding": False, "silent": True, "custom_model_id": GPT.SUBTITLE_SUMMARY_MODEL_ID, "custom_model_name": GPT.SUBTITLE_SUMMARY_MODEL_NAME}
res = await gpt_response(client, ai_msg, **kwargs)
if res.get("texts"):
- await send2tg(client, ai_msg, texts=res["prefix"] + res["texts"], **kwargs)
+ await send2tg(client, ai_msg, texts=res["prefix"] + blockquote(res["texts"]), **kwargs)
with contextlib.suppress(Exception):
[await delete_message(msg) for msg in res.get("sent_messages", [])]
await delete_message(kwargs.get("progress"))
src/config.py
@@ -419,9 +419,11 @@ class GPT:
# comma separated chat ids that are allowed to use `cid` as the chatid for the summary
SUMMARY_WHITELIST_CUSTOM_CHATS = os.getenv("GPT_SUMMARY_WHITELIST_CUSTOM_CHATS", "")
CHAT_SUMMARY_MODEL_ID = os.getenv("CHAT_SUMMARY_MODEL_ID", "") # Specify the model id for `/summary` command (If not set, use the default model)
+ CHAT_SUMMARY_MODEL_NAME = os.getenv("CHAT_SUMMARY_MODEL_NAME", "")
PODCAST_SUMMARY_MODEL_ID = os.getenv("PODCAST_SUMMARY_MODEL_ID", "") # for generating podcast summary (If not set, use the default AI model)
PODCAST_SUMMARY_MODEL_NAME = os.getenv("PODCAST_SUMMARY_MODEL_NAME", "")
SUBTITLE_SUMMARY_MODEL_ID = os.getenv("SUBTITLE_SUMMARY_MODEL_ID", "") # for generating podcast summary (If not set, use the default AI model)
+ SUBTITLE_SUMMARY_MODEL_NAME = os.getenv("SUBTITLE_SUMMARY_MODEL_NAME", "")
# For tool_call. Some models doesn't support tool call, so we use this model to do the tool_call first.
# Then construct the new questions for the original model.
TOOLS_MODEL = os.getenv("GPT_TOOLS_MODEL", "gpt-4o-mini") # this model should be fast and cheap