Commit 91cf667
Changed files (1)
src
llm
src/llm/gpt.py
@@ -50,6 +50,7 @@ async def gpt_response(
custom_model_id: str = "",
custom_model_name: str = "",
enable_tools: bool = True,
+ silent: bool = False,
**kwargs,
) -> dict:
"""Get GPT response from Various API.
@@ -63,6 +64,7 @@ async def gpt_response(
custom_model_id (str, optional): Custom model id.
custom_model_name (str, optional): Custom model name.
enable_tools (bool, optional): Whether to enable tools. Defaults to True.
+ silent (bool, optional): Whether to disable progressing. Defaults to False.
Returns:
dict: {"texts": str, "thoughts": str, "prefix": str, "model_name": str, "sent_messages": list[Message]}
@@ -102,21 +104,29 @@ async def gpt_response(
await send2tg(client, message, texts=f"⚠️不支持自定义模型: {custom_model_id}\n\n⚙️支持自定义模型列表:\n{'\n'.join(allowed_model_ids)}", **kwargs)
return {}
if custom_model_id.lower() in [x.lower() for x in strings_list(GEMINI.ALLOWED_CUSTOM_MODEL_IDS)]:
- return await gemini_chat_completion(client, message, model_id=custom_model_id, model_name=custom_model_name or custom_model_id, enable_tools=enable_tools, **kwargs)
+ return await gemini_chat_completion(
+ client,
+ message,
+ model_id=custom_model_id,
+ model_name=custom_model_name or custom_model_id,
+ enable_tools=enable_tools,
+ silent=silent,
+ **kwargs,
+ )
if model_id == GEMINI.TEXT_MODEL and not custom_model_id:
- return await gemini_chat_completion(client, message, enable_tools=enable_tools, **kwargs)
+ return await gemini_chat_completion(client, message, enable_tools=enable_tools, silent=silent, **kwargs)
# GPT models
if custom_model_id:
model_id = custom_model_id
config = get_gpt_config(model_id)
- config["friendly_name"] = custom_model_id or config["friendly_name"]
+ config["friendly_name"] = custom_model_name or custom_model_id or config["friendly_name"]
conversations = get_conversations(message)
config["completions"]["messages"] = await get_conversation_contexts(client, conversations, model_id=model_id, ctx_format="openai")
real_prompt = clean_cmd_prefix(info["text"], model_id) or clean_cmd_prefix(info["reply_text"], model_id)
msg = f"🤖**{config['friendly_name']}**: 思考中...\n👤**[{info['full_name'] or info['ctitle']}](tg://user?id={info['uid']})**: “{real_prompt}”"[:TEXT_LENGTH]
- status_msg = (await send2tg(client, message, texts=msg, **kwargs))[0]
- kwargs["progress"] = status_msg
+ if not silent and kwargs.get("show_progress"):
+ kwargs["progress"] = (await send2tg(client, message, texts=msg, **kwargs))[0]
if enable_tools:
config, response = await merge_tools_response(config, **kwargs)
@@ -125,12 +135,12 @@ async def gpt_response(
texts = f"🤖**{config['friendly_name']}**:{BOT_TIPS}\n{response['content']}"
length = await count_without_entities(texts)
if length <= TEXT_LENGTH:
- await modify_progress(message=status_msg, text=texts, force_update=True, **kwargs)
+ await modify_progress(text=texts, force_update=True, **kwargs)
final = {
"texts": response["content"],
"prefix": f"🤖**{config['friendly_name']}**:{BOT_TIPS}\n",
"model_name": config["friendly_name"],
- "sent_messages": [status_msg],
+ "sent_messages": [kwargs["progress"]] if kwargs.get("progress") else [],
}
else:
final = {
@@ -139,9 +149,9 @@ async def gpt_response(
"model_name": config["friendly_name"],
"sent_messages": await send2tg(client, message, texts=texts, **kwargs),
}
- await modify_progress(message=status_msg, del_status=True, **kwargs)
+ await modify_progress(del_status=True, **kwargs)
llm_cleanup_files(config["completions"]["messages"])
return final
- final = await send_to_gpt_stream(client, status_msg, config, **kwargs) # type: ignore
+ final = await send_to_gpt_stream(client, kwargs.get("progress"), config, silent=silent, **kwargs) # type: ignore
llm_cleanup_files(config["completions"]["messages"])
return final