Commit 3baa474
src/llm/gpt.py
@@ -30,7 +30,7 @@ HELP = f"""🤖**GPT对话**
🔄使用以下命令强制切换模型:
`/gpt`: **{GPT.OPENAI_MODEL_NAME}** {image_emoji(GPT.OPENAI_ACCEPT_IMAGE)}
-`/gemini`: **{GPT.GEMINI_MODEL_NAME}** 🎬🎧{image_emoji(GPT.GEMINI_ACCEPT_IMAGE)}
+`/gemini`: **{GEMINI.TEXT_MODEL_NAME}** 🎬🏞🎧
`/ds`: **{GPT.DEEPSEEK_MODEL_NAME}** {image_emoji(GPT.DEEPSEEK_ACCEPT_IMAGE)}
`/qwen`: **{GPT.QWEN_MODEL_NAME}** {image_emoji(GPT.QWEN_ACCEPT_IMAGE)}
`/doubao`: **{GPT.DOUBAO_MODEL_NAME}** {image_emoji(GPT.DOUBAO_ACCEPT_IMAGE)}
@@ -39,7 +39,7 @@ HELP = f"""🤖**GPT对话**
⚠️注意:
若对话历史包含图片, 但模型不支持图片 (无🏞图标), 会自动切换为 **{GPT.OMNI_PROVIDER.lower()}** 模型
-若对话历史包含视频/音频, 但模型不支持视频/音频 (无🎬/🎧图标), 会自动切换为 **{GPT.GEMINI_MODEL_NAME}** 模型
+若对话历史包含视频/音频, 但模型不支持视频/音频 (无🎬/🎧图标), 会自动切换为 **{GEMINI.TEXT_MODEL_NAME}** 模型
"""
@@ -103,7 +103,7 @@ def is_gpt_conversation(minfo: dict) -> bool:
GPT.QWEN_MODEL_NAME,
GPT.DOUBAO_MODEL_NAME,
GPT.GROK_MODEL_NAME,
- GPT.GEMINI_MODEL_NAME,
+ GEMINI.TEXT_MODEL_NAME,
GEMINI.IMG_MODEL_NAME,
]
return startswith_prefix(minfo["reply_text"], prefix=[f"🤖{x}".lower() for x in model_names])
@@ -154,8 +154,8 @@ async def gpt_response(
kwargs["message_info"] = info # save trigger message info
conversations = get_conversations(message)
context_type = get_context_type(conversations) # {"type": "text", "error": None} # text, image
- model_id, resp_modality, sdk = get_model_id(info["text"], info["reply_text"], context_type)
- if "gemini" in model_id.lower() and sdk == "gemini":
+ model_id, resp_modality = get_model_id(info["text"], info["reply_text"], context_type)
+ if "gemini" in model_id.lower():
return await gemini_response(
client,
message,
src/llm/models.py
@@ -22,13 +22,11 @@ def get_context_type(conversations: list[Message]) -> str:
return context_type
-def get_model_id(text: str, reply_text: str, context_type: str) -> tuple[str, str, str]:
+def get_model_id(text: str, reply_text: str, context_type: str) -> tuple[str, str]:
"""Get model id based on the reply text, prefix command and context type.
- /gpt = OpenAI, /gemini = Gemini, /ds = DeepSeek, /qwen = Qwen, /doubao = Doubao, /grok = Grok
-
Returns:
- tuple[str, str, str]: (model_id, response_modality, sdk)
+ tuple[str, str, str]: (model_id, response_modality)
"""
model_id = ""
# Parse from reply bot message.
@@ -45,8 +43,8 @@ def get_model_id(text: str, reply_text: str, context_type: str) -> tuple[str, st
model_id = GPT.GROK_MODEL
elif reply_text.startswith(f"🤖{GPT.KIMI_MODEL_NAME}"):
model_id = GPT.KIMI_MODEL
- elif reply_text.startswith(f"🤖{GPT.GEMINI_MODEL_NAME}"):
- model_id = GPT.GEMINI_MODEL
+ elif reply_text.startswith(f"🤖{GEMINI.TEXT_MODEL_NAME}"):
+ model_id = GEMINI.TEXT_MODEL
elif reply_text.startswith(f"🤖{GEMINI.IMG_MODEL_NAME}"):
model_id = GEMINI.IMG_MODEL
@@ -57,7 +55,7 @@ def get_model_id(text: str, reply_text: str, context_type: str) -> tuple[str, st
"qwen": GPT.QWEN_MODEL,
"doubao": GPT.DOUBAO_MODEL,
"grok": GPT.GROK_MODEL,
- "gemini": GPT.GEMINI_MODEL,
+ "gemini": GEMINI.TEXT_MODEL,
"kimi": GPT.KIMI_MODEL,
}
# parse from command prefix. If use /ds command, force use DeepSeek model.
@@ -76,7 +74,7 @@ def get_model_id(text: str, reply_text: str, context_type: str) -> tuple[str, st
elif startswith_prefix(text, prefix=PREFIX.GENIMG):
model_id = GEMINI.IMG_MODEL
elif startswith_prefix(text, prefix="/gemini"):
- model_id = GPT.GEMINI_MODEL
+ model_id = GEMINI.TEXT_MODEL
else:
model_id = providers.get(GPT.DEFAULT_PROVIDER.lower(), GPT.OPENAI_MODEL)
@@ -92,22 +90,18 @@ def get_model_id(text: str, reply_text: str, context_type: str) -> tuple[str, st
}
if model_id and (model_id == GEMINI.IMG_MODEL or reply_text.startswith(f"🤖{GEMINI.IMG_MODEL_NAME}")):
response_modality = "image"
- sdk = "gemini"
elif "gemini" in model_id:
response_modality = "text"
- sdk = GPT.GEMINI_SDK
else:
response_modality = "text"
- sdk = "openai"
if model_id and context_type == "text": # no need to fallback if context type is text
- return model_id, response_modality, sdk
+ return model_id, response_modality
if context_type == "gemini": # force gemini
- return GPT.GEMINI_MODEL, "text", "gemini"
+ return GEMINI.TEXT_MODEL, "text"
if (
(model_id == GPT.OPENAI_MODEL and not GPT.OPENAI_ACCEPT_IMAGE)
- or (model_id == GPT.GEMINI_MODEL and not GPT.GEMINI_ACCEPT_IMAGE)
or (model_id == GPT.DEEPSEEK_MODEL and not GPT.DEEPSEEK_ACCEPT_IMAGE)
or (model_id == GPT.QWEN_MODEL and not GPT.QWEN_ACCEPT_IMAGE)
or (model_id == GPT.DOUBAO_MODEL and not GPT.DOUBAO_ACCEPT_IMAGE)
@@ -117,7 +111,7 @@ def get_model_id(text: str, reply_text: str, context_type: str) -> tuple[str, st
prefix = omni_providers.get(GPT.OMNI_PROVIDER.lower(), "/gpt")
return get_model_id(prefix, reply_text, context_type) # parse again
- return model_id, response_modality, sdk
+ return model_id, response_modality
def get_gpt_config(model_id: str = "") -> dict:
@@ -129,7 +123,6 @@ def get_gpt_config(model_id: str = "") -> dict:
GPT.DOUBAO_MODEL: {"api_key": sample_key(GPT.DOUBAO_API_KEY), "base_url": GPT.DOUBAO_BASE_URL, "model_name": GPT.DOUBAO_MODEL_NAME},
GPT.GROK_MODEL: {"api_key": sample_key(GPT.GROK_API_KEY), "base_url": GPT.GROK_BASE_URL, "model_name": GPT.GROK_MODEL_NAME},
GPT.KIMI_MODEL: {"api_key": sample_key(GPT.KIMI_API_KEY), "base_url": GPT.KIMI_BASE_URL, "model_name": GPT.KIMI_MODEL_NAME},
- GPT.GEMINI_MODEL: {"api_key": sample_key(GPT.GEMINI_API_KEY), "base_url": GPT.GEMINI_BASE_URL, "model_name": GPT.GEMINI_MODEL_NAME},
}
client = {"http_client": DefaultAsyncHttpxClient(proxy=PROXY.GPT)}
src/config.py
@@ -369,13 +369,6 @@ class GPT:
OPENAI_API_KEY = os.getenv("GPT_OPENAI_API_KEY", "")
OPENAI_BASE_URL = os.getenv("GPT_OPENAI_BASE_URL", "https://api.openai.com/v1")
OPENAI_ACCEPT_IMAGE = os.getenv("GPT_OPENAI_ACCEPT_IMAGE", "1").lower() in ["1", "y", "yes", "t", "true", "on"]
- # /gemini command
- GEMINI_SDK = os.getenv("GEMINI_SDK", "openai") # "openai" or "gemini". If set to "gemini", see class GEMINI below for details
- GEMINI_MODEL = os.getenv("GEMINI_TEXT_MODEL", "")
- GEMINI_MODEL_NAME = os.getenv("GEMINI_TEXT_MODEL_NAME", "")
- GEMINI_API_KEY = os.getenv("GPT_GEMINI_API_KEY", "")
- GEMINI_BASE_URL = os.getenv("GPT_GEMINI_BASE_URL", "https://generativelanguage.googleapis.com/v1beta/openai")
- GEMINI_ACCEPT_IMAGE = os.getenv("GPT_GEMINI_ACCEPT_IMAGE", "1").lower() in ["1", "y", "yes", "t", "true", "on"]
# /ds command
DEEPSEEK_MODEL = os.getenv("GPT_DEEPSEEK_MODEL", "")
DEEPSEEK_MODEL_NAME = os.getenv("GPT_DEEPSEEK_MODEL_NAME", "")