Commit 75670b1

benny-dou <60535774+benny-dou@users.noreply.github.com>
2025-07-24 02:24:19
feat(gpt): support Kimi AI model
1 parent 46ee63e
Changed files (4)
src/llm/gpt.py
@@ -35,6 +35,7 @@ HELP = f"""🤖**GPT对话**
 `/qwen`: **{GPT.QWEN_MODEL_NAME}** {image_emoji(GPT.QWEN_ACCEPT_IMAGE)}
 `/doubao`: **{GPT.DOUBAO_MODEL_NAME}** {image_emoji(GPT.DOUBAO_ACCEPT_IMAGE)}
 `/grok`: **{GPT.GROK_MODEL_NAME}** {image_emoji(GPT.GROK_ACCEPT_IMAGE)}
+`/kimi`: **{GPT.KIMI_MODEL_NAME}** {image_emoji(GPT.KIMI_ACCEPT_IMAGE)}
 
 ⚠️注意:
 若对话历史包含图片, 但模型不支持图片 (无🏞图标), 会自动切换为 **{GPT.OMNI_PROVIDER.lower()}** 模型
@@ -91,6 +92,9 @@ def is_gpt_conversation(minfo: dict) -> bool:
     if any(str(x) in strings_list(TID.DOUBAO_CHATS) for x in [minfo["cid"], slim_cid(minfo["cid"])]):
         minfo["text"] = "/doubao " + minfo["text"]
         return True
+    if any(str(x) in strings_list(TID.KIMI_CHATS) for x in [minfo["cid"], slim_cid(minfo["cid"])]):
+        minfo["text"] = "/kimi " + minfo["text"]
+        return True
 
     # is replying to gpt-bot response message?
     model_names = [
src/llm/models.py
@@ -43,6 +43,8 @@ def get_model_id(text: str, reply_text: str, context_type: str) -> tuple[str, st
         model_id = GPT.DOUBAO_MODEL
     elif reply_text.startswith(f"🤖{GPT.GROK_MODEL_NAME}"):
         model_id = GPT.GROK_MODEL
+    elif reply_text.startswith(f"🤖{GPT.KIMI_MODEL_NAME}"):
+        model_id = GPT.KIMI_MODEL
     elif reply_text.startswith(f"🤖{GPT.GEMINI_MODEL_NAME}"):
         model_id = GPT.GEMINI_MODEL
     elif reply_text.startswith(f"🤖{GEMINI.IMG_MODEL_NAME}"):
@@ -56,21 +58,24 @@ def get_model_id(text: str, reply_text: str, context_type: str) -> tuple[str, st
         "doubao": GPT.DOUBAO_MODEL,
         "grok": GPT.GROK_MODEL,
         "gemini": GPT.GEMINI_MODEL,
+        "kimi": GPT.KIMI_MODEL,
     }
     # parse from command prefix. If use /ds command, force use DeepSeek model.
-    if startswith_prefix(text, prefix=["/gpt"]):
+    if startswith_prefix(text, prefix="/gpt"):
         model_id = GPT.OPENAI_MODEL
-    elif startswith_prefix(text, prefix=["/ds"]):
+    elif startswith_prefix(text, prefix="/ds"):
         model_id = GPT.DEEPSEEK_MODEL
-    elif startswith_prefix(text, prefix=["/qwen"]):
+    elif startswith_prefix(text, prefix="/qwen"):
         model_id = GPT.QWEN_MODEL
-    elif startswith_prefix(text, prefix=["/doubao"]):
+    elif startswith_prefix(text, prefix="/doubao"):
         model_id = GPT.DOUBAO_MODEL
-    elif startswith_prefix(text, prefix=["/grok"]):
+    elif startswith_prefix(text, prefix="/grok"):
         model_id = GPT.GROK_MODEL
-    elif startswith_prefix(text, prefix=[PREFIX.GENIMG]):
+    elif startswith_prefix(text, prefix="/kimi"):
+        model_id = GPT.KIMI_MODEL
+    elif startswith_prefix(text, prefix=PREFIX.GENIMG):
         model_id = GEMINI.IMG_MODEL
-    elif startswith_prefix(text, prefix=["/gemini"]):
+    elif startswith_prefix(text, prefix="/gemini"):
         model_id = GPT.GEMINI_MODEL
     else:
         model_id = providers.get(GPT.DEFAULT_PROVIDER.lower(), GPT.OPENAI_MODEL)
@@ -83,6 +88,7 @@ def get_model_id(text: str, reply_text: str, context_type: str) -> tuple[str, st
         "doubao": "/doubao",
         "grok": "/grok",
         "gemini": "/gemini",
+        "kimi": "/kimi",
     }
     if model_id and (model_id == GEMINI.IMG_MODEL or reply_text.startswith(f"🤖{GEMINI.IMG_MODEL_NAME}")):
         response_modality = "image"
@@ -106,6 +112,7 @@ def get_model_id(text: str, reply_text: str, context_type: str) -> tuple[str, st
         or (model_id == GPT.QWEN_MODEL and not GPT.QWEN_ACCEPT_IMAGE)
         or (model_id == GPT.DOUBAO_MODEL and not GPT.DOUBAO_ACCEPT_IMAGE)
         or (model_id == GPT.GROK_MODEL and not GPT.GROK_ACCEPT_IMAGE)
+        or (model_id == GPT.KIMI_MODEL and not GPT.KIMI_ACCEPT_IMAGE)
     ):
         prefix = omni_providers.get(GPT.OMNI_PROVIDER.lower(), "/gpt")
         return get_model_id(prefix, reply_text, context_type)  # parse again
@@ -121,6 +128,7 @@ def get_gpt_config(model_id: str = "") -> dict:
         GPT.QWEN_MODEL: {"api_key": sample_key(GPT.QWEN_API_KEY), "base_url": GPT.QWEN_BASE_URL, "model_name": GPT.QWEN_MODEL_NAME},
         GPT.DOUBAO_MODEL: {"api_key": sample_key(GPT.DOUBAO_API_KEY), "base_url": GPT.DOUBAO_BASE_URL, "model_name": GPT.DOUBAO_MODEL_NAME},
         GPT.GROK_MODEL: {"api_key": sample_key(GPT.GROK_API_KEY), "base_url": GPT.GROK_BASE_URL, "model_name": GPT.GROK_MODEL_NAME},
+        GPT.KIMI_MODEL: {"api_key": sample_key(GPT.KIMI_API_KEY), "base_url": GPT.KIMI_BASE_URL, "model_name": GPT.KIMI_MODEL_NAME},
         GPT.GEMINI_MODEL: {"api_key": sample_key(GPT.GEMINI_API_KEY), "base_url": GPT.GEMINI_BASE_URL, "model_name": GPT.GEMINI_MODEL_NAME},
     }
 
src/config.py
@@ -76,7 +76,7 @@ class PREFIX:
     ASR = os.getenv("PREFIX_ASR", "/asr").lower()
     AUDIO = os.getenv("PREFIX_AUDIO", "/audio").lower()
     CONVERT = os.getenv("PREFIX_CONVERT", "/convert").lower()  # convert image file to photo
-    GPT = os.getenv("PREFIX_GPT", "/ai,/gpt,/gemini,/ds,/qwen,/doubao,/grok").lower()
+    GPT = os.getenv("PREFIX_GPT", "/ai,/gpt,/gemini,/ds,/qwen,/doubao,/grok,/kimi").lower()
     SUBTITLE = os.getenv("PREFIX_SUBTITLE", "/subtitle, /sub").lower()
     WGET = os.getenv("PREFIX_WGET", "/wget, /curl").lower()
     OCR = os.getenv("PREFIX_OCR", "/ocr").lower()
@@ -195,12 +195,13 @@ class TID:  # see more TID usecase in `src/permission.py`
     # back up ytdlp audio if the user does not request it
     CHANNEL_YTDLP_BACKUP = os.getenv("TID_CHANNEL_YTDLP_BACKUP", "me")
     DAILY_SUMMARY = os.getenv("TID_DAILY_SUMMARY", "{}")  # {"source-chat-id": "target-chat-id"}, e.g. '{"-1001234567890": "-1009876543210"}'
-    GEMINI_CHATS = os.getenv("TID_GEMINI_CHATS", "")  # comma separated chat ids to always use openai models (no need `/gemini`)
+    GEMINI_CHATS = os.getenv("TID_GEMINI_CHATS", "")  # comma separated chat ids to always use gemini models (no need `/gemini`)
     OPENAI_CHATS = os.getenv("TID_OPENAI_CHATS", "")  # comma separated chat ids to always use openai models (no need `/gpt`)
-    DEEPSEEK_CHATS = os.getenv("TID_DEEPSEEK_CHATS", "")  # comma separated chat ids to always use openai models (no need `/ds`)
-    QWEN_CHATS = os.getenv("TID_QWEN_CHATS", "")  # comma separated chat ids to always use openai models (no need `/qwen`)
-    DOUBAO_CHATS = os.getenv("TID_DOUBAO_CHATS", "")  # comma separated chat ids to always use openai models (no need `/doubao`)
-    GROK_CHATS = os.getenv("TID_GROK_CHATS", "")  # comma separated chat ids to always use openai models (no need `/grok`)
+    DEEPSEEK_CHATS = os.getenv("TID_DEEPSEEK_CHATS", "")  # comma separated chat ids to always use deepseek models (no need `/ds`)
+    QWEN_CHATS = os.getenv("TID_QWEN_CHATS", "")  # comma separated chat ids to always use qwen models (no need `/qwen`)
+    DOUBAO_CHATS = os.getenv("TID_DOUBAO_CHATS", "")  # comma separated chat ids to always use doubao models (no need `/doubao`)
+    GROK_CHATS = os.getenv("TID_GROK_CHATS", "")  # comma separated chat ids to always use grok models (no need `/grok`)
+    KIMI_CHATS = os.getenv("TID_KIMI_CHATS", "")  # comma separated chat ids to always use kimi models (no need `/kimi`)
 
 
 class DB:
@@ -399,6 +400,12 @@ class GPT:
     DOUBAO_API_KEY = os.getenv("GPT_DOUBAO_API_KEY", "")
     DOUBAO_BASE_URL = os.getenv("GPT_DOUBAO_BASE_URL", "https://ark.cn-beijing.volces.com/api/v3")
     DOUBAO_ACCEPT_IMAGE = os.getenv("GPT_DOUBAO_ACCEPT_IMAGE", "1").lower() in ["1", "y", "yes", "t", "true", "on"]
+    # /kimi command
+    KIMI_MODEL = os.getenv("GPT_KIMI_MODEL", "")
+    KIMI_MODEL_NAME = os.getenv("GPT_KIMI_MODEL_NAME", "")
+    KIMI_API_KEY = os.getenv("GPT_KIMI_API_KEY", "")
+    KIMI_BASE_URL = os.getenv("GPT_KIMI_BASE_URL", "https://api.moonshot.ai/v1")
+    KIMI_ACCEPT_IMAGE = os.getenv("GPT_KIMI_ACCEPT_IMAGE", "1").lower() in ["1", "y", "yes", "t", "true", "on"]
 
     # AI summary (/summary)
     SUMMARY_CMD = os.getenv("GPT_SUMMARY_CMD", "/gemini")  # add this command prefix to call AI summary
src/handler.py
@@ -102,7 +102,7 @@ async def handle_utilities(
     info = parse_msg(message)
     kwargs |= params_from_msg_text(info["text"])  # merge the parameters from the message text
     if ai:
-        await gpt_response(client, message, **kwargs)  # /ai /gpt /gemini /ds /qwen /doubao /grok
+        await gpt_response(client, message, **kwargs)  # /ai
     if asr:
         await voice_to_text(client, message, **kwargs)  # /asr
     if audio: