main
  1#!/usr/bin/env python
  2# -*- coding: utf-8 -*-
  3import contextlib
  4import hashlib
  5from typing import Literal
  6from urllib.parse import quote_plus
  7
  8from anthropic import AsyncAnthropic, DefaultAioHttpClient
  9from glom import Coalesce, glom
 10from loguru import logger
 11from pyrogram.client import Client
 12from pyrogram.parser.markdown import BLOCKQUOTE_DELIM
 13from pyrogram.types import Message, ReplyParameters
 14
 15from ai.texts.contexts import get_anthropic_contexts
 16from ai.utils import BOT_TIPS, EMOJI_REASONING_BEGIN, EMOJI_TEXT_BOT, beautify_llm_response, literal_eval, load_skills, trim_none
 17from config import AI, PROXY, TEXT_LENGTH
 18from messages.progress import modify_progress
 19from messages.utils import blockquote, count_without_entities, delete_message, quote, smart_split
 20from utils import number_to_emoji, rand_string, strings_list
 21
 22
 23async def anthropic_responses(
 24    client: Client,
 25    message: Message,
 26    *,
 27    prefix: str = "",
 28    model_id: str = AI.ANTHROPIC_MODEL_ID,
 29    model_name: str = AI.ANTHROPIC_MODEL_ID,
 30    anthropic_base_url: str = AI.ANTHROPIC_BASE_URL,
 31    anthropic_api_keys: str = AI.ANTHROPIC_API_KEYS,
 32    anthropic_client_config: str | dict = "",
 33    anthropic_default_headers: str | dict = "",
 34    anthropic_responses_config: str | dict = "",
 35    anthropic_proxy: str | None = PROXY.ANTHROPIC,
 36    cache_response_ttl: int = 0,
 37    anthropic_media_send_as: Literal["base64", "file_id"] = "file_id",
 38    anthropic_append_citation: bool = True,
 39    skills: str = "",
 40    hide_thinking: bool = False,
 41    silent: bool = False,
 42    max_retries: int = 3,
 43    **kwargs,
 44) -> dict:
 45    """Get Anthropic Responses.
 46
 47    Returns:
 48        dict: {"texts": str, "thoughts": str,  "prefix": str, "model_name": str, "sent_messages": list[Message]}
 49    """
 50    if not prefix:
 51        prefix = f"{EMOJI_TEXT_BOT}**{model_name}**:{BOT_TIPS}\n"
 52
 53    if silent or not kwargs.get("show_progress"):  # noqa: SIM108
 54        status_msg = None
 55    else:
 56        status_msg = kwargs.get("progress") or await message.reply(f"{EMOJI_TEXT_BOT}**{model_name}**: 思考中...", quote=True)
 57
 58    sent_messages = [status_msg]
 59    cache_hour = round(cache_response_ttl // 3600)
 60    try:
 61        anthropic_client = {}
 62        if literal_eval(anthropic_client_config):
 63            anthropic_client |= literal_eval(anthropic_client_config)
 64        if literal_eval(anthropic_default_headers):
 65            anthropic_client |= {"default_headers": literal_eval(anthropic_default_headers)}
 66        if anthropic_proxy:
 67            anthropic_client |= {"http_client": DefaultAioHttpClient(proxy=anthropic_proxy)}
 68    except Exception as e:
 69        logger.error(f"Anthropic client setup error: {e}")
 70        return {"progress": status_msg} if isinstance(status_msg, Message) else {}
 71    for api_key in strings_list(anthropic_api_keys, shuffle=True):
 72        try:
 73            anthropic_client |= {"base_url": anthropic_base_url, "api_key": api_key}
 74            logger.trace(f"AsyncAnthropic(**{anthropic_client})")
 75            anthropic = AsyncAnthropic(**anthropic_client)
 76            params: dict = {
 77                "model": model_id,
 78                "max_tokens": 4096,
 79                "messages": await get_anthropic_contexts(
 80                    client,
 81                    message,
 82                    anthropic=anthropic,
 83                    cache_hour=cache_hour,
 84                    media_send_as=anthropic_media_send_as,
 85                ),
 86            }
 87            if literal_eval(anthropic_responses_config):
 88                params |= literal_eval(anthropic_responses_config)
 89            if skills:
 90                params |= {"system": await load_skills(skills)}
 91            logger.debug(f"anthropic.messages.create(**{params})")
 92            resp = await single_api_response(
 93                client,
 94                status_msg,
 95                anthropic,
 96                params=params,
 97                prefix=prefix,
 98                hide_thinking=hide_thinking,
 99                silent=silent,
100                max_retries=max_retries,
101                append_citation=anthropic_append_citation,
102                **kwargs,
103            )
104            if not resp.get("texts"):
105                continue
106            sent_messages.extend(resp.get("sent_messages", []))
107            return {
108                "success": True,
109                "texts": resp["texts"],
110                "thoughts": resp["thoughts"],
111                "prefix": prefix,
112                "model_name": model_name,
113                "sent_messages": [m for m in sent_messages if isinstance(m, Message)],
114            }
115        except Exception as e:
116            logger.error(f"Anthropic API error: {e}")
117            await modify_progress(status_msg, text=f"{e}", force_update=True, **kwargs)
118    return {"progress": status_msg} if isinstance(status_msg, Message) else {}
119
120
121async def single_api_response(
122    client: Client,
123    status_msg: Message | None,
124    anthropic: AsyncAnthropic,
125    params: dict,
126    *,
127    prefix: str = "",
128    append_citation: bool = True,
129    hide_thinking: bool = False,
130    silent: bool = False,
131    retry: int = 0,
132    max_retries: int = 3,
133    **kwargs,
134) -> dict:
135    """Get Anthropic Chat Completions via single API.
136
137    Returns:
138        dict: {"texts": str, "thoughts": str, "sent_messages": list[Message]}
139    """
140    if retry > max_retries:
141        return {"texts": "", "thoughts": "", "sent_messages": []}
142    answers = ""  # all model responses
143    thoughts = ""  # all model thoughts
144    runtime_texts = ""  # for a single telegram message
145    status_cid = status_msg.chat.id if isinstance(status_msg, Message) else 0
146    status_mid = status_msg.id if isinstance(status_msg, Message) else 0
147    sent_messages = []
148    try:
149        is_reasoning = False
150        async with anthropic.beta.messages.stream(**params) as stream:
151            async for chunk in stream:
152                resp = trim_none(chunk.model_dump())
153                logger.trace(resp)
154                response_type = glom(resp, Coalesce("delta.type", "content_block.type"), default="") or ""
155                chunk_answer = glom(resp, "delta.text", default="") or ""
156                chunk_thinking = glom(resp, "delta.thinking", default="") or ""
157                # 设置推理标志
158                if response_type == "thinking_delta":  # 正在推理
159                    is_reasoning = True
160                elif response_type == "text_delta":  # 推理结束
161                    is_reasoning = False
162
163                if response_type == "thinking" and len(thoughts) == 0:  # 首次收到推理内容
164                    runtime_texts += quote(f"{EMOJI_REASONING_BEGIN}{chunk_thinking.lstrip()}")
165                elif chunk_thinking:  # 收到推理内容
166                    runtime_texts += chunk_thinking.replace("\n", f"\n{BLOCKQUOTE_DELIM}")
167
168                if response_type == "text":  # 收到初始回答
169                    runtime_texts = chunk_answer.lstrip()
170                else:
171                    runtime_texts += chunk_answer
172
173                if not chunk_answer and not chunk_thinking:
174                    continue
175                thoughts += chunk_thinking
176                answers += chunk_answer
177                if hide_thinking and is_reasoning:
178                    continue
179                runtime_texts = beautify_llm_response(runtime_texts)
180                length = await count_without_entities(prefix + runtime_texts)
181                if length <= TEXT_LENGTH - 10:  # leave some flexibility
182                    if len(runtime_texts.removeprefix(prefix)) > 10:  # start response if answer is not empty
183                        await modify_progress(message=status_msg, text=prefix + runtime_texts, detail_progress=True)
184                else:  # answers is too long, split it into multiple messages
185                    parts = await smart_split(prefix + runtime_texts)
186                    if len(parts) == 1:
187                        continue
188                    if is_reasoning:
189                        runtime_texts = quote(f"{EMOJI_REASONING_BEGIN}{parts[-1].lstrip()}")  # remove previous thinking
190                        await modify_progress(message=status_msg, text=parts[0], force_update=True)  # force send the first part
191                    else:
192                        await modify_progress(message=status_msg, text=blockquote(parts[0]), force_update=True)  # force send the first part
193                        runtime_texts = parts[-1]  # keep the last part
194                        if not silent:
195                            status_msg = await client.send_message(status_cid, text=prefix + runtime_texts, reply_parameters=ReplyParameters(message_id=status_mid))  # the new message
196                            sent_messages.append(status_msg)
197                            status_mid = status_msg.id
198
199        # all chunks are processed
200        if not answers.strip() and not thoughts.strip():  # empty response
201            return await single_api_response(
202                client,
203                status_msg,
204                anthropic,
205                params=params,
206                prefix=prefix,
207                retry=retry + 1,
208                max_retries=max_retries,
209                hide_thinking=hide_thinking,
210                silent=silent,
211                **kwargs,
212            )
213        thoughts, answers = parse_final_block(resp, thoughts, answers, append_citation=append_citation)
214        if await count_without_entities(prefix + answers) <= TEXT_LENGTH - 10:  # short answer in single msg
215            quoted = answers.strip()
216            await modify_progress(message=status_msg, text=f"{prefix}{blockquote(quoted)}", force_update=True)
217        else:  # total length is too long, answers are splitted into multiple messages
218            await modify_progress(message=status_msg, text=prefix + blockquote(runtime_texts), force_update=True)
219
220    except Exception as e:
221        error = f"{EMOJI_TEXT_BOT}BOT请求失败, 重试次数: {retry + 1}/{max_retries}\n{e}"
222        if "resp" in locals():
223            error += f"\n{resp}"
224        logger.error(error)
225        with contextlib.suppress(Exception):
226            await modify_progress(status_msg, text=error, force_update=True, **kwargs)
227            [await delete_message(msg) for msg in sent_messages]
228        if retry + 1 < max_retries:
229            return await single_api_response(
230                client,
231                status_msg,
232                anthropic,
233                params=params,
234                prefix=prefix,
235                append_citation=append_citation,
236                retry=retry + 1,
237                max_retries=max_retries,
238                hide_thinking=hide_thinking,
239                silent=silent,
240                **kwargs,
241            )
242    return {
243        "texts": answers,
244        "thoughts": thoughts,
245        "sent_messages": [m for m in sent_messages if isinstance(m, Message)],
246    }
247
248
249def parse_final_block(chunk: dict, thoughts: str, answers: str, *, append_citation: bool) -> tuple[str, str]:
250    if not append_citation:
251        return thoughts, answers
252    if chunk.get("type") != "message_stop":
253        return thoughts, answers
254    thoughts = ""
255    texts = ""
256    citations = {}  # {cite_key: {index:int, title:str, url:str}}
257    for item in glom(chunk, "message.content", default=[]):
258        if item.get("type") == "thinking":
259            thoughts += item.get("thinking", "")
260        elif item.get("type") == "text":
261            texts += item.get("text", "")
262            for citation in glom(item, "citations", default=[]):
263                title = citation.get("title") or rand_string(8)
264                url = citation.get("url") or f"https://google.com/search?q=/{quote_plus(title)}"
265                cite_key = hashlib.sha256(f"{title}{url}".encode()).hexdigest()
266                cite_index = glom(citations, f"{cite_key}.index", default=None) or len(citations) + 1
267                citations[cite_key] = {"index": cite_index, "title": title, "url": url}
268                texts += f" [[{cite_index}]]({url})"
269    # append citations
270    for x in sorted(citations.values(), key=lambda x: x["index"]):
271        texts += f"\n{number_to_emoji(x['index'])}[{x['title']}]({x['url']})"
272    return thoughts.strip(), texts.strip()