Commit 463a1ca
Changed files (4)
src
src/asr/gemini.py
@@ -96,6 +96,7 @@ async def gemini_single_file(
text = glom(chunk.model_dump(), "candidates.0.content.parts.0.text", default="") or ""
logger.trace(f"{text!r}")
answers += text
+ await app.aio.aclose()
try:
transcriptions = json.loads(answers)
except json.JSONDecodeError as e:
src/llm/gemini/chat.py
@@ -182,6 +182,7 @@ async def gemini_stream(
sent_messages.append(status_msg)
status_mid = status_msg.id
+ await app.aio.aclose()
# all chunks are processed
if is_reasoning or not answers.strip(): # empty response
return await gemini_stream(
src/llm/gemini/text2img.py
@@ -93,6 +93,7 @@ async def gemini_non_stream(
clean_gemini_sourcemarks(params["contents"])
genai_params = {"model": params["model"], "contents": params["contents"], "config": params["config"]}
response = await app.aio.models.generate_content(**genai_params)
+ await app.aio.aclose()
prefix = f"š¤**{model_name}**:{BOT_TIPS}\n"
res = parse_response(response.model_dump())
texts = res.get("texts", "")
src/tts/gemini.py
@@ -69,6 +69,7 @@ async def gemini_tts_real(message: Message, texts: str, model: str, voice_name:
),
),
)
+ await app.aio.aclose()
if data := glom(response, "candidates.0.content.parts.0.inline_data.data", default=None):
caption = f"š£é³č²: {voice_name}\nš¤å¼ę: {model}\n{blockquote(texts[: CAPTION_LENGTH - 20])}"
if return_bytes: