Commit 6461d98
Changed files (1)
src
llm
gemini
src/llm/gemini/chat.py
@@ -48,7 +48,10 @@ async def gemini_chat_completion(
silent (bool, optional): Whether to disable progressing. Defaults to False.
"""
info = parse_msg(message, silent=True, use_cache=False)
- tools = [types.Tool(url_context=types.UrlContext()), types.Tool(google_search=types.GoogleSearch())]
+ tools = [types.Tool(url_context=types.UrlContext())]
+ if not model_id.startswith("gemini-3"): # google search tool is paid for gemini-3 models
+ tools.append(types.Tool(google_search=types.GoogleSearch()))
+
# parse config from environment variable
genconfig = {}
with contextlib.suppress(Exception):
@@ -59,7 +62,7 @@ async def gemini_chat_completion(
msg = f"π€**{model_name}**: ζθδΈ...\nπ€**[{info['full_name'] or info['ctitle']}](tg://user?id={info['uid']})**: β{real_prompt}β"[:TEXT_LENGTH]
if not silent and kwargs.get("show_progress"):
kwargs["progress"] = (await send2tg(client, message, texts=msg, **kwargs))[0]
- genconfig |= {"response_modalities": ["TEXT"]}
+ genconfig |= {"response_modalities": ["TEXT"], "media_resolution": types.MediaResolution.MEDIA_RESOLUTION_HIGH}
if enable_tools:
genconfig |= {"tools": tools}
if system_prompt is not None:
@@ -70,6 +73,8 @@ async def gemini_chat_completion(
if GEMINI.TEXT_THINKING_BUDGET is not None and not disable_thinking:
thinking_budget = min(round(float(GEMINI.TEXT_THINKING_BUDGET)), GEMINI.MAX_THINKING_BUDGET)
genconfig |= {"thinking_config": types.ThinkingConfig(include_thoughts=include_thoughts, thinking_budget=thinking_budget)}
+ if model_id.startswith("gemini-3") and not disable_thinking:
+ genconfig |= {"thinking_config": types.ThinkingConfig(include_thoughts=include_thoughts, thinking_level=types.ThinkingLevel.HIGH)}
params = {"model": model_id, "conversations": get_conversations(message), "config": types.GenerateContentConfig(**genconfig)}
logger.trace(params)
return await gemini_stream(client, message, model_name, params, append_grounding=append_grounding, silent=silent, **kwargs)