diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index c23c08acf..333d73368 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -171,6 +171,16 @@ To run integration tests: make integration_tests ``` +#### Backend Selection for Integration Tests + +For `libs/genai`, integration tests can run against different backends using the `TEST_VERTEXAI` environment variable: + +- **Google AI only (default)**: `make integration_tests` +- **Both Google AI and Vertex AI**: `TEST_VERTEXAI=1 make integration_tests` +- **Vertex AI only**: `TEST_VERTEXAI=only make integration_tests` + +Vertex AI tests require the `GOOGLE_CLOUD_PROJECT` environment variable to be set. Tests will automatically skip if not configured. + #### Annotating integration tests We annotate integration tests to separate those tests which heavily rely on GCP infrastructure. Especially for running those tests we have created a separate GCP project with all necessary infrastructure parts provisioned. To run the extended integration tests locally you will need to provision a GCP project and pass its configuration via env variables. diff --git a/libs/genai/Makefile b/libs/genai/Makefile index a7dc09a30..495f94ac3 100644 --- a/libs/genai/Makefile +++ b/libs/genai/Makefile @@ -67,5 +67,8 @@ help: @echo 'lint - run linters' @echo 'test - run unit tests' @echo 'tests - run unit tests' - @echo 'integration_test - run integration tests(NOTE: "export GOOGLE_API_KEY=..." is needed.)' + @echo 'integration_test - run integration tests (NOTE: "export GOOGLE_API_KEY=..." is needed.)' + @echo ' Use TEST_VERTEXAI=1 to test both Google AI and Vertex AI backends' + @echo ' Use TEST_VERTEXAI=only to test only Vertex AI backend' + @echo ' Vertex AI requires GOOGLE_CLOUD_PROJECT to be set' @echo 'test TEST_FILE= - run all tests in file' diff --git a/libs/genai/langchain_google_genai/__init__.py b/libs/genai/langchain_google_genai/__init__.py index 1fa7aac69..4ade8e941 100644 --- a/libs/genai/langchain_google_genai/__init__.py +++ b/libs/genai/langchain_google_genai/__init__.py @@ -1,8 +1,23 @@ -"""LangChain Google Generative AI Integration (GenAI). +"""LangChain Google Gen AI integration. -This module integrates Google's Generative AI models, specifically the Gemini series, -with the LangChain framework. It provides classes for interacting with chat models and -generating embeddings, leveraging Google's advanced AI capabilities. +!!! note "Vertex AI Consolidated" + + As of `langchain-google-genai 4.0.0`, this package uses the + [consolidated Google Gen AI SDK](https://googleapis.github.io/python-genai/) + instead of the legacy [`google-ai-generativelanguage`](https://googleapis.dev/python/generativelanguage/latest/) + SDK. + + This brings support for Gemini models both via the Gemini API and Gemini API in + Vertex AI, superseding `langchain-google-vertexai`. Users should migrate to this + package for continued support of Google's Generative AI models. + + Certain Vertex AI features are not yet supported in the consolidated SDK (and + subsequently this package); see [the docs](https://docs.langchain.com/oss/python/integrations/providers/google) + for more details. + +This module provides an interface to Google's Generative AI models, specifically the +Gemini series, with the LangChain framework. It provides classes for interacting with +chat models, generating embeddings, and more. **Chat Models** @@ -10,51 +25,14 @@ Google's Gemini chat models. It allows users to send and receive messages using a specified Gemini model, suitable for various conversational AI applications. -**LLMs** - -The `GoogleGenerativeAI` class is the primary interface for interacting with Google's -Gemini LLMs. It allows users to generate text using a specified Gemini model. - **Embeddings** The `GoogleGenerativeAIEmbeddings` class provides functionalities to generate embeddings using Google's models. These embeddings can be used for a range of NLP tasks, including semantic analysis, similarity comparisons, and more. -**Using Chat Models** - -After setting up your environment with the required API key, you can interact with the -Google Gemini models. - -```python -from langchain_google_genai import ChatGoogleGenerativeAI - -llm = ChatGoogleGenerativeAI(model="gemini-2.5-pro") -llm.invoke("Sing a ballad of LangChain.") -``` - -**Using LLMs** - -The package also supports generating text with Google's models. - -```python -from langchain_google_genai import GoogleGenerativeAI - -llm = GoogleGenerativeAI(model="gemini-2.5-pro") -llm.invoke("Once upon a time, a library called LangChain") -``` - -**Embedding Generation** - -The package also supports creating embeddings with Google's models, useful for textual -similarity and other NLP applications. - -```python -from langchain_google_genai import GoogleGenerativeAIEmbeddings - -embeddings = GoogleGenerativeAIEmbeddings(model="models/gemini-embedding-001") -embeddings.embed_query("hello, world!") -``` +See [the docs](https://docs.langchain.com/oss/python/integrations/providers/google) for +more information on usage of this package. """ from langchain_google_genai._enums import ( @@ -70,6 +48,7 @@ GoogleVectorStore, ) from langchain_google_genai.llms import GoogleGenerativeAI +from langchain_google_genai.utils import create_context_cache __all__ = [ "ChatGoogleGenerativeAI", @@ -81,4 +60,5 @@ "HarmCategory", "MediaResolution", "Modality", + "create_context_cache", ] diff --git a/libs/genai/langchain_google_genai/_common.py b/libs/genai/langchain_google_genai/_common.py index ee0e6719b..94c40828c 100644 --- a/libs/genai/langchain_google_genai/_common.py +++ b/libs/genai/langchain_google_genai/_common.py @@ -1,16 +1,18 @@ import os from importlib import metadata -from typing import Any, TypedDict +from typing import Any from google.api_core.gapic_v1.client_info import ClientInfo -from langchain_core.utils import secret_from_env -from pydantic import BaseModel, Field, SecretStr +from langchain_core.utils import from_env, secret_from_env +from pydantic import BaseModel, Field, SecretStr, model_validator +from typing_extensions import Self from langchain_google_genai._enums import ( HarmBlockThreshold, HarmCategory, MediaResolution, Modality, + SafetySetting, ) _TELEMETRY_TAG = "remote_reasoning_engine" @@ -28,10 +30,109 @@ class GoogleGenerativeAIError(Exception): class _BaseGoogleGenerativeAI(BaseModel): - """Base class for Google Generative AI LLMs.""" + """Base class for Google Generative AI LLMs. - model: str = Field(...) - """Model name to use.""" + ## Backend selection + + This class supports both the Gemini Developer API and Google Cloud's Vertex AI + Platform as backends. The backend is selected **automatically** based on your + configuration, or can be set explicitly using the `vertexai` parameter. + + **Automatic backend detection** (when `vertexai=None` / unspecified): + + 1. If `GOOGLE_GENAI_USE_VERTEXAI` env var is set, uses that value + 2. If `credentials` parameter is provided, uses Vertex AI + 3. If `project` parameter is provided, uses Vertex AI + 4. Otherwise, uses Gemini Developer API + + **Authentication options:** + + | Backend | Authentication Methods | + |---------|------------------------| + | **Gemini Developer API** | API key (via `api_key` param or `GOOGLE_API_KEY`/`GEMINI_API_KEY` env var) | + | **Vertex AI** | API key, service account credentials, or [Application Default Credentials (ADC)](https://cloud.google.com/docs/authentication/application-default-credentials) | + + !!! tip "Quick Start" + + **Gemini Developer API** (simplest): + + ```python + # Either set GOOGLE_API_KEY env var or pass api_key directly + llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash", api_key="MY_API_KEY") + ``` + + **Vertex AI with API key**: + + ```bash + export GEMINI_API_KEY='your-api-key' + export GOOGLE_GENAI_USE_VERTEXAI=true + export GOOGLE_CLOUD_PROJECT='your-project-id' + ``` + + ```python + # Automatically uses Vertex AI with API key + llm = ChatGoogleGenerativeAI(model="gemini-3-pro-preview") + ``` + + Or programmatically: + + ```python + llm = ChatGoogleGenerativeAI( + model="gemini-3-pro-preview", + api_key="your-api-key", + project="your-project-id", + vertexai=True, # Explicitly use Vertex AI + ) + ``` + + **Vertex AI with credentials**: + + ```python + # Ensure ADC is configured: gcloud auth application-default login + # Either set GOOGLE_CLOUD_PROJECT env var or pass project directly + # Location defaults to us-central1 or can be set via GOOGLE_CLOUD_LOCATION + llm = ChatGoogleGenerativeAI( + model="gemini-2.5-flash", + project="my-project", + # location="us-central1", + ) + ``` + + ## Environment variables + + | Variable | Purpose | Backend | + |----------|---------|---------| + | `GOOGLE_API_KEY` | API key (primary) | Both (see `GOOGLE_GENAI_USE_VERTEXAI`) | + | `GEMINI_API_KEY` | API key (fallback) | Both (see `GOOGLE_GENAI_USE_VERTEXAI`) | + | `GOOGLE_GENAI_USE_VERTEXAI` | Force Vertex AI backend (`true`/`false`) | Vertex AI | + | `GOOGLE_CLOUD_PROJECT` | GCP project ID | Vertex AI | + | `GOOGLE_CLOUD_LOCATION` | GCP region (default: `us-central1`) | Vertex AI | + | `HTTPS_PROXY` | HTTP/HTTPS proxy URL | Both | + | `SSL_CERT_FILE` | Custom SSL certificate file | Both | + + `GOOGLE_API_KEY` is checked first for backwards compatibility. (`GEMINI_API_KEY` was + introduced later to better reflect the API's branding.) + + ## Proxy configuration + + Set these before initializing: + + ```bash + export HTTPS_PROXY='http://username:password@proxy_uri:port' + export SSL_CERT_FILE='path/to/cert.pem' # Optional: custom SSL certificate + ``` + + For SOCKS5 proxies or advanced proxy configuration, use the `client_args` parameter: + + ```python + llm = ChatGoogleGenerativeAI( + model="gemini-2.5-flash", + client_args={"proxy": "socks5://user:pass@host:port"}, + ) + ``` + """ # noqa: E501 + + # --- Client params --- google_api_key: SecretStr | None = Field( alias="api_key", @@ -39,28 +140,172 @@ class _BaseGoogleGenerativeAI(BaseModel): ["GOOGLE_API_KEY", "GEMINI_API_KEY"], default=None ), ) - """Google AI API key. + """API key for authentication. If not specified, will check the env vars `GOOGLE_API_KEY` and `GEMINI_API_KEY` with precedence given to `GOOGLE_API_KEY`. + + - **Gemini Developer API**: API key is required (default when no `project` is set) + - **Vertex AI**: API key is optional (set `vertexai=True` or provide `project`) + - If provided, uses API key for authentication + - If not provided, uses [Application Default Credentials (ADC)](https://docs.cloud.google.com/docs/authentication/application-default-credentials) + or `credentials` parameter + + !!! tip "Vertex AI with API key" + + You can now use Vertex AI with API key authentication instead of service account + credentials. Set `GOOGLE_GENAI_USE_VERTEXAI=true` or `vertexai=True` along with + your API key and project. """ credentials: Any = None - """The default custom credentials to use when making API calls. + """Custom credentials for Vertex AI authentication. + + When provided, forces Vertex AI backend (regardless of API key presence in + `google_api_key`/`api_key`). + + Accepts a `google.auth.credentials.Credentials` object. + + If omitted and no API key is found, the SDK uses + [Application Default Credentials (ADC)](https://cloud.google.com/docs/authentication/application-default-credentials). + + !!! example "Service account credentials" + + ```python + from google.oauth2 import service_account + + credentials = service_account.Credentials.from_service_account_file( + "path/to/service-account.json", + scopes=["https://www.googleapis.com/auth/cloud-platform"], + ) + + llm = ChatGoogleGenerativeAI( + model="gemini-2.5-flash", + credentials=credentials, + project="my-project-id", + ) + ``` + """ + + vertexai: bool | None = Field(default=None) + """Whether to use Vertex AI backend. + + If `None` (default), backend is automatically determined as follows: + + 1. If the `GOOGLE_GENAI_USE_VERTEXAI` env var is set, uses Vertex AI + 2. If the `credentials` parameter is provided, uses Vertex AI + 3. If the `project` parameter is provided, uses Vertex AI + 4. Otherwise, uses Gemini Developer API + + Set explicitly to `True` or `False` to override auto-detection. + + !!! tip "Vertex AI with API key" + + You can use Vertex AI with API key authentication by setting: + + ```bash + export GEMINI_API_KEY='your-api-key' + export GOOGLE_GENAI_USE_VERTEXAI=true + export GOOGLE_CLOUD_PROJECT='your-project-id' + ``` + + Or programmatically: + + ```python + llm = ChatGoogleGenerativeAI( + model="gemini-3-pro-preview", + api_key="your-api-key", + project="your-project-id", + vertexai=True, + ) + ``` + + This allows for simpler authentication compared to service account JSON files. + """ + + project: str | None = Field(default=None) + """Google Cloud project ID (**Vertex AI only**). + + Required when using Vertex AI. + + Falls back to `GOOGLE_CLOUD_PROJECT` env var if not provided. + """ + + location: str | None = Field( + default_factory=from_env("GOOGLE_CLOUD_LOCATION", default=None) + ) + """Google Cloud region (**Vertex AI only**). - If not provided, credentials will be ascertained from the `GOOGLE_API_KEY` - or `GEMINI_API_KEY` env vars with precedence given to `GOOGLE_API_KEY`. + If not provided, falls back to the `GOOGLE_CLOUD_LOCATION` env var, then + `'us-central1'`. """ + base_url: str | dict | None = Field(default=None, alias="client_options") + """Custom base URL for the API client. + + If not provided, defaults depend on the API being used: + + - **Gemini Developer API** (`api_key`/`google_api_key`): `https://generativelanguage.googleapis.com/` + - **Vertex AI** (`credentials`): `https://{location}-aiplatform.googleapis.com/` + + !!! note + + Typed to accept `dict` to support backwards compatibility for the (now removed) + `client_options` param. + + If a `dict` is passed in, it will **only** extract the `'api_endpoint'` key. + """ + + additional_headers: dict[str, str] | None = Field( + default=None, + ) + """Additional HTTP headers to include in API requests. + + Passed as `headers` to `HttpOptions` when creating the client. + + !!! example + + ```python + llm = ChatGoogleGenerativeAI( + model="gemini-2.5-flash", + additional_headers={ + "X-Custom-Header": "value", + }, + ) + ``` + """ + + client_args: dict[str, Any] | None = Field(default=None) + """Additional arguments to pass to the underlying HTTP client. + + Applied to both sync and async clients. + + !!! example "SOCKS5 proxy" + + ```python + llm = ChatGoogleGenerativeAI( + model="gemini-2.5-flash", + client_args={"proxy": "socks5://user:pass@host:port"}, + ) + ``` + """ + + # --- Model / invocation params --- + + model: str = Field(...) + """Model name to use.""" + temperature: float = 0.7 """Run inference with this temperature. Must be within `[0.0, 2.0]`. - !!! warning "Gemini 3.0+ models" + !!! note "Automatic override for Gemini 3.0+ models" - Setting `temperature < 1.0` for Gemini 3.0+ models can cause infinite loops, - degraded reasoning performance, and failure on complex tasks. + If `temperature` is not explicitly set and the model is Gemini 3.0 or later, + it will be automatically set to `1.0` instead of the default `0.7` per the + Google GenAI API best practices, as it can cause infinite loops, degraded + reasoning performance, and failure on complex tasks. """ @@ -104,63 +349,6 @@ class _BaseGoogleGenerativeAI(BaseModel): timeout: float | None = Field(default=None, alias="request_timeout") """The maximum number of seconds to wait for a response.""" - client_options: dict | None = Field( - default=None, - ) - """A dictionary of client options to pass to the Google API client. - - Example: `api_endpoint` - - !!! warning - - If both `client_options['api_endpoint']` and `base_url` are specified, - the `api_endpoint` in `client_options` takes precedence. - """ - - base_url: str | None = Field( - default=None, - ) - """Base URL to use for the API client. - - This is a convenience alias for `client_options['api_endpoint']`. - - - **REST transport** (`transport="rest"`): Accepts full URLs with paths - - - `https://api.example.com/v1/path` - - `https://webhook.site/unique-path` - - - **gRPC transports** (`transport="grpc"` or `transport="grpc_asyncio"`): Only - accepts `hostname:port` format - - - `api.example.com:443` - - `custom.googleapis.com:443` - - `https://api.example.com` (auto-formatted to `api.example.com:443`) - - NOT `https://webhook.site/path` (paths are not supported in gRPC) - - NOT `api.example.com/path` (paths are not supported in gRPC) - - !!! warning - - If `client_options` already contains an `api_endpoint`, this parameter will be - ignored in favor of the existing value. - """ - - transport: str | None = Field( - default=None, - alias="api_transport", - ) - """A string, one of: `['rest', 'grpc', 'grpc_asyncio']`. - - The Google client library defaults to `'grpc'` for sync clients. - - For async clients, `'rest'` is converted to `'grpc_asyncio'` unless - a custom endpoint is specified. - """ - - additional_headers: dict[str, str] | None = Field( - default=None, - ) - """Key-value dictionary representing additional headers for the model call""" - response_modalities: list[Modality] | None = Field( default=None, ) @@ -219,7 +407,7 @@ class _BaseGoogleGenerativeAI(BaseModel): !!! example ```python - from google.generativeai.types.safety_types import HarmBlockThreshold, HarmCategory + from google.genai.types import HarmBlockThreshold, HarmCategory safety_settings = { HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE, @@ -230,6 +418,54 @@ class _BaseGoogleGenerativeAI(BaseModel): ``` """ # noqa: E501 + @model_validator(mode="after") + def _resolve_project_from_credentials(self) -> Self: + """Extract project from credentials if not explicitly set. + + For backward compatibility with `langchain-google-vertexai`, which extracts + `project_id` from credentials when not explicitly provided. + """ + if self.project is None: + if self.credentials and hasattr(self.credentials, "project_id"): + self.project = self.credentials.project_id + return self + + @model_validator(mode="after") + def _determine_backend(self) -> Self: + """Determine which backend (Vertex AI or Gemini Developer API) to use. + + The backend is determined by the following priority: + 1. Explicit `vertexai` parameter value (if not None) + 2. `GOOGLE_GENAI_USE_VERTEXAI` environment variable + 3. Presence of `credentials` parameter (forces Vertex AI) + 4. Presence of `project` parameter (implies Vertex AI) + 5. Default to Gemini Developer API (False) + + Stores result in `_use_vertexai` attribute for use in client initialization. + """ + use_vertexai = self.vertexai + + if use_vertexai is None: + # Check environment variable + env_var = os.getenv("GOOGLE_GENAI_USE_VERTEXAI", "").lower() + if env_var in ("true", "1", "yes"): + use_vertexai = True + elif env_var in ("false", "0", "no"): + use_vertexai = False + # Check for credentials (forces Vertex AI) + elif self.credentials is not None: + use_vertexai = True + # Check for project (implies Vertex AI) + elif self.project is not None: + use_vertexai = True + else: + # Default to Gemini Developer API + use_vertexai = False + + # Store the determined backend in a private attribute + object.__setattr__(self, "_use_vertexai", use_vertexai) + return self + @property def lc_secrets(self) -> dict[str, str]: # Either could contain the API key @@ -279,7 +515,4 @@ def get_client_info(module: str | None = None) -> "ClientInfo": ) -class SafetySettingDict(TypedDict): - category: HarmCategory - - threshold: HarmBlockThreshold +SafetySettingDict = SafetySetting diff --git a/libs/genai/langchain_google_genai/_compat.py b/libs/genai/langchain_google_genai/_compat.py index 604c2720b..7b642052b 100644 --- a/libs/genai/langchain_google_genai/_compat.py +++ b/libs/genai/langchain_google_genai/_compat.py @@ -20,20 +20,24 @@ def translate_citations_to_grounding_metadata( Google AI grounding metadata dictionary. Example: - >>> citations = [ - ... create_citation( - ... url="https://uefa.com/euro2024", - ... title="UEFA Euro 2024 Results", - ... start_index=0, - ... end_index=47, - ... cited_text="Spain won the UEFA Euro 2024 championship", - ... ) - ... ] - >>> metadata = translate_citations_to_grounding_metadata(citations) - >>> len(metadata["groundingChunks"]) - 1 - >>> metadata["groundingChunks"][0]["web"]["uri"] - 'https://uefa.com/euro2024' + ```python + citations = [ + create_citation( + url="https://uefa.com/euro2024", + title="UEFA Euro 2024 Results", + start_index=0, + end_index=47, + cited_text="Spain won the UEFA Euro 2024 championship", + ) + ] + + metadata = translate_citations_to_grounding_metadata(citations) + len(metadata["groundingChunks"]) + # -> 1 + + metadata["groundingChunks"][0]["web"]["uri"] + # -> 'https://uefa.com/euro2024' + ``` """ if not citations: return {} @@ -123,15 +127,15 @@ def translate_citations_to_grounding_metadata( def _convert_from_v1_to_generativelanguage_v1beta( content: list[types.ContentBlock], model_provider: str | None ) -> list[dict[str, Any]]: - """Convert v1 content blocks to `google.ai.generativelanguage_v1beta.types.Content`. + """Convert v1 content blocks to `generativelanguage_v1beta` `Content`. Args: content: List of v1 `ContentBlock` objects. model_provider: The model provider name that generated the v1 content. Returns: - List of dictionaries in `google.ai.generativelanguage_v1beta.types.Content` - format, ready to be sent to the API. + List of dictionaries in `generativelanguage_v1beta` `Content` format, ready to + be sent to the API. """ new_content: list = [] for block in content: diff --git a/libs/genai/langchain_google_genai/_enums.py b/libs/genai/langchain_google_genai/_enums.py index 1f69213f4..202f4b76b 100644 --- a/libs/genai/langchain_google_genai/_enums.py +++ b/libs/genai/langchain_google_genai/_enums.py @@ -1,8 +1,35 @@ -import google.ai.generativelanguage_v1beta as genai +from google.genai.types import ( + BlockedReason, + HarmBlockThreshold, + HarmCategory, + MediaModality, + MediaResolution, + Modality, + SafetySetting, +) -HarmBlockThreshold = genai.SafetySetting.HarmBlockThreshold -HarmCategory = genai.HarmCategory -Modality = genai.GenerationConfig.Modality -MediaResolution = genai.GenerationConfig.MediaResolution +__all__ = [ + "BlockedReason", + "HarmBlockThreshold", + "HarmCategory", + "MediaModality", + "MediaResolution", + "Modality", + "SafetySetting", +] -__all__ = ["HarmBlockThreshold", "HarmCategory", "MediaResolution", "Modality"] +# Migration notes: +# - Added: +# - `BlockedReason` +# - `SafetySetting` +# +# Parity between generativelanguage_v1beta and genai.types +# - `HarmBlockThreshold`: equivalent +# - `HarmCategory`: there are a few Vertex-only and categories not supported by Gemini +# - `MediaResolution`: equivalent +# +# `MediaModality` has additional modalities not present in `Modality`: +# - `VIDEO` +# - `DOCUMENT` +# +# TODO: investigate why both? Or not just use `MediaModality` everywhere? diff --git a/libs/genai/langchain_google_genai/_function_utils.py b/libs/genai/langchain_google_genai/_function_utils.py index e2fc82a9d..c2727451a 100644 --- a/libs/genai/langchain_google_genai/_function_utils.py +++ b/libs/genai/langchain_google_genai/_function_utils.py @@ -2,7 +2,6 @@ import collections import importlib -import json import logging from collections.abc import Callable, Sequence from typing import ( @@ -12,9 +11,7 @@ cast, ) -import google.ai.generativelanguage as glm -import google.ai.generativelanguage_v1beta.types as gapic -import proto # type: ignore[import-untyped] +from google.genai import types from langchain_core.tools import BaseTool from langchain_core.tools import tool as callable_as_lc_tool from langchain_core.utils.function_calling import ( @@ -30,32 +27,50 @@ TYPE_ENUM = { - "string": glm.Type.STRING, - "number": glm.Type.NUMBER, - "integer": glm.Type.INTEGER, - "boolean": glm.Type.BOOLEAN, - "array": glm.Type.ARRAY, - "object": glm.Type.OBJECT, + "string": types.Type.STRING, + "number": types.Type.NUMBER, + "integer": types.Type.INTEGER, + "boolean": types.Type.BOOLEAN, + "array": types.Type.ARRAY, + "object": types.Type.OBJECT, "null": None, } -_ALLOWED_SCHEMA_FIELDS = [] -_ALLOWED_SCHEMA_FIELDS.extend([f.name for f in gapic.Schema()._pb.DESCRIPTOR.fields]) -_ALLOWED_SCHEMA_FIELDS.extend( - list(gapic.Schema.to_dict(gapic.Schema(), preserving_proto_field_name=False).keys()) -) +# Note: For google.genai, we'll use a simplified approach for allowed schema fields +# since the new library doesn't expose protobuf fields in the same way +_ALLOWED_SCHEMA_FIELDS = [ + "type", + "type_", + "description", + "enum", + "format", + "items", + "properties", + "required", + "nullable", + "anyOf", + "default", + "minimum", + "maximum", + "minLength", + "maxLength", + "pattern", + "minItems", + "maxItems", + "title", +] _ALLOWED_SCHEMA_FIELDS_SET = set(_ALLOWED_SCHEMA_FIELDS) # Info: This is a FunctionDeclaration(=fc). _FunctionDeclarationLike = ( - BaseTool | type[BaseModel] | gapic.FunctionDeclaration | Callable | dict[str, Any] + BaseTool | type[BaseModel] | types.FunctionDeclaration | Callable | dict[str, Any] ) -_GoogleSearchRetrievalLike = gapic.GoogleSearchRetrieval | dict[str, Any] +_GoogleSearchRetrievalLike = types.GoogleSearchRetrieval | dict[str, Any] -_GoogleSearchLike = gapic.Tool.GoogleSearch | dict[str, Any] -_CodeExecutionLike = gapic.CodeExecution | dict[str, Any] -_UrlContextLike = gapic.UrlContext | dict[str, Any] +_GoogleSearchLike = types.GoogleSearch | dict[str, Any] +_CodeExecutionLike = types.ToolCodeExecution | dict[str, Any] +_UrlContextLike = types.UrlContext | dict[str, Any] class _ToolDict(TypedDict): @@ -67,9 +82,9 @@ class _ToolDict(TypedDict): # Info: This means one tool=Sequence of FunctionDeclaration -# The dict should be gapic.Tool like. {"function_declarations": [ { "name": ...}. +# The dict should be Tool like. {"function_declarations": [ { "name": ...}. # OpenAI like dict is not be accepted. {{'type': 'function', 'function': {'name': ...} -_ToolType = gapic.Tool | _ToolDict | _FunctionDeclarationLike +_ToolType = types.Tool | _ToolDict | _FunctionDeclarationLike _ToolsType = Sequence[_ToolType] @@ -79,7 +94,8 @@ def _format_json_schema_to_gapic(schema: dict[str, Any]) -> dict[str, Any]: if key == "definitions": continue if key == "items": - converted_schema["items"] = _format_json_schema_to_gapic(value) + if value is not None: + converted_schema["items"] = _format_json_schema_to_gapic(value) elif key == "properties": converted_schema["properties"] = _get_properties_from_schema(value) continue @@ -90,8 +106,14 @@ def _format_json_schema_to_gapic(schema: dict[str, Any]) -> dict[str, Any]: f"Got {len(value)}, ignoring other than first value!" ) return _format_json_schema_to_gapic(value[0]) - elif key in ["type", "_type"]: - converted_schema["type"] = str(value).upper() + elif key in ["type", "type_"]: + if isinstance(value, dict): + converted_schema["type"] = value["_value_"] + elif isinstance(value, str): + converted_schema["type"] = value + else: + msg = f"Invalid type: {value}" + raise ValueError(msg) elif key not in _ALLOWED_SCHEMA_FIELDS_SET: logger.warning(f"Key '{key}' is not supported in schema, ignoring") else: @@ -99,53 +121,120 @@ def _format_json_schema_to_gapic(schema: dict[str, Any]) -> dict[str, Any]: return converted_schema -def _dict_to_gapic_schema(schema: dict[str, Any]) -> gapic.Schema | None: +def _dict_to_genai_schema( + schema: dict[str, Any], is_property: bool = False +) -> types.Schema | None: if schema: dereferenced_schema = dereference_refs(schema) formatted_schema = _format_json_schema_to_gapic(dereferenced_schema) - json_schema = json.dumps(formatted_schema) - return gapic.Schema.from_json(json_schema) + # Convert the formatted schema to google.genai.types.Schema + schema_dict = {} + # Set type if present, or if we can infer it from anyOf + # (for Gemini compatibility) + if "type" in formatted_schema: + type_obj = formatted_schema["type"] + if isinstance(type_obj, dict): + type_value = type_obj["_value_"] + elif isinstance(type_obj, str): + type_value = type_obj + else: + msg = f"Invalid type: {type_obj}" + raise ValueError(msg) + schema_dict["type"] = types.Type(type_value) + elif "anyOf" in formatted_schema: + # Try to infer type from anyOf for Gemini compatibility + inferred_type = _get_type_from_schema(formatted_schema) + if ( + inferred_type != types.Type.STRING + ): # Only set if it's not the default fallback + schema_dict["type"] = inferred_type + if "description" in formatted_schema: + schema_dict["description"] = formatted_schema["description"] + if "title" in formatted_schema: + schema_dict["title"] = formatted_schema["title"] + if "properties" in formatted_schema: + # Recursively process each property + properties_dict = {} + for prop_name, prop_schema in formatted_schema["properties"].items(): + properties_dict[prop_name] = _dict_to_genai_schema( + prop_schema, is_property=True + ) + schema_dict["properties"] = properties_dict # type: ignore[assignment] + # Set required field for all schemas + if "required" in formatted_schema and formatted_schema["required"] is not None: + schema_dict["required"] = formatted_schema["required"] + elif not is_property: + # For backward compatibility, set empty list for non-property schemas + empty_required: list[str] = [] + schema_dict["required"] = empty_required # type: ignore[assignment] + if "items" in formatted_schema: + # Recursively process items schema + schema_dict["items"] = _dict_to_genai_schema( + formatted_schema["items"], is_property=True + ) # type: ignore[assignment] + if "enum" in formatted_schema: + schema_dict["enum"] = formatted_schema["enum"] + if "nullable" in formatted_schema: + schema_dict["nullable"] = formatted_schema["nullable"] + if "anyOf" in formatted_schema: + # Convert anyOf list to list of Schema objects + any_of_schemas = [] + for any_of_item in formatted_schema["anyOf"]: + any_of_schema = _dict_to_genai_schema(any_of_item, is_property=True) + if any_of_schema: + any_of_schemas.append(any_of_schema) + schema_dict["any_of"] = any_of_schemas # type: ignore[assignment] + return types.Schema.model_validate(schema_dict) return None def _format_dict_to_function_declaration( tool: FunctionDescription | dict[str, Any], -) -> gapic.FunctionDeclaration: - return gapic.FunctionDeclaration( - name=tool.get("name") or tool.get("title"), - description=tool.get("description"), - parameters=_dict_to_gapic_schema(tool.get("parameters", {})), +) -> types.FunctionDeclaration: + name = tool.get("name") or tool.get("title") or "MISSING_NAME" + description = tool.get("description") or None + parameters = _dict_to_genai_schema(tool.get("parameters", {})) + return types.FunctionDeclaration( + name=str(name), + description=description, + parameters=parameters, ) -# Info: gapic.Tool means function_declarations and proto.Message. +# Info: gapicTool means function_declarations and other tool types def convert_to_genai_function_declarations( tools: _ToolsType, -) -> gapic.Tool: +) -> types.Tool: + tool_dict: dict[str, Any] = {} if not isinstance(tools, collections.abc.Sequence): logger.warning( "convert_to_genai_function_declarations expects a Sequence " "and not a single tool." ) tools = [tools] - gapic_tool = gapic.Tool() + function_declarations: list[types.FunctionDeclaration] = [] for tool in tools: - if any(f in gapic_tool for f in ["google_search_retrieval"]): - msg = ( - "Providing multiple google_search_retrieval" - " or mixing with function_declarations is not supported" - ) - raise ValueError(msg) - if isinstance(tool, (gapic.Tool)): - rt: gapic.Tool = ( - tool if isinstance(tool, gapic.Tool) else tool._raw_tool # type: ignore - ) - if "google_search_retrieval" in rt: - gapic_tool.google_search_retrieval = rt.google_search_retrieval - if "function_declarations" in rt: - gapic_tool.function_declarations.extend(rt.function_declarations) - if "google_search" in rt: - gapic_tool.google_search = rt.google_search + if isinstance(tool, types.Tool): + # Handle existing Tool objects + if hasattr(tool, "function_declarations") and tool.function_declarations: + function_declarations.extend(tool.function_declarations) + if ( + hasattr(tool, "google_search_retrieval") + and tool.google_search_retrieval + ): + if "google_search_retrieval" in tool_dict: + msg = ( + "Providing multiple google_search_retrieval " + "or mixing with function_declarations is not supported" + ) + raise ValueError(msg) + tool_dict["google_search_retrieval"] = tool.google_search_retrieval + if hasattr(tool, "google_search") and tool.google_search: + tool_dict["google_search"] = tool.google_search + if hasattr(tool, "code_execution") and tool.code_execution: + tool_dict["code_execution"] = tool.code_execution + if hasattr(tool, "url_context") and tool.url_context: + tool_dict["url_context"] = tool.url_context elif isinstance(tool, dict): # not _ToolDictLike if not any( @@ -158,61 +247,91 @@ def convert_to_genai_function_declarations( "url_context", ] ): - fd = _format_to_gapic_function_declaration(tool) # type: ignore[arg-type] - gapic_tool.function_declarations.append(fd) + fd = _format_to_genai_function_declaration(tool) # type: ignore[arg-type] + function_declarations.append(fd) continue # _ToolDictLike tool = cast("_ToolDict", tool) if "function_declarations" in tool: - function_declarations = tool["function_declarations"] - if not isinstance( + tool_function_declarations = tool["function_declarations"] + if tool_function_declarations is not None and not isinstance( tool["function_declarations"], collections.abc.Sequence ): msg = ( "function_declarations should be a list" - f"got '{type(function_declarations)}'" + f"got '{type(tool_function_declarations)}'" ) raise ValueError(msg) - if function_declarations: + if tool_function_declarations: fds = [ - _format_to_gapic_function_declaration(fd) - for fd in function_declarations + _format_to_genai_function_declaration(fd) + for fd in tool_function_declarations ] - gapic_tool.function_declarations.extend(fds) + function_declarations.extend(fds) if "google_search_retrieval" in tool: - gapic_tool.google_search_retrieval = gapic.GoogleSearchRetrieval( - tool["google_search_retrieval"] - ) + if "google_search_retrieval" in tool_dict: + msg = ( + "Providing multiple google_search_retrieval" + " or mixing with function_declarations is not supported" + ) + raise ValueError(msg) + if isinstance(tool["google_search_retrieval"], dict): + tool_dict["google_search_retrieval"] = types.GoogleSearchRetrieval( + **tool["google_search_retrieval"] + ) + else: + tool_dict["google_search_retrieval"] = tool[ + "google_search_retrieval" + ] if "google_search" in tool: - gapic_tool.google_search = gapic.Tool.GoogleSearch( - tool["google_search"] - ) + if isinstance(tool["google_search"], dict): + tool_dict["google_search"] = types.GoogleSearch( + **tool["google_search"] + ) + else: + tool_dict["google_search"] = tool["google_search"] if "code_execution" in tool: - gapic_tool.code_execution = gapic.CodeExecution(tool["code_execution"]) + if isinstance(tool["code_execution"], dict): + tool_dict["code_execution"] = types.ToolCodeExecution( + **tool["code_execution"] + ) + else: + tool_dict["code_execution"] = tool["code_execution"] if "url_context" in tool: - gapic_tool.url_context = gapic.UrlContext(tool["url_context"]) + if isinstance(tool["url_context"], dict): + tool_dict["url_context"] = types.UrlContext(**tool["url_context"]) + else: + tool_dict["url_context"] = tool["url_context"] else: - fd = _format_to_gapic_function_declaration(tool) - gapic_tool.function_declarations.append(fd) - return gapic_tool + fd = _format_to_genai_function_declaration(tool) + function_declarations.append(fd) + if function_declarations: + tool_dict["function_declarations"] = function_declarations + + return types.Tool(**tool_dict) -def tool_to_dict(tool: gapic.Tool) -> _ToolDict: +def tool_to_dict(tool: types.Tool) -> _ToolDict: def _traverse_values(raw: Any) -> Any: if isinstance(raw, list): return [_traverse_values(v) for v in raw] if isinstance(raw, dict): return {k: _traverse_values(v) for k, v in raw.items()} - if isinstance(raw, proto.Message): - return _traverse_values(type(raw).to_dict(raw)) + if hasattr(raw, "__dict__"): + return _traverse_values(raw.__dict__) return raw - return _traverse_values(type(tool).to_dict(tool)) + if hasattr(tool, "model_dump"): + raw_result = tool.model_dump() + else: + raw_result = tool.__dict__ + + return _traverse_values(raw_result) -def _format_to_gapic_function_declaration( +def _format_to_genai_function_declaration( tool: _FunctionDeclarationLike, -) -> gapic.FunctionDeclaration: +) -> types.FunctionDeclaration: if isinstance(tool, BaseTool): return _format_base_tool_to_function_declaration(tool) if isinstance(tool, type) and is_basemodel_subclass_safe(tool): @@ -224,9 +343,7 @@ def _format_to_gapic_function_declaration( all(k in tool for k in ("name", "description")) and "parameters" not in tool ): function = cast("dict", tool) - elif ( - "parameters" in tool and tool["parameters"].get("properties") # type: ignore[index] - ): + elif "parameters" in tool and tool["parameters"].get("properties"): function = convert_to_openai_tool(cast("dict", tool))["function"] else: function = cast("dict", tool) @@ -245,15 +362,15 @@ def _format_to_gapic_function_declaration( def _format_base_tool_to_function_declaration( tool: BaseTool, -) -> gapic.FunctionDeclaration: +) -> types.FunctionDeclaration: if not tool.args_schema: - return gapic.FunctionDeclaration( + return types.FunctionDeclaration( name=tool.name, description=tool.description, - parameters=gapic.Schema( - type=gapic.Type.OBJECT, + parameters=types.Schema( + type=types.Type.OBJECT, properties={ - "__arg1": gapic.Schema(type=gapic.Type.STRING), + "__arg1": types.Schema(type=types.Type.STRING), }, required=["__arg1"], ), @@ -261,9 +378,11 @@ def _format_base_tool_to_function_declaration( if isinstance(tool.args_schema, dict): schema = tool.args_schema - elif issubclass(tool.args_schema, BaseModel): + elif isinstance(tool.args_schema, type) and issubclass(tool.args_schema, BaseModel): schema = tool.args_schema.model_json_schema() - elif issubclass(tool.args_schema, BaseModelV1): + elif isinstance(tool.args_schema, type) and issubclass( + tool.args_schema, BaseModelV1 + ): schema = tool.args_schema.schema() else: msg = ( @@ -271,9 +390,9 @@ def _format_base_tool_to_function_declaration( f"got {tool.args_schema}." ) raise NotImplementedError(msg) - parameters = _dict_to_gapic_schema(schema) + parameters = _dict_to_genai_schema(schema) - return gapic.FunctionDeclaration( + return types.FunctionDeclaration( name=tool.name or schema.get("title"), description=tool.description or schema.get("description"), parameters=parameters, @@ -284,7 +403,7 @@ def _convert_pydantic_to_genai_function( pydantic_model: type[BaseModel], tool_name: str | None = None, tool_description: str | None = None, -) -> gapic.FunctionDeclaration: +) -> types.FunctionDeclaration: if issubclass(pydantic_model, BaseModel): schema = pydantic_model.model_json_schema() elif issubclass(pydantic_model, BaseModelV1): @@ -292,21 +411,17 @@ def _convert_pydantic_to_genai_function( else: msg = f"pydantic_model must be a Pydantic BaseModel, got {pydantic_model}" raise NotImplementedError(msg) - schema = dereference_refs(schema) schema.pop("definitions", None) - return gapic.FunctionDeclaration( + + # Convert to google.genai Schema format - remove title/description for parameters + schema_for_params = schema.copy() + schema_for_params.pop("title", None) + schema_for_params.pop("description", None) + parameters = _dict_to_genai_schema(schema_for_params) + return types.FunctionDeclaration( name=tool_name if tool_name else schema.get("title"), description=tool_description if tool_description else schema.get("description"), - parameters={ - "properties": _get_properties_from_schema_any( - schema.get("properties") - ), # TODO: use _dict_to_gapic_schema() if possible - # "items": _get_items_from_schema_any( - # schema - # ), # TODO: fix it https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/function-calling?hl#schema - "required": schema.get("required", []), - "type_": TYPE_ENUM[schema["type"]], - }, + parameters=parameters, ) @@ -339,19 +454,17 @@ def _get_properties_from_schema(schema: dict) -> dict[str, Any]: _format_json_schema_to_gapic(anyOf_type) for anyOf_type in v.get("anyOf", []) ] - # For non-nullable anyOf, we still need to set a type - item_type_ = _get_type_from_schema(v) - properties_item["type_"] = item_type_ + # Don't set type_ when anyOf is present as they're mutually exclusive elif v.get("type") or v.get("anyOf") or v.get("type_"): item_type_ = _get_type_from_schema(v) - properties_item["type_"] = item_type_ + properties_item["type"] = item_type_ if _is_nullable_schema(v): properties_item["nullable"] = True # Replace `v` with chosen definition for array / object json types any_of_types = v.get("anyOf") - if any_of_types and item_type_ in [glm.Type.ARRAY, glm.Type.OBJECT]: - json_type_ = "array" if item_type_ == glm.Type.ARRAY else "object" + if any_of_types and item_type_ in [types.Type.ARRAY, types.Type.OBJECT]: + json_type_ = "array" if item_type_ == types.Type.ARRAY else "object" # Use Index -1 for consistency with `_get_nullable_type_from_schema` filtered_schema = [ val for val in any_of_types if val.get("type") == json_type_ @@ -384,10 +497,10 @@ def _get_properties_from_schema(schema: dict) -> dict[str, Any]: if description and isinstance(description, str): properties_item["description"] = description - if properties_item.get("type_") == glm.Type.ARRAY and v.get("items"): + if properties_item.get("type") == types.Type.ARRAY and v.get("items"): properties_item["items"] = _get_items_from_schema_any(v.get("items")) - if properties_item.get("type_") == glm.Type.OBJECT: + if properties_item.get("type") == types.Type.OBJECT: if ( v.get("anyOf") and isinstance(v["anyOf"], list) @@ -406,7 +519,7 @@ def _get_properties_from_schema(schema: dict) -> dict[str, Any]: elif not v.get("additionalProperties"): # Only provide dummy type for object without properties AND without # additionalProperties - properties_item["type_"] = glm.Type.STRING + properties_item["type"] = types.Type.STRING if k == "title" and "description" not in properties_item: properties_item["description"] = k + " is " + str(v) @@ -428,15 +541,13 @@ def _get_items_from_schema(schema: dict | list | str) -> dict[str, Any]: for i, v in enumerate(schema): items[f"item{i}"] = _get_properties_from_schema_any(v) elif isinstance(schema, dict): - items["type_"] = _get_type_from_schema(schema) - if items["type_"] == glm.Type.OBJECT and "properties" in schema: + items["type"] = _get_type_from_schema(schema) + if items["type"] == types.Type.OBJECT and "properties" in schema: items["properties"] = _get_properties_from_schema_any(schema["properties"]) - if items["type_"] == glm.Type.ARRAY and "items" in schema: + if items["type"] == types.Type.ARRAY and "items" in schema: items["items"] = _format_json_schema_to_gapic(schema["items"]) if "title" in schema or "description" in schema: - items["description"] = ( - schema.get("description") or schema.get("title") or "" - ) + items["description"] = schema.get("description") or schema.get("title") if "enum" in schema: items["enum"] = schema["enum"] if _is_nullable_schema(schema): @@ -447,67 +558,72 @@ def _get_items_from_schema(schema: dict | list | str) -> dict[str, Any]: items["enum"] = schema["enum"] else: # str - items["type_"] = _get_type_from_schema({"type": schema}) + items["type"] = _get_type_from_schema({"type": schema}) if _is_nullable_schema({"type": schema}): items["nullable"] = True return items -def _get_type_from_schema(schema: dict[str, Any]) -> int: - return _get_nullable_type_from_schema(schema) or glm.Type.STRING +def _get_type_from_schema(schema: dict[str, Any]) -> types.Type: + type_ = _get_nullable_type_from_schema(schema) + return type_ if type_ is not None else types.Type.STRING -def _get_nullable_type_from_schema(schema: dict[str, Any]) -> int | None: +def _get_nullable_type_from_schema(schema: dict[str, Any]) -> types.Type | None: if "anyOf" in schema: - types = [ + schema_types = [ _get_nullable_type_from_schema(sub_schema) for sub_schema in schema["anyOf"] ] - types = [t for t in types if t is not None] # Remove None values - if types: - return types[-1] # TODO: update FunctionDeclaration and pass all types? + schema_types = [t for t in schema_types if t is not None] # Remove None values + # TODO: update FunctionDeclaration and pass all types? + if schema_types: + return schema_types[-1] elif "type" in schema or "type_" in schema: type_ = schema["type"] if "type" in schema else schema["type_"] - if isinstance(type_, int): + if isinstance(type_, types.Type): return type_ - stype = str(schema["type"]) if "type" in schema else str(schema["type_"]) - return TYPE_ENUM.get(stype, glm.Type.STRING) + if isinstance(type_, int): + msg = f"Invalid type, int not supported: {type_}" + raise ValueError(msg) + if isinstance(type_, dict): + return types.Type(type_["_value_"]) + if isinstance(type_, str): + if type_ == "null": + return None + return types.Type(type_) + return None else: pass - return glm.Type.STRING # Default to string if no valid types found + return None # No valid types found def _is_nullable_schema(schema: dict[str, Any]) -> bool: if "anyOf" in schema: - types = [ + schema_types = [ _get_nullable_type_from_schema(sub_schema) for sub_schema in schema["anyOf"] ] - return any(t is None for t in types) + return any(t is None for t in schema_types) if "type" in schema or "type_" in schema: type_ = schema["type"] if "type" in schema else schema["type_"] - if isinstance(type_, int): + if isinstance(type_, types.Type): return False - stype = str(schema["type"]) if "type" in schema else str(schema["type_"]) - return TYPE_ENUM.get(stype, glm.Type.STRING) is None + if isinstance(type_, int): + # Handle integer type values (from tool_to_dict serialization) + # Integer types are never null (except for NULL type handled separately) + return type_ == 7 # 7 corresponds to NULL type + else: + pass return False _ToolChoiceType = Literal["auto", "none", "any", True] | dict | list[str] | str -class _FunctionCallingConfigDict(TypedDict): - mode: gapic.FunctionCallingConfig.Mode | str - allowed_function_names: list[str] | None - - -class _ToolConfigDict(TypedDict): - function_calling_config: _FunctionCallingConfigDict - - def _tool_choice_to_tool_config( tool_choice: _ToolChoiceType, all_names: list[str], -) -> _ToolConfigDict: +) -> types.ToolConfig: allowed_function_names: list[str] | None = None if tool_choice is True or tool_choice == "any": mode = "ANY" @@ -540,11 +656,11 @@ def _tool_choice_to_tool_config( else: msg = f"Unrecognized tool choice format:\n\n{tool_choice=}" raise ValueError(msg) - return _ToolConfigDict( - function_calling_config={ - "mode": mode.upper(), - "allowed_function_names": allowed_function_names, - } + return types.ToolConfig( + function_calling_config=types.FunctionCallingConfig( + mode=types.FunctionCallingConfigMode(mode), + allowed_function_names=allowed_function_names, + ) ) @@ -580,3 +696,7 @@ def _get_def_key_from_schema_path(schema_path: str) -> str: raise ValueError(error_message) return parts[-1] + + +# Backward compatibility alias +_dict_to_gapic_schema = _dict_to_genai_schema diff --git a/libs/genai/langchain_google_genai/_genai_extension.py b/libs/genai/langchain_google_genai/_genai_extension.py index 2fff85abc..89931376a 100644 --- a/libs/genai/langchain_google_genai/_genai_extension.py +++ b/libs/genai/langchain_google_genai/_genai_extension.py @@ -1,7 +1,6 @@ """Temporary high-level library of the Google GenerativeAI API. -(The content of this file should eventually go into the Python package -`google.generativeai`) +Mostly utils for Google Vector Store and embeddings. """ import datetime @@ -12,7 +11,6 @@ from typing import Any from urllib.parse import urlparse -import google.ai.generativelanguage as genai import langchain_core from google.ai.generativelanguage_v1beta import ( GenerativeServiceAsyncClient as v1betaGenerativeServiceAsyncClient, @@ -20,6 +18,10 @@ from google.ai.generativelanguage_v1beta import ( GenerativeServiceClient as v1betaGenerativeServiceClient, ) +from google.ai.generativelanguage_v1beta import types as old_genai +from google.ai.generativelanguage_v1beta.services.retriever_service import ( + RetrieverServiceClient, +) from google.api_core import client_options as client_options_lib from google.api_core import exceptions as gapi_exception from google.api_core import gapic_v1 @@ -94,7 +96,7 @@ def corpus_id(self) -> str: return name.corpus_id @classmethod - def from_corpus(cls, c: genai.Corpus) -> "Corpus": + def from_corpus(cls, c: old_genai.Corpus) -> "Corpus": return cls( name=c.name, display_name=c.display_name, @@ -109,7 +111,7 @@ class Document: display_name: str | None create_time: timestamp_pb2.Timestamp | None update_time: timestamp_pb2.Timestamp | None - custom_metadata: MutableSequence[genai.CustomMetadata] | None + custom_metadata: MutableSequence[old_genai.CustomMetadata] | None @property def corpus_id(self) -> str: @@ -123,7 +125,7 @@ def document_id(self) -> str: return name.document_id @classmethod - def from_document(cls, d: genai.Document) -> "Document": + def from_document(cls, d: old_genai.Document) -> "Document": return cls( name=d.name, display_name=d.display_name, @@ -228,10 +230,9 @@ def _get_credentials() -> credentials.Credentials | None: return None -def build_semantic_retriever() -> genai.RetrieverServiceClient: - """Uses the default `'grpc'` transport to build a semantic retriever client.""" +def build_semantic_retriever() -> RetrieverServiceClient: credentials = _get_credentials() - return genai.RetrieverServiceClient( + return RetrieverServiceClient( credentials=credentials, # TODO: remove ignore once google-auth has types. client_info=gapic_v1.client_info.ClientInfo(user_agent=_USER_AGENT), # type: ignore[no-untyped-call] @@ -241,6 +242,7 @@ def build_semantic_retriever() -> genai.RetrieverServiceClient: ) +# Used by embeddings.py def _prepare_config( credentials: credentials.Credentials | None = None, api_key: str | None = None, @@ -284,6 +286,7 @@ def _prepare_config( return {k: v for k, v in config.items() if v is not None} +# Used by embeddings.py def _validate_grpc_endpoint(endpoint: str, transport: str) -> None: """Validate that an endpoint is compatible with gRPC transports. @@ -315,6 +318,7 @@ def _validate_grpc_endpoint(endpoint: str, transport: str) -> None: raise ValueError(msg) +# Used by embeddings.py def _format_grpc_endpoint(endpoint: str) -> str: """Format an endpoint for gRPC compatibility. @@ -343,6 +347,7 @@ def _format_grpc_endpoint(endpoint: str) -> str: return endpoint +# Used by embeddings.py def build_generative_service( credentials: credentials.Credentials | None = None, api_key: str | None = None, @@ -360,6 +365,7 @@ def build_generative_service( return v1betaGenerativeServiceClient(**config) +# Used by embeddings.py def build_generative_async_service( credentials: credentials.Credentials | None, api_key: str | None = None, @@ -377,14 +383,15 @@ def build_generative_async_service( return v1betaGenerativeServiceAsyncClient(**config) +# Used by google_vector_store.py def get_corpus( *, corpus_id: str, - client: genai.RetrieverServiceClient, + client: RetrieverServiceClient, ) -> Corpus | None: try: corpus = client.get_corpus( - genai.GetCorpusRequest(name=str(EntityName(corpus_id=corpus_id))) + old_genai.GetCorpusRequest(name=str(EntityName(corpus_id=corpus_id))) ) return Corpus.from_corpus(corpus) except Exception as e: @@ -395,45 +402,53 @@ def get_corpus( return None +# Used by google_vector_store.py def create_corpus( *, corpus_id: str | None = None, display_name: str | None = None, - client: genai.RetrieverServiceClient, + client: RetrieverServiceClient, ) -> Corpus: name: str | None - name = str(EntityName(corpus_id=corpus_id)) if corpus_id is not None else None + if corpus_id is not None: + name = str(EntityName(corpus_id=corpus_id)) + else: + name = None new_display_name = display_name or f"Untitled {datetime.datetime.now()}" new_corpus = client.create_corpus( - genai.CreateCorpusRequest( - corpus=genai.Corpus(name=name, display_name=new_display_name) + old_genai.CreateCorpusRequest( + corpus=old_genai.Corpus(name=name, display_name=new_display_name) ) ) return Corpus.from_corpus(new_corpus) +# Used by google_vector_store.py def delete_corpus( *, corpus_id: str, - client: genai.RetrieverServiceClient, + client: RetrieverServiceClient, ) -> None: client.delete_corpus( - genai.DeleteCorpusRequest(name=str(EntityName(corpus_id=corpus_id)), force=True) + old_genai.DeleteCorpusRequest( + name=str(EntityName(corpus_id=corpus_id)), force=True + ) ) +# Used by google_vector_store.py def get_document( *, corpus_id: str, document_id: str, - client: genai.RetrieverServiceClient, + client: RetrieverServiceClient, ) -> Document | None: try: document = client.get_document( - genai.GetDocumentRequest( + old_genai.GetDocumentRequest( name=str(EntityName(corpus_id=corpus_id, document_id=document_id)) ) ) @@ -445,13 +460,14 @@ def get_document( return None +# Used by google_vector_store.py def create_document( *, corpus_id: str, document_id: str | None = None, display_name: str | None = None, metadata: dict[str, Any] | None = None, - client: genai.RetrieverServiceClient, + client: RetrieverServiceClient, ) -> Document: name: str | None if document_id is not None: @@ -463,9 +479,9 @@ def create_document( new_metadatas = _convert_to_metadata(metadata) if metadata else None new_document = client.create_document( - genai.CreateDocumentRequest( + old_genai.CreateDocumentRequest( parent=str(EntityName(corpus_id=corpus_id)), - document=genai.Document( + document=old_genai.Document( name=name, display_name=new_display_name, custom_metadata=new_metadatas ), ) @@ -474,28 +490,30 @@ def create_document( return Document.from_document(new_document) +# Used by google_vector_store.py def delete_document( *, corpus_id: str, document_id: str, - client: genai.RetrieverServiceClient, + client: RetrieverServiceClient, ) -> None: client.delete_document( - genai.DeleteDocumentRequest( + old_genai.DeleteDocumentRequest( name=str(EntityName(corpus_id=corpus_id, document_id=document_id)), force=True, ) ) +# Used by google_vector_store.py def batch_create_chunk( *, corpus_id: str, document_id: str, texts: list[str], metadatas: list[dict[str, Any]] | None = None, - client: genai.RetrieverServiceClient, -) -> list[genai.Chunk]: + client: RetrieverServiceClient, +) -> list[old_genai.Chunk]: if metadatas is None: metadatas = [{} for _ in texts] if len(texts) != len(metadatas): @@ -507,18 +525,18 @@ def batch_create_chunk( doc_name = str(EntityName(corpus_id=corpus_id, document_id=document_id)) - created_chunks: list[genai.Chunk] = [] + created_chunks: list[old_genai.Chunk] = [] - batch_request = genai.BatchCreateChunksRequest( + batch_request = old_genai.BatchCreateChunksRequest( parent=doc_name, requests=[], ) - for text, metadata in zip(texts, metadatas, strict=False): + for text, metadata in zip(texts, metadatas): batch_request.requests.append( - genai.CreateChunkRequest( + old_genai.CreateChunkRequest( parent=doc_name, - chunk=genai.Chunk( - data=genai.ChunkData(string_value=text), + chunk=old_genai.Chunk( + data=old_genai.ChunkData(string_value=text), custom_metadata=_convert_to_metadata(metadata), ), ) @@ -528,7 +546,7 @@ def batch_create_chunk( response = client.batch_create_chunks(batch_request) created_chunks.extend(list(response.chunks)) # Prepare a new batch for next round. - batch_request = genai.BatchCreateChunksRequest( + batch_request = old_genai.BatchCreateChunksRequest( parent=doc_name, requests=[], ) @@ -541,15 +559,16 @@ def batch_create_chunk( return created_chunks +# Used by google_vector_store.py def delete_chunk( *, corpus_id: str, document_id: str, chunk_id: str, - client: genai.RetrieverServiceClient, + client: RetrieverServiceClient, ) -> None: client.delete_chunk( - genai.DeleteChunkRequest( + old_genai.DeleteChunkRequest( name=str( EntityName( corpus_id=corpus_id, document_id=document_id, chunk_id=chunk_id @@ -559,16 +578,17 @@ def delete_chunk( ) +# Used by google_vector_store.py def query_corpus( *, corpus_id: str, query: str, k: int = 4, filter: dict[str, Any] | None = None, - client: genai.RetrieverServiceClient, -) -> list[genai.RelevantChunk]: + client: RetrieverServiceClient, +) -> list[old_genai.RelevantChunk]: response = client.query_corpus( - genai.QueryCorpusRequest( + old_genai.QueryCorpusRequest( name=str(EntityName(corpus_id=corpus_id)), query=query, metadata_filters=_convert_filter(filter), @@ -578,6 +598,7 @@ def query_corpus( return list(response.relevant_chunks) +# Used by google_vector_store.py def query_document( *, corpus_id: str, @@ -585,10 +606,10 @@ def query_document( query: str, k: int = 4, filter: dict[str, Any] | None = None, - client: genai.RetrieverServiceClient, -) -> list[genai.RelevantChunk]: + client: RetrieverServiceClient, +) -> list[old_genai.RelevantChunk]: response = client.query_document( - genai.QueryDocumentRequest( + old_genai.QueryDocumentRequest( name=str(EntityName(corpus_id=corpus_id, document_id=document_id)), query=query, metadata_filters=_convert_filter(filter), @@ -598,13 +619,14 @@ def query_document( return list(response.relevant_chunks) -def _convert_to_metadata(metadata: dict[str, Any]) -> list[genai.CustomMetadata]: - cs: list[genai.CustomMetadata] = [] +# Used by google_vector_store.py +def _convert_to_metadata(metadata: dict[str, Any]) -> list[old_genai.CustomMetadata]: + cs: list[old_genai.CustomMetadata] = [] for key, value in metadata.items(): if isinstance(value, str): - c = genai.CustomMetadata(key=key, string_value=value) + c = old_genai.CustomMetadata(key=key, string_value=value) elif isinstance(value, (float, int)): - c = genai.CustomMetadata(key=key, numeric_value=value) + c = old_genai.CustomMetadata(key=key, numeric_value=value) else: msg = f"Metadata value {value} is not supported" raise ValueError(msg) @@ -613,25 +635,26 @@ def _convert_to_metadata(metadata: dict[str, Any]) -> list[genai.CustomMetadata] return cs -def _convert_filter(fs: dict[str, Any] | None) -> list[genai.MetadataFilter]: +# Used by google_vector_store.py +def _convert_filter(fs: dict[str, Any] | None) -> list[old_genai.MetadataFilter]: if fs is None: return [] assert isinstance(fs, dict) - filters: list[genai.MetadataFilter] = [] + filters: list[old_genai.MetadataFilter] = [] for key, value in fs.items(): if isinstance(value, str): - condition = genai.Condition( - operation=genai.Condition.Operator.EQUAL, string_value=value + condition = old_genai.Condition( + operation=old_genai.Condition.Operator.EQUAL, string_value=value ) elif isinstance(value, (float, int)): - condition = genai.Condition( - operation=genai.Condition.Operator.EQUAL, numeric_value=value + condition = old_genai.Condition( + operation=old_genai.Condition.Operator.EQUAL, numeric_value=value ) else: msg = f"Filter value {value} is not supported" raise ValueError(msg) - filters.append(genai.MetadataFilter(key=key, conditions=[condition])) + filters.append(old_genai.MetadataFilter(key=key, conditions=[condition])) return filters diff --git a/libs/genai/langchain_google_genai/_image_utils.py b/libs/genai/langchain_google_genai/_image_utils.py index 594b37da6..241df1d59 100644 --- a/libs/genai/langchain_google_genai/_image_utils.py +++ b/libs/genai/langchain_google_genai/_image_utils.py @@ -5,12 +5,14 @@ import os import re from enum import Enum -from typing import Any from urllib.parse import urlparse import filetype # type: ignore[import-untyped] import requests -from google.ai.generativelanguage_v1beta.types import Part +from google.genai.types import Blob, Part + +# Note: noticed the previous generativelanguage_v1beta Part has a `part_metadata` field +# that is not present in the genai.types.Part. class Route(Enum): @@ -22,12 +24,17 @@ class Route(Enum): class ImageBytesLoader: - """Loads image bytes from multiple sources given a string. + """Loads media bytes from multiple sources given a string. + + (Despite the class name, this loader supports multiple media types including + images, PDFs, audio, and video files.) Currently supported: - - B64 Encoded image string - - URL + - Base64 encoded data URIs (e.g., `data:image/jpeg;base64,...` or + `data:application/pdf;base64,...`) + - HTTP/HTTPS URLs + - Google Cloud Storage URIs (gs://) via URL download, not direct URI passing """ def load_bytes(self, image_string: str) -> bytes: @@ -36,11 +43,12 @@ def load_bytes(self, image_string: str) -> bytes: Args: image_string: Can be either: - - B64 Encoded image string - - URL + - Base64 encoded data URI (e.g., `data:image/jpeg;base64,...` or + `data:application/pdf;base64,...`) + - HTTP/HTTPS URL Returns: - Image bytes. + Media bytes (images, PDFs, audio, video, etc.). """ route = self._route(image_string) @@ -53,15 +61,17 @@ def load_bytes(self, image_string: str) -> bytes: if route == Route.LOCAL_FILE: msg = ( "Loading from local files is no longer supported for security reasons. " - "Please pass in images as Google Cloud Storage URI, " - "b64 encoded image string (data:image/...), or valid image url." + "Please pass in media as Google Cloud Storage URI, " + "base64 encoded data URI (e.g., data:image/..., " + "data:application/pdf;base64,...), or valid HTTP/HTTPS URL." ) raise ValueError(msg) return self._bytes_from_file(image_string) msg = ( - "Image string must be one of: Google Cloud Storage URI, " - "b64 encoded image string (data:image/...), or valid image url." + "Media string must be one of: Google Cloud Storage URI, " + "base64 encoded data URI (e.g., data:image/..., " + "data:application/pdf;base64,...), or valid HTTP/HTTPS URL. " f"Instead got '{image_string}'." ) raise ValueError(msg) @@ -71,8 +81,14 @@ def load_part(self, image_string: str) -> Part: Args: image_string: Can be either: - - B64 Encoded image string - - URL + + - Base64 encoded data URI (e.g., `data:image/jpeg;base64,...` or + `data:application/pdf;base64,...`) + - HTTP/HTTPS URL + + Returns: + Part object with `inline_data` containing the media bytes and detected mime + type. """ route = self._route(image_string) @@ -85,26 +101,26 @@ def load_part(self, image_string: str) -> Part: if route == Route.LOCAL_FILE: msg = ( "Loading from local files is no longer supported for security reasons. " - "Please specify images as Google Cloud Storage URI, " - "b64 encoded image string (data:image/...), or valid image url." + "Please specify media as Google Cloud Storage URI, " + "base64 encoded data URI (e.g., data:image/..., " + "data:application/pdf;base64,...), or valid HTTP/HTTPS URL." ) raise ValueError(msg) - inline_data: dict[str, Any] = {"data": bytes_} - mime_type, _ = mimetypes.guess_type(image_string) if not mime_type: kind = filetype.guess(bytes_) if kind: mime_type = kind.mime - if mime_type: - inline_data["mime_type"] = mime_type + blob = Blob(data=bytes_, mime_type=mime_type) - return Part(inline_data=inline_data) + return Part(inline_data=blob) def _route(self, image_string: str) -> Route: - if image_string.startswith("data:image/"): + # Accept any data URI format (images, PDFs, audio, video, etc.) + # Examples: data:image/jpeg;base64,..., data:application/pdf;base64,... + if image_string.startswith("data:"): return Route.BASE64 if self._is_url(image_string): @@ -114,42 +130,57 @@ def _route(self, image_string: str) -> Route: return Route.LOCAL_FILE msg = ( - "Image string must be one of: " - "b64 encoded image string (data:image/...) or valid image url." - f" Instead got '{image_string}'." + "Media string must be one of: " + "base64 encoded data URI (e.g., data:image/..., " + "data:application/pdf;base64,...) or valid HTTP/HTTPS URL. " + f"Instead got '{image_string}'." ) raise ValueError(msg) def _bytes_from_b64(self, base64_image: str) -> bytes: - """Gets image bytes from a base64 encoded string. + """Gets media bytes from a base64 encoded data URI. + + Supports any mime type including images, PDFs, audio, video, etc. Args: - base64_image: Encoded image in b64 format. + base64_image: Base64 encoded data URI (e.g., `data:image/jpeg;base64,...` + or `data:application/pdf;base64,...`). Returns: - Image bytes + Decoded media bytes. + + Raises: + ValueError: If the data URI format is invalid. """ - pattern = r"data:image/\w{2,4};base64,(.*)" + # Pattern captures any mime type: image/jpeg, application/pdf, audio/mp3, etc. + # Format: data:;base64, + pattern = r"data:([^;]+);base64,(.*)" match = re.search(pattern, base64_image) if match is not None: - encoded_string = match.group(1) + # Group 1: mime type (e.g., "image/jpeg", "application/pdf") + # Group 2: base64 encoded data + encoded_string = match.group(2) return base64.b64decode(encoded_string) - msg = f"Error in b64 encoded image. Must follow pattern: {pattern}" + msg = ( + f"Invalid base64 data URI format. Expected pattern: {pattern}\n" + f"Examples: data:image/jpeg;base64,... or data:application/pdf;base64,...\n" + f"Got: {base64_image[:100]}..." + ) raise ValueError(msg) def _bytes_from_url(self, url: str) -> bytes: - """Gets image bytes from a public url. + """Gets media bytes from a public HTTP/HTTPS URL. Args: - url: Valid url. + url: Valid HTTP or HTTPS URL pointing to media content. Raises: - HTTP Error if there is one. + HTTPError: If the request fails. Returns: - Image bytes + Media bytes (images, PDFs, audio, video, etc.). """ response = requests.get(url) diff --git a/libs/genai/langchain_google_genai/chat_models.py b/libs/genai/langchain_google_genai/chat_models.py index d87004eba..bc91ef9d2 100644 --- a/libs/genai/langchain_google_genai/chat_models.py +++ b/libs/genai/langchain_google_genai/chat_models.py @@ -6,8 +6,8 @@ import json import logging import mimetypes +import os import re -import time import uuid import warnings import wave @@ -22,13 +22,11 @@ import filetype # type: ignore[import-untyped] import proto # type: ignore[import-untyped] -from google.ai.generativelanguage_v1beta import ( - GenerativeServiceAsyncClient as v1betaGenerativeServiceAsyncClient, -) -from google.ai.generativelanguage_v1beta.types import ( +from google.genai.client import Client +from google.genai.errors import ClientError +from google.genai.types import ( Blob, Candidate, - CodeExecution, CodeExecutionResult, Content, ExecutableCode, @@ -36,22 +34,22 @@ FunctionCall, FunctionDeclaration, FunctionResponse, - GenerateContentRequest, + GenerateContentConfig, GenerateContentResponse, GenerationConfig, + HttpOptions, + HttpRetryOptions, Part, SafetySetting, + ThinkingConfig, + ToolCodeExecution, ToolConfig, VideoMetadata, ) -from google.ai.generativelanguage_v1beta.types import Tool as GoogleTool -from google.api_core.exceptions import ( - FailedPrecondition, - GoogleAPIError, - InvalidArgument, - ResourceExhausted, - ServiceUnavailable, +from google.genai.types import ( + Outcome as CodeExecutionResultOutcome, ) +from google.genai.types import Tool as GoogleTool from langchain_core.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, @@ -96,20 +94,13 @@ from langchain_core.utils.utils import _build_model_kwargs from pydantic import BaseModel, ConfigDict, Field, SecretStr, model_validator from pydantic.v1 import BaseModel as BaseModelV1 -from tenacity import ( - before_sleep_log, - retry, - retry_if_exception_type, - stop_after_attempt, - wait_exponential, -) from typing_extensions import Self, is_typeddict from langchain_google_genai._common import ( GoogleGenerativeAIError, SafetySettingDict, _BaseGoogleGenerativeAI, - get_client_info, + get_user_agent, ) from langchain_google_genai._compat import ( _convert_from_v1_to_generativelanguage_v1beta, @@ -117,7 +108,6 @@ from langchain_google_genai._function_utils import ( _tool_choice_to_tool_config, _ToolChoiceType, - _ToolConfigDict, _ToolDict, convert_to_genai_function_declarations, is_basemodel_subclass_safe, @@ -129,12 +119,8 @@ ) from langchain_google_genai.data._profiles import _PROFILES -from . import _genai_extension as genaix - logger = logging.getLogger(__name__) -_allowed_params_prediction_service = ["request", "timeout", "metadata", "labels"] - _FunctionDeclarationType = FunctionDeclaration | dict[str, Any] | Callable[..., Any] _FUNCTION_CALL_THOUGHT_SIGNATURES_MAP_KEY = ( @@ -144,6 +130,21 @@ _MODEL_PROFILES = cast("ModelProfileRegistry", _PROFILES) +def _handle_client_error(e: ClientError, request: dict[str, Any]) -> None: + """Convert `ClientError` to `ChatGoogleGenerativeAIError` with descriptive message. + + Args: + e: The `ClientError` exception to handle. + request: The request dict containing model info. + + Raises: + ChatGoogleGenerativeAIError: Always raised with formatted error message. + """ + model_name = request.get("model", "unknown") + msg = f"Error calling model '{model_name}' ({e.status}): {e}" + raise ChatGoogleGenerativeAIError(msg) from e + + def _get_default_model_profile(model_name: str) -> ModelProfile: default = _MODEL_PROFILES.get(model_name) or {} return default.copy() @@ -158,11 +159,10 @@ def _base64_to_bytes(input_str: str) -> bytes: class ChatGoogleGenerativeAIError(GoogleGenerativeAIError): - """Custom exception class for errors associated with the `Google GenAI` API. + """Wrapper exception class for errors associated with the `Google GenAI` API. - This exception is raised when there are specific issues related to the Google GenAI - API usage in the `ChatGoogleGenerativeAI` class, such as unsupported message types - or roles. + Raised when there are specific issues related to the Google GenAI API usage in the + `ChatGoogleGenerativeAI` class, such as unsupported message types or roles. """ @@ -170,10 +170,8 @@ def _is_gemini_3_or_later(model_name: str) -> bool: """Checks if the model is a pre-Gemini 3 model.""" if not model_name: return False - model_name = model_name.lower() - if "gemini-3" in model_name: - return True - return False + model_name = model_name.lower().replace("models/", "") + return "gemini-3" in model_name def _is_gemini_25_model(model_name: str) -> bool: @@ -184,136 +182,6 @@ def _is_gemini_25_model(model_name: str) -> bool: return "gemini-2.5" in model_name -def _create_retry_decorator( - max_retries: int = 6, - wait_exponential_multiplier: float = 2.0, - wait_exponential_min: float = 1.0, - wait_exponential_max: float = 60.0, -) -> Callable[[Any], Any]: - """Creates and returns a preconfigured tenacity retry decorator. - - The retry decorator is configured to handle specific Google API exceptions such as - `ResourceExhausted` and `ServiceUnavailable`. It uses an exponential backoff - strategy for retries. - - Returns: - A retry decorator configured for handling specific Google API exceptions. - """ - return retry( - reraise=True, - stop=stop_after_attempt(max_retries), - wait=wait_exponential( - multiplier=wait_exponential_multiplier, - min=wait_exponential_min, - max=wait_exponential_max, - ), - retry=( - retry_if_exception_type(ResourceExhausted) - | retry_if_exception_type(ServiceUnavailable) - | retry_if_exception_type(GoogleAPIError) - ), - before_sleep=before_sleep_log(logger, logging.WARNING), - ) - - -def _chat_with_retry(generation_method: Callable, **kwargs: Any) -> Any: - """Executes a chat generation method with retry logic using tenacity. - - This function is a wrapper that applies a retry mechanism to a provided chat - generation function. It is useful for handling intermittent issues like network - errors or temporary service unavailability. - - Args: - generation_method: The chat generation method to be executed. - **kwargs: Additional keyword arguments to pass to the generation method. - - Returns: - Any: The result from the chat generation method. - """ - retry_decorator = _create_retry_decorator( - max_retries=kwargs.get("max_retries", 6), - wait_exponential_multiplier=kwargs.get("wait_exponential_multiplier", 2.0), - wait_exponential_min=kwargs.get("wait_exponential_min", 1.0), - wait_exponential_max=kwargs.get("wait_exponential_max", 60.0), - ) - - @retry_decorator - def _chat_with_retry(**kwargs: Any) -> Any: - try: - return generation_method(**kwargs) - except FailedPrecondition as exc: - if "location is not supported" in exc.message: - error_msg = ( - "Your location is not supported by google-generativeai " - "at the moment. Try to use ChatVertexAI LLM from " - "langchain_google_vertexai." - ) - raise ValueError(error_msg) - - except InvalidArgument as e: - msg = f"Invalid argument provided to Gemini: {e}" - raise ChatGoogleGenerativeAIError(msg) from e - except ResourceExhausted as e: - # Handle quota-exceeded error with recommended retry delay - if hasattr(e, "retry_after") and getattr(e, "retry_after", 0) < kwargs.get( - "wait_exponential_max", 60.0 - ): - time.sleep(e.retry_after) - raise - except Exception: - raise - - params = { - k: v for k, v in kwargs.items() if k in _allowed_params_prediction_service - } - return _chat_with_retry(**params) - - -async def _achat_with_retry(generation_method: Callable, **kwargs: Any) -> Any: - """Executes a chat generation method with retry logic using tenacity. - - This function is a wrapper that applies a retry mechanism to a provided chat - generation function. It is useful for handling intermittent issues like network - errors or temporary service unavailability. - - Args: - generation_method: The chat generation method to be executed. - **kwargs: Additional keyword arguments to pass to the generation method. - - Returns: - Any: The result from the chat generation method. - """ - retry_decorator = _create_retry_decorator( - max_retries=kwargs.get("max_retries", 6), - wait_exponential_multiplier=kwargs.get("wait_exponential_multiplier", 2.0), - wait_exponential_min=kwargs.get("wait_exponential_min", 1.0), - wait_exponential_max=kwargs.get("wait_exponential_max", 60.0), - ) - - @retry_decorator - async def _achat_with_retry(**kwargs: Any) -> Any: - try: - return await generation_method(**kwargs) - except InvalidArgument as e: - # Do not retry for these errors. - msg = f"Invalid argument provided to Gemini: {e}" - raise ChatGoogleGenerativeAIError(msg) from e - except ResourceExhausted as e: - # Handle quota-exceeded error with recommended retry delay - if hasattr(e, "retry_after") and getattr(e, "retry_after", 0) < kwargs.get( - "wait_exponential_max", 60.0 - ): - time.sleep(e.retry_after) - raise - except Exception: - raise - - params = { - k: v for k, v in kwargs.items() if k in _allowed_params_prediction_service - } - return await _achat_with_retry(**params) - - def _convert_to_parts( raw_content: str | Sequence[str | dict], model: str | None = None, @@ -376,10 +244,9 @@ def _convert_to_parts( "'data' field." ) raise ValueError(msg) - inline_data: dict = {"data": bytes_} - if "mime_type" in part: - inline_data["mime_type"] = part["mime_type"] - else: + + mime_type = part.get("mime_type") + if not mime_type: # Guess MIME type based on data field if not provided source = cast( "str", @@ -391,9 +258,15 @@ def _convert_to_parts( kind = filetype.guess(bytes_) if kind: mime_type = kind.mime - if mime_type: - inline_data["mime_type"] = mime_type + blob_kwargs: dict[str, Any] = { + "data": bytes_, + } + if mime_type: + blob_kwargs["mime_type"] = mime_type + part_kwargs: dict[str, Any] = { + "inline_data": Blob(**blob_kwargs), + } if "media_resolution" in part: if model and _is_gemini_25_model(model): warnings.warn( @@ -404,9 +277,11 @@ def _convert_to_parts( stacklevel=2, ) elif model and _is_gemini_3_or_later(model): - inline_data["media_resolution"] = part["media_resolution"] + part_kwargs["media_resolution"] = { + "level": part["media_resolution"] + } - parts.append(Part(inline_data=inline_data)) + parts.append(Part(**part_kwargs)) elif part["type"] == "image_url": # Chat Completions image format img_url = part["image_url"] @@ -423,24 +298,24 @@ def _convert_to_parts( msg = f"Missing mime_type in media part: {part}" raise ValueError(msg) mime_type = part["mime_type"] - media_part = Part() + media_part_kwargs: dict[str, Any] = {} if "data" in part: # Embedded media - media_part.inline_data = Blob( + media_part_kwargs["inline_data"] = Blob( data=part["data"], mime_type=mime_type ) elif "file_uri" in part: # Referenced files (e.g. stored in GCS) - media_part.file_data = FileData( + media_part_kwargs["file_data"] = FileData( file_uri=part["file_uri"], mime_type=mime_type ) else: msg = f"Media part must have either data or file_uri: {part}" raise ValueError(msg) if "video_metadata" in part: - metadata = VideoMetadata(part["video_metadata"]) - media_part.video_metadata = metadata + metadata = VideoMetadata.model_validate(part["video_metadata"]) + media_part_kwargs["video_metadata"] = metadata if "media_resolution" in part: if model and _is_gemini_25_model(model): @@ -452,16 +327,11 @@ def _convert_to_parts( stacklevel=2, ) elif model and _is_gemini_3_or_later(model): - if media_part.inline_data: - media_part.inline_data.media_resolution = part[ - "media_resolution" - ] - elif media_part.file_data: - media_part.file_data.media_resolution = part[ - "media_resolution" - ] - - parts.append(media_part) + media_part_kwargs["media_resolution"] = { + "level": part["media_resolution"] + } + + parts.append(Part(**media_part_kwargs)) elif part["type"] == "thinking": # Pre-existing thinking block format that we continue to store as thought_sig = None @@ -525,8 +395,11 @@ def _convert_to_parts( elif part["type"] == "server_tool_result": output = part.get("output", "") status = part.get("status", "success") - # Map status to outcome: success → 1 (OUTCOME_OK), error → 2 - outcome = 1 if status == "success" else 2 + outcome = ( + CodeExecutionResultOutcome.OUTCOME_OK + if status == "success" + else CodeExecutionResultOutcome.OUTCOME_FAILED + ) # Check extras for original outcome if available if "extras" in part and "outcome" in part["extras"]: outcome = part["extras"]["outcome"] @@ -548,10 +421,11 @@ def _convert_to_parts( outcome = part["outcome"] else: # Backward compatibility - outcome = 1 # Default to success if not specified + outcome = CodeExecutionResultOutcome.OUTCOME_OK code_execution_result_part = Part( code_execution_result=CodeExecutionResult( - output=part["code_execution_result"], outcome=outcome + outcome=outcome, + output=part["code_execution_result"], ) ) parts.append(code_execution_result_part) @@ -672,17 +546,18 @@ def _parse_chat_history( Args: input_messages: Sequence of `BaseMessage` objects representing the chat history. - convert_system_message_to_human: Whether to convert the first system message - into a `HumanMessage`. Deprecated, use system instructions instead. + convert_system_message_to_human: Deprecated, use system instructions instead. + + Whether to convert the first system message into a `HumanMessage`. model: The model name, used for version-specific logic. Returns: A tuple containing: - - An optional `google.ai.generativelanguage_v1beta.types.Content` - representing the system instruction (if any). - - A list of `google.ai.generativelanguage_v1beta.types.Content` representing - the formatted messages. + - An optional `generativelanguage_v1beta` `Content` representing the system + instruction (if any). + - A list of `generativelanguage_v1beta` `Content` representing the formatted + messages. """ if convert_system_message_to_human: warnings.warn( @@ -727,7 +602,10 @@ def _parse_chat_history( if i == 0: system_instruction = Content(parts=system_parts) elif system_instruction is not None: - system_instruction.parts.extend(system_parts) + if system_instruction.parts is None: + system_instruction.parts = system_parts + else: + system_instruction.parts.extend(system_parts) else: pass continue @@ -735,15 +613,52 @@ def _parse_chat_history( role = "model" if message.tool_calls: ai_message_parts = [] + + # First, include thinking blocks from content if present. + # When include_thoughts=True, thinking blocks need to be preserved + # when passing messages back to the API. + if isinstance(message.content, list): + for content_block in message.content: + if isinstance(content_block, dict): + block_type = content_block.get("type") + if block_type == "thinking": + # v0 output_format thinking block + thought_sig = None + if "signature" in content_block: + sig = content_block["signature"] + if sig and isinstance(sig, str): + thought_sig = base64.b64decode(sig) + ai_message_parts.append( + Part( + text=content_block["thinking"], + thought=True, + thought_signature=thought_sig, + ) + ) + elif block_type == "reasoning": + # v1 output_format reasoning block + # (Stored in extras, and different type key) + extras = content_block.get("extras", {}) or {} + sig = extras.get("signature") + thought_sig = None + if sig and isinstance(sig, str): + thought_sig = base64.b64decode(sig) + ai_message_parts.append( + Part( + text=content_block["reasoning"], + thought=True, + thought_signature=thought_sig, + ) + ) + + # Then, add function call parts function_call_sigs: dict[Any, str] = message.additional_kwargs.get( _FUNCTION_CALL_THOUGHT_SIGNATURES_MAP_KEY, {} ) for tool_call_idx, tool_call in enumerate(message.tool_calls): function_call = FunctionCall( - { - "name": tool_call["name"], - "args": tool_call["args"], - } + name=tool_call["name"], + args=tool_call["args"], ) # Check if there's a signature for this function call sig = function_call_sigs.get(tool_call.get("id")) @@ -760,16 +675,20 @@ def _parse_chat_history( tool_messages=tool_messages, ai_message=message, model=model ) formatted_messages.append(Content(role=role, parts=ai_message_parts)) - formatted_messages.append( - Content(role="user", parts=tool_messages_parts) - ) + # Only append tool response message if there are actual tool responses. + # The Gemini API requires every Content message to have at least one + # Part. If there are no ToolMessages in the conversation history for + # this AI message's tool_calls, tool_messages_parts will be empty, and + # we must not create an empty Content message. + if tool_messages_parts: + formatted_messages.append( + Content(role="user", parts=tool_messages_parts) + ) continue if raw_function_call := message.additional_kwargs.get("function_call"): function_call = FunctionCall( - { - "name": raw_function_call["name"], - "args": json.loads(raw_function_call["arguments"]), - } + name=raw_function_call["name"], + args=json.loads(raw_function_call["arguments"]), ) parts = [Part(function_call=function_call)] elif message.response_metadata.get("output_version") == "v1": @@ -782,7 +701,7 @@ def _parse_chat_history( role = "user" parts = _convert_to_parts(message.content, model=model) if i == 1 and convert_system_message_to_human and system_instruction: - parts = list(system_instruction.parts) + parts + parts = list(system_instruction.parts or []) + parts system_instruction = None elif isinstance(message, FunctionMessage): role = "user" @@ -808,11 +727,11 @@ def _parse_chat_history( # This defines the scope where we must ensure compliance. active_loop_start_idx = -1 for i in range(len(formatted_messages) - 1, -1, -1): - msg = formatted_messages[i] - if msg.role == "user": + content_msg = formatted_messages[i] + if content_msg.role == "user": has_function_response = False has_standard_content = False - for part in msg.parts: + for part in content_msg.parts or []: if part.function_response: has_function_response = True if part.text or part.inline_data: @@ -829,10 +748,10 @@ def _parse_chat_history( # API's schema validation without requiring the original internal thought data. start_idx = active_loop_start_idx + 1 if active_loop_start_idx != -1 else 0 for i in range(start_idx, len(formatted_messages)): - msg = formatted_messages[i] - if msg.role == "model": + content_msg = formatted_messages[i] + if content_msg.role == "model": first_fc_seen = False - for part in msg.parts: + for part in content_msg.parts or []: if part.function_call: if not first_fc_seen: if not part.thought_signature: @@ -862,6 +781,27 @@ def _append_to_content( raise TypeError(msg) +def _convert_integer_like_floats(obj: Any) -> Any: + """Convert integer-like floats to integers recursively. + + Addresses a protobuf issue where integers are converted to floats when using + `proto.Message.to_dict()`. + + Args: + obj: The object to process (can be `dict`, `list`, or primitive) + + Returns: + The object with integer-like floats converted to integers + """ + if isinstance(obj, dict): + return {k: _convert_integer_like_floats(v) for k, v in obj.items()} + if isinstance(obj, list): + return [_convert_integer_like_floats(item) for item in obj] + if isinstance(obj, float) and obj.is_integer(): + return int(obj) + return obj + + def _parse_response_candidate( response_candidate: Candidate, streaming: bool = False, @@ -870,10 +810,14 @@ def _parse_response_candidate( content: None | str | list[str | dict] = None additional_kwargs: dict[str, Any] = {} response_metadata: dict[str, Any] = {"model_provider": "google_genai"} + if model_name: + response_metadata["model_name"] = model_name tool_calls = [] invalid_tool_calls = [] tool_call_chunks = [] - for part in response_candidate.content.parts: + + parts = response_candidate.content.parts or [] if response_candidate.content else [] + for part in parts: text: str | None = None try: if hasattr(part, "text") and part.text is not None: @@ -907,7 +851,7 @@ def _parse_response_candidate( content = _append_to_content(content, thinking_message) elif ( (text is not None and text) # text part with non-empty string - or ("text" in part and thought_sig) # text part with thought signature + or (part.text is not None and thought_sig) # text part w/ thought sig ): text_block: dict[str, Any] = {"type": "text", "text": text or ""} if thought_sig: @@ -944,55 +888,45 @@ def _parse_response_candidate( } content = _append_to_content(content, execution_result) - if ( - hasattr(part, "inline_data") - and part.inline_data - and part.inline_data.mime_type.startswith("audio/") - ): - buffer = io.BytesIO() + if part.inline_data and part.inline_data.data and part.inline_data.mime_type: + if part.inline_data.mime_type.startswith("audio/"): + buffer = io.BytesIO() - with wave.open(buffer, "wb") as wf: - wf.setnchannels(1) - wf.setsampwidth(2) - # TODO: Read Sample Rate from MIME content type. - wf.setframerate(24000) - wf.writeframes(part.inline_data.data) + with wave.open(buffer, "wb") as wf: + wf.setnchannels(1) + wf.setsampwidth(2) + # TODO: Read Sample Rate from MIME content type. + wf.setframerate(24000) + wf.writeframes(part.inline_data.data) - audio_data = buffer.getvalue() - additional_kwargs["audio"] = audio_data + audio_data = buffer.getvalue() + additional_kwargs["audio"] = audio_data - # For backwards compatibility, audio stays in additional_kwargs by default - # and is accessible via .content_blocks property + # For backwards compatibility, audio stays in additional_kwargs by + # default and is accessible via .content_blocks property - if ( - hasattr(part, "inline_data") - and part.inline_data - and part.inline_data.mime_type.startswith("image/") - ): - image_format = part.inline_data.mime_type[6:] - image_message = { - "type": "image_url", - "image_url": { - "url": image_bytes_to_b64_string( - part.inline_data.data, image_format=image_format - ) - }, - } - content = _append_to_content(content, image_message) + if part.inline_data.mime_type.startswith("image/"): + image_format = part.inline_data.mime_type[6:] + image_message = { + "type": "image_url", + "image_url": { + "url": image_bytes_to_b64_string( + part.inline_data.data, + image_format=image_format, + ) + }, + } + content = _append_to_content(content, image_message) if part.function_call: function_call = {"name": part.function_call.name} # dump to match other function calling llm for now - function_call_args_dict = proto.Message.to_dict(part.function_call)["args"] - - # Fix: Correct integer-like floats from protobuf conversion - # The protobuf library sometimes converts integers to floats - corrected_args = { - k: int(v) if isinstance(v, float) and v.is_integer() else v - for k, v in function_call_args_dict.items() - } - - function_call["arguments"] = json.dumps(corrected_args) + # Convert function call args to dict first, then fix integer-like floats + args_dict = dict(part.function_call.args) if part.function_call.args else {} + function_call_args_dict = _convert_integer_like_floats(args_dict) + function_call["arguments"] = json.dumps( + {k: function_call_args_dict[k] for k in function_call_args_dict} + ) additional_kwargs["function_call"] = function_call tool_call_id = function_call.get("id", str(uuid.uuid4())) @@ -1080,16 +1014,25 @@ def _response_to_result( stream: bool = False, prev_usage: UsageMetadata | None = None, ) -> ChatResult: - """Converts a PaLM API response into a LangChain `ChatResult`.""" - llm_output = {"prompt_feedback": proto.Message.to_dict(response.prompt_feedback)} + """Converts a Google AI response into a LangChain `ChatResult`.""" + llm_output = ( + {"prompt_feedback": response.prompt_feedback.model_dump()} + if response.prompt_feedback + else {} + ) # Get usage metadata try: - input_tokens = response.usage_metadata.prompt_token_count - thought_tokens = response.usage_metadata.thoughts_token_count - output_tokens = response.usage_metadata.candidates_token_count + thought_tokens - total_tokens = response.usage_metadata.total_token_count - cache_read_tokens = response.usage_metadata.cached_content_token_count + if response.usage_metadata is None: + msg = "Usage metadata is None" + raise AttributeError(msg) + input_tokens = response.usage_metadata.prompt_token_count or 0 + thought_tokens = response.usage_metadata.thoughts_token_count or 0 + output_tokens = ( + response.usage_metadata.candidates_token_count or 0 + ) + thought_tokens + total_tokens = response.usage_metadata.total_token_count or 0 + cache_read_tokens = response.usage_metadata.cached_content_token_count or 0 if input_tokens + output_tokens + cache_read_tokens + total_tokens > 0: if thought_tokens > 0: cumulative_usage = UsageMetadata( @@ -1127,16 +1070,17 @@ def _response_to_result( generations: list[ChatGeneration] = [] - for candidate in response.candidates: - generation_info = {} + for candidate in response.candidates or []: + generation_info: dict[str, Any] = {} if candidate.finish_reason: generation_info["finish_reason"] = candidate.finish_reason.name # Add model_name in last chunk - generation_info["model_name"] = response.model_version - generation_info["safety_ratings"] = [ - proto.Message.to_dict(safety_rating, use_integers_for_enums=False) - for safety_rating in candidate.safety_ratings - ] + generation_info["model_name"] = response.model_version or "" + generation_info["safety_ratings"] = ( + [safety_rating.model_dump() for safety_rating in candidate.safety_ratings] + if candidate.safety_ratings + else [] + ) message = _parse_response_candidate( candidate, streaming=stream, model_name=response.model_version ) @@ -1146,7 +1090,24 @@ def _response_to_result( try: if candidate.grounding_metadata: - grounding_metadata = proto.Message.to_dict(candidate.grounding_metadata) + grounding_metadata = candidate.grounding_metadata.model_dump() + # Ensure None fields that are expected to be lists become empty lists + # to prevent errors in downstream processing + if ( + "grounding_supports" in grounding_metadata + and grounding_metadata["grounding_supports"] is None + ): + grounding_metadata["grounding_supports"] = [] + if ( + "grounding_chunks" in grounding_metadata + and grounding_metadata["grounding_chunks"] is None + ): + grounding_metadata["grounding_chunks"] = [] + if ( + "web_search_queries" in grounding_metadata + and grounding_metadata["web_search_queries"] is None + ): + grounding_metadata["web_search_queries"] = [] generation_info["grounding_metadata"] = grounding_metadata message.response_metadata["grounding_metadata"] = grounding_metadata except AttributeError: @@ -1192,24 +1153,53 @@ def _response_to_result( return ChatResult(generations=generations, llm_output=llm_output) -def _is_event_loop_running() -> bool: - try: - asyncio.get_running_loop() - return True - except RuntimeError: - return False - - class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel): r"""Google GenAI chat model integration. - Instantiation: - To use, you must have either: + Setup: + This integration supports both the **Gemini Developer API** and **Vertex AI**. + + **For Gemini Developer API** (simplest): + + 1. Set the `GOOGLE_API_KEY` environment variable (recommended), or + 2. Pass your API key using the `api_key` parameter + + ```python + from langchain_google_genai import ChatGoogleGenerativeAI + + model = ChatGoogleGenerativeAI(model="gemini-3-pro-preview", api_key="...") + ``` + + **For Vertex AI with API key** (NEW - simpler than service accounts!): + + ```bash + export GEMINI_API_KEY='your-api-key' + export GOOGLE_GENAI_USE_VERTEXAI=true + export GOOGLE_CLOUD_PROJECT='your-project-id' + ``` + + ```python + model = ChatGoogleGenerativeAI(model="gemini-2.5-flash") + # Or explicitly: + model = ChatGoogleGenerativeAI( + model="gemini-2.5-flash", + api_key="...", + project="your-project-id", + vertexai=True, + ) + ``` - 1. The `GOOGLE_API_KEY` environment variable set with your API key, or - 2. Pass your API key using the `google_api_key` kwarg to the - `ChatGoogleGenerativeAI` constructor. + **For Vertex AI with credentials**: + ```python + model = ChatGoogleGenerativeAI( + model="gemini-2.5-flash", + project="your-project-id", + # Uses Application Default Credentials (ADC) + ) + ``` + + Instantiation: ```python from langchain_google_genai import ChatGoogleGenerativeAI @@ -1426,6 +1416,17 @@ class GetPopulation(BaseModel): ] ``` + Search: + ```python + from google.genai.types import Tool as GoogleTool + + llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash") + resp = llm.invoke( + "When is the next total solar eclipse in US?", + tools=[GoogleTool(google_search={})], + ) + ``` + Structured output: See [the docs](https://docs.langchain.com/oss/python/integrations/chat/google_generative_ai#structured-output) for more info. @@ -1617,6 +1618,14 @@ class Joke(BaseModel): See [the docs](https://docs.langchain.com/oss/python/integrations/chat/google_generative_ai#audio-generation) for more info. + !!! note + + Audio generation models (TTS) are currently in preview on Vertex AI + and may require allowlist access. If you receive an `INVALID_ARGUMENT` + error when using TTS models with `vertexai=True`, your project may need to + be allowlisted. See this post on the [Google AI forum](https://discuss.ai.google.dev/t/request-allowlist-access-for-audio-output-in-gemini-2-5-pro-flash-tts-vertex-ai/108067) + for more details. + File upload: You can also upload files to Google's servers and reference them by URI. @@ -1651,9 +1660,19 @@ class Joke(BaseModel): See [the docs](https://docs.langchain.com/oss/python/integrations/chat/google_generative_ai#thinking-support) for more info. - For thinking models, you have the option to adjust the number of internal - thinking tokens used (`thinking_budget`) or to disable thinking altogether. - Note that not all models allow disabling thinking. + Gemini 3+ models use `thinking_level` (`'low'` or `'high'`) to control + reasoning depth. If not specified, defaults to `'high'`. + + ```python + model = ChatGoogleGenerativeAI( + model="gemini-3-pro-preview", + thinking_level="low", # For faster, lower-latency responses + ) + ``` + + Gemini 2.5 models use `thinking_budget` (an integer token count) to control + reasoning. Set to `0` to disable thinking (where supported), or `-1` for + dynamic thinking. See the [Gemini API docs](https://ai.google.dev/gemini-api/docs/thinking) for more details on thinking models. @@ -1669,6 +1688,18 @@ class Joke(BaseModel): ai_msg = model.invoke("How many 'r's are in the word 'strawberry'?") ``` + Thought signatures: + Gemini 3+ models return *thought signatures*—encrypted representations of + the model's internal reasoning. For multi-turn conversations involving tool + calls, you must pass the full `AIMessage` back to the model so that these + signatures are preserved. This happens automatically when you append the + `AIMessage` to your message list. + + See the [LangChain docs](https://docs.langchain.com/oss/python/integrations/chat/google_generative_ai#thought-signatures) for more info as well as a code example. + + See the [Gemini API docs](https://ai.google.dev/gemini-api/docs/thinking) + for more details on thought signatures. + Google search: See [the docs](https://docs.langchain.com/oss/python/integrations/chat/google_generative_ai#google-search) for more info. @@ -1904,32 +1935,22 @@ class Joke(BaseModel): ``` """ # noqa: E501 - thinking_level: Literal["low", "high"] | None = Field( + client: Client | None = Field( default=None, + exclude=True, # Excluded from serialization ) - """Indicates the thinking level. - - Supported values: - * `'low'`: Minimizes latency and cost. - * `'high'`: Maximizes reasoning depth. - - !!! note "Replaces `thinking_budget`" - - `thinking_budget` is deprecated for Gemini 3+ models. If both parameters are - provided, `thinking_level` takes precedence. - - If left unspecified, the model's default thinking level is used. For Gemini 3+, - this defaults to `'high'`. - """ - - client: Any = Field(default=None, exclude=True) - - async_client_running: Any = Field(default=None, exclude=True) default_metadata: Sequence[tuple[str, str]] | None = Field( - default=None, alias="default_metadata_input" + default=None, + alias="default_metadata_input", ) + model_kwargs: dict[str, Any] = Field(default_factory=dict) + """Holds any unexpected initialization parameters.""" + + streaming: bool | None = None + """Whether to stream responses from the model.""" + convert_system_message_to_human: bool = False """Whether to merge any leading `SystemMessage` into the following `HumanMessage`. @@ -1937,6 +1958,9 @@ class Joke(BaseModel): error. """ + stop: list[str] | None = None + """Stop sequences for the model.""" + response_mime_type: str | None = None """Output response MIME type of the generated candidate text. @@ -1972,6 +1996,24 @@ class Joke(BaseModel): for more details. """ + thinking_level: Literal["low", "high"] | None = Field( + default=None, + ) + """Indicates the thinking level. + + Supported values: + * `'low'`: Minimizes latency and cost. + * `'high'`: Maximizes reasoning depth. + + !!! note "Replaces `thinking_budget`" + + `thinking_budget` is deprecated for Gemini 3+ models. If both parameters are + provided, `thinking_level` takes precedence. + + If left unspecified, the model's default thinking level is used. For Gemini 3+, + this defaults to `'high'`. + """ + cached_content: str | None = None """The name of the cached content used as context to serve the prediction. @@ -1982,15 +2024,6 @@ class Joke(BaseModel): `cachedContents/{cachedContent}`. """ - stop: list[str] | None = None - """Stop sequences for the model.""" - - streaming: bool | None = None - """Whether to stream responses from the model.""" - - model_kwargs: dict[str, Any] = Field(default_factory=dict) - """Holds any unexpected initialization parameters.""" - def __init__(self, **kwargs: Any) -> None: """Needed for arg validation.""" # Get all valid field names, including aliases @@ -2017,10 +2050,6 @@ def __init__(self, **kwargs: Any) -> None: populate_by_name=True, ) - @property - def lc_secrets(self) -> dict[str, str]: - return {"google_api_key": "GOOGLE_API_KEY"} - @property def _llm_type(self) -> str: return "chat-google-generative-ai" @@ -2029,14 +2058,11 @@ def _llm_type(self) -> str: def _supports_code_execution(self) -> bool: """Whether the model supports code execution. - See the [Gemini models docs](https://ai.google.dev/gemini-api/docs/models) for a - full list. + See [Gemini models](https://ai.google.dev/gemini-api/docs/models) for a list. """ - return ( - "gemini-1.5-pro" in self.model - or "gemini-1.5-flash" in self.model - or "gemini-2" in self.model - ) + # TODO: Refactor to use `capabilities` property when supported upstream + # (or done via augmentation) + return "gemini-2" in self.model or "gemini-3" in self.model @classmethod def is_lc_serializable(cls) -> bool: @@ -2045,13 +2071,23 @@ def is_lc_serializable(cls) -> bool: @model_validator(mode="before") @classmethod def build_extra(cls, values: dict[str, Any]) -> Any: - """Build extra kwargs from additional params that were passed in.""" + """Build extra kwargs from additional params that were passed in. + + (In other words, handle additional params that aren't explicitly defined as + model fields. Used to pass extra config to underlying APIs without defining them + all here.) + """ all_required_field_names = get_pydantic_field_names(cls) return _build_model_kwargs(values, all_required_field_names) @model_validator(mode="after") def validate_environment(self) -> Self: - """Validates params and passes them to `google-generativeai` package.""" + """Validates params and builds client. + + We override `temperature` to `1.0` for Gemini 3+ models if not explicitly set. + This is to prevent infinite loops and degraded performance that can occur with + `temperature < 1.0` on these models. + """ if self.temperature is not None and not 0 <= self.temperature <= 2.0: msg = "temperature must be in the range [0.0, 2.0]" raise ValueError(msg) @@ -2069,80 +2105,100 @@ def validate_environment(self) -> Self: msg = "top_k must be positive" raise ValueError(msg) - if not any(self.model.startswith(prefix) for prefix in ("models/",)): - self.model = f"models/{self.model}" - additional_headers = self.additional_headers or {} self.default_metadata = tuple(additional_headers.items()) - client_info = get_client_info(f"ChatGoogleGenerativeAI:{self.model}") + + _, user_agent = get_user_agent("ChatGoogleGenerativeAI") + headers = {"User-Agent": user_agent, **additional_headers} + google_api_key = None if not self.credentials: if isinstance(self.google_api_key, SecretStr): google_api_key = self.google_api_key.get_secret_value() else: google_api_key = self.google_api_key - transport: str | None = self.transport - - # Merge base_url into client_options if provided - client_options = self.client_options or {} - if self.base_url and "api_endpoint" not in client_options: - client_options = {**client_options, "api_endpoint": self.base_url} - - self.client = genaix.build_generative_service( - credentials=self.credentials, - api_key=google_api_key, - client_info=client_info, - client_options=client_options, - transport=transport, - ) - self.async_client_running = None - return self - @property - def async_client(self) -> v1betaGenerativeServiceAsyncClient: - google_api_key = None - if not self.credentials: - if isinstance(self.google_api_key, SecretStr): - google_api_key = self.google_api_key.get_secret_value() + base_url = self.base_url + if isinstance(self.base_url, dict): + # Handle case where base_url is provided as a dict + # (Backwards compatibility for deprecated client_options field) + if keys := list(self.base_url.keys()): + if "api_endpoint" in keys and len(keys) == 1: + base_url = self.base_url["api_endpoint"] + elif "api_endpoint" in keys and len(keys) > 1: + msg = ( + "When providing base_url as a dict, it can only contain the " + "api_endpoint key. Extra keys found: " + f"{[k for k in keys if k != 'api_endpoint']}" + ) + raise ValueError(msg) + else: + msg = ( + "When providing base_url as a dict, it must only contain the " + "api_endpoint key." + ) + raise ValueError(msg) else: - google_api_key = self.google_api_key - # NOTE: genaix.build_generative_async_service requires - # a running event loop, which causes an error - # when initialized inside a ThreadPoolExecutor. - # this check ensures that async client is only initialized - # within an asyncio event loop to avoid the error - if not self.async_client_running and _is_event_loop_running(): - # async clients don't support "rest" transport - # https://github.com/googleapis/gapic-generator-python/issues/1962 - - # However, when using custom endpoints, we can try to keep REST transport - transport = self.transport - client_options = self.client_options or {} - - # Check for custom endpoint - has_custom_endpoint = self.base_url or ( - self.client_options - and "api_endpoint" in self.client_options - and self.client_options["api_endpoint"] - != "https://generativelanguage.googleapis.com" - ) + msg = ( + "base_url must be a string or a dict containing the " + "api_endpoint key." + ) + raise ValueError(msg) - # Only change to grpc_asyncio if no custom endpoint is specified - if transport == "rest" and not has_custom_endpoint: - transport = "grpc_asyncio" + http_options = HttpOptions( + base_url=cast("str", base_url), + headers=headers, + client_args=self.client_args, + async_client_args=self.client_args, + ) - # Merge base_url into client_options if provided - if self.base_url and "api_endpoint" not in client_options: - client_options = {**client_options, "api_endpoint": self.base_url} + if self._use_vertexai: # type: ignore[attr-defined] + # Vertex AI backend - supports both API key and credentials + # Note: The google-genai SDK requires API keys to be passed via environment + # variables when using Vertex AI, not via the api_key parameter. + # If an API key is provided programmatically, we set it in the environment + # temporarily for the Client initialization. + + # Normalize model name for Vertex AI - strip 'models/' prefix + # Vertex AI expects model names without the prefix + # (e.g., "gemini-2.5-flash") while Google AI accepts both formats + if self.model.startswith("models/"): + object.__setattr__(self, "model", self.model.replace("models/", "", 1)) + + api_key_env_set = False + + if ( + google_api_key + and not os.getenv("GOOGLE_API_KEY") + and not os.getenv("GEMINI_API_KEY") + ): + # Set the API key in environment for Client initialization + os.environ["GOOGLE_API_KEY"] = google_api_key + api_key_env_set = True - self.async_client_running = genaix.build_generative_async_service( - credentials=self.credentials, - api_key=google_api_key, - client_info=get_client_info(f"ChatGoogleGenerativeAI:{self.model}"), - client_options=client_options, - transport=transport, - ) - return self.async_client_running + try: + self.client = Client( + vertexai=True, + project=self.project, + location=self.location, + credentials=self.credentials, + http_options=http_options, + ) + finally: + # Clean up the temporary environment variable if we set it + if api_key_env_set: + os.environ.pop("GOOGLE_API_KEY", None) + else: + # Gemini Developer API - requires API key + if not google_api_key: + msg = ( + "API key required for Gemini Developer API. Provide api_key " + "parameter or set GOOGLE_API_KEY/GEMINI_API_KEY environment " + "variable." + ) + raise ValueError(msg) + self.client = Client(api_key=google_api_key, http_options=http_options) + return self @model_validator(mode="after") def _set_model_profile(self) -> Self: @@ -2152,6 +2208,64 @@ def _set_model_profile(self) -> Self: self.profile = _get_default_model_profile(model_id) return self + def __del__(self) -> None: + """Clean up the client on deletion.""" + if not hasattr(self, "client") or self.client is None: + return + + try: + # Close the sync client + self.client.close() + + # Attempt to close the async client + # Note: The SDK's close() doesn't close the async client automatically + if hasattr(self.client, "aio") and self.client.aio is not None: + try: + # Check if there's a running event loop + loop = asyncio.get_running_loop() + if not loop.is_closed(): + # Schedule the close + # Wrap in ensure_future to avoid "coroutine never awaited" + task = asyncio.ensure_future( + self.client.aio.aclose(), loop=loop + ) + # Add a done callback to suppress any exceptions + task.add_done_callback( + lambda t: t.exception() if not t.cancelled() else None + ) + except RuntimeError: + # No running loop - create a new one for cleanup + try: + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + try: + loop.run_until_complete(self.client.aio.aclose()) + finally: + loop.close() + asyncio.set_event_loop(None) + except Exception: + # Suppress errors during shutdown + pass + except Exception: + # Suppress all errors during cleanup + pass + + @property + def async_client(self) -> Any: + """Async client for Google GenAI operations.. + + Returns: + The async client interface that exposes async versions of all client + methods. + + Raises: + ValueError: If the client has not been initialized. + """ + if self.client is None: + msg = "Client not initialized. Initialize the model first." + raise ValueError(msg) + return self.client.aio + @property def _identifying_params(self) -> dict[str, Any]: """Get the identifying parameters.""" @@ -2186,12 +2300,12 @@ def invoke( if code_execution is not None: if not self._supports_code_execution: msg = ( - "Code execution is only supported on Gemini 1.5, 2.0, and 2.5 " - f"models. Current model: {self.model}" + "Code execution is only supported on Gemini 2.0, and 2.5 models. " + f"Current model: {self.model}" ) raise ValueError(msg) if "tools" not in kwargs: - code_execution_tool = GoogleTool(code_execution=CodeExecution()) + code_execution_tool = GoogleTool(code_execution=ToolCodeExecution()) kwargs["tools"] = [code_execution_tool] else: @@ -2223,73 +2337,123 @@ def _get_ls_params( ls_params["ls_stop"] = ls_stop return ls_params + def _supports_thinking(self) -> bool: + """Check if the current model supports thinking capabilities.""" + return self.profile.get("reasoning_output", False) if self.profile else False + def _prepare_params( self, stop: list[str] | None, generation_config: dict[str, Any] | None = None, **kwargs: Any, ) -> GenerationConfig: - # Extract thinking parameters with kwargs override - thinking_budget = kwargs.get("thinking_budget", self.thinking_budget) + """Prepare generation parameters with config logic.""" + gen_config = self._build_base_generation_config(stop, **kwargs) + if generation_config: + gen_config = self._merge_generation_config(gen_config, generation_config) + + # Handle response-specific kwargs (MIME type and structured output) + gen_config = self._add_response_parameters(gen_config, **kwargs) + + return GenerationConfig.model_validate(gen_config) + + def _build_base_generation_config( + self, stop: list[str] | None, **kwargs: Any + ) -> dict[str, Any]: + """Build the base generation configuration from instance attributes.""" + config: dict[str, Any] = { + "candidate_count": self.n, + "temperature": kwargs.get("temperature", self.temperature), + "stop_sequences": stop, + "max_output_tokens": kwargs.get( + "max_output_tokens", self.max_output_tokens + ), + "top_k": kwargs.get("top_k", self.top_k), + "top_p": kwargs.get("top_p", self.top_p), + "response_modalities": kwargs.get( + "response_modalities", self.response_modalities + ), + } + thinking_config = self._build_thinking_config(**kwargs) + if thinking_config is not None: + config["thinking_config"] = thinking_config + return {k: v for k, v in config.items() if v is not None} + + def _build_thinking_config(self, **kwargs: Any) -> dict[str, Any] | None: + """Build thinking configuration if supported by the model.""" thinking_level = kwargs.get("thinking_level", self.thinking_level) + thinking_budget = kwargs.get("thinking_budget", self.thinking_budget) + include_thoughts = kwargs.get("include_thoughts", self.include_thoughts) - if thinking_level is not None and thinking_budget is not None: - msg = ( - "Both 'thinking_level' and 'thinking_budget' were specified. " - "'thinking_level' is not yet supported by the current API version, " - "so 'thinking_budget' will be used instead." - ) - warnings.warn(msg, UserWarning, stacklevel=2) - - gen_config = { - k: v - for k, v in { - "candidate_count": self.n, - "temperature": self.temperature, - "stop_sequences": stop, - "max_output_tokens": kwargs.get( - "max_output_tokens", self.max_output_tokens - ), - "top_k": self.top_k, - "top_p": self.top_p, - "response_modalities": self.response_modalities, - "thinking_config": ( - ( - ( - {"thinking_budget": thinking_budget} - if thinking_budget is not None - else {} - ) - | ( - {"include_thoughts": self.include_thoughts} - if self.include_thoughts is not None - else {} - ) - | ( - {"thinking_level": thinking_level} - if thinking_level is not None - else {} - ) - ) - if thinking_budget is not None - or self.include_thoughts is not None - or thinking_level is not None - else None - ), - }.items() - if v is not None - } - if generation_config: - gen_config = {**gen_config, **generation_config} + has_thinking_params = ( + thinking_level is not None + or thinking_budget is not None + or include_thoughts is not None + ) + if not has_thinking_params: + return None + if not self._supports_thinking(): + return None + + config: dict[str, Any] = {} + + # thinking_level takes precedence over thinking_budget for Gemini 3+ models + if thinking_level is not None: + if thinking_budget is not None: + warnings.warn( + "Both 'thinking_level' and 'thinking_budget' were provided. " + "'thinking_level' takes precedence for Gemini 3+ models; " + "'thinking_budget' will be ignored.", + UserWarning, + stacklevel=2, + ) + config["thinking_level"] = thinking_level + elif thinking_budget is not None: + config["thinking_budget"] = thinking_budget + + if include_thoughts is not None: + config["include_thoughts"] = include_thoughts + + return config + + def _merge_generation_config( + self, base_config: dict[str, Any], generation_config: dict[str, Any] + ) -> dict[str, Any]: + """Merge user-provided generation config with base config.""" + processed_config = dict(generation_config) + # Convert string response_modalities to Modality enums if needed + if "response_modalities" in processed_config: + modalities = processed_config["response_modalities"] + if ( + isinstance(modalities, list) + and modalities + and isinstance(modalities[0], str) + ): + from langchain_google_genai import Modality + try: + processed_config["response_modalities"] = [ + getattr(Modality, modality) for modality in modalities + ] + except AttributeError as e: + msg = f"Invalid response modality: {e}" + raise ValueError(msg) from e + return {**base_config, **processed_config} + + def _add_response_parameters( + self, gen_config: dict[str, Any], **kwargs: Any + ) -> dict[str, Any]: + """Add response-specific parameters to generation config. + + Includes `response_mime_type`, `response_schema`, and `response_json_schema`. + """ + # Handle response mime type response_mime_type = kwargs.get("response_mime_type", self.response_mime_type) if response_mime_type is not None: gen_config["response_mime_type"] = response_mime_type response_schema = kwargs.get("response_schema", self.response_schema) - - # In case passed in as a direct kwarg - response_json_schema = kwargs.get("response_json_schema") + response_json_schema = kwargs.get("response_json_schema") # If passed as kwarg # Handle both response_schema and response_json_schema # (Regardless, we use `response_json_schema` in the request) @@ -2298,31 +2462,261 @@ def _prepare_params( if response_json_schema is not None else response_schema ) + if schema_to_use: + self._validate_and_add_response_schema( + gen_config=gen_config, + response_schema=schema_to_use, + response_mime_type=response_mime_type, + ) - if schema_to_use is not None: - if response_mime_type != "application/json": - param_name = ( - "response_json_schema" - if response_json_schema is not None - else "response_schema" + return gen_config + + def _validate_and_add_response_schema( + self, + gen_config: dict[str, Any], + response_schema: dict[str, Any], + response_mime_type: str | None, + ) -> None: + """Validate and add response schema to generation config.""" + if response_mime_type != "application/json": + error_message = ( + "JSON schema structured output is only supported when " + "response_mime_type is set to 'application/json'" + ) + if response_mime_type == "text/x.enum": + error_message += ( + ". Instead of 'text/x.enum', define enums using your JSON schema." ) - error_message = ( - f"'{param_name}' is only supported when " - f"response_mime_type is set to 'application/json'" + raise ValueError(error_message) + + gen_config["response_json_schema"] = response_schema + + def _prepare_request( + self, + messages: list[BaseMessage], + *, + stop: list[str] | None = None, + tools: Sequence[_ToolDict | GoogleTool] | None = None, + functions: Sequence[_FunctionDeclarationType] | None = None, + safety_settings: SafetySettingDict | None = None, + tool_config: dict | ToolConfig | None = None, + tool_choice: _ToolChoiceType | bool | None = None, + generation_config: dict[str, Any] | None = None, + cached_content: str | None = None, + **kwargs: Any, + ) -> dict[str, Any]: + """Prepare the request configuration for the API call.""" + # Validate tool configuration + if tool_choice and tool_config: + msg = ( + "Must specify at most one of tool_choice and tool_config, received " + f"both:\n\n{tool_choice=}\n\n{tool_config=}" + ) + raise ValueError(msg) + + # Process tools and functions + formatted_tools = self._format_tools(tools, functions) + + # Remove any messages with empty content + filtered_messages = self._filter_messages(messages) + + # Parse chat history into Gemini Content + system_instruction, history = _parse_chat_history( + filtered_messages, + convert_system_message_to_human=self.convert_system_message_to_human, + model=self.model, + ) + + # Process tool configuration + formatted_tool_config = self._process_tool_config( + tool_choice, tool_config, formatted_tools + ) + + # Process safety settings + formatted_safety_settings = self._format_safety_settings(safety_settings) + + timeout = kwargs.pop("timeout", None) + if timeout is not None: + timeout = int(timeout * 1000) + elif self.timeout is not None: + timeout = int(self.timeout * 1000) + + max_retries = kwargs.pop("max_retries", None) + if max_retries is None: + max_retries = self.max_retries + + # Get generation parameters + # (consumes thinking kwargs into params.thinking_config) + params: GenerationConfig = self._prepare_params( + stop, generation_config=generation_config, **kwargs + ) + + _consumed_kwargs = { + "thinking_budget", + "thinking_level", + "include_thoughts", + "response_modalities", + } + # Filter out kwargs already consumed by _prepare_params. + # These are handled via params and aren't direct fields + # on GenerateContentConfig or would cause duplicate argument errors. + remaining_kwargs = { + k: v for k, v in kwargs.items() if k not in _consumed_kwargs + } + + # Build request configuration + request = self._build_request_config( + formatted_tools, + formatted_tool_config, + formatted_safety_settings, + params, + cached_content, + system_instruction, + stop, + timeout=timeout, + max_retries=max_retries, + **remaining_kwargs, + ) + + # Return config and additional params needed for API call + return {"model": self.model, "contents": history, "config": request} + + def _format_tools( + self, + tools: Sequence[_ToolDict | GoogleTool] | None = None, + functions: Sequence[_FunctionDeclarationType] | None = None, + ) -> list | None: + """Format tools and functions for the API.""" + code_execution_tool = GoogleTool(code_execution=ToolCodeExecution()) + if tools == [code_execution_tool]: + return list(tools) + if tools: + return [convert_to_genai_function_declarations(tools)] + if functions: + return [convert_to_genai_function_declarations(functions)] + return None + + def _filter_messages(self, messages: list[BaseMessage]) -> list[BaseMessage]: + """Filter out messages with empty content.""" + filtered_messages = [] + for message in messages: + if isinstance(message, HumanMessage) and not message.content: + warnings.warn( + "HumanMessage with empty content was removed to prevent API error" ) - if response_mime_type == "text/x.enum": - error_message += ( - ". Instead of 'text/x.enum', define enums using JSON schema." - ) - raise ValueError(error_message) + else: + filtered_messages.append(message) + return filtered_messages + + def _process_tool_config( + self, + tool_choice: _ToolChoiceType | bool | None, + tool_config: dict | ToolConfig | None, + formatted_tools: list | None, + ) -> ToolConfig | None: + """Process tool configuration and choice.""" + if tool_choice: + if not formatted_tools: + msg = ( + f"Received {tool_choice=} but no {formatted_tools=}. " + "'tool_choice' can only be specified if 'tools' is specified." + ) + raise ValueError(msg) + all_names = self._extract_tool_names(formatted_tools) + return _tool_choice_to_tool_config(tool_choice, all_names) + if tool_config: + if isinstance(tool_config, dict): + return ToolConfig.model_validate(tool_config) + return tool_config + return None + + def _extract_tool_names(self, formatted_tools: list) -> list[str]: + """Extract tool names from formatted tools.""" + all_names: list[str] = [] + for t in formatted_tools: + if hasattr(t, "function_declarations"): + t_with_declarations = cast("Any", t) + all_names.extend( + f.name for f in t_with_declarations.function_declarations + ) + elif isinstance(t, GoogleTool) and hasattr(t, "code_execution"): + continue + else: + msg = f"Tool {t} doesn't have function_declarations attribute" + raise TypeError(msg) + return all_names + + def _format_safety_settings( + self, safety_settings: SafetySettingDict | None + ) -> list[SafetySetting]: + """Format safety settings for the API.""" + if not safety_settings: + return [] + if isinstance(safety_settings, dict): + # Handle dictionary format: {HarmCategory: HarmBlockThreshold} + return [ + SafetySetting(category=category, threshold=threshold) + for category, threshold in safety_settings.items() + ] + if isinstance(safety_settings, list): + # Handle list format: [SafetySetting, ...] + return safety_settings - gen_config["response_json_schema"] = schema_to_use + # Handle single SafetySetting object + return [safety_settings] - media_resolution = kwargs.get("media_resolution", self.media_resolution) - if media_resolution is not None: - gen_config["media_resolution"] = media_resolution + def _build_request_config( + self, + formatted_tools: list | None, + formatted_tool_config: ToolConfig | None, + formatted_safety_settings: list[SafetySetting], + params: GenerationConfig, + cached_content: str | None, + system_instruction: Content | None, + stop: list[str] | None, + timeout: int | None = None, + max_retries: int | None = None, + **kwargs: Any, + ) -> GenerateContentConfig: + """Build the final request configuration.""" + # Convert response modalities + response_modalities = ( + [m.value for m in params.response_modalities] + if params.response_modalities + else None + ) + # Create thinking config if supported + thinking_config = None + if params.thinking_config is not None and self._supports_thinking(): + thinking_config = ThinkingConfig( + include_thoughts=params.thinking_config.include_thoughts, + thinking_budget=params.thinking_config.thinking_budget, + thinking_level=params.thinking_config.thinking_level, + ) - return GenerationConfig(**gen_config) + retry_options = None + if max_retries is not None: + retry_options = HttpRetryOptions(attempts=max_retries) + + http_options = None + if timeout is not None or retry_options is not None: + http_options = HttpOptions( + timeout=timeout, + retry_options=retry_options, + ) + + return GenerateContentConfig( + tools=list(formatted_tools) if formatted_tools else None, + tool_config=formatted_tool_config, + safety_settings=formatted_safety_settings, + response_modalities=response_modalities if response_modalities else None, + thinking_config=thinking_config, + cached_content=cached_content, + system_instruction=system_instruction, + stop_sequences=stop, + http_options=http_options, + **kwargs, + ) def _generate( self, @@ -2333,12 +2727,16 @@ def _generate( tools: Sequence[_ToolDict | GoogleTool] | None = None, functions: Sequence[_FunctionDeclarationType] | None = None, safety_settings: SafetySettingDict | None = None, - tool_config: dict | _ToolConfigDict | None = None, + tool_config: dict | ToolConfig | None = None, generation_config: dict[str, Any] | None = None, cached_content: str | None = None, tool_choice: _ToolChoiceType | bool | None = None, **kwargs: Any, ) -> ChatResult: + if self.client is None: + msg = "Client not initialized." + raise ValueError(msg) + request = self._prepare_request( messages, stop=stop, @@ -2351,16 +2749,13 @@ def _generate( tool_choice=tool_choice, **kwargs, ) - if self.timeout is not None and "timeout" not in kwargs: - kwargs["timeout"] = self.timeout - if "max_retries" not in kwargs: - kwargs["max_retries"] = self.max_retries - response: GenerateContentResponse = _chat_with_retry( - request=request, - **kwargs, - generation_method=self.client.generate_content, - metadata=self.default_metadata, - ) + try: + response: GenerateContentResponse = self.client.models.generate_content( + **request, + ) + except ClientError as e: + _handle_client_error(e, request) + return _response_to_result(response) async def _agenerate( @@ -2372,24 +2767,15 @@ async def _agenerate( tools: Sequence[_ToolDict | GoogleTool] | None = None, functions: Sequence[_FunctionDeclarationType] | None = None, safety_settings: SafetySettingDict | None = None, - tool_config: dict | _ToolConfigDict | None = None, + tool_config: dict | ToolConfig | None = None, generation_config: dict[str, Any] | None = None, cached_content: str | None = None, tool_choice: _ToolChoiceType | bool | None = None, **kwargs: Any, ) -> ChatResult: - if not self.async_client: - updated_kwargs = { - **kwargs, - "tools": tools, - "functions": functions, - "safety_settings": safety_settings, - "tool_config": tool_config, - "generation_config": generation_config, - } - return await super()._agenerate( - messages, stop, run_manager, **updated_kwargs - ) + if self.client is None: + msg = "Client not initialized." + raise ValueError(msg) request = self._prepare_request( messages, @@ -2403,16 +2789,15 @@ async def _agenerate( tool_choice=tool_choice, **kwargs, ) - if self.timeout is not None and "timeout" not in kwargs: - kwargs["timeout"] = self.timeout - if "max_retries" not in kwargs: - kwargs["max_retries"] = self.max_retries - response: GenerateContentResponse = await _achat_with_retry( - request=request, - **kwargs, - generation_method=self.async_client.generate_content, - metadata=self.default_metadata, - ) + try: + response: GenerateContentResponse = ( + await self.client.aio.models.generate_content( + **request, + ) + ) + except ClientError as e: + _handle_client_error(e, request) + return _response_to_result(response) def _stream( @@ -2424,12 +2809,16 @@ def _stream( tools: Sequence[_ToolDict | GoogleTool] | None = None, functions: Sequence[_FunctionDeclarationType] | None = None, safety_settings: SafetySettingDict | None = None, - tool_config: dict | _ToolConfigDict | None = None, + tool_config: dict | ToolConfig | None = None, generation_config: dict[str, Any] | None = None, cached_content: str | None = None, tool_choice: _ToolChoiceType | bool | None = None, **kwargs: Any, ) -> Iterator[ChatGenerationChunk]: + if self.client is None: + msg = "Client not initialized." + raise ValueError(msg) + request = self._prepare_request( messages, stop=stop, @@ -2442,28 +2831,27 @@ def _stream( tool_choice=tool_choice, **kwargs, ) - if self.timeout is not None and "timeout" not in kwargs: - kwargs["timeout"] = self.timeout - if "max_retries" not in kwargs: - kwargs["max_retries"] = self.max_retries - response: GenerateContentResponse = _chat_with_retry( - request=request, - generation_method=self.client.stream_generate_content, - **kwargs, - metadata=self.default_metadata, - ) + try: + response: Iterator[GenerateContentResponse] = ( + self.client.models.generate_content_stream( + **request, + ) + ) + except ClientError as e: + _handle_client_error(e, request) - prev_usage_metadata: UsageMetadata | None = None # cumulative usage + prev_usage_metadata: UsageMetadata | None = None # Cumulative usage index = -1 index_type = "" for chunk in response: - _chat_result = _response_to_result( - chunk, stream=True, prev_usage=prev_usage_metadata - ) - gen = cast("ChatGenerationChunk", _chat_result.generations[0]) - message = cast("AIMessageChunk", gen.message) + if chunk: + _chat_result = _response_to_result( + chunk, stream=True, prev_usage=prev_usage_metadata + ) + gen = cast("ChatGenerationChunk", _chat_result.generations[0]) + message = cast("AIMessageChunk", gen.message) - # populate index if missing + # Populate index if missing if isinstance(message.content, list): for block in message.content: if isinstance(block, dict) and "type" in block: @@ -2492,189 +2880,64 @@ async def _astream( tools: Sequence[_ToolDict | GoogleTool] | None = None, functions: Sequence[_FunctionDeclarationType] | None = None, safety_settings: SafetySettingDict | None = None, - tool_config: dict | _ToolConfigDict | None = None, + tool_config: dict | ToolConfig | None = None, generation_config: dict[str, Any] | None = None, cached_content: str | None = None, tool_choice: _ToolChoiceType | bool | None = None, **kwargs: Any, ) -> AsyncIterator[ChatGenerationChunk]: - if not self.async_client: - updated_kwargs = { - **kwargs, - "tools": tools, - "functions": functions, - "safety_settings": safety_settings, - "tool_config": tool_config, - "generation_config": generation_config, - } - async for value in super()._astream( - messages, stop, run_manager, **updated_kwargs - ): - yield value - else: - request = self._prepare_request( - messages, - stop=stop, - tools=tools, - functions=functions, - safety_settings=safety_settings, - tool_config=tool_config, - generation_config=generation_config, - cached_content=cached_content or self.cached_content, - tool_choice=tool_choice, - **kwargs, - ) - if self.timeout is not None and "timeout" not in kwargs: - kwargs["timeout"] = self.timeout - if "max_retries" not in kwargs: - kwargs["max_retries"] = self.max_retries - prev_usage_metadata: UsageMetadata | None = None # cumulative usage - - index = -1 - index_type = "" - async for chunk in await _achat_with_retry( - request=request, - generation_method=self.async_client.stream_generate_content, - **kwargs, - metadata=self.default_metadata, - ): - _chat_result = _response_to_result( - chunk, stream=True, prev_usage=prev_usage_metadata - ) - gen = cast("ChatGenerationChunk", _chat_result.generations[0]) - message = cast("AIMessageChunk", gen.message) - - # populate index if missing - if isinstance(message.content, list): - for block in message.content: - if isinstance(block, dict) and "type" in block: - if block["type"] != index_type: - index_type = block["type"] - index = index + 1 - if "index" not in block: - block["index"] = index - - prev_usage_metadata = ( - message.usage_metadata - if prev_usage_metadata is None - else add_usage(prev_usage_metadata, message.usage_metadata) - ) - - if run_manager: - await run_manager.on_llm_new_token(gen.text, chunk=gen) - yield gen - - def _prepare_request( - self, - messages: list[BaseMessage], - *, - stop: list[str] | None = None, - tools: Sequence[_ToolDict | GoogleTool] | None = None, - functions: Sequence[_FunctionDeclarationType] | None = None, - safety_settings: SafetySettingDict | None = None, - tool_config: dict | _ToolConfigDict | None = None, - tool_choice: _ToolChoiceType | bool | None = None, - generation_config: dict[str, Any] | None = None, - cached_content: str | None = None, - **kwargs: Any, - ) -> GenerateContentRequest: - if tool_choice and tool_config: - msg = ( - "Must specify at most one of tool_choice and tool_config, received " - f"both:\n\n{tool_choice=}\n\n{tool_config=}" - ) + if self.client is None: + msg = "Client not initialized." raise ValueError(msg) - formatted_tools = None - code_execution_tool = GoogleTool(code_execution=CodeExecution()) - if tools == [code_execution_tool]: - formatted_tools = tools - elif tools: - formatted_tools = [convert_to_genai_function_declarations(tools)] - elif functions: - formatted_tools = [convert_to_genai_function_declarations(functions)] - - filtered_messages = [] - for message in messages: - if isinstance(message, HumanMessage) and not message.content: - warnings.warn( - "HumanMessage with empty content was removed to prevent API error" - ) - else: - filtered_messages.append(message) - messages = filtered_messages - - if self.convert_system_message_to_human: - system_instruction, history = _parse_chat_history( - messages, - convert_system_message_to_human=self.convert_system_message_to_human, - model=self.model, - ) - else: - system_instruction, history = _parse_chat_history( - messages, - model=self.model, + request = self._prepare_request( + messages, + stop=stop, + tools=tools, + functions=functions, + safety_settings=safety_settings, + tool_config=tool_config, + generation_config=generation_config, + cached_content=cached_content or self.cached_content, + tool_choice=tool_choice, + **kwargs, + ) + prev_usage_metadata: UsageMetadata | None = None # Cumulative usage + index = -1 + index_type = "" + try: + stream = await self.client.aio.models.generate_content_stream( + **request, ) + except ClientError as e: + _handle_client_error(e, request) - # Validate that we have at least one content message for the API - if not history: - msg = ( - "No content messages found. The Gemini API requires at least one " - "non-system message (HumanMessage, AIMessage, etc.) in addition to " - "any SystemMessage. Please include additional messages in your input." + async for chunk in stream: + _chat_result = _response_to_result( + chunk, stream=True, prev_usage=prev_usage_metadata ) - raise ValueError(msg) - - if tool_choice: - if not formatted_tools: - msg = ( - f"Received {tool_choice=} but no {tools=}. 'tool_choice' can only " - f"be specified if 'tools' is specified." - ) - raise ValueError(msg) - all_names: list[str] = [] - for t in formatted_tools: - if hasattr(t, "function_declarations"): - t_with_declarations = cast("Any", t) - all_names.extend( - f.name for f in t_with_declarations.function_declarations - ) - elif isinstance(t, GoogleTool) and hasattr(t, "code_execution"): - continue - else: - msg = f"Tool {t} doesn't have function_declarations attribute" - raise TypeError(msg) + gen = cast("ChatGenerationChunk", _chat_result.generations[0]) + message = cast("AIMessageChunk", gen.message) - tool_config = _tool_choice_to_tool_config(tool_choice, all_names) + # populate index if missing + if isinstance(message.content, list): + for block in message.content: + if isinstance(block, dict) and "type" in block: + if block["type"] != index_type: + index_type = block["type"] + index = index + 1 + if "index" not in block: + block["index"] = index - formatted_tool_config = None - if tool_config: - formatted_tool_config = ToolConfig( - function_calling_config=tool_config["function_calling_config"] + prev_usage_metadata = ( + message.usage_metadata + if prev_usage_metadata is None + else add_usage(prev_usage_metadata, message.usage_metadata) ) - formatted_safety_settings = [] - if safety_settings: - formatted_safety_settings = [ - SafetySetting(category=c, threshold=t) - for c, t in safety_settings.items() - ] - request = GenerateContentRequest( - model=self.model, - contents=history, # google.ai.generativelanguage_v1beta.types.Content - tools=formatted_tools, - tool_config=formatted_tool_config, - safety_settings=formatted_safety_settings, - generation_config=self._prepare_params( - stop, - generation_config=generation_config, - **kwargs, - ), - cached_content=cached_content, - ) - if system_instruction: - request.system_instruction = system_instruction - return request + if run_manager: + await run_manager.on_llm_new_token(gen.text, chunk=gen) + yield gen def get_num_tokens(self, text: str) -> int: """Get the number of tokens present in the text. Uses the model's tokenizer. @@ -2695,10 +2958,14 @@ def get_num_tokens(self, text: str) -> int: # -> 4 ``` """ - result = self.client.count_tokens( + if self.client is None: + msg = "Client not initialized." + raise ValueError(msg) + + result = self.client.models.count_tokens( model=self.model, contents=[Content(parts=[Part(text=text)])] ) - return result.total_tokens + return result.total_tokens if result and result.total_tokens is not None else 0 def with_structured_output( self, @@ -2780,7 +3047,7 @@ def bind_tools( tools: Sequence[ dict[str, Any] | type | Callable[..., Any] | BaseTool | GoogleTool ], - tool_config: dict | _ToolConfigDict | None = None, + tool_config: dict | ToolConfig | None = None, *, tool_choice: _ToolChoiceType | bool | None = None, **kwargs: Any, @@ -2807,7 +3074,7 @@ def bind_tools( ) raise ValueError(msg) try: - formatted_tools: list = [convert_to_openai_tool(tool) for tool in tools] + formatted_tools: list = [convert_to_openai_tool(tool) for tool in tools] # type: ignore[arg-type] except Exception: formatted_tools = [ tool_to_dict(convert_to_genai_function_declarations(tools)) @@ -2827,11 +3094,7 @@ def _supports_tool_choice(self) -> bool: See the [Gemini models docs](https://ai.google.dev/gemini-api/docs/models) for a full list. Gemini calls this "function calling". """ - return ( - "gemini-1.5-pro" in self.model - or "gemini-1.5-flash" in self.model - or "gemini-2" in self.model - ) + return self.profile.get("tool_choice", True) if self.profile else True def _get_tool_name( diff --git a/libs/genai/langchain_google_genai/llms.py b/libs/genai/langchain_google_genai/llms.py index 6b46202df..b76402260 100644 --- a/libs/genai/langchain_google_genai/llms.py +++ b/libs/genai/langchain_google_genai/llms.py @@ -78,9 +78,7 @@ def validate_environment(self) -> Self: max_tokens=self.max_output_tokens, timeout=self.timeout, model=self.model, - client_options=self.client_options, base_url=self.base_url, - transport=self.transport, additional_headers=self.additional_headers, safety_settings=self.safety_settings, ) diff --git a/libs/genai/langchain_google_genai/utils.py b/libs/genai/langchain_google_genai/utils.py new file mode 100644 index 000000000..67ca53814 --- /dev/null +++ b/libs/genai/langchain_google_genai/utils.py @@ -0,0 +1,193 @@ +"""Utility functions for LangChain Google GenAI.""" + +from __future__ import annotations + +from collections.abc import Callable +from typing import Any + +from google.genai import types +from langchain_core.messages import BaseMessage +from langchain_core.tools import BaseTool +from pydantic import BaseModel + +from langchain_google_genai._function_utils import ( + _ToolChoiceType, + convert_to_genai_function_declarations, +) +from langchain_google_genai.chat_models import ( + ChatGoogleGenerativeAI, + _parse_chat_history, +) + + +def create_context_cache( + model: ChatGoogleGenerativeAI, + messages: list[BaseMessage], + *, + ttl: str | None = None, + expire_time: str | None = None, + tools: list[BaseTool | type[BaseModel] | dict | Callable] | None = None, + tool_choice: _ToolChoiceType | bool | None = None, +) -> str: + """Creates a context cache for the specified model and content. + + Context caching allows you to store and reuse content (e.g., PDFs, images) for + faster processing. This is useful when you have large amounts of context that + you want to reuse across multiple requests. + + !!! warning "Important Constraint" + When using cached content, you **cannot** specify `system_instruction`, + `tools`, or `tool_config` in subsequent API requests. These must be part + of the cached content. Do not call `.bind_tools()` when using a model with + cached content that already includes tools. + + Args: + model: `ChatGoogleGenerativeAI` model instance. + + Must be a model that supports context caching. + messages: List of `BaseMessage` objects to cache. + + Can include system messages, human messages, and multimodal content + (images, PDFs, etc.). + ttl: Time-to-live for the cache in seconds (e.g., `'300'` for 5 minutes). + + At most one of `ttl` or `expire_time` can be specified. + expire_time: Absolute expiration time (ISO 8601 format). + + At most one of `ttl` or `expire_time` can be specified. + tools: Optional list of tools to bind to the cached context. Can be: + - `BaseTool` instances + - Pydantic models (converted to JSON schema) + - Dict representations of tools + - Callable functions + tool_choice: Optional tool choice configuration. + + Returns: + Cache name (string identifier) that can be passed to `cached_content` + parameter in subsequent API calls. + + Raises: + ValueError: If the model client is not initialized or if the model is not + specified. + + Example: + ```python + from langchain_core.messages import HumanMessage, SystemMessage + from langchain_google_genai import ChatGoogleGenerativeAI, create_context_cache + + model = ChatGoogleGenerativeAI(model="gemini-2.5-flash") + + # Example 1: Cache with text content + cache = create_context_cache( + model, + messages=[ + SystemMessage(content="You are an expert researcher."), + HumanMessage(content="Large document content here..."), + ], + ttl="3600s", # 1 hour + ) + + # Example 2: Cache with uploaded files (Gemini API) + # Note: gs:// URIs are NOT supported with Gemini API. + # Files must be uploaded first using client.files.upload() + file = model.client.files.upload(file="document.pdf") + cache = create_context_cache( + model, + messages=[ + SystemMessage(content="You are an expert researcher."), + HumanMessage( + content=[ + { + "type": "media", + "file_uri": file.uri, # Use the uploaded file's URI + "mime_type": "application/pdf", + } + ] + ), + ], + ttl="3600s", + ) + + # Use the cache in subsequent requests + response = model.invoke( + "Summarize the document.", + cached_content=cache, + ) + + # Example 3: Cache with tools (correct usage) + from langchain_core.tools import tool + + + @tool + def search_database(query: str) -> str: + '''Search the database.''' + return f"Results for: {query}" + + + # Create cache WITH tools + cache_with_tools = create_context_cache( + model, + messages=[ + SystemMessage(content="You are a helpful assistant."), + HumanMessage(content="Large context here..."), + ], + tools=[search_database], + ttl="3600s", + ) + + # When using the cache, do NOT bind tools again + # The tools are already in the cache + model_with_cache = ChatGoogleGenerativeAI( + model="gemini-2.5-flash", + cached_content=cache_with_tools, + ) + # DON'T do this: .bind_tools([search_database]) + + response = model_with_cache.invoke("Search for X") + ``` + """ + if model.client is None: + msg = "Model client must be initialized to create cached content." + raise ValueError(msg) + + if not model.model: + msg = "Model name must be specified to create cached content." + raise ValueError(msg) + + # Parse messages into system instruction and content + system_instruction, contents = _parse_chat_history(messages, model=model.model) + + # Convert tools to Tool object if provided + tool = None + if tools: + tool = convert_to_genai_function_declarations(tools) + + # Build the cache config + cache_config_kwargs: dict[str, Any] = {} + + if system_instruction: + cache_config_kwargs["system_instruction"] = system_instruction + + if contents: + cache_config_kwargs["contents"] = contents + + if ttl: + cache_config_kwargs["ttl"] = ttl + + if expire_time: + cache_config_kwargs["expire_time"] = expire_time + + if tool: + cache_config_kwargs["tools"] = [tool] + + # Create the cache + cache = model.client.caches.create( + model=model.model, + config=types.CreateCachedContentConfig(**cache_config_kwargs), + ) + + if cache.name is None: + msg = "Cache name was not set after creation." + raise ValueError(msg) + + return cache.name diff --git a/libs/genai/pyproject.toml b/libs/genai/pyproject.toml index 19ff2f503..54530efd6 100644 --- a/libs/genai/pyproject.toml +++ b/libs/genai/pyproject.toml @@ -14,6 +14,7 @@ requires-python = ">=3.10.0,<4.0.0" dependencies = [ "langchain-core>=1.1.0,<2.0.0", "google-ai-generativelanguage>=0.9.0,<1.0.0", + "google-genai>=1.53.0,<2.0.0", "pydantic>=2.0.0,<3.0.0", "filetype>=1.2.0,<2.0.0", ] diff --git a/libs/genai/tests/conftest.py b/libs/genai/tests/conftest.py index 04374bf56..e692ebe85 100644 --- a/libs/genai/tests/conftest.py +++ b/libs/genai/tests/conftest.py @@ -1,5 +1,7 @@ """Tests configuration to be executed before tests execution.""" +import os + import pytest _RELEASE_FLAG = "release" @@ -48,3 +50,44 @@ def pytest_collection_modifyitems( if keywords and not any(config.getoption(f"--{kw}") for kw in keywords): skip = pytest.mark.skip(reason=f"need --{keywords[0]} option to run") item.add_marker(skip) + + +def pytest_generate_tests(metafunc: pytest.Metafunc) -> None: + """Dynamically parametrize backend based on environment variable. + + Args: + metafunc: The pytest metafunc object. + """ + if "backend_config" in metafunc.fixturenames: + vertexai_setting = os.environ.get("TEST_VERTEXAI", "").lower() + + if vertexai_setting == "only": + # Test only Vertex AI + backends = ["vertex_ai"] + elif vertexai_setting in ("1", "true", "yes"): + # Test both backends + backends = ["google_ai", "vertex_ai"] + else: + # Default: test only Google AI + backends = ["google_ai"] + + metafunc.parametrize("backend_config", backends, indirect=True) + + +@pytest.fixture +def backend_config(request: pytest.FixtureRequest) -> dict: + """Provide backend configuration. + + Args: + request: The pytest fixture request object. + + Returns: + Backend configuration dictionary to pass to ChatGoogleGenerativeAI. + """ + if request.param == "vertex_ai": + project = os.environ.get("GOOGLE_CLOUD_PROJECT") + if not project: + pytest.skip("Vertex AI requires GOOGLE_CLOUD_PROJECT env var") + return {"vertexai": True, "project": project, "api_key": None} + # Google AI backend (default) + return {} diff --git a/libs/genai/tests/integration_tests/test_builtin_tools.py b/libs/genai/tests/integration_tests/test_builtin_tools.py deleted file mode 100644 index 8af94336d..000000000 --- a/libs/genai/tests/integration_tests/test_builtin_tools.py +++ /dev/null @@ -1,21 +0,0 @@ -from langchain_core.messages import AIMessage - -from langchain_google_genai import ChatGoogleGenerativeAI - -_MODEL = "gemini-2.5-flash" - - -def test_url_context_tool() -> None: - model = ChatGoogleGenerativeAI(model=_MODEL) - model_with_search = model.bind_tools([{"url_context": {}}]) - - input = "What is this page's contents about? https://docs.langchain.com" - response = model_with_search.invoke(input) - assert isinstance(response, AIMessage) - - assert ( - response.response_metadata["grounding_metadata"]["grounding_chunks"][0]["web"][ - "uri" - ] - is not None - ) diff --git a/libs/genai/tests/integration_tests/test_chat_models.py b/libs/genai/tests/integration_tests/test_chat_models.py index 8adcaf262..f3c099af5 100644 --- a/libs/genai/tests/integration_tests/test_chat_models.py +++ b/libs/genai/tests/integration_tests/test_chat_models.py @@ -1,12 +1,26 @@ """Test `ChatGoogleGenerativeAI`.""" import asyncio +import base64 +import io import json +import math +import os from collections.abc import Generator, Sequence from typing import Literal, cast from unittest.mock import patch +import httpx import pytest +import requests +from google.genai.types import ( + FunctionCallingConfig, + FunctionCallingConfigMode, + ToolConfig, +) +from google.genai.types import ( + Tool as GoogleTool, +) from langchain_core.messages import ( AIMessage, AIMessageChunk, @@ -27,6 +41,7 @@ HarmCategory, MediaResolution, Modality, + create_context_cache, ) _MODEL = "gemini-2.5-flash" @@ -38,6 +53,19 @@ _B64_string = """iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAIAAAAC64paAAABhGlDQ1BJQ0MgUHJvZmlsZQAAeJx9kT1Iw0AcxV8/xCIVQTuIKGSoTi2IijhqFYpQIdQKrTqYXPoFTRqSFBdHwbXg4Mdi1cHFWVcHV0EQ/ABxdXFSdJES/5cUWsR4cNyPd/ced+8Af6PCVDM4DqiaZaSTCSGbWxW6XxHECPoRQ0hipj4niil4jq97+Ph6F+dZ3uf+HL1K3mSATyCeZbphEW8QT29aOud94ggrSQrxOXHMoAsSP3JddvmNc9FhP8+MGJn0PHGEWCh2sNzBrGSoxFPEUUXVKN+fdVnhvMVZrdRY6578heG8trLMdZrDSGIRSxAhQEYNZVRgIU6rRoqJNO0nPPxDjl8kl0yuMhg5FlCFCsnxg//B727NwuSEmxROAF0vtv0xCnTvAs26bX8f23bzBAg8A1da219tADOfpNfbWvQI6NsGLq7bmrwHXO4Ag0+6ZEiOFKDpLxSA9zP6phwwcAv0rLm9tfZx+gBkqKvUDXBwCIwVKXvd492hzt7+PdPq7wdzbXKn5swsVgAAA8lJREFUeJx90dtPHHUUB/Dz+81vZhb2wrDI3soUKBSRcisF21iqqCRNY01NTE0k8aHpi0k18VJfjOFvUF9M44MmGrHFQqSQiKSmFloL5c4CXW6Fhb0vO3ufvczMzweiBGI9+eW8ffI95/yQqqrwv4UxBgCfJ9w/2NfSVB+Nyn6/r+vdLo7H6FkYY6yoABR2PJujj34MSo/d/nHeVLYbydmIp/bEO0fEy/+NMcbTU4/j4Vs6Lr0ccKeYuUKWS4ABVCVHmRdszbfvTgfjR8kz5Jjs+9RREl9Zy2lbVK9wU3/kWLJLCXnqza1bfVe7b9jLbIeTMcYu13Jg/aMiPrCwVFcgtDiMhnxwJ/zXVDwSdVCVMRV7nqzl2i9e/fKrw8mqSp84e2sFj3Oj8/SrF/MaicmyYhAaXu58NPAbeAeyzY0NLecmh2+ODN3BewYBAkAY43giI3kebrnsRmvV9z2D4ciOa3EBAf31Tp9sMgdxMTFm6j74/Ogb70VCYQKAAIDCXkOAIC6pkYBWdwwnpHEdf6L9dJtJKPh95DZhzFKMEWRAGL927XpWTmMA+s8DAOBYAoR483l/iHZ/8bXoODl8b9UfyH72SXepzbyRJNvjFGHKMlhvMBze+cH9+4lEuOOlU2X1tVkFTU7Om03q080NDGXV1cflRpHwaaoiiiildB8jhDLZ7HDfz2Yidba6Vn2L4fhzFrNRKy5OZ2QOZ1U5W8VtqlVH/iUHcM933zZYWS7Wtj66zZr65bzGJQt0glHgudi9XVzEl4vKw2kUPhO020oPYI1qYc+2Xc0bRXFwTLY0VXa2VibD/lBaIXm1UChN5JSRUcQQ1Tk/47Cf3x8bY7y17Y17PVYTG1UkLPBFcqik7Zoa9JcLYoHBqHhXNgd6gS1k9EJ1TQ2l9EDy1saErmQ2kGpwGC2MLOtCM8nZEV1K0tKJtEksSm26J/rHg2zzmabKisq939nHzqUH7efzd4f/nPGW6NP8ybNFrOsWQhpoCuuhnJ4hAnPhFam01K4oQMjBg/mzBjVhuvw2O++KKT+BIVxJKzQECBDLF2qu2WTMmCovtDQ1f8iyoGkUADBCCGPsdnvTW2OtFm01VeB06msvdWlpPZU0wJRG85ns84umU3k+VyxeEcWqvYUBAGsUrbvme4be99HFeisP/pwUOIZaOqQX31ISgrKmZhLHtXNXuJq68orrr5/9mBCglCLAGGPyy81votEbcjlKLrC9E8mhH3wdHRdcyyvjidSlxjftPJpD+o25JYvRHGFoZDdks1mBQhxJu9uxvwEiXuHnHbLd1AAAAABJRU5ErkJggg==""" # noqa: E501 +@tool +def get_weather(location: str) -> str: + """Get the weather for a location. + + Args: + location: The city/location to get weather for. + + Returns: + Weather information for the location. + """ + return "It's sunny and 72°F." + + def get_wav_type_from_bytes(file_bytes: bytes) -> bool: """Determine if the given bytes represent a WAV file by inspecting the header. @@ -113,74 +141,90 @@ def _check_tool_call_args(tool_call_args: dict) -> None: @pytest.mark.parametrize("is_async", [False, True]) @pytest.mark.parametrize("with_tags", [False, True]) -async def test_chat_google_genai_batch(is_async: bool, with_tags: bool) -> None: +async def test_chat_google_genai_batch( + is_async: bool, with_tags: bool, backend_config: dict +) -> None: """Test batch tokens.""" - llm = ChatGoogleGenerativeAI(model=_MODEL) + llm = ChatGoogleGenerativeAI(model=_MODEL, **backend_config) messages: Sequence[str] = [ "This is a test. Say 'foo'", "This is a test, say 'bar'", ] config: RunnableConfig | None = {"tags": ["foo"]} if with_tags else None - if is_async: - result = await llm.abatch(cast("list", messages), config=config) - else: - result = llm.batch(cast("list", messages), config=config) - - for token in result: - if isinstance(token.content, list): - text_content = "".join( - block.get("text", "") - for block in token.content - if isinstance(block, dict) and block.get("type") == "text" - ) - assert len(text_content) > 0 + try: + if is_async: + result = await llm.abatch(cast("list", messages), config=config) else: - assert isinstance(token.content, str) + result = llm.batch(cast("list", messages), config=config) + + for token in result: + if isinstance(token.content, list): + text_content = "".join( + block.get("text", "") + for block in token.content + if isinstance(block, dict) and block.get("type") == "text" + ) + assert len(text_content) > 0 + else: + assert isinstance(token.content, str) + finally: + # Explicitly close the client to avoid resource warnings + if llm.client: + llm.client.close() + if llm.client.aio: + await llm.client.aio.aclose() @pytest.mark.parametrize("is_async", [False, True]) -async def test_chat_google_genai_invoke(is_async: bool) -> None: +async def test_chat_google_genai_invoke(is_async: bool, backend_config: dict) -> None: """Test invoke tokens.""" - llm = ChatGoogleGenerativeAI(model=_MODEL) + llm = ChatGoogleGenerativeAI(model=_MODEL, **backend_config) - if is_async: - result = await llm.ainvoke( - "This is a test. Say 'foo'", - config={"tags": ["foo"]}, - generation_config={"top_k": 2, "top_p": 1, "temperature": 0.7}, - ) - else: - result = llm.invoke( - "This is a test. Say 'foo'", - config={"tags": ["foo"]}, - generation_config={"top_k": 2, "top_p": 1, "temperature": 0.7}, - ) + try: + if is_async: + result = await llm.ainvoke( + "This is a test. Say 'foo'", + config={"tags": ["foo"]}, + generation_config={"top_k": 2, "top_p": 1, "temperature": 0.7}, + ) + else: + result = llm.invoke( + "This is a test. Say 'foo'", + config={"tags": ["foo"]}, + generation_config={"top_k": 2, "top_p": 1, "temperature": 0.7}, + ) - assert isinstance(result, AIMessage) - if isinstance(result.content, list): - text_content = "".join( - block.get("text", "") - for block in result.content - if isinstance(block, dict) and block.get("type") == "text" - ) - assert len(text_content) > 0 - assert not text_content.startswith(" ") - else: - assert isinstance(result.content, str) - assert not result.content.startswith(" ") - _check_usage_metadata(result) + assert isinstance(result, AIMessage) + if isinstance(result.content, list): + text_content = "".join( + block.get("text", "") + for block in result.content + if isinstance(block, dict) and block.get("type") == "text" + ) + assert len(text_content) > 0 + assert not text_content.startswith(" ") + else: + assert isinstance(result.content, str) + assert not result.content.startswith(" ") + _check_usage_metadata(result) + finally: + # Explicitly close the client to avoid resource warnings + if llm.client: + llm.client.close() + if llm.client.aio: + await llm.client.aio.aclose() @pytest.mark.flaky(retries=3, delay=1) -def test_chat_google_genai_invoke_with_image() -> None: +def test_chat_google_genai_invoke_with_image(backend_config: dict) -> None: """Test generating an image and then text. Using `generation_config` to specify response modalities. Up to `9` retries possible due to inner loop and Pytest retries. """ - llm = ChatGoogleGenerativeAI(model=_IMAGE_OUTPUT_MODEL) + llm = ChatGoogleGenerativeAI(model=_IMAGE_OUTPUT_MODEL, **backend_config) for _ in range(3): # We break as soon as we get an image back, as it's not guaranteed @@ -225,10 +269,18 @@ def test_chat_google_genai_invoke_with_image() -> None: _ = llm.invoke(["What's this?", {"role": "assistant", "content": content_blocks}]) -def test_chat_google_genai_invoke_with_audio() -> None: +def test_chat_google_genai_invoke_with_audio(backend_config: dict) -> None: """Test generating audio.""" + # Skip on Vertex AI - having some issues possibly upstream + # TODO: look later + # https://discuss.ai.google.dev/t/request-allowlist-access-for-audio-output-in-gemini-2-5-pro-flash-tts-vertex-ai/108067 + if backend_config.get("vertexai"): + pytest.skip("Gemini TTS on Vertex AI requires allowlist access") + llm = ChatGoogleGenerativeAI( - model=_AUDIO_OUTPUT_MODEL, response_modalities=[Modality.AUDIO] + model=_AUDIO_OUTPUT_MODEL, + response_modalities=[Modality.AUDIO], + **backend_config, ) result = llm.invoke( @@ -260,14 +312,14 @@ def test_chat_google_genai_invoke_with_audio() -> None: ], ) def test_chat_google_genai_invoke_thinking( - thinking_budget: int | None, test_description: str + thinking_budget: int | None, test_description: str, backend_config: dict ) -> None: """Test invoke a thinking model with different thinking budget configurations.""" llm_kwargs: dict[str, str | int] = {"model": _THINKING_MODEL} if thinking_budget is not None: llm_kwargs["thinking_budget"] = thinking_budget - llm = ChatGoogleGenerativeAI(**llm_kwargs) + llm = ChatGoogleGenerativeAI(**llm_kwargs, **backend_config) result = llm.invoke( "How many O's are in Google? Please tell me how you double checked the result", @@ -327,11 +379,14 @@ def _check_thinking_output(content: list, output_version: str) -> None: @pytest.mark.flaky(retries=3, delay=1) @pytest.mark.parametrize("output_version", ["v0", "v1"]) def test_chat_google_genai_invoke_thinking_include_thoughts( - output_version: str, + output_version: str, backend_config: dict ) -> None: """Test invoke thinking model with `include_thoughts`.""" llm = ChatGoogleGenerativeAI( - model=_THINKING_MODEL, include_thoughts=True, output_version=output_version + model=_THINKING_MODEL, + include_thoughts=True, + output_version=output_version, + **backend_config, ) input_message = { @@ -384,7 +439,7 @@ def test_chat_google_genai_invoke_thinking_include_thoughts( @pytest.mark.flaky(retries=5, delay=1) @pytest.mark.parametrize("output_version", ["v0", "v1"]) def test_chat_google_genai_invoke_thinking_with_tools( - output_version: str, + output_version: str, backend_config: dict ) -> None: """Test thinking with function calling to get thought signatures. @@ -411,7 +466,10 @@ def analyze_weather(location: str, date: str) -> dict: } llm = ChatGoogleGenerativeAI( - model=_THINKING_MODEL, include_thoughts=True, output_version=output_version + model=_THINKING_MODEL, + include_thoughts=True, + output_version=output_version, + **backend_config, ) llm_with_tools = llm.bind_tools([analyze_weather]) @@ -516,8 +574,9 @@ def analyze_weather(location: str, date: str) -> dict: @pytest.mark.flaky(retries=3, delay=1) -def test_thought_signature_round_trip() -> None: +def test_thought_signature_round_trip(backend_config: dict) -> None: """Test thought signatures are properly preserved in round-trip conversations.""" + # TODO: could paramaterize over output_version too @tool def simple_tool(query: str) -> str: @@ -525,7 +584,10 @@ def simple_tool(query: str) -> str: return f"Response to: {query}" llm = ChatGoogleGenerativeAI( - model=_THINKING_MODEL, include_thoughts=True, output_version="v1" + model=_THINKING_MODEL, + include_thoughts=True, + output_version="v1", + **backend_config, ) llm_with_tools = llm.bind_tools([simple_tool]) @@ -575,9 +637,12 @@ def simple_tool(query: str) -> str: assert second_result.content is not None -def test_chat_google_genai_invoke_thinking_disabled() -> None: +def test_chat_google_genai_invoke_thinking_disabled(backend_config: dict) -> None: """Test invoking a thinking model with zero `thinking_budget`.""" - llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash", thinking_budget=0) + # Note certain models may not allow `thinking_budget=0` + llm = ChatGoogleGenerativeAI( + model="gemini-2.5-flash", thinking_budget=0, **backend_config + ) result = llm.invoke( "How many O's are in Google? Please tell me how you double checked the result", @@ -593,9 +658,11 @@ def test_chat_google_genai_invoke_thinking_disabled() -> None: @pytest.mark.flaky(retries=3, delay=1) -def test_chat_google_genai_invoke_no_image_generation_without_modalities() -> None: +def test_chat_google_genai_invoke_no_image_generation_without_modalities( + backend_config: dict, +) -> None: """Test invoke tokens with image without response modalities.""" - llm = ChatGoogleGenerativeAI(model=_MODEL) + llm = ChatGoogleGenerativeAI(model=_MODEL, **backend_config) result = llm.invoke( "Generate an image of a cat. Then, say meow!", @@ -646,6 +713,7 @@ def test_chat_google_genai_multimodal( test_case: str, messages: list[BaseMessage], use_streaming: bool, + backend_config: dict, ) -> None: """Test multimodal functionality with various message types and streaming support. @@ -656,7 +724,7 @@ def test_chat_google_genai_multimodal( use_streaming: Whether to test streaming functionality. """ del test_case # Parameters used for test organization - llm = ChatGoogleGenerativeAI(model=_VISION_MODEL) + llm = ChatGoogleGenerativeAI(model=_VISION_MODEL, **backend_config) if use_streaming: # Test streaming @@ -692,6 +760,91 @@ def test_chat_google_genai_multimodal( assert len(response.content.strip()) > 0 +def test_multimodal_pdf_input_url(backend_config: dict) -> None: + """Test multimodal PDF input from a public URL. + + Note: This test uses the `image_url` format which downloads the content. + For gs:// URIs: + - Google AI backend: Must upload via client.files.upload() and use `media` format + - Vertex AI backend: Can use `media` format with `file_uri` directly + This test uses a public HTTP URL which works with both backends. + """ + llm = ChatGoogleGenerativeAI(model=_VISION_MODEL, **backend_config) + pdf_url = "https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf" + + message = HumanMessage( + content=[ + { + "type": "text", + "text": "Describe the provided document.", + }, + { + "type": "image_url", + "image_url": {"url": pdf_url}, + }, + ] + ) + + response = llm.invoke([message]) + assert isinstance(response, AIMessage) + if isinstance(response.content, list): + text_content = "".join( + block.get("text", "") + for block in response.content + if isinstance(block, dict) and block.get("type") == "text" + ) + assert len(text_content.strip()) > 0 + else: + assert isinstance(response.content, str) + assert len(response.content.strip()) > 0 + + +def test_multimodal_pdf_input_base64(backend_config: dict) -> None: + """Test multimodal PDF input from base64 encoded data. + + Verifies that PDFs can be passed as base64 encoded data URIs + (`data:application/pdf;base64,...`) using the `image_url` format. + + `ImageBytesLoader` handles decoding and sends the PDF as inline bytes to + the API, which works for both Google AI and Vertex AI backends. + """ + llm = ChatGoogleGenerativeAI(model=_VISION_MODEL, **backend_config) + pdf_url = "https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf" + + # Download and encode PDF as base64 + response_data = requests.get(pdf_url, allow_redirects=True) + with io.BytesIO() as stream: + stream.write(response_data.content) + pdf_base64 = base64.b64encode(stream.getbuffer()).decode("utf-8") + pdf_data_uri = f"data:application/pdf;base64,{pdf_base64}" + + message = HumanMessage( + content=[ + { + "type": "text", + "text": "Describe the provided document.", + }, + { + "type": "image_url", + "image_url": {"url": pdf_data_uri}, + }, + ] + ) + + response = llm.invoke([message]) + assert isinstance(response, AIMessage) + if isinstance(response.content, list): + text_content = "".join( + block.get("text", "") + for block in response.content + if isinstance(block, dict) and block.get("type") == "text" + ) + assert len(text_content.strip()) > 0 + else: + assert isinstance(response.content, str) + assert len(response.content.strip()) > 0 + + @pytest.mark.parametrize( "message", [ @@ -709,9 +862,11 @@ def test_chat_google_genai_multimodal( ), ], ) -def test_chat_google_genai_invoke_media_resolution(message: BaseMessage) -> None: +def test_chat_google_genai_invoke_media_resolution( + message: BaseMessage, backend_config: dict +) -> None: """Test invoke vision model with `media_resolution` set to low and without.""" - llm = ChatGoogleGenerativeAI(model=_VISION_MODEL) + llm = ChatGoogleGenerativeAI(model=_VISION_MODEL, **backend_config) result = llm.invoke([message]) result_low_res = llm.invoke( [message], media_resolution=MediaResolution.MEDIA_RESOLUTION_LOW @@ -728,8 +883,8 @@ def test_chat_google_genai_invoke_media_resolution(message: BaseMessage) -> None ) -def test_chat_google_genai_single_call_with_history() -> None: - model = ChatGoogleGenerativeAI(model=_MODEL) +def test_chat_google_genai_single_call_with_history(backend_config: dict) -> None: + model = ChatGoogleGenerativeAI(model=_MODEL, **backend_config) text_question1, text_answer1 = "How much is 2+2?", "4" text_question2 = "How much is 3+3?" message1 = HumanMessage(content=text_question1) @@ -753,16 +908,14 @@ def test_chat_google_genai_single_call_with_history() -> None: [_MODEL, _PRO_MODEL], ) def test_chat_google_genai_system_message( - model_name: str, + model_name: str, backend_config: dict ) -> None: """Test system message handling. Tests that system messages are properly converted to system instructions for different models. """ - model = ChatGoogleGenerativeAI( - model=model_name, - ) + model = ChatGoogleGenerativeAI(model=model_name, **backend_config) text_question1, text_answer1 = "How much is 2+2?", "4" text_question2 = "How much is 3+3?" system_message = SystemMessage(content="You're supposed to answer math questions.") @@ -782,21 +935,231 @@ def test_chat_google_genai_system_message( assert isinstance(response.content, str) -def test_generativeai_get_num_tokens_gemini() -> None: +def test_generativeai_get_num_tokens_gemini(backend_config: dict) -> None: """Test model tokenizer.""" - llm = ChatGoogleGenerativeAI(temperature=0, model=_MODEL) + llm = ChatGoogleGenerativeAI(temperature=0, model=_MODEL, **backend_config) output = llm.get_num_tokens("How are you?") assert output == 4 +def test_get_num_tokens_from_messages(backend_config: dict) -> None: + """Test token counting from messages.""" + model = ChatGoogleGenerativeAI(temperature=0.0, model=_MODEL, **backend_config) + message = HumanMessage(content="Hello") + token = model.get_num_tokens_from_messages(messages=[message]) + assert isinstance(token, int) + assert token == 3 + + +def test_single_call_previous_blocked_response(backend_config: dict) -> None: + """Test handling of blocked responses in conversation history. + + If a previous call was blocked, the AIMessage will have empty content. + Empty content should be ignored. + """ + model = ChatGoogleGenerativeAI(model=_MODEL, **backend_config) + text_question = "How much is 3+3?" + # Previous blocked response included in history + blocked_message = AIMessage( + content="", + response_metadata={ + "is_blocked": True, + "safety_ratings": [ + { + "category": "HARM_CATEGORY_HARASSMENT", + "probability_label": "MEDIUM", + "probability_score": 0.33039191365242004, + "blocked": True, + "severity": "HARM_SEVERITY_MEDIUM", + "severity_score": 0.2782268822193146, + }, + ], + "finish_reason": "SAFETY", + }, + ) + user_message = HumanMessage(content=text_question) + response = model.invoke([blocked_message, user_message]) + assert isinstance(response, AIMessage) + if isinstance(response.content, list): + text_content = "".join( + block.get("text", "") + for block in response.content + if isinstance(block, dict) and block.get("type") == "text" + ) + assert len(text_content) > 0 + else: + assert isinstance(response.content, str) + + +def test_json_mode_typeddict(backend_config: dict) -> None: + """Test structured output with `TypedDict` using `json_mode` method.""" + from typing_extensions import TypedDict + + class MyModel(TypedDict): + name: str + age: int + + llm = ChatGoogleGenerativeAI(model=_MODEL, **backend_config) + model = llm.with_structured_output(MyModel, method="json_mode") # type: ignore[arg-type] + message = HumanMessage(content="My name is Erick and I am 28 years old") + + response = model.invoke([message]) + assert isinstance(response, dict) + assert response == {"name": "Erick", "age": 28} + + # Test stream + for chunk in model.stream([message]): + assert isinstance(chunk, dict) + assert all(key in ["name", "age"] for key in chunk) + assert chunk == {"name": "Erick", "age": 28} + + +def test_nested_bind_tools(backend_config: dict) -> None: + """Test nested Pydantic models in tool calling with `tool_choice`.""" + from pydantic import Field + + class Person(BaseModel): + name: str = Field(description="The name.") + hair_color: str | None = Field( + default=None, description="Hair color, only if provided." + ) + + class People(BaseModel): + data: list[Person] = Field(description="The people.") + + llm = ChatGoogleGenerativeAI(model=_MODEL, **backend_config) + llm_with_tools = llm.bind_tools([People], tool_choice="People") + + response = llm_with_tools.invoke("Chester, no hair color provided.") + assert isinstance(response, AIMessage) + assert response.tool_calls[0]["name"] == "People" + + +def test_timeout_non_streaming(backend_config: dict) -> None: + """Test timeout parameter in non-streaming mode.""" + model = ChatGoogleGenerativeAI( + model=_MODEL, + timeout=0.001, # 1ms - too short to complete + **backend_config, + ) + with pytest.raises((httpx.ReadTimeout, httpx.TimeoutException)): + model.invoke([HumanMessage(content="Hello")]) + + +def test_timeout_streaming(backend_config: dict) -> None: + """Test timeout parameter in streaming mode.""" + model = ChatGoogleGenerativeAI( + model=_MODEL, + timeout=0.001, # 1ms - too short to complete + streaming=True, + **backend_config, + ) + with pytest.raises((httpx.ReadTimeout, httpx.TimeoutException)): + model.invoke([HumanMessage(content="Hello")]) + + +def test_response_metadata_avg_logprobs(backend_config: dict) -> None: + """Test that `avg_logprobs` are present in response metadata.""" + llm = ChatGoogleGenerativeAI(model=_MODEL, **backend_config) + response = llm.invoke("Hello!") + probs = response.response_metadata.get("avg_logprobs") + if probs is not None: + assert isinstance(probs, float) + + +@pytest.mark.xfail(reason="logprobs are subject to daily quotas") +def test_logprobs(backend_config: dict) -> None: + """Test logprobs parameter with different configurations.""" + # Test with integer logprobs (top K) + llm = ChatGoogleGenerativeAI(model=_MODEL, logprobs=2, **backend_config) + msg = llm.invoke("hey") + tokenprobs = msg.response_metadata.get("logprobs_result") + assert tokenprobs is None or isinstance(tokenprobs, list) + if tokenprobs: + stack = tokenprobs[:] + while stack: + token = stack.pop() + assert isinstance(token, dict) + assert "token" in token + assert "logprob" in token + assert isinstance(token.get("token"), str) + assert isinstance(token.get("logprob"), float) + if "top_logprobs" in token and token.get("top_logprobs") is not None: + assert isinstance(token.get("top_logprobs"), list) + stack.extend(token.get("top_logprobs", [])) + + # Test with logprobs=True + llm2 = ChatGoogleGenerativeAI(model=_MODEL, logprobs=True, **backend_config) + msg2 = llm2.invoke("how are you") + assert msg2.response_metadata["logprobs_result"] + + # Test with logprobs=False + llm3 = ChatGoogleGenerativeAI(model=_MODEL, logprobs=False, **backend_config) + msg3 = llm3.invoke("howdy") + assert msg3.response_metadata.get("logprobs_result") is None + + +@pytest.mark.xfail(reason="logprobs are subject to daily quotas") +def test_logprobs_with_json_schema(backend_config: dict) -> None: + """Test logprobs with JSON schema structured output. + + Ensures logprobs are populated when using JSON schema responses. + This exercises the logprobs path with `response_mime_type='application/json'` + and `response_schema` set. + + The test verifies: + 1. Zero logprobs (`prob=1.0`, 100% certainty) are included, not filtered + 2. All logprob values are valid (non-positive, non-`NaN`) + """ + output_schema = { + "title": "Test Schema", + "type": "object", + "properties": { + "fieldA": {"type": "string"}, + "fieldB": {"type": "number"}, + }, + "required": ["fieldA", "fieldB"], + } + + llm = ChatGoogleGenerativeAI( + model=_MODEL, + response_mime_type="application/json", + response_schema=output_schema, + logprobs=True, + **backend_config, + ) + + msg = llm.invoke("Return a JSON object with fieldA='test' and fieldB=42") + tokenprobs = msg.response_metadata.get("logprobs_result") + # We don't assert exact content to avoid flakiness, but if present it must + # be a well-formed list of token/logprob dicts, including zero logprobs. + assert tokenprobs is None or isinstance(tokenprobs, list) + if tokenprobs: + logprob_values = [] + for token in tokenprobs: + assert isinstance(token, dict) + assert "token" in token + assert "logprob" in token + assert isinstance(token.get("token"), str) + assert isinstance(token.get("logprob"), (float, int)) + logprob_values.append(token["logprob"]) + + # Verify all logprobs are valid: non-positive (zero allowed) and not NaN + for lp in logprob_values: + assert lp <= 0.0, f"Logprob should be non-positive, got {lp}" + assert not (isinstance(lp, float) and math.isnan(lp)), ( + f"Logprob should not be NaN, got {lp}" + ) + + @pytest.mark.parametrize("use_streaming", [False, True]) -def test_safety_settings_gemini(use_streaming: bool) -> None: +def test_safety_settings_gemini(use_streaming: bool, backend_config: dict) -> None: """Test safety settings with both `invoke` and `stream` methods.""" safety_settings: dict[HarmCategory, HarmBlockThreshold] = { - HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE # type: ignore[dict-item] + HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE } # Test with safety filters on bind - llm = ChatGoogleGenerativeAI(temperature=0, model=_MODEL).bind( + llm = ChatGoogleGenerativeAI(temperature=0, model=_MODEL, **backend_config).bind( safety_settings=safety_settings ) @@ -815,7 +1178,7 @@ def test_safety_settings_gemini(use_streaming: bool) -> None: assert len(output.content) > 0 -def test_chat_function_calling_with_multiple_parts() -> None: +def test_chat_function_calling_with_multiple_parts(backend_config: dict) -> None: @tool def search( question: str, @@ -829,9 +1192,11 @@ def search( tools = [search] safety: dict[HarmCategory, HarmBlockThreshold] = { - HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_ONLY_HIGH # type: ignore[dict-item] + HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_ONLY_HIGH } - llm = ChatGoogleGenerativeAI(model=_PRO_MODEL, safety_settings=safety) + llm = ChatGoogleGenerativeAI( + model=_PRO_MODEL, safety_settings=safety, **backend_config + ) llm_with_search = llm.bind( functions=tools, ) @@ -891,7 +1256,7 @@ def search( assert "brown" in content_str.lower() -def test_chat_vertexai_gemini_function_calling() -> None: +def test_chat_vertexai_gemini_function_calling(backend_config: dict) -> None: """Test function calling with Gemini models. Safety settings included but not tested. @@ -903,15 +1268,15 @@ class MyModel(BaseModel): likes: list[str] safety: dict[HarmCategory, HarmBlockThreshold] = { - HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_ONLY_HIGH # type: ignore[dict-item] + HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_ONLY_HIGH } # Test .bind_tools with BaseModel message = HumanMessage( content="My name is Erick and I am 27 years old. I like apple and banana." ) - model = ChatGoogleGenerativeAI(model=_MODEL, safety_settings=safety).bind_tools( - [MyModel] - ) + model = ChatGoogleGenerativeAI( + model=_MODEL, safety_settings=safety, **backend_config + ).bind_tools([MyModel]) response = model.invoke([message]) _check_tool_calls(response, "MyModel") @@ -919,9 +1284,9 @@ class MyModel(BaseModel): def my_model(name: str, age: int, likes: list[str]) -> None: """Invoke this with names and age and likes.""" - model = ChatGoogleGenerativeAI(model=_MODEL, safety_settings=safety).bind_tools( - [my_model] - ) + model = ChatGoogleGenerativeAI( + model=_MODEL, safety_settings=safety, **backend_config + ).bind_tools([my_model]) response = model.invoke([message]) _check_tool_calls(response, "my_model") @@ -930,9 +1295,9 @@ def my_model(name: str, age: int, likes: list[str]) -> None: def my_tool(name: str, age: int, likes: list[str]) -> None: """Invoke this with names and age and likes.""" - model = ChatGoogleGenerativeAI(model=_MODEL, safety_settings=safety).bind_tools( - [my_tool] - ) + model = ChatGoogleGenerativeAI( + model=_MODEL, safety_settings=safety, **backend_config + ).bind_tools([my_tool]) response = model.invoke([message]) _check_tool_calls(response, "my_tool") @@ -973,15 +1338,18 @@ def my_tool(name: str, age: int, likes: list[str]) -> None: def test_chat_google_genai_with_structured_output( model_name: str, method: Literal["function_calling", "json_mode", "json_schema"] | None, + backend_config: dict, ) -> None: class MyModel(BaseModel): name: str age: int safety: dict[HarmCategory, HarmBlockThreshold] = { - HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_ONLY_HIGH # type: ignore[dict-item] + HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_ONLY_HIGH } - llm = ChatGoogleGenerativeAI(model=model_name, safety_settings=safety) + llm = ChatGoogleGenerativeAI( + model=model_name, safety_settings=safety, **backend_config + ) model = llm.with_structured_output(MyModel, method=method) message = HumanMessage(content="My name is Erick and I am 27 years old") @@ -1049,7 +1417,9 @@ class MyModel(BaseModel): assert response == expected, f"Expected {expected}, got {response}" -def test_chat_google_genai_with_structured_output_nested_model() -> None: +def test_chat_google_genai_with_structured_output_nested_model( + backend_config: dict, +) -> None: """Deeply nested model test for structured output.""" class Argument(BaseModel): @@ -1063,9 +1433,9 @@ class Response(BaseModel): response: str reasons: list[Reason] - model = ChatGoogleGenerativeAI(model=_MODEL).with_structured_output( - Response, method="json_schema" - ) + model = ChatGoogleGenerativeAI( + model=_MODEL, **backend_config + ).with_structured_output(Response, method="json_schema") response = model.invoke("Why is Real Madrid better than Barcelona?") @@ -1083,11 +1453,40 @@ class Response(BaseModel): assert isinstance(argument.description, str) +def test_thinking_params_preserved_with_structured_output(backend_config: dict) -> None: + """Test that `thinking_budget=0` and `include_thoughts=False` are preserved. + + Verifies that thinking configuration persists through `with_structured_output()`. + """ + + class SimpleModel(BaseModel): + answer: str + + # Initialize with thinking disabled + # Only certain models support disabling thinking + llm = ChatGoogleGenerativeAI( + model="gemini-2.5-flash", + thinking_budget=0, + include_thoughts=False, + **backend_config, + ) + + # Apply structured output - params should be preserved + structured_llm = llm.with_structured_output(SimpleModel, method="json_schema") + + result = structured_llm.invoke("What is 2+2? Just give a brief answer.") + + assert isinstance(result, SimpleModel) + assert isinstance(result.answer, str) + + @pytest.mark.parametrize("is_async", [False, True]) @pytest.mark.parametrize("use_streaming", [False, True]) -def test_model_methods_without_eventloop(is_async: bool, use_streaming: bool) -> None: +def test_model_methods_without_eventloop( + is_async: bool, use_streaming: bool, backend_config: dict +) -> None: """Test `invoke` and `stream` (sync & async) without event loop.""" - model = ChatGoogleGenerativeAI(model=_MODEL) + model = ChatGoogleGenerativeAI(model=_MODEL, **backend_config) if use_streaming: if is_async: @@ -1134,9 +1533,9 @@ def _check_web_search_output(message: AIMessage, output_version: str) -> None: @pytest.mark.parametrize("output_version", ["v0", "v1"]) -def test_search_builtin(output_version: str) -> None: +def test_search_builtin(output_version: str, backend_config: dict) -> None: llm = ChatGoogleGenerativeAI( - model=_MODEL, output_version=output_version + model=_MODEL, output_version=output_version, **backend_config ).bind_tools([{"google_search": {}}]) input_message = { "role": "user", @@ -1160,9 +1559,13 @@ def test_search_builtin(output_version: str) -> None: @pytest.mark.parametrize("use_streaming", [False, True]) -def test_search_builtin_with_citations(use_streaming: bool) -> None: +def test_search_builtin_with_citations( + use_streaming: bool, backend_config: dict +) -> None: """Test that citations are properly extracted from `grounding_metadata`.""" - llm = ChatGoogleGenerativeAI(model=_MODEL).bind_tools([{"google_search": {}}]) + llm = ChatGoogleGenerativeAI(model=_MODEL, **backend_config).bind_tools( + [{"google_search": {}}] + ) input_message = { "role": "user", "content": "Who won the 2024 UEFA Euro championship? Use search/citations.", @@ -1278,6 +1681,115 @@ def test_search_builtin_with_citations(use_streaming: bool) -> None: assert isinstance(google_metadata, dict) +@pytest.mark.flaky(retries=3, delay=1) +@pytest.mark.parametrize("use_streaming", [False, True]) +def test_structured_output_with_google_search( + use_streaming: bool, backend_config: dict +) -> None: + """Test structured outputs combined with Google Search tool. + + Tests that the model can: + 1. Use Google Search to find information + 2. Return a response that conforms to a structured schema + 3. Include grounding metadata from the search + """ + + class MatchResult(BaseModel): + winner: str + final_match_score: str + scorers: list[str] + + llm = ChatGoogleGenerativeAI(model="gemini-3-pro-preview", **backend_config) + + # Bind tools and configure for structured output + llm_with_search = llm.bind( + tools=[{"google_search": {}}, {"url_context": {}}], + response_mime_type="application/json", + response_schema=MatchResult.model_json_schema(), + ) + + if use_streaming: + # Test streaming + chunks: list[BaseMessageChunk] = [] + for chunk in llm_with_search.stream( + "Search for all details for the latest Euro championship final match." + ): + assert isinstance(chunk, AIMessageChunk) + chunks.append(chunk) + + assert len(chunks) > 0 + + # Reconstruct full message + response = chunks[0] + for chunk in chunks[1:]: # type: ignore[assignment] + response = response + chunk + + response = cast("AIMessageChunk", response) + assert isinstance(response, AIMessageChunk) + else: + # Test invoke + response = llm_with_search.invoke( # type: ignore[assignment] + "Search for all details for the latest Euro championship final match." + ) + assert isinstance(response, AIMessage) + + # Extract JSON from response content + assert isinstance(response.content, list) + text_content = "".join( + block.get("text", "") + for block in response.content + if isinstance(block, dict) and block.get("type") == "text" + ) + + # Verify structured output can be parsed + result = MatchResult.model_validate_json(text_content) + assert isinstance(result, MatchResult) + assert isinstance(result.winner, str) + assert len(result.winner) > 0 + assert isinstance(result.final_match_score, str) + assert len(result.final_match_score) > 0 + assert isinstance(result.scorers, list) + + # Verify grounding metadata is present (indicating search was used) + assert "grounding_metadata" in response.response_metadata + grounding = response.response_metadata["grounding_metadata"] + assert "grounding_chunks" in grounding or "grounding_supports" in grounding + + # Verify usage metadata + # TODO: Investigate streaming usage metadata accumulation issue + # When streaming, total_tokens doesn't match input_tokens + output_tokens + # This appears to be a chunk accumulation bug where total_tokens is summed + # across chunks but input/output tokens only keep final values + if not use_streaming: + _check_usage_metadata(response) + + +def test_search_with_googletool(backend_config: dict) -> None: + """Test using `GoogleTool` with Google Search.""" + llm = ChatGoogleGenerativeAI(model="models/gemini-2.5-flash", **backend_config) + resp = llm.invoke( + "When is the next total solar eclipse in US?", + tools=[GoogleTool(google_search={})], + ) + assert "grounding_metadata" in resp.response_metadata + + +def test_url_context_tool(backend_config: dict) -> None: + model = ChatGoogleGenerativeAI(model=_MODEL, **backend_config) + model_with_search = model.bind_tools([{"url_context": {}}]) + + input = "What is this page's contents about? https://docs.langchain.com" + response = model_with_search.invoke(input) + assert isinstance(response, AIMessage) + + assert ( + response.response_metadata["grounding_metadata"]["grounding_chunks"][0]["web"][ + "uri" + ] + is not None + ) + + def _check_code_execution_output(message: AIMessage, output_version: str) -> None: if output_version == "v0": blocks = [block for block in message.content if isinstance(block, dict)] @@ -1307,9 +1819,9 @@ def _check_code_execution_output(message: AIMessage, output_version: str) -> Non @pytest.mark.filterwarnings("ignore::UserWarning") @pytest.mark.parametrize("output_version", ["v0", "v1"]) -def test_code_execution_builtin(output_version: str) -> None: +def test_code_execution_builtin(output_version: str, backend_config: dict) -> None: llm = ChatGoogleGenerativeAI( - model=_MODEL, output_version=output_version + model=_MODEL, output_version=output_version, **backend_config ).bind_tools([{"code_execution": {}}]) input_message = { "role": "user", @@ -1333,13 +1845,14 @@ def test_code_execution_builtin(output_version: str) -> None: _check_code_execution_output(response, output_version) -def test_chat_google_genai_invoke_with_generation_params() -> None: +def test_chat_google_genai_invoke_with_generation_params(backend_config: dict) -> None: """Test that generation parameters passed to invoke() are respected. Verifies that `max_output_tokens` (max_tokens) and `thinking_budget` parameters passed directly to invoke() method override model defaults. """ - llm = ChatGoogleGenerativeAI(model=_MODEL) + # Use gemini-2.5-flash because it supports thinking_budget=0 + llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash", **backend_config) # Test with max_output_tokens constraint result_constrained = llm.invoke( @@ -1368,9 +1881,11 @@ def test_chat_google_genai_invoke_with_generation_params() -> None: @pytest.mark.parametrize("max_tokens", [10, 20, 50]) -def test_chat_google_genai_invoke_respects_max_tokens(max_tokens: int) -> None: - """Test that different max_output_tokens values are respected.""" - llm = ChatGoogleGenerativeAI(model=_MODEL) +def test_chat_google_genai_invoke_respects_max_tokens( + max_tokens: int, backend_config: dict +) -> None: + """Test that different `max_output_tokens` values are respected.""" + llm = ChatGoogleGenerativeAI(model=_MODEL, **backend_config) result = llm.invoke( "Write a detailed essay about artificial intelligence.", @@ -1384,3 +1899,869 @@ def test_chat_google_genai_invoke_respects_max_tokens(max_tokens: int) -> None: assert output_tokens <= max_tokens, ( f"Expected output_tokens <= {max_tokens}, got {output_tokens}" ) + + +@pytest.mark.parametrize("output_version", ["v0", "v1"]) +def test_agent_loop(output_version: Literal["v0", "v1"], backend_config: dict) -> None: + """Test agent loop with tool calling (non-streaming). + + Ensures that the model can: + 1. Make a tool call in response to a user query + 2. Accept the tool result + 3. Generate a final response incorporating the tool result + """ + + llm = ChatGoogleGenerativeAI( + model=_MODEL, output_version=output_version, **backend_config + ) + llm_with_tools = llm.bind_tools([get_weather]) + input_message = HumanMessage("What is the weather in San Francisco, CA?") + + # First call - should make a tool call + tool_call_message = llm_with_tools.invoke([input_message]) + assert isinstance(tool_call_message, AIMessage) + tool_calls = tool_call_message.tool_calls + assert len(tool_calls) == 1 + tool_call = tool_calls[0] + assert tool_call["name"] == "get_weather" + assert "location" in tool_call["args"] + + # Execute the tool + tool_message = get_weather.invoke(tool_call) + assert isinstance(tool_message, ToolMessage) + + # Second call - should incorporate tool result + response = llm_with_tools.invoke( + [ + input_message, + tool_call_message, + tool_message, + ] + ) + assert isinstance(response, AIMessage) + # Response should mention weather information + if isinstance(response.content, list): + text_content = "".join( + block.get("text", "") + for block in response.content + if isinstance(block, dict) and block.get("type") == "text" + ) + assert len(text_content) > 0 + else: + # v0 output for 2.5 models and lower + assert isinstance(response.content, str) + assert len(response.content) > 0 + + +@pytest.mark.parametrize("output_version", ["v0", "v1"]) +def test_agent_loop_streaming( + output_version: Literal["v0", "v1"], backend_config: dict +) -> None: + """Test agent loop with tool calling (streaming).""" + + llm = ChatGoogleGenerativeAI( + model=_MODEL, output_version=output_version, **backend_config + ) + llm_with_tools = llm.bind_tools([get_weather]) + input_message = HumanMessage("What is the weather in San Francisco, CA?") + + # First call - stream tool call chunks + chunks: list[BaseMessageChunk] = [] + for chunk in llm_with_tools.stream([input_message]): + assert isinstance(chunk, AIMessageChunk) + chunks.append(chunk) + + assert len(chunks) > 0 + + # Reconstruct the full message + tool_call_message = chunks[0] + for chunk in chunks[1:]: # type: ignore[assignment] + tool_call_message = tool_call_message + chunk + + tool_call_message = cast("AIMessageChunk", tool_call_message) + assert isinstance(tool_call_message, AIMessageChunk) + tool_calls = tool_call_message.tool_calls + assert len(tool_calls) == 1 + tool_call = tool_calls[0] + assert tool_call["name"] == "get_weather" + + # Execute the tool + tool_message = get_weather.invoke(tool_call) + assert isinstance(tool_message, ToolMessage) + + # Second call - stream final response + response_chunks: list[BaseMessageChunk] = [] + for chunk in llm_with_tools.stream( + [ + input_message, + tool_call_message, + tool_message, + ] + ): + assert isinstance(chunk, AIMessageChunk) + response_chunks.append(chunk) + + assert len(response_chunks) > 0 + # Reconstruct full response + response = response_chunks[0] + for chunk in response_chunks[1:]: # type: ignore[assignment] + response = response + chunk + + response = cast("AIMessageChunk", response) + assert isinstance(response, AIMessageChunk) + assert len(response.text) > 0 + + +@pytest.mark.parametrize("use_stream_method", [False, True]) +@pytest.mark.parametrize("is_async", [False, True]) +async def test_basic_streaming( + use_stream_method: bool, is_async: bool, backend_config: dict +) -> None: + """Test basic streaming functionality. + + Args: + use_stream_method: If `True`, use `stream()`, otherwise set `streaming=True` + is_async: Test async vs sync streaming + """ + if use_stream_method: + llm = ChatGoogleGenerativeAI(model=_MODEL, **backend_config) + else: + llm = ChatGoogleGenerativeAI(model=_MODEL, streaming=True, **backend_config) + + message = HumanMessage("Count from 1 to 5") + + try: + chunks: list[BaseMessageChunk] = [] + if is_async: + if use_stream_method: + async for chunk in llm.astream([message]): + assert isinstance(chunk, AIMessageChunk) + chunks.append(chunk) + else: + result = await llm.ainvoke([message]) + # When streaming=True, ainvoke still returns full message + assert isinstance(result, AIMessage) + return + elif use_stream_method: + for chunk in llm.stream([message]): + assert isinstance(chunk, AIMessageChunk) + chunks.append(chunk) + else: + result = llm.invoke([message]) + # When streaming=True, invoke still returns full message + assert isinstance(result, AIMessage) + return + + # Verify we got chunks + assert len(chunks) > 0 + + # Verify we can reconstruct the message + full_message = chunks[0] + for chunk in chunks[1:]: # type: ignore[assignment] + full_message = full_message + chunk + + full_message = cast("AIMessageChunk", full_message) + assert isinstance(full_message, AIMessageChunk) + if isinstance(full_message.content, list): + text_content = "".join( + block.get("text", "") + for block in full_message.content + if isinstance(block, dict) and block.get("type") == "text" + ) + assert len(text_content) > 0 + else: + assert isinstance(full_message.content, str) + assert len(full_message.content) > 0 + + # Verify usage metadata is present in the final message + _check_usage_metadata(cast("AIMessage", full_message)) + finally: + # Explicitly close the client to avoid resource warnings + if llm.client: + llm.client.close() + if llm.client.aio: + await llm.client.aio.aclose() + + +@pytest.mark.parametrize("output_version", ["v0", "v1"]) +def test_gemini_3_pro_streaming_with_thinking( + output_version: Literal["v0", "v1"], backend_config: dict +) -> None: + """Test `gemini-3-pro-preview` streaming with thinking capabilities. + + `gemini-3-pro-preview` uses `thinking_level` instead of `thinking_budget`. + """ + llm = ChatGoogleGenerativeAI( + model="gemini-3-pro-preview", + thinking_level="high", + include_thoughts=True, + output_version=output_version, + **backend_config, + ) + + input_message = HumanMessage( + "How many r's are in the word 'strawberry'? Think through this carefully." + ) + + chunks: list[BaseMessageChunk] = [] + for chunk in llm.stream([input_message]): + assert isinstance(chunk, AIMessageChunk) + chunks.append(chunk) + + assert len(chunks) > 0 + + # Reconstruct full message + full_message = chunks[0] + for chunk in chunks[1:]: # type: ignore[assignment] + full_message = full_message + chunk + + full_message = cast("AIMessageChunk", full_message) + assert isinstance(full_message, AIMessageChunk) + assert isinstance(full_message.content, list), ( + f"Expected list content, got {type(full_message.content)}" + ) + + # Check for thinking/reasoning blocks based on output_version + if output_version == "v0": + # v0 uses "thinking" blocks + thought_blocks = [ + block + for block in full_message.content + if isinstance(block, dict) and block.get("type") == "thinking" + ] + assert thought_blocks, ( + "Expected thinking blocks when include_thoughts=True for gemini-3 with v0" + ) + else: + # v1 uses "reasoning" blocks + reasoning_blocks = [ + block + for block in full_message.content + if isinstance(block, dict) and block.get("type") == "reasoning" + ] + assert reasoning_blocks, ( + "Expected reasoning blocks when include_thoughts=True for gemini-3 with v1" + ) + + # Verify usage metadata includes reasoning tokens + _check_usage_metadata(cast("AIMessage", full_message)) + assert full_message.usage_metadata is not None + if ( + "output_token_details" in full_message.usage_metadata + and "reasoning" in full_message.usage_metadata["output_token_details"] + ): + assert full_message.usage_metadata["output_token_details"]["reasoning"] > 0 + + +@pytest.mark.parametrize("output_version", ["v0", "v1"]) +def test_gemini_3_pro_agent_loop_streaming( + output_version: Literal["v0", "v1"], backend_config: dict +) -> None: + """Test `gemini-3-pro-preview` agent loop with streaming and thinking.""" + + @tool + def calculate_sum(a: int, b: int) -> int: + """Calculate the sum of two numbers. + + Args: + a: First number. + b: Second number. + + Returns: + The sum of a and b. + """ + return a + b + + llm = ChatGoogleGenerativeAI( + model="gemini-3-pro-preview", + thinking_level="high", + include_thoughts=True, + output_version=output_version, + **backend_config, + ) + llm_with_tools = llm.bind_tools([calculate_sum]) + + input_message = HumanMessage("What is 123 + 456? Use the calculator tool.") + + # First call - stream tool call + chunks: list[BaseMessageChunk] = [] + for chunk in llm_with_tools.stream([input_message]): + assert isinstance(chunk, AIMessageChunk) + chunks.append(chunk) + + assert len(chunks) > 0 + + # Reconstruct message + tool_call_message = chunks[0] + for chunk in chunks[1:]: # type: ignore[assignment] + tool_call_message = tool_call_message + chunk + + tool_call_message = cast("AIMessageChunk", tool_call_message) + assert isinstance(tool_call_message, AIMessageChunk) + tool_calls = tool_call_message.tool_calls + assert len(tool_calls) == 1 + tool_call = tool_calls[0] + assert tool_call["name"] == "calculate_sum" + + # Execute tool + tool_message = calculate_sum.invoke(tool_call) + assert isinstance(tool_message, ToolMessage) + + # Second call - stream final response with reasoning + response_chunks: list[BaseMessageChunk] = [] + for chunk in llm_with_tools.stream( + [ + input_message, + tool_call_message, + tool_message, + ] + ): + assert isinstance(chunk, AIMessageChunk) + response_chunks.append(chunk) + + assert len(response_chunks) > 0 + + # Reconstruct response + response = response_chunks[0] + for chunk in response_chunks[1:]: # type: ignore[assignment] + response = response + chunk + + response = cast("AIMessageChunk", response) + assert isinstance(response, AIMessageChunk) + assert isinstance(response.content, list) + + # Verify response has text blocks + text_blocks = [ + block + for block in response.content + if isinstance(block, dict) and block.get("type") == "text" + ] + assert len(text_blocks) > 0 + + +@pytest.mark.parametrize("model_name", [_MODEL, "gemini-3-pro-preview"]) +@pytest.mark.parametrize("output_version", ["v0", "v1"]) +def test_streaming_with_multiple_tool_calls( + model_name: str, output_version: Literal["v0", "v1"], backend_config: dict +) -> None: + """Test streaming with multiple tool calls in a single response.""" + + @tool + def get_temperature(city: str) -> str: + """Get temperature for a city. + + Args: + city: The city name. + + Returns: + Temperature information. + """ + return f"Temperature in {city}: 72°F" + + @tool + def get_humidity(city: str) -> str: + """Get humidity for a city. + + Args: + city: The city name. + + Returns: + Humidity information. + """ + return f"Humidity in {city}: 65%" + + llm = ChatGoogleGenerativeAI( + model=model_name, + streaming=True, + output_version=output_version, + **backend_config, + ) + llm_with_tools = llm.bind_tools([get_temperature, get_humidity]) + + input_message = HumanMessage( + "Get both temperature and humidity for San Francisco. Use both tools." + ) + + # Stream tool calls + chunks: list[BaseMessageChunk] = [] + for chunk in llm_with_tools.stream([input_message]): + assert isinstance(chunk, AIMessageChunk) + chunks.append(chunk) + + assert len(chunks) > 0 + + # Reconstruct message + tool_call_message = chunks[0] + for chunk in chunks[1:]: # type: ignore[assignment] + tool_call_message = tool_call_message + chunk + + tool_call_message = cast("AIMessageChunk", tool_call_message) + assert isinstance(tool_call_message, AIMessageChunk) + tool_calls = tool_call_message.tool_calls + + # Model may make 1 or 2 tool calls depending on its decision + assert len(tool_calls) >= 1 + assert len(tool_calls) <= 2 + + # Execute tools + tool_messages = [] + for tool_call in tool_calls: + if tool_call["name"] == "get_temperature": + tool_message = get_temperature.invoke(tool_call) + elif tool_call["name"] == "get_humidity": + tool_message = get_humidity.invoke(tool_call) + else: + continue + tool_messages.append(tool_message) + + # Stream final response + response_chunks: list[BaseMessageChunk] = [] + for chunk in llm_with_tools.stream( + [input_message, tool_call_message, *tool_messages] + ): + assert isinstance(chunk, AIMessageChunk) + response_chunks.append(chunk) + + assert len(response_chunks) > 0 + + +@pytest.mark.extended +def test_context_caching(backend_config: dict) -> None: + """Test context caching with large text content. + + Note: For Google AI backend, `gs://` URIs require client.files.upload() first, + then using `media` format. Vertex AI backend supports `gs://` URIs directly + with `media` format. For simplicity, this test uses large text content instead. + """ + # Create a large context document (needs to be large enough to trigger caching) + large_document = ( + "RESEARCH PAPER ON ARTIFICIAL INTELLIGENCE\n\n" + "Abstract: This paper discusses the fundamentals of artificial intelligence " + "and machine learning.\n\n" + ) + # Add substantial content to meet caching minimum token requirements + for i in range(200): + large_document += ( + f"Section {i}: This section contains detailed information about " + f"various aspects of AI including neural networks, deep learning, " + f"natural language processing, computer vision, and reinforcement " + f"learning. The field of artificial intelligence has evolved " + f"significantly over the past decades, with breakthroughs in pattern " + f"recognition, data processing, and autonomous systems. " + ) + + system_instruction = ( + "You are an expert researcher. You always stick to the facts in the sources " + "provided, and never make up new facts.\n\n" + "If asked about it, the secret number is 747.\n\n" + "Now analyze the research paper provided and answer questions about it." + ) + + model = ChatGoogleGenerativeAI(model=_MODEL, **backend_config) + + cached_content = create_context_cache( + model, + messages=[ + SystemMessage(content=system_instruction), + HumanMessage(content=large_document), + ], + ttl="300s", # 5 minutes + ) + + # Using cached_content in constructor + chat = ChatGoogleGenerativeAI( + model=_MODEL, cached_content=cached_content, **backend_config + ) + + response = chat.invoke("What is the secret number?") + + assert isinstance(response, AIMessage) + assert isinstance(response.content, str) + assert "747" in response.content + + # Verify cache was used (should have cache_read tokens in usage metadata) + if response.usage_metadata: + # Cache read tokens should be present when using cached content + cache_read_check = ( + "cache_read_input" in response.usage_metadata + or cast("int", response.usage_metadata.get("cache_read_input", 0)) >= 0 + ) + assert cache_read_check + + # Using cached content in request + chat = ChatGoogleGenerativeAI(model=_MODEL, **backend_config) + response = chat.invoke("What is the secret number?", cached_content=cached_content) + + assert isinstance(response, AIMessage) + assert isinstance(response.content, str) + assert "747" in response.content + + +@pytest.mark.extended +def test_context_caching_tools(backend_config: dict) -> None: + """Test context caching with tools and large text content. + + Note: For Google AI backend, `gs://` URIs require client.files.upload() first, + then using `media` format. Vertex AI backend supports `gs://` URIs directly + with `media` format. For simplicity, this test uses large text content instead. + """ + + @tool + def get_secret_number() -> int: + """Gets secret number.""" + return 747 + + tools = [get_secret_number] + + # Create a large context document + large_document = ( + "RESEARCH PAPER ON ARTIFICIAL INTELLIGENCE\n\n" + "Abstract: This paper discusses various AI topics.\n\n" + ) + for i in range(200): + large_document += ( + f"Section {i}: Detailed AI research content about neural networks, " + f"machine learning algorithms, and computational intelligence. " + ) + + system_instruction = ( + "You are an expert researcher. You always stick to the facts in the sources " + "provided, and never make up new facts.\n\n" + "You have a get_secret_number function available. Use this tool if someone " + "asks for the secret number.\n\n" + "Now analyze the research paper and answer questions." + ) + + model = ChatGoogleGenerativeAI(model=_MODEL, **backend_config) + + cached_content = create_context_cache( + model, + messages=[ + SystemMessage(content=system_instruction), + HumanMessage(content=large_document), + ], + tools=cast("list", tools), + ttl="300s", # 5 minutes + ) + + # Note: When using cached content with tools, do NOT bind tools again + # The tools are already part of the cache and will be used automatically + chat = ChatGoogleGenerativeAI( + model=_MODEL, cached_content=cached_content, **backend_config + ) + + # Invoke the model - it should call the tool from the cached content + response = chat.invoke("What is the secret number?") + + assert isinstance(response, AIMessage) + # The model should call the get_secret_number tool that was cached + assert len(response.tool_calls) > 0 + assert response.tool_calls[0]["name"] == "get_secret_number" + + +def test_audio_timestamp(backend_config: dict) -> None: + """Test audio transcription with timestamps. + + This test verifies that audio files can be transcribed with timestamps using + the `audio_timestamp` generation config parameter. Similar to PDF support, we + use the `image_url` format with an HTTP URL to download audio and send as + inline bytes, which works for both Google AI and Vertex AI backends. + + The `audio_timestamp` parameter enables timestamp generation in the format + `[HH:MM:SS]` within the transcribed output. + """ + llm = ChatGoogleGenerativeAI(model=_MODEL, **backend_config) + audio_url = ( + "https://www.learningcontainer.com/wp-content/uploads/2020/02/Kalimba.mp3" + ) + + message = HumanMessage( + content=[ + { + "type": "text", + "text": "Transcribe this audio with timestamps.", + }, + { + "type": "image_url", # image_url format works for audio too + "image_url": {"url": audio_url}, + }, + ] + ) + + response = llm.invoke([message], generation_config={"audio_timestamp": True}) + + assert isinstance(response, AIMessage) + + # Extract text content (handle both string and list formats) + if isinstance(response.content, list): + text_content = "".join( + block.get("text", "") + for block in response.content + if isinstance(block, dict) and block.get("type") == "text" + ) + else: + text_content = response.content + + assert len(text_content.strip()) > 0 + + # Check that the response contains timestamp markers + # Timestamps appear in formats like **[00:00]**, **0:00**, or [HH:MM:SS] + # Check for common timestamp patterns + has_timestamps = ( + "**[" in text_content # Format: **[00:00]** + or "**0:" in text_content # Format: **0:00** + or "[00:" in text_content # Format: [00:00:00] + ) + assert has_timestamps, ( + f"No timestamp markers found in response: {text_content[:200]}" + ) + + +def test_langgraph_example(backend_config: dict) -> None: + """Test integration with LangGraph-style multi-turn tool calling.""" + llm = ChatGoogleGenerativeAI( + model=_MODEL, max_output_tokens=8192, temperature=0.2, **backend_config + ) + + add_declaration = { + "name": "add", + "description": "Adds a and b.", + "parameters": { + "properties": { + "a": {"description": "first int", "type": "integer"}, + "b": {"description": "second int", "type": "integer"}, + }, + "required": ["a", "b"], + "type": "object", + }, + } + + multiply_declaration = { + "name": "multiply", + "description": "Multiply a and b.", + "parameters": { + "properties": { + "a": {"description": "first int", "type": "integer"}, + "b": {"description": "second int", "type": "integer"}, + }, + "required": ["a", "b"], + "type": "object", + }, + } + + messages = [ + SystemMessage( + content=( + "You are a helpful assistant tasked with performing " + "arithmetic on a set of inputs." + ) + ), + HumanMessage(content="Multiply 2 and 3"), + HumanMessage(content="No, actually multiply 3 and 3!"), + ] + step1 = llm.invoke( + messages, + tools=[{"function_declarations": [add_declaration, multiply_declaration]}], + ) + step2 = llm.invoke( + [ + *messages, + step1, + ToolMessage(content="9", tool_call_id=step1.tool_calls[0]["id"]), + ], + tools=[{"function_declarations": [add_declaration, multiply_declaration]}], + ) + assert isinstance(step2, AIMessage) + + +@pytest.mark.flaky(retries=3, delay=1) +def test_streaming_function_call_arguments() -> None: + """Test streaming function calling with `stream_function_call_arguments=True`. + + Note: This feature is only available on Vertex AI with Gemini 3 Pro (Preview). + This test verifies that function call arguments are streamed incrementally + rather than being delivered in a single chunk. + """ + + @tool + def search_database( + query: str, + filters: dict[str, str], + max_results: int = 10, + ) -> str: + """Search a database with a query and filters. + + Args: + query: The search query string. + filters: Dictionary of field:value filters to apply. + max_results: Maximum number of results to return. + + Returns: + Search results as a string. + """ + return f"Found {max_results} results for '{query}' with filters {filters}" + + # Use Vertex AI as stream_function_call_arguments is only supported there + # Pass api_key=None to force use of application default credentials + project = os.getenv("GOOGLE_CLOUD_PROJECT") or os.getenv("GCP_PROJECT") + if not project: + pytest.skip("Vertex AI tests require GOOGLE_CLOUD_PROJECT env var to be set") + + # Use Gemini 3 Pro as these features are only available there + # Note: This test explicitly requires Vertex AI, so we hardcode those parameters + llm = ChatGoogleGenerativeAI( + model="gemini-3-pro-preview", + vertexai=True, + project=project, + api_key=None, # Force use of application default credentials + ) + + # Configure tool_config with streaming function call arguments + tool_config = ToolConfig( + function_calling_config=FunctionCallingConfig( + mode=FunctionCallingConfigMode.AUTO, + stream_function_call_arguments=True, + ) + ) + + llm_with_tools = llm.bind_tools([search_database], tool_config=tool_config) + + input_message = HumanMessage( + content=( + "Search the database for 'machine learning' with filters " + "category='AI' and year='2024', return 20 results" + ) + ) + + # Stream the response + chunks: list[AIMessageChunk] = [] + for chunk in llm_with_tools.stream([input_message]): + assert isinstance(chunk, AIMessageChunk) + chunks.append(chunk) + + assert len(chunks) > 0, "No chunks received from streaming" + + # Reconstruct the full message + full_message = chunks[0] + for chunk in chunks[1:]: + full_message = full_message + chunk + + # Verify that the tool call was made + assert isinstance(full_message, AIMessageChunk) + tool_calls = full_message.tool_calls + + # With stream_function_call_arguments=True, we should see incremental updates + # Check that we have multiple chunks with tool_call_chunks + chunks_with_tool_calls = [ + c for c in chunks if c.tool_call_chunks and len(c.tool_call_chunks) > 0 + ] + + # Verify that streaming is actually happening + assert len(chunks_with_tool_calls) > 0, "No chunks contained tool call data" + assert len(chunks_with_tool_calls) > 1, ( + f"Expected multiple chunks with tool call data to verify streaming, " + f"got {len(chunks_with_tool_calls)}" + ) + + # Verify the tool name appears in at least one tool call + # Note: Due to chunk aggregation behavior, args may be in separate chunks + tool_names = [tc.get("name") for tc in tool_calls if tc.get("name")] + assert "search_database" in tool_names, ( + f"Expected 'search_database' in tool names, got {tool_names}" + ) + + # The presence of multiple tool calls indicates incremental streaming + assert len(tool_calls) > 1, ( + f"Expected multiple tool calls indicating argument streaming, " + f"got {len(tool_calls)}" + ) + + +@pytest.mark.flaky(retries=3, delay=1) +def test_multimodal_function_response() -> None: + """Test multimodal function responses with image/file data. + + Note: This feature is only available on Vertex AI with Gemini 3 Pro (Preview). + This test verifies that function responses can include image/file data + and that the model can process multimodal function responses. + """ + + @tool + def get_product_image(product_id: str) -> str: + """Get the product image for a given product ID. + + Args: + product_id: The unique identifier for the product. + + Returns: + Information about retrieving the product image. + """ + return f"Image data for product {product_id}" + + # Use Vertex AI as this feature is only available there + # Pass api_key=None to force use of application default credentials + + project = os.getenv("GOOGLE_CLOUD_PROJECT") or os.getenv("GCP_PROJECT") + if not project: + pytest.skip("Vertex AI tests require GOOGLE_CLOUD_PROJECT env var to be set") + + # Use Gemini 3 Pro as these features are only available there + llm = ChatGoogleGenerativeAI( + model="gemini-3-pro-preview", + vertexai=True, + project=project, + api_key=None, # Force use of application default credentials + ) + llm_with_tools = llm.bind_tools([get_product_image]) + + input_message = HumanMessage( + content="Show me the product image for product ID 'laptop-2024'" + ) + + # First call - model should request the tool + tool_call_message = llm_with_tools.invoke([input_message]) + assert isinstance(tool_call_message, AIMessage) + tool_calls = tool_call_message.tool_calls + assert len(tool_calls) == 1 + tool_call = tool_calls[0] + assert tool_call["name"] == "get_product_image" + assert "product_id" in tool_call["args"] + + # Create a multimodal function response with an image + # Using a Google Cloud Storage URI + tool_response = ToolMessage( + content=json.dumps( + { + "type": "function_response_file_data", + "file_uri": "gs://cloud-samples-data/generative-ai/image/scones.jpg", + "mime_type": "image/jpeg", + "display_name": "Product Image: laptop-2024", + } + ), + tool_call_id=tool_call["id"], + ) + + # Second call - model should incorporate the image response + response = llm_with_tools.invoke( + [ + input_message, + tool_call_message, + tool_response, + ] + ) + + assert isinstance(response, AIMessage) + + # The model should acknowledge receiving data + if isinstance(response.content, list): + text_content = "".join( + block.get("text", "") + for block in response.content + if isinstance(block, dict) and block.get("type") == "text" + ) + assert len(text_content) > 0, "Expected text response from model" + else: + assert isinstance(response.content, str) + assert len(response.content) > 0, "Expected text response from model" + + # Test successful - multimodal function response is working diff --git a/libs/genai/tests/integration_tests/test_llms.py b/libs/genai/tests/integration_tests/test_llms.py index 9b72f9493..6ed4bc9a9 100644 --- a/libs/genai/tests/integration_tests/test_llms.py +++ b/libs/genai/tests/integration_tests/test_llms.py @@ -51,8 +51,15 @@ def test_google_generativeai_generate(model_name: str) -> None: ) async def test_google_generativeai_agenerate(model_name: str) -> None: llm = GoogleGenerativeAI(temperature=0, model=model_name) - output = await llm.agenerate(["Please say foo:"]) - assert isinstance(output, LLMResult) + try: + output = await llm.agenerate(["Please say foo:"]) + assert isinstance(output, LLMResult) + finally: + # Explicitly close the client to avoid resource warnings + if llm.client and hasattr(llm.client, "client") and llm.client.client: + llm.client.client.close() + if llm.client.client.aio: + await llm.client.client.aio.aclose() @pytest.mark.parametrize( @@ -88,7 +95,7 @@ def test_safety_settings_gemini(model_name: str) -> None: # safety filters safety_settings: dict[HarmCategory, HarmBlockThreshold] = { - HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE, # type: ignore[dict-item] + HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE, } # test with safety filters directly to generate diff --git a/libs/genai/tests/integration_tests/test_standard.py b/libs/genai/tests/integration_tests/test_standard.py index b74bec28e..404998d0d 100644 --- a/libs/genai/tests/integration_tests/test_standard.py +++ b/libs/genai/tests/integration_tests/test_standard.py @@ -6,7 +6,6 @@ import pytest from langchain_core.language_models import BaseChatModel from langchain_core.rate_limiters import InMemoryRateLimiter -from langchain_core.tools import BaseTool from langchain_tests.integration_tests import ChatModelIntegrationTests from langchain_google_genai import ChatGoogleGenerativeAI @@ -14,7 +13,7 @@ rate_limiter = InMemoryRateLimiter(requests_per_second=0.25) _FLASH_MODEL = "gemini-2.5-flash" -_PRO_MODEL = "gemini-2.5-pro" +_PRO_MODEL = "gemini-3-pro-preview" def _has_multimodal_secrets() -> bool: @@ -25,102 +24,174 @@ def _has_multimodal_secrets() -> bool: return bool(os.environ.get("LANGCHAIN_TESTS_USER_AGENT")) -class TestGeminiFlashStandard(ChatModelIntegrationTests): - @property - def chat_model_class(self) -> type[BaseChatModel]: - return ChatGoogleGenerativeAI - - @property - def chat_model_params(self) -> dict: - return { - "model": _FLASH_MODEL, - "rate_limiter": rate_limiter, - } - - @property - def supports_image_inputs(self) -> bool: - return True - - @property - def supports_image_urls(self) -> bool: - return True - - @property - def supports_image_tool_message(self) -> bool: - return True - - @property - def supports_pdf_inputs(self) -> bool: - return True - - @property - def supports_audio_inputs(self) -> bool: - return True - - @pytest.mark.xfail( - not _has_multimodal_secrets(), - reason=( - "Multimodal tests require integration secrets (user agent to fetch " - "external resources)" - # (Won't run locally without LANGCHAIN_TESTS_USER_AGENT) - ), - run=False, - ) - def test_audio_inputs(self, model: BaseChatModel) -> None: - """Skip audio tests in PR context - requires external resource fetching.""" - super().test_audio_inputs(model) - - @pytest.mark.xfail( - not _has_multimodal_secrets(), - reason=( - "Multimodal tests require integration secrets (user agent to fetch " - "external resources)" - # (Won't run locally without LANGCHAIN_TESTS_USER_AGENT) - ), - run=False, - ) - def test_pdf_inputs(self, model: BaseChatModel) -> None: - """Skip PDF tests in PR context - requires external resource fetching.""" - super().test_pdf_inputs(model) - - -class TestGeminiProStandard(ChatModelIntegrationTests): - @property - def chat_model_class(self) -> type[BaseChatModel]: - return ChatGoogleGenerativeAI - - @property - def chat_model_params(self) -> dict: - return { - "model": _PRO_MODEL, - "rate_limiter": rate_limiter, - } - - @pytest.mark.xfail(reason="Not yet supported") - def test_tool_message_histories_list_content( - self, model: BaseChatModel, my_adder_tool: BaseTool - ) -> None: - super().test_tool_message_histories_list_content(model, my_adder_tool) - - @pytest.mark.xfail( - reason="Investigate: prompt_token_count inconsistent in final chunk." +def _get_backend_configs() -> list[tuple[str, dict]]: + """Get list of backend configurations based on TEST_VERTEXAI env var. + + Returns: + List of (backend_name, backend_config) tuples. + """ + vertexai_setting = os.environ.get("TEST_VERTEXAI", "").lower() + + configs: list = [] + + # Add Google AI config + if vertexai_setting != "only": + configs.append(("GoogleAI", {})) + + # Add Vertex AI config + if vertexai_setting in ("1", "true", "yes", "only"): + project = os.environ.get("GOOGLE_CLOUD_PROJECT") + # Always add config, tests will skip if project is missing + configs.append( + ( + "VertexAI", + {"vertexai": True, "project": project, "api_key": None} + if project + else {"vertexai": True, "project": None, "api_key": None}, + ) + ) + + return configs + + +# Dynamically create test classes for each backend +for backend_name, backend_config in _get_backend_configs(): + + class _TestGeminiFlashStandardBase(ChatModelIntegrationTests): + @property + def chat_model_class(self) -> type[BaseChatModel]: + return ChatGoogleGenerativeAI + + @property + def chat_model_params(self) -> dict: + # Skip if Vertex AI is requested but GOOGLE_CLOUD_PROJECT is not set + if backend_config.get("vertexai") and not backend_config.get("project"): + pytest.skip( + "Vertex AI tests require GOOGLE_CLOUD_PROJECT env var to be set" + ) + return { + "model": _FLASH_MODEL, + "rate_limiter": rate_limiter, + **backend_config, + } + + @property + def supports_image_inputs(self) -> bool: + return True + + @property + def supports_image_urls(self) -> bool: + return True + + @property + def supports_image_tool_message(self) -> bool: + return True + + @property + def supports_pdf_inputs(self) -> bool: + return True + + @property + def supports_audio_inputs(self) -> bool: + return True + + @property + def supports_json_mode(self) -> bool: + return True + + @pytest.mark.xfail( + not _has_multimodal_secrets(), + reason=( + "Multimodal tests require integration secrets (user agent to fetch " + "external resources)" + ), + run=False, + ) + def test_audio_inputs(self, model: BaseChatModel) -> None: + """Skip audio tests in PR context - requires external resource fetching.""" + super().test_audio_inputs(model) + + @pytest.mark.xfail( + not _has_multimodal_secrets(), + reason=( + "Multimodal tests require integration secrets (user agent to fetch " + "external resources)" + ), + run=False, + ) + def test_pdf_inputs(self, model: BaseChatModel) -> None: + """Skip PDF tests in PR context - requires external resource fetching.""" + super().test_pdf_inputs(model) + + class _TestGeminiProStandardBase(ChatModelIntegrationTests): + @property + def chat_model_class(self) -> type[BaseChatModel]: + return ChatGoogleGenerativeAI + + @property + def chat_model_params(self) -> dict: + # Skip if Vertex AI is requested but GOOGLE_CLOUD_PROJECT is not set + if backend_config.get("vertexai") and not backend_config.get("project"): + pytest.skip( + "Vertex AI tests require GOOGLE_CLOUD_PROJECT env var to be set" + ) + return { + "model": _PRO_MODEL, + "rate_limiter": rate_limiter, + **backend_config, + } + + @property + def supports_image_inputs(self) -> bool: + return True + + @property + def supports_image_urls(self) -> bool: + return True + + @property + def supports_pdf_inputs(self) -> bool: + return True + + @property + def supports_audio_inputs(self) -> bool: + return True + + @property + def supports_json_mode(self) -> bool: + return True + + @property + def supports_image_tool_message(self) -> bool: + return True + + @property + def supports_pdf_tool_message(self) -> bool: + return True + + @property + def supported_usage_metadata_details( + self, + ) -> dict[ + Literal["invoke", "stream"], + list[ + Literal[ + "audio_input", + "audio_output", + "reasoning_output", + "cache_read_input", + "cache_creation_input", + ] + ], + ]: + return {"invoke": [], "stream": []} + + # Create backend-specific test classes + flash_class_name = f"TestGeminiFlashStandard{backend_name}" + pro_class_name = f"TestGeminiProStandard{backend_name}" + + # Add classes to module globals so pytest can discover them + globals()[flash_class_name] = type( + flash_class_name, (_TestGeminiFlashStandardBase,), {} ) - def test_usage_metadata_streaming(self, model: BaseChatModel) -> None: - super().test_usage_metadata_streaming(model) - - @property - def supported_usage_metadata_details( - self, - ) -> dict[ - Literal["invoke", "stream"], - list[ - Literal[ - "audio_input", - "audio_output", - "reasoning_output", - "cache_read_input", - "cache_creation_input", - ] - ], - ]: - return {"invoke": [], "stream": []} + globals()[pro_class_name] = type(pro_class_name, (_TestGeminiProStandardBase,), {}) diff --git a/libs/genai/tests/unit_tests/__snapshots__/test_standard.ambr b/libs/genai/tests/unit_tests/__snapshots__/test_standard.ambr index 723e55a8d..332e77ec6 100644 --- a/libs/genai/tests/unit_tests/__snapshots__/test_standard.ambr +++ b/libs/genai/tests/unit_tests/__snapshots__/test_standard.ambr @@ -18,7 +18,7 @@ }), 'max_output_tokens': 100, 'max_retries': 2, - 'model': 'models/gemini-2.5-flash', + 'model': 'gemini-2.5-flash', 'n': 1, 'stop': list([ ]), diff --git a/libs/genai/tests/unit_tests/test_chat_models.py b/libs/genai/tests/unit_tests/test_chat_models.py index 46945d946..a48b7a7e4 100644 --- a/libs/genai/tests/unit_tests/test_chat_models.py +++ b/libs/genai/tests/unit_tests/test_chat_models.py @@ -1,24 +1,33 @@ """Test chat model integration.""" -import asyncio import base64 import json +import os import warnings from collections.abc import Iterator from concurrent.futures import ThreadPoolExecutor from typing import Any, Literal, cast from unittest.mock import ANY, AsyncMock, Mock, patch -import google.ai.generativelanguage as glm import pytest -from google.ai.generativelanguage_v1beta.types import ( +from google.genai.errors import ClientError +from google.genai.types import ( + Blob, Candidate, Content, + FinishReason, FunctionCall, + FunctionResponse, GenerateContentResponse, + GenerateContentResponseUsageMetadata, + Language, Part, + ThinkingLevel, ) -from google.api_core.exceptions import ResourceExhausted +from google.genai.types import ( + Outcome as CodeExecutionResultOutcome, +) +from google.protobuf.struct_pb2 import Struct from langchain_core.load import dumps, loads from langchain_core.messages import ( AIMessage, @@ -43,9 +52,10 @@ ) from langchain_google_genai.chat_models import ( ChatGoogleGenerativeAI, - _chat_with_retry, + ChatGoogleGenerativeAIError, _convert_to_parts, _convert_tool_message_to_parts, + _get_ai_message_tool_messages_parts, _is_gemini_3_or_later, _is_gemini_25_model, _parse_chat_history, @@ -57,6 +67,8 @@ FAKE_API_KEY = "fake-api-key" +SMALL_VIEWABLE_BASE64_IMAGE = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAIAQMAAAD+wSzIAAAABlBMVEX///+/v7+jQ3Y5AAAADklEQVQI12P4AIX8EAgALgAD/aNpbtEAAAAASUVORK5CYII=" # noqa: E501 + def test_integration_initialization() -> None: """Test chat model initialization.""" @@ -109,7 +121,7 @@ def test_integration_initialization() -> None: "HARM_CATEGORY_DANGEROUS_CONTENT": "BLOCK_LOW_AND_ABOVE" }, # Invalid arg ) - assert llm.model == f"models/{MODEL_NAME}" + assert llm.model == f"{MODEL_NAME}" mock_warning.assert_called_once() call_args = mock_warning.call_args[0][0] assert "Unexpected argument 'safety_setting'" in call_args @@ -119,7 +131,7 @@ def test_integration_initialization() -> None: def test_safety_settings_initialization() -> None: """Test chat model initialization with `safety_settings` parameter.""" safety_settings: dict[HarmCategory, HarmBlockThreshold] = { - HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE # type: ignore[dict-item] + HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE } # Test initialization with safety_settings @@ -133,7 +145,7 @@ def test_safety_settings_initialization() -> None: # Verify the safety_settings are stored correctly assert llm.safety_settings == safety_settings assert llm.temperature == 0.7 - assert llm.model == f"models/{MODEL_NAME}" + assert llm.model == f"{MODEL_NAME}" def test_initialization_inside_threadpool() -> None: @@ -147,50 +159,6 @@ def test_initialization_inside_threadpool() -> None: ).result() -def test_client_transport() -> None: - """Test client transport configuration.""" - model = ChatGoogleGenerativeAI(model=MODEL_NAME, google_api_key=FAKE_API_KEY) - assert model.client.transport.kind == "grpc" - - model = ChatGoogleGenerativeAI( - model=MODEL_NAME, google_api_key="fake-key", transport="rest" - ) - assert model.client.transport.kind == "rest" - - async def check_async_client() -> None: - model = ChatGoogleGenerativeAI(model=MODEL_NAME, google_api_key=FAKE_API_KEY) - assert model.async_client.transport.kind == "grpc_asyncio" - - # Test auto conversion of transport to "grpc_asyncio" from "rest" - model = ChatGoogleGenerativeAI( - model=MODEL_NAME, google_api_key=FAKE_API_KEY, transport="rest" - ) - assert model.async_client.transport.kind == "grpc_asyncio" - - asyncio.run(check_async_client()) - - -def test_initalization_without_async() -> None: - chat = ChatGoogleGenerativeAI( - model=MODEL_NAME, - google_api_key=SecretStr(FAKE_API_KEY), - ) - assert chat.async_client is None - - -def test_initialization_with_async() -> None: - async def initialize_chat_with_async_client() -> ChatGoogleGenerativeAI: - model = ChatGoogleGenerativeAI( - model=MODEL_NAME, - google_api_key=SecretStr(FAKE_API_KEY), - ) - _ = model.async_client - return model - - chat = asyncio.run(initialize_chat_with_async_client()) - assert chat.async_client is not None - - def test_api_key_is_string() -> None: chat = ChatGoogleGenerativeAI( model=MODEL_NAME, @@ -213,15 +181,24 @@ def test_api_key_masked_when_passed_via_constructor( def test_profile() -> None: - model = ChatGoogleGenerativeAI(model="gemini-1.5-flash") + model = ChatGoogleGenerativeAI( + model="gemini-1.5-flash", + google_api_key=SecretStr(FAKE_API_KEY), + ) assert model.profile assert not model.profile["reasoning_output"] - model = ChatGoogleGenerativeAI(model="gemini-2.5-flash") + model = ChatGoogleGenerativeAI( + model="gemini-2.5-flash", + google_api_key=SecretStr(FAKE_API_KEY), + ) assert model.profile assert model.profile["reasoning_output"] - model = ChatGoogleGenerativeAI(model="foo") + model = ChatGoogleGenerativeAI( + model="foo", + google_api_key=SecretStr(FAKE_API_KEY), + ) assert model.profile == {} @@ -293,106 +270,90 @@ def test_parse_history() -> None: ] system_instruction, history = _parse_chat_history(messages) assert len(history) == 8 - assert history[0] == glm.Content(role="user", parts=[glm.Part(text=text_question1)]) - assert history[1] == glm.Content( + assert history[0] == Content(role="user", parts=[Part(text=text_question1)]) + assert history[1] == Content( role="model", parts=[ - glm.Part( - function_call=glm.FunctionCall( - { - "name": "calculator", - "args": function_call_1["args"], - } + Part( + function_call=FunctionCall( + name="calculator", + args=function_call_1["args"], ) ) ], ) - assert history[2] == glm.Content( + assert history[2] == Content( role="user", parts=[ - glm.Part( - function_response=glm.FunctionResponse( - { - "name": "calculator", - "response": {"result": 4}, - } + Part( + function_response=FunctionResponse( + name="calculator", + response={"result": 4}, ) ) ], ) - assert history[3] == glm.Content( + assert history[3] == Content( role="model", parts=[ - glm.Part( - function_call=glm.FunctionCall( - { - "name": "calculator", - "args": json.loads(function_call_2["arguments"]), - } + Part( + function_call=FunctionCall( + name="calculator", + args=json.loads(function_call_2["arguments"]), ) ) ], ) - assert history[4] == glm.Content( + assert history[4] == Content( role="user", parts=[ - glm.Part( - function_response=glm.FunctionResponse( - { - "name": "calculator", - "response": {"result": 4}, - } + Part( + function_response=FunctionResponse( + name="calculator", + response={"result": 4}, ) ) ], ) - assert history[5] == glm.Content( + assert history[5] == Content( role="model", parts=[ - glm.Part( - function_call=glm.FunctionCall( - { - "name": "calculator", - "args": function_call_3["args"], - } + Part( + function_call=FunctionCall( + name="calculator", + args=function_call_3["args"], ) ), - glm.Part( - function_call=glm.FunctionCall( - { - "name": "calculator", - "args": function_call_4["args"], - } + Part( + function_call=FunctionCall( + name="calculator", + args=function_call_4["args"], ) ), ], ) - assert history[6] == glm.Content( + assert history[6] == Content( role="user", parts=[ - glm.Part( - function_response=glm.FunctionResponse( - { - "name": "calculator", - "response": {"result": 4}, - } + Part( + function_response=FunctionResponse( + name="calculator", + response={"result": 4}, ) ), - glm.Part( - function_response=glm.FunctionResponse( - { - "name": "calculator", - "response": {"result": 6}, - } + Part( + function_response=FunctionResponse( + name="calculator", + response={"result": 6}, ) ), ], ) - assert history[7] == glm.Content(role="model", parts=[glm.Part(text=text_answer1)]) + assert history[7] == Content(role="model", parts=[Part(text=text_answer1)]) if convert_system_message_to_human: assert system_instruction is None else: - assert system_instruction == glm.Content(parts=[glm.Part(text=system_input)]) + assert system_instruction == Content(parts=[Part(text=system_input)]) @pytest.mark.parametrize("content", ['["a"]', '{"a":"b"}', "function output"]) @@ -406,26 +367,27 @@ def test_parse_function_history(content: str | list[str | dict]) -> None: ) def test_additional_headers_support(headers: dict[str, str] | None) -> None: mock_client = Mock() + mock_models = Mock() mock_generate_content = Mock() mock_generate_content.return_value = GenerateContentResponse( - candidates=[Candidate(content=Content(parts=[Part(text="test response")]))] + candidates=[Candidate(content=Content(parts=[Part(text="test response")]))], + usage_metadata=GenerateContentResponseUsageMetadata( + prompt_token_count=10, + candidates_token_count=5, + total_token_count=15, + ), ) - mock_client.return_value.generate_content = mock_generate_content + mock_models.generate_content = mock_generate_content + mock_client.return_value.models = mock_models api_endpoint = "http://127.0.0.1:8000/ai" param_api_key = FAKE_API_KEY param_secret_api_key = SecretStr(param_api_key) - param_client_options = {"api_endpoint": api_endpoint} - param_transport = "rest" - with patch( - "langchain_google_genai._genai_extension.v1betaGenerativeServiceClient", - mock_client, - ): + with patch("langchain_google_genai.chat_models.Client", mock_client): chat = ChatGoogleGenerativeAI( model=MODEL_NAME, google_api_key=param_secret_api_key, - client_options=param_client_options, - transport=param_transport, + base_url=api_endpoint, additional_headers=headers, ) @@ -442,442 +404,154 @@ def test_additional_headers_support(headers: dict[str, str] | None) -> None: assert response.content == "test response" mock_client.assert_called_once_with( - transport=param_transport, - client_options=ANY, - client_info=ANY, + api_key=param_api_key, + http_options=ANY, ) - call_client_options = mock_client.call_args_list[0].kwargs["client_options"] - assert call_client_options.api_key == param_api_key - assert call_client_options.api_endpoint == api_endpoint - call_client_info = mock_client.call_args_list[0].kwargs["client_info"] - assert "langchain-google-genai" in call_client_info.user_agent - assert "ChatGoogleGenerativeAI" in call_client_info.user_agent - + call_http_options = mock_client.call_args_list[0].kwargs["http_options"] + assert call_http_options.base_url == api_endpoint -def test_base_url_support() -> None: - """Test that `base_url` is properly merged into `client_options`.""" - mock_client = Mock() - mock_generate_content = Mock() - mock_generate_content.return_value = GenerateContentResponse( - candidates=[Candidate(content=Content(parts=[Part(text="test response")]))] - ) - mock_client.return_value.generate_content = mock_generate_content - base_url = "https://example.com" - param_api_key = FAKE_API_KEY - param_secret_api_key = SecretStr(param_api_key) - param_transport = "rest" + # Verify user-agent header is set + assert "User-Agent" in call_http_options.headers + assert "langchain-google-genai" in call_http_options.headers["User-Agent"] + assert "ChatGoogleGenerativeAI" in call_http_options.headers["User-Agent"] - with patch( - "langchain_google_genai._genai_extension.v1betaGenerativeServiceClient", - mock_client, - ): - chat = ChatGoogleGenerativeAI( - model=MODEL_NAME, - google_api_key=param_secret_api_key, - base_url=base_url, - transport=param_transport, - ) + # Verify user-provided headers are included + if headers: + for key, value in headers.items(): + assert call_http_options.headers[key] == value - response = chat.invoke("test") - assert response.content == "test response" - mock_client.assert_called_once_with( - transport=param_transport, - client_options=ANY, - client_info=ANY, - ) - call_client_options = mock_client.call_args_list[0].kwargs["client_options"] - assert call_client_options.api_key == param_api_key - assert call_client_options.api_endpoint == base_url - call_client_info = mock_client.call_args_list[0].kwargs["client_info"] - assert "langchain-google-genai" in call_client_info.user_agent - assert "ChatGoogleGenerativeAI" in call_client_info.user_agent - - -async def test_async_base_url_support() -> None: - """Test that `base_url` is properly merged into `client_options` for async.""" - mock_async_client = Mock() - mock_generate_content = AsyncMock() - mock_generate_content.return_value = GenerateContentResponse( - candidates=[ - Candidate(content=Content(parts=[Part(text="async test response")])) - ] +def test_base_url_set_in_constructor() -> None: + chat = ChatGoogleGenerativeAI( + model=MODEL_NAME, + google_api_key=SecretStr(FAKE_API_KEY), + base_url="http://localhost:8000", ) - mock_async_client.return_value.generate_content = mock_generate_content - base_url = "https://async-example.com" - param_api_key = FAKE_API_KEY - param_secret_api_key = SecretStr(param_api_key) - - with patch( - "langchain_google_genai._genai_extension.v1betaGenerativeServiceAsyncClient", - mock_async_client, - ): - chat = ChatGoogleGenerativeAI( - model=MODEL_NAME, - google_api_key=param_secret_api_key, - base_url=base_url, - transport="rest", # Should keep "rest" when custom endpoint is used - ) - - response = await chat.ainvoke("async test") - assert response.content == "async test response" - - mock_async_client.assert_called_once_with( - transport="rest", # Should keep "rest" when custom endpoint is specified - client_options=ANY, - client_info=ANY, - ) - call_client_options = mock_async_client.call_args_list[0].kwargs[ - "client_options" - ] - assert call_client_options.api_key == param_api_key - assert call_client_options.api_endpoint == base_url + assert chat.base_url == "http://localhost:8000" -def test_api_endpoint_via_client_options() -> None: - """Test that `api_endpoint` via `client_options` is used in API calls.""" - mock_client = Mock() - mock_generate_content = Mock() - mock_generate_content.return_value = GenerateContentResponse( - candidates=[Candidate(content=Content(parts=[Part(text="test response")]))] - ) - mock_client.return_value.generate_content = mock_generate_content - api_endpoint = "https://custom-endpoint.com" - param_api_key = FAKE_API_KEY - param_secret_api_key = SecretStr(param_api_key) - param_transport = "rest" - - with patch( - "langchain_google_genai._genai_extension.v1betaGenerativeServiceClient", - mock_client, - ): - chat = ChatGoogleGenerativeAI( +def test_base_url_passed_to_client() -> None: + with patch("langchain_google_genai.chat_models.Client") as mock_client: + ChatGoogleGenerativeAI( model=MODEL_NAME, - google_api_key=param_secret_api_key, - client_options={"api_endpoint": api_endpoint}, - transport=param_transport, + google_api_key=SecretStr(FAKE_API_KEY), + base_url="http://localhost:8000", + ) + mock_client.assert_called_once_with( + api_key=FAKE_API_KEY, + http_options=ANY, ) + call_http_options = mock_client.call_args_list[0].kwargs["http_options"] + assert call_http_options.base_url == "http://localhost:8000" + assert "langchain-google-genai" in call_http_options.headers["User-Agent"] - response = chat.invoke("test") - assert response.content == "test response" - mock_client.assert_called_once_with( - transport=param_transport, - client_options=ANY, - client_info=ANY, +def test_async_client_property() -> None: + """Test that async_client property exposes `client.aio`.""" + chat = ChatGoogleGenerativeAI( + model=MODEL_NAME, + google_api_key=SecretStr(FAKE_API_KEY), ) - call_client_options = mock_client.call_args_list[0].kwargs["client_options"] - assert call_client_options.api_key == param_api_key - assert call_client_options.api_endpoint == api_endpoint - call_client_info = mock_client.call_args_list[0].kwargs["client_info"] - assert "langchain-google-genai" in call_client_info.user_agent - assert "ChatGoogleGenerativeAI" in call_client_info.user_agent + # Verify client is initialized + client = chat.client + assert client is not None + # Verify async_client returns client.aio + assert chat.async_client is client.aio + # Verify async_client has the expected async methods + assert hasattr(chat.async_client, "models") -async def test_async_api_endpoint_via_client_options() -> None: - """Test that `api_endpoint` via `client_options` is used in async API calls.""" - mock_async_client = Mock() - mock_generate_content = AsyncMock() - mock_generate_content.return_value = GenerateContentResponse( - candidates=[ - Candidate( - content=Content(parts=[Part(text="async custom endpoint response")]) - ) - ] +def test_async_client_raises_when_client_not_initialized() -> None: + """Test that async_client raises `ValueError` if client is `None`.""" + chat = ChatGoogleGenerativeAI( + model=MODEL_NAME, + google_api_key=SecretStr(FAKE_API_KEY), ) - mock_async_client.return_value.generate_content = mock_generate_content - api_endpoint = "https://async-custom-endpoint.com" - param_api_key = FAKE_API_KEY - param_secret_api_key = SecretStr(param_api_key) - - with patch( - "langchain_google_genai._genai_extension.v1betaGenerativeServiceAsyncClient", - mock_async_client, - ): - chat = ChatGoogleGenerativeAI( - model=MODEL_NAME, - google_api_key=param_secret_api_key, - client_options={"api_endpoint": api_endpoint}, - transport="grpc_asyncio", - ) - - response = await chat.ainvoke("async custom endpoint test") - assert response.content == "async custom endpoint response" - - mock_async_client.assert_called_once_with( - transport="grpc_asyncio", - client_options=ANY, - client_info=ANY, - ) - call_client_options = mock_async_client.call_args_list[0].kwargs[ - "client_options" - ] - assert call_client_options.api_key == param_api_key - # For gRPC async transport, URL is formatted to hostname:port - assert call_client_options.api_endpoint == "async-custom-endpoint.com:443" + # Force client to None to test error handling + chat.client = None + with pytest.raises(ValueError, match="Client not initialized"): + _ = chat.async_client -def test_base_url_preserves_existing_client_options() -> None: - """Test that `base_url` doesn't override existing `api_endpoint` in - `client_options`.""" - mock_client = Mock() +def test_api_endpoint_via_client_options() -> None: + """Test that `api_endpoint` via `client_options` is used in API calls.""" mock_generate_content = Mock() - mock_generate_content.return_value = GenerateContentResponse( - candidates=[Candidate(content=Content(parts=[Part(text="test response")]))] - ) - mock_client.return_value.generate_content = mock_generate_content - base_url = "https://base-url.com" - api_endpoint = "https://client-options-endpoint.com" + api_endpoint = "https://custom-endpoint.com" param_api_key = FAKE_API_KEY param_secret_api_key = SecretStr(param_api_key) - param_transport = "rest" - - with patch( - "langchain_google_genai._genai_extension.v1betaGenerativeServiceClient", - mock_client, - ): - chat = ChatGoogleGenerativeAI( - model=MODEL_NAME, - google_api_key=param_secret_api_key, - base_url=base_url, - client_options={"api_endpoint": api_endpoint}, - transport=param_transport, - ) - response = chat.invoke("test") - assert response.content == "test response" + with patch("langchain_google_genai.chat_models.Client") as mock_client_class: + mock_client_instance = Mock() + mock_client_class.return_value = mock_client_instance - mock_client.assert_called_once_with( - transport=param_transport, - client_options=ANY, - client_info=ANY, - ) - call_client_options = mock_client.call_args_list[0].kwargs["client_options"] - assert call_client_options.api_key == param_api_key - # client_options.api_endpoint should take precedence over base_url - assert call_client_options.api_endpoint == api_endpoint - call_client_info = mock_client.call_args_list[0].kwargs["client_info"] - assert "langchain-google-genai" in call_client_info.user_agent - assert "ChatGoogleGenerativeAI" in call_client_info.user_agent - - -async def test_async_base_url_preserves_existing_client_options() -> None: - """Test that `base_url` doesn't override existing `api_endpoint` in async client.""" - mock_async_client = Mock() - mock_generate_content = AsyncMock() - mock_generate_content.return_value = GenerateContentResponse( - candidates=[ - Candidate( - content=Content(parts=[Part(text="async precedence test response")]) - ) - ] - ) - mock_async_client.return_value.generate_content = mock_generate_content - base_url = "https://async-base-url.com" - api_endpoint = "https://async-client-options-endpoint.com" - param_api_key = FAKE_API_KEY - param_secret_api_key = SecretStr(param_api_key) + mock_generate_content.return_value = GenerateContentResponse( + candidates=[Candidate(content=Content(parts=[Part(text="test response")]))] + ) + mock_client_instance.models.generate_content = mock_generate_content - with patch( - "langchain_google_genai._genai_extension.v1betaGenerativeServiceAsyncClient", - mock_async_client, - ): chat = ChatGoogleGenerativeAI( model=MODEL_NAME, google_api_key=param_secret_api_key, - base_url=base_url, client_options={"api_endpoint": api_endpoint}, - transport="grpc_asyncio", - ) - - response = await chat.ainvoke("async precedence test") - assert response.content == "async precedence test response" - - mock_async_client.assert_called_once_with( - transport="grpc_asyncio", - client_options=ANY, - client_info=ANY, ) - call_client_options = mock_async_client.call_args_list[0].kwargs[ - "client_options" - ] - assert call_client_options.api_key == param_api_key - # client_options.api_endpoint should take precedence over base_url - # For gRPC async transport, URL is formatted to hostname:port - expected_endpoint = "async-client-options-endpoint.com:443" - assert call_client_options.api_endpoint == expected_endpoint - - -def test_grpc_base_url_valid_hostname() -> None: - """Test that valid `hostname:port` `base_url` works with gRPC.""" - mock_client = Mock() - mock_generate_content = Mock() - mock_generate_content.return_value = GenerateContentResponse( - candidates=[Candidate(content=Content(parts=[Part(text="grpc test response")]))] - ) - mock_client.return_value.generate_content = mock_generate_content - base_url = "example.com:443" - param_api_key = FAKE_API_KEY - param_secret_api_key = SecretStr(param_api_key) - with patch( - "langchain_google_genai._genai_extension.v1betaGenerativeServiceClient", - mock_client, - ): - chat = ChatGoogleGenerativeAI( - model=MODEL_NAME, - google_api_key=param_secret_api_key, - base_url=base_url, - transport="grpc", + response = chat.invoke("test") + assert response.content == "test response" + mock_client_class.assert_called_once_with( + api_key=param_api_key, + http_options=ANY, ) - - response = chat.invoke("grpc test") - assert response.content == "grpc test response" - - mock_client.assert_called_once_with( - transport="grpc", - client_options=ANY, - client_info=ANY, - ) - call_client_options = mock_client.call_args_list[0].kwargs["client_options"] - assert call_client_options.api_endpoint == base_url + call_http_options = mock_client_class.call_args_list[0].kwargs["http_options"] + assert call_http_options.base_url == api_endpoint + assert "langchain-google-genai" in call_http_options.headers["User-Agent"] -async def test_async_grpc_base_url_valid_hostname() -> None: - """Test that valid `hostname:port` `base_url` works with `grpc_asyncio`.""" - mock_async_client = Mock() - mock_generate_content = AsyncMock() - mock_generate_content.return_value = GenerateContentResponse( - candidates=[ - Candidate(content=Content(parts=[Part(text="async grpc test response")])) - ] - ) - mock_async_client.return_value.generate_content = mock_generate_content - base_url = "async.example.com:443" +async def test_async_api_endpoint_via_client_options() -> None: + """Test that `api_endpoint` via `client_options` is used in async API calls.""" + api_endpoint = "https://async-custom-endpoint.com" param_api_key = FAKE_API_KEY param_secret_api_key = SecretStr(param_api_key) - with patch( - "langchain_google_genai._genai_extension.v1betaGenerativeServiceAsyncClient", - mock_async_client, - ): - chat = ChatGoogleGenerativeAI( - model=MODEL_NAME, - google_api_key=param_secret_api_key, - base_url=base_url, - transport="grpc_asyncio", + with patch("langchain_google_genai.chat_models.Client") as mock_client_class: + mock_client_instance = Mock() + mock_client_class.return_value = mock_client_instance + + # Mock the aio.models.generate_content method for async calls + mock_aio = Mock() + mock_client_instance.aio = mock_aio + mock_aio_models = Mock() + mock_aio.models = mock_aio_models + mock_aio_models.generate_content = AsyncMock( + return_value=GenerateContentResponse( + candidates=[ + Candidate( + content=Content( + parts=[Part(text="async custom endpoint response")] + ) + ) + ] + ) ) - response = await chat.ainvoke("async grpc test") - assert response.content == "async grpc test response" - - mock_async_client.assert_called_once_with( - transport="grpc_asyncio", - client_options=ANY, - client_info=ANY, - ) - call_client_options = mock_async_client.call_args_list[0].kwargs["client_options"] - assert call_client_options.api_endpoint == base_url - - -def test_grpc_base_url_formats_https_without_path() -> None: - """Test that `https://` URLs without paths are formatted correctly for gRPC.""" - mock_client = Mock() - mock_generate_content = Mock() - mock_generate_content.return_value = GenerateContentResponse( - candidates=[Candidate(content=Content(parts=[Part(text="formatted response")]))] - ) - mock_client.return_value.generate_content = mock_generate_content - base_url = "https://custom.googleapis.com" - param_api_key = FAKE_API_KEY - param_secret_api_key = SecretStr(param_api_key) - - with patch( - "langchain_google_genai._genai_extension.v1betaGenerativeServiceClient", - mock_client, - ): chat = ChatGoogleGenerativeAI( model=MODEL_NAME, google_api_key=param_secret_api_key, - base_url=base_url, - transport="grpc", - ) - - response = chat.invoke("format test") - assert response.content == "formatted response" - - call_client_options = mock_client.call_args_list[0].kwargs["client_options"] - # Should be formatted as hostname:port for gRPC - assert call_client_options.api_endpoint == "custom.googleapis.com:443" - - -def test_grpc_base_url_with_path_raises_error() -> None: - """Test that `base_url` with path raises `ValueError` for gRPC.""" - base_url = "https://webhook.site/path-not-allowed" - param_secret_api_key = SecretStr(FAKE_API_KEY) - - with pytest.raises( - ValueError, match="gRPC transport 'grpc' does not support URL paths" - ): - ChatGoogleGenerativeAI( - model=MODEL_NAME, - google_api_key=param_secret_api_key, - base_url=base_url, - transport="grpc", - ) - - -def test_grpc_asyncio_base_url_with_path_raises_error() -> None: - """Test that `base_url` with path raises `ValueError` for `grpc_asyncio`.""" - base_url = "example.com/api/v1" - param_secret_api_key = SecretStr(FAKE_API_KEY) - - with pytest.raises( - ValueError, match="gRPC transport 'grpc_asyncio' does not support URL paths" - ): - ChatGoogleGenerativeAI( - model=MODEL_NAME, - google_api_key=param_secret_api_key, - base_url=base_url, - transport="grpc_asyncio", + client_options={"api_endpoint": api_endpoint}, ) - -def test_grpc_base_url_adds_default_port() -> None: - """Test that hostname without port gets default port `443` for gRPC.""" - mock_client = Mock() - mock_generate_content = Mock() - mock_generate_content.return_value = GenerateContentResponse( - candidates=[ - Candidate(content=Content(parts=[Part(text="default port response")])) - ] - ) - mock_client.return_value.generate_content = mock_generate_content - base_url = "custom.example.com" - param_api_key = FAKE_API_KEY - param_secret_api_key = SecretStr(param_api_key) - - with patch( - "langchain_google_genai._genai_extension.v1betaGenerativeServiceClient", - mock_client, - ): - chat = ChatGoogleGenerativeAI( - model=MODEL_NAME, - google_api_key=param_secret_api_key, - base_url=base_url, - transport="grpc", + response = await chat.ainvoke("async custom endpoint test") + assert response.content == "async custom endpoint response" + mock_client_class.assert_called_once_with( + api_key=param_api_key, + http_options=ANY, ) - - response = chat.invoke("default port test") - assert response.content == "default port response" - - call_client_options = mock_client.call_args_list[0].kwargs["client_options"] - # Should add default port 443 - assert call_client_options.api_endpoint == "custom.example.com:443" + call_http_options = mock_client_class.call_args_list[0].kwargs["http_options"] + assert call_http_options.base_url == api_endpoint + assert "langchain-google-genai" in call_http_options.headers["User-Agent"] def test_default_metadata_field_alias() -> None: - """Test 'default_metadata' and 'default_metadata_input' fields work correctly.""" + """Test `default_metadata` and `default_metadata_input` fields work correctly.""" # Test with default_metadata_input field name (alias) - should accept None without # error # This is the main issue: LangSmith Playground passes None to default_metadata_input @@ -995,7 +669,7 @@ def test_default_metadata_field_alias() -> None: "content": { "parts": [ { - "function_call": glm.FunctionCall( + "function_call": FunctionCall( name="Information", args={"name": "Ben"} ) } @@ -1024,7 +698,7 @@ def test_default_metadata_field_alias() -> None: "content": { "parts": [ { - "function_call": glm.FunctionCall( + "function_call": FunctionCall( name="Information", args={"info": ["A", "B", "C"]}, ) @@ -1054,7 +728,7 @@ def test_default_metadata_field_alias() -> None: "content": { "parts": [ { - "function_call": glm.FunctionCall( + "function_call": FunctionCall( name="Information", args={ "people": [ @@ -1101,7 +775,7 @@ def test_default_metadata_field_alias() -> None: "content": { "parts": [ { - "function_call": glm.FunctionCall( + "function_call": FunctionCall( name="Information", args={"info": [[1, 2, 3], [4, 5, 6]]}, ) @@ -1132,7 +806,7 @@ def test_default_metadata_field_alias() -> None: "parts": [ {"text": "Mike age is 30"}, { - "function_call": glm.FunctionCall( + "function_call": FunctionCall( name="Information", args={"name": "Ben"} ) }, @@ -1161,7 +835,7 @@ def test_default_metadata_field_alias() -> None: "content": { "parts": [ { - "function_call": glm.FunctionCall( + "function_call": FunctionCall( name="Information", args={"name": "Ben"} ) }, @@ -1191,7 +865,7 @@ def test_default_metadata_field_alias() -> None: def test_parse_response_candidate(raw_candidate: dict, expected: AIMessage) -> None: with patch("langchain_google_genai.chat_models.uuid.uuid4") as uuid4: uuid4.return_value = "00000000-0000-0000-0000-00000000000" - response_candidate = glm.Candidate(raw_candidate) + response_candidate = Candidate.model_validate(raw_candidate) result = _parse_response_candidate(response_candidate) assert result.content == expected.content assert result.tool_calls == expected.tool_calls @@ -1214,11 +888,11 @@ def test_parse_response_candidate_includes_model_provider() -> None: """Test `_parse_response_candidate` has `model_provider` in `response_metadata`.""" raw_candidate = { "content": {"parts": [{"text": "Hello, world!"}]}, - "finish_reason": 1, + "finish_reason": "STOP", "safety_ratings": [], } - response_candidate = glm.Candidate(raw_candidate) + response_candidate = Candidate.model_validate(raw_candidate) result = _parse_response_candidate(response_candidate) assert hasattr(result, "response_metadata") @@ -1231,6 +905,33 @@ def test_parse_response_candidate_includes_model_provider() -> None: assert result.response_metadata["model_provider"] == "google_genai" +def test_parse_response_candidate_includes_model_name() -> None: + """Test that `_parse_response_candidate` includes `model_name` in + `response_metadata`.""" + raw_candidate = { + "content": {"parts": [{"text": "Hello, world!"}]}, + "finish_reason": "STOP", + "safety_ratings": [], + } + + response_candidate = Candidate.model_validate(raw_candidate) + result = _parse_response_candidate( + response_candidate, model_name="gemini-2.5-flash" + ) + + assert hasattr(result, "response_metadata") + assert result.response_metadata["model_provider"] == "google_genai" + assert result.response_metadata["model_name"] == "gemini-2.5-flash" + + # No name + + result = _parse_response_candidate(response_candidate) + + assert hasattr(result, "response_metadata") + assert result.response_metadata["model_provider"] == "google_genai" + assert "model_name" not in result.response_metadata + + def test_serialize() -> None: llm = ChatGoogleGenerativeAI(model=MODEL_NAME, google_api_key="test-key") serialized = dumps(llm) @@ -1264,10 +965,40 @@ def test__convert_tool_message_to_parts__sets_tool_name( parts = _convert_tool_message_to_parts(tool_message) assert len(parts) == 1 part = parts[0] + assert part.function_response is not None assert part.function_response.name == "tool_name" assert part.function_response.response == {"output": "test_content"} +def test_supports_thinking() -> None: + """Test that `_supports_thinking` correctly identifies model capabilities.""" + llm_image_gen = ChatGoogleGenerativeAI( + model="gemini-2.0-flash-preview-image-generation", + google_api_key=SecretStr(FAKE_API_KEY), + ) + assert not llm_image_gen._supports_thinking() + llm_tts = ChatGoogleGenerativeAI( + model="gemini-2.5-flash-preview-tts", + google_api_key=SecretStr(FAKE_API_KEY), + ) + assert not llm_tts._supports_thinking() + llm_normal = ChatGoogleGenerativeAI( + model="gemini-2.5-flash", + google_api_key=SecretStr(FAKE_API_KEY), + ) + assert llm_normal._supports_thinking() + llm_pro = ChatGoogleGenerativeAI( + model="gemini-2.5-pro", + google_api_key=SecretStr(FAKE_API_KEY), + ) + assert llm_pro._supports_thinking() + llm_15 = ChatGoogleGenerativeAI( + model="gemini-1.5-flash", + google_api_key=SecretStr(FAKE_API_KEY), + ) + assert not llm_15._supports_thinking() + + def test_temperature_range_pydantic_validation() -> None: """Test that temperature is in the range `[0.0, 2.0]`.""" with pytest.raises(ValidationError): @@ -1299,46 +1030,197 @@ def test_temperature_range_model_validation() -> None: ChatGoogleGenerativeAI(model=MODEL_NAME, temperature=-0.5) -def test_model_kwargs() -> None: +@patch("langchain_google_genai.chat_models.Client") +def test_model_kwargs(mock_client: Mock) -> None: """Test we can transfer unknown params to `model_kwargs`.""" llm = ChatGoogleGenerativeAI( model=MODEL_NAME, + google_api_key=SecretStr(FAKE_API_KEY), convert_system_message_to_human=True, model_kwargs={"foo": "bar"}, ) - assert llm.model == f"models/{MODEL_NAME}" + assert llm.model == MODEL_NAME assert llm.convert_system_message_to_human is True assert llm.model_kwargs == {"foo": "bar"} - with pytest.warns(match="transferred to model_kwargs"): llm = ChatGoogleGenerativeAI( model=MODEL_NAME, + google_api_key=SecretStr(FAKE_API_KEY), convert_system_message_to_human=True, foo="bar", ) - assert llm.model == f"models/{MODEL_NAME}" - assert llm.convert_system_message_to_human is True - assert llm.model_kwargs == {"foo": "bar"} + assert llm.model == MODEL_NAME + assert llm.convert_system_message_to_human is True + assert llm.model_kwargs == {"foo": "bar"} + + +@pytest.mark.parametrize( + "is_async,method_name,client_method", + [ + (False, "_generate", "models.generate_content"), # Sync + (True, "_agenerate", "aio.models.generate_content"), # Async + ], +) +@pytest.mark.parametrize( + "instance_timeout,call_timeout,expected_timeout_ms,should_have_timeout", + [ + (5.0, None, 5000, True), # Instance-level timeout (converted to ms) + (5.0, 10.0, 10000, True), # Call-level overrides instance (in ms) + (None, None, None, False), # No timeout anywhere + ], +) +async def test_timeout_parameter_handling( + is_async: bool, + method_name: str, + client_method: str, + instance_timeout: float | None, + call_timeout: float | None, + expected_timeout_ms: int | None, + should_have_timeout: bool, +) -> None: + """Test timeout parameter handling for sync and async methods.""" + with patch( + "langchain_google_genai.chat_models.ChatGoogleGenerativeAI.client", create=True + ): + # Create LLM with optional instance-level timeout + llm_kwargs: dict[str, Any] = { + "model": MODEL_NAME, + "google_api_key": SecretStr(FAKE_API_KEY), + } + if instance_timeout is not None: + llm_kwargs["timeout"] = instance_timeout + llm = ChatGoogleGenerativeAI(**llm_kwargs) -def test_retry_decorator_with_custom_parameters() -> None: - # Mock the generation method - mock_generation_method = Mock() - # TODO: remove ignore once google-auth has types. - mock_generation_method.side_effect = ResourceExhausted("Quota exceeded") # type: ignore[no-untyped-call] - - # Call the function with custom retry parameters - with pytest.raises(ResourceExhausted): - _chat_with_retry( - generation_method=mock_generation_method, - max_retries=3, - wait_exponential_multiplier=1.5, - wait_exponential_min=2.0, - wait_exponential_max=30.0, + # Mock the client method + mock_response = GenerateContentResponse( + candidates=[Candidate(content=Content(parts=[Part(text="Test response")]))] + ) + + if is_async: + mock_method = AsyncMock(return_value=mock_response) + else: + mock_method = Mock(return_value=mock_response) + + # Set up the mock on the client + client_parts = client_method.split(".") + mock_client = llm.client + for part in client_parts[:-1]: + mock_client = getattr(mock_client, part) + setattr(mock_client, client_parts[-1], mock_method) + + messages: list[BaseMessage] = [HumanMessage(content="Hello")] + + # Call the appropriate method with optional call-level timeout + method = getattr(llm, method_name) + call_kwargs: dict[str, Any] = {} + if call_timeout is not None: + call_kwargs["timeout"] = call_timeout + + if is_async: + await method(messages, **call_kwargs) + else: + method(messages, **call_kwargs) + + # Verify http_options were set correctly in config + mock_method.assert_called_once() + call_args = mock_method.call_args + config = call_args.kwargs.get("config") or call_args[0][0].kwargs.get("config") + + if should_have_timeout: + assert config.http_options is not None + assert config.http_options.timeout == expected_timeout_ms + else: + assert config.http_options is None or config.http_options.timeout is None + + +@pytest.mark.parametrize( + "instance_timeout,expected_timeout_ms,should_have_timeout", + [ + (5.0, 5000, True), # Instance-level timeout (converted to ms) + (None, None, False), # No timeout + ], +) +def test_timeout_streaming_parameter_handling( + instance_timeout: float | None, + expected_timeout_ms: int | None, + should_have_timeout: bool, +) -> None: + """Test timeout parameter handling for streaming methods.""" + # Create LLM with optional instance-level timeout + llm_kwargs: dict[str, Any] = { + "model": MODEL_NAME, + "google_api_key": SecretStr(FAKE_API_KEY), + } + if instance_timeout is not None: + llm_kwargs["timeout"] = instance_timeout + + llm = ChatGoogleGenerativeAI(**llm_kwargs) + assert llm.client is not None + + # Mock the client method + def mock_stream() -> Iterator[GenerateContentResponse]: + yield GenerateContentResponse( + candidates=[Candidate(content=Content(parts=[Part(text="chunk1")]))] ) - # Verify that the retry mechanism used the custom parameters - assert mock_generation_method.call_count == 3 + with patch.object( + llm.client.models, "generate_content_stream", return_value=mock_stream() + ): + # Call _stream (which should include timeout in config) + messages: list[BaseMessage] = [HumanMessage(content="Hello")] + request = llm._prepare_request(messages) + + # Verify timeout was set correctly in config + config = request["config"] + if should_have_timeout: + assert config.http_options is not None + assert config.http_options.timeout == expected_timeout_ms + else: + assert config.http_options is None or config.http_options.timeout is None + + +@pytest.mark.parametrize( + "instance_max_retries,call_max_retries,expected_max_retries,should_have_max_retries", + [ + (1, None, 1, True), # Instance-level max_retries + (3, 5, 5, True), # Call-level overrides instance + (6, None, 6, True), # Default instance value + ], +) +def test_max_retries_parameter_handling( + instance_max_retries: int, + call_max_retries: int | None, + expected_max_retries: int, + should_have_max_retries: bool, +) -> None: + """Test `max_retries` handling for sync and async methods.""" + # Instance-level max_retries + llm_kwargs: dict[str, Any] = { + "model": MODEL_NAME, + "google_api_key": SecretStr(FAKE_API_KEY), + "max_retries": instance_max_retries, + } + + llm = ChatGoogleGenerativeAI(**llm_kwargs) + + messages: list[BaseMessage] = [HumanMessage(content="Hello")] + + # Prepare request with optional call-level max_retries + call_kwargs: dict[str, Any] = {} + if call_max_retries is not None: + call_kwargs["max_retries"] = call_max_retries + + request = llm._prepare_request(messages, **call_kwargs) + + # Verify max_retries was set correctly in http_options.retry_options + config = request["config"] + if should_have_max_retries: + assert config.http_options is not None + assert config.http_options.retry_options is not None + assert config.http_options.retry_options.attempts == expected_max_retries + else: + assert config.http_options is None or config.http_options.retry_options is None @pytest.mark.parametrize( @@ -1375,7 +1257,10 @@ def test_retry_decorator_with_custom_parameters() -> None: }, } ], - "prompt_feedback": {"block_reason": 0, "safety_ratings": []}, + "prompt_feedback": { + "block_reason": "BLOCKED_REASON_UNSPECIFIED", + "safety_ratings": [], + }, "usage_metadata": { "prompt_token_count": 10, "candidates_token_count": 5, @@ -1383,8 +1268,17 @@ def test_retry_decorator_with_custom_parameters() -> None: }, }, { + "google_maps_widget_context_token": None, "grounding_chunks": [ - {"web": {"uri": "https://example.com", "title": "Example Site"}} + { + "maps": None, + "retrieved_context": None, + "web": { + "domain": None, + "uri": "https://example.com", + "title": "Example Site", + }, + } ], "grounding_supports": [ { @@ -1398,6 +1292,10 @@ def test_retry_decorator_with_custom_parameters() -> None: "confidence_scores": [0.95], } ], + "retrieval_metadata": None, + "retrieval_queries": None, + "search_entry_point": None, + "source_flagging_uris": None, "web_search_queries": ["test query"], }, ), @@ -1409,7 +1307,10 @@ def test_retry_decorator_with_custom_parameters() -> None: "content": {"parts": [{"text": "Test response"}]}, } ], - "prompt_feedback": {"block_reason": 0, "safety_ratings": []}, + "prompt_feedback": { + "block_reason": "BLOCKED_REASON_UNSPECIFIED", + "safety_ratings": [], + }, "usage_metadata": { "prompt_token_count": 10, "candidates_token_count": 5, @@ -1423,8 +1324,8 @@ def test_retry_decorator_with_custom_parameters() -> None: def test_response_to_result_grounding_metadata( raw_response: dict, expected_grounding_metadata: dict ) -> None: - """Test that `_response_to_result` includes grounding_metadata in the response.""" - response = GenerateContentResponse(raw_response) + """Test that `_response_to_result` includes `grounding_metadata` in the response.""" + response = GenerateContentResponse.model_validate(raw_response) result = _response_to_result(response, stream=False) assert len(result.generations) == len(raw_response["candidates"]) @@ -1515,7 +1416,10 @@ def test_grounding_metadata_to_citations_conversion() -> None: }, } ], - "prompt_feedback": {"block_reason": 0, "safety_ratings": []}, + "prompt_feedback": { + "block_reason": "BLOCKED_REASON_UNSPECIFIED", + "safety_ratings": [], + }, "usage_metadata": { "prompt_token_count": 10, "candidates_token_count": 20, @@ -1523,7 +1427,7 @@ def test_grounding_metadata_to_citations_conversion() -> None: }, } - response = GenerateContentResponse(raw_response) + response = GenerateContentResponse.model_validate(raw_response) result = _response_to_result(response, stream=False) assert len(result.generations) == 1 @@ -1581,7 +1485,10 @@ def test_empty_grounding_metadata_no_citations() -> None: "grounding_metadata": {}, } ], - "prompt_feedback": {"block_reason": 0, "safety_ratings": []}, + "prompt_feedback": { + "block_reason": "BLOCKED_REASON_UNSPECIFIED", + "safety_ratings": [], + }, "usage_metadata": { "prompt_token_count": 5, "candidates_token_count": 8, @@ -1589,7 +1496,7 @@ def test_empty_grounding_metadata_no_citations() -> None: }, } - response = GenerateContentResponse(raw_response) + response = GenerateContentResponse.model_validate(raw_response) result = _response_to_result(response, stream=False) message = result.generations[0].message @@ -1634,7 +1541,10 @@ def test_grounding_metadata_missing_optional_fields() -> None: }, } ], - "prompt_feedback": {"block_reason": 0, "safety_ratings": []}, + "prompt_feedback": { + "block_reason": "BLOCKED_REASON_UNSPECIFIED", + "safety_ratings": [], + }, "usage_metadata": { "prompt_token_count": 5, "candidates_token_count": 3, @@ -1642,7 +1552,7 @@ def test_grounding_metadata_missing_optional_fields() -> None: }, } - response = GenerateContentResponse(raw_response) + response = GenerateContentResponse.model_validate(raw_response) result = _response_to_result(response, stream=False) message = result.generations[0].message @@ -1699,7 +1609,10 @@ def test_grounding_metadata_multiple_parts() -> None: }, } ], - "prompt_feedback": {"block_reason": 0, "safety_ratings": []}, + "prompt_feedback": { + "block_reason": "BLOCKED_REASON_UNSPECIFIED", + "safety_ratings": [], + }, "usage_metadata": { "prompt_token_count": 10, "candidates_token_count": 10, @@ -1707,7 +1620,7 @@ def test_grounding_metadata_multiple_parts() -> None: }, } - response = GenerateContentResponse(raw_response) + response = GenerateContentResponse.model_validate(raw_response) result = _response_to_result(response, stream=False) message = result.generations[0].message @@ -1720,246 +1633,59 @@ def test_grounding_metadata_multiple_parts() -> None: assert grounding["grounding_supports"][0]["segment"]["part_index"] == 1 -@pytest.mark.parametrize( - "is_async,mock_target,method_name", - [ - (False, "_chat_with_retry", "_generate"), # Sync - (True, "_achat_with_retry", "_agenerate"), # Async - ], -) -@pytest.mark.parametrize( - "instance_timeout,call_timeout,expected_timeout,should_have_timeout", - [ - (5.0, None, 5.0, True), # Instance-level timeout - (5.0, 10.0, 10.0, True), # Call-level overrides instance - (None, None, None, False), # No timeout anywhere - ], -) -async def test_timeout_parameter_handling( - is_async: bool, - mock_target: str, - method_name: str, - instance_timeout: float | None, - call_timeout: float | None, - expected_timeout: float | None, - should_have_timeout: bool, -) -> None: - """Test timeout parameter handling for sync and async methods.""" - with patch(f"langchain_google_genai.chat_models.{mock_target}") as mock_retry: - mock_retry.return_value = GenerateContentResponse( - { - "candidates": [ - { - "content": {"parts": [{"text": "Test response"}]}, - "finish_reason": "STOP", - } - ] - } - ) - - # Create LLM with optional instance-level timeout - llm_kwargs = { - "model": "gemini-2.5-flash", - "google_api_key": SecretStr(FAKE_API_KEY), - } - if instance_timeout is not None: - llm_kwargs["timeout"] = instance_timeout - - llm = ChatGoogleGenerativeAI(**llm_kwargs) - messages: list[BaseMessage] = [HumanMessage(content="Hello")] - - # Call the appropriate method with optional call-level timeout - method = getattr(llm, method_name) - call_kwargs = {} - if call_timeout is not None: - call_kwargs["timeout"] = call_timeout - - if is_async: - await method(messages, **call_kwargs) - else: - method(messages, **call_kwargs) - - # Verify timeout was passed correctly - mock_retry.assert_called_once() - call_kwargs_actual = mock_retry.call_args[1] - - if should_have_timeout: - assert "timeout" in call_kwargs_actual - assert call_kwargs_actual["timeout"] == expected_timeout - else: - assert "timeout" not in call_kwargs_actual - - -@pytest.mark.parametrize( - "instance_timeout,expected_timeout,should_have_timeout", - [ - (5.0, 5.0, True), # Instance-level timeout - (None, None, False), # No timeout - ], -) -@patch("langchain_google_genai.chat_models._chat_with_retry") -def test_timeout_streaming_parameter_handling( - mock_retry: Mock, - instance_timeout: float | None, - expected_timeout: float | None, - should_have_timeout: bool, -) -> None: - """Test timeout parameter handling for streaming methods.""" - - # Mock the return value for _chat_with_retry to return an iterator - def mock_stream() -> Iterator[GenerateContentResponse]: - yield GenerateContentResponse( - { - "candidates": [ - { - "content": {"parts": [{"text": "chunk1"}]}, - "finish_reason": "STOP", - } - ] - } - ) - - mock_retry.return_value = mock_stream() - - # Create LLM with optional instance-level timeout - llm_kwargs = { - "model": "gemini-2.5-flash", - "google_api_key": SecretStr(FAKE_API_KEY), - } - if instance_timeout is not None: - llm_kwargs["timeout"] = instance_timeout - - llm = ChatGoogleGenerativeAI(**llm_kwargs) - - # Call _stream (which should pass timeout to _chat_with_retry) - messages: list[BaseMessage] = [HumanMessage(content="Hello")] - list(llm._stream(messages)) # Convert generator to list to trigger execution - - # Verify timeout was passed correctly - mock_retry.assert_called_once() - call_kwargs = mock_retry.call_args[1] - - if should_have_timeout: - assert "timeout" in call_kwargs - assert call_kwargs["timeout"] == expected_timeout - else: - assert "timeout" not in call_kwargs - - -@pytest.mark.parametrize( - "is_async,mock_target,method_name", - [ - (False, "_chat_with_retry", "_generate"), # Sync - (True, "_achat_with_retry", "_agenerate"), # Async - ], -) -@pytest.mark.parametrize( - "instance_max_retries,call_max_retries,expected_max_retries,should_have_max_retries", - [ - (1, None, 1, True), # Instance-level max_retries - (3, 5, 5, True), # Call-level overrides instance - (6, None, 6, True), # Default instance value - ], -) -async def test_max_retries_parameter_handling( - is_async: bool, - mock_target: str, - method_name: str, - instance_max_retries: int, - call_max_retries: int | None, - expected_max_retries: int, - should_have_max_retries: bool, -) -> None: - """Test `max_retries` handling for sync and async methods.""" - with patch(f"langchain_google_genai.chat_models.{mock_target}") as mock_retry: - mock_retry.return_value = GenerateContentResponse( - { - "candidates": [ - { - "content": {"parts": [{"text": "Test response"}]}, - "finish_reason": "STOP", - } - ] - } - ) - - # Instance-level max_retries - llm_kwargs = { - "model": "gemini-2.5-flash", - "google_api_key": SecretStr(FAKE_API_KEY), - "max_retries": instance_max_retries, - } - - llm = ChatGoogleGenerativeAI(**llm_kwargs) - messages: list[BaseMessage] = [HumanMessage(content="Hello")] - - # Call the appropriate method with optional call-level max_retries - method = getattr(llm, method_name) - call_kwargs = {} - if call_max_retries is not None: - call_kwargs["max_retries"] = call_max_retries - - if is_async: - await method(messages, **call_kwargs) - else: - method(messages, **call_kwargs) - - # Verify max_retries was passed correctly - mock_retry.assert_called_once() - call_kwargs_actual = mock_retry.call_args[1] - - if should_have_max_retries: - assert "max_retries" in call_kwargs_actual - assert call_kwargs_actual["max_retries"] == expected_max_retries - else: - assert "max_retries" not in call_kwargs_actual - - def test_thinking_config_merging_with_generation_config() -> None: """Test that `thinking_config` is properly merged when passed in `generation_config`.""" - with patch("langchain_google_genai.chat_models._chat_with_retry") as mock_retry: - # Mock response with thinking content followed by regular text - mock_response = GenerateContentResponse( - { - "candidates": [ - { - "content": { - "parts": [ - Part(text="Let me think about this...", thought=True), - Part(text="There are 2 O's in Google."), - ] - }, - "finish_reason": "STOP", - } - ], - "usage_metadata": { - "prompt_token_count": 20, - "candidates_token_count": 15, - "total_token_count": 35, - "cached_content_token_count": 0, - }, - } - ) - mock_retry.return_value = mock_response + # Mock response with thinking content followed by regular text + mock_response = GenerateContentResponse( + candidates=[ + Candidate( + content=Content( + parts=[ + Part(text="Let me think about this...", thought=True), + Part(text="There are 2 O's in Google."), + ] + ), + finish_reason="STOP", + ) + ], + usage_metadata=GenerateContentResponseUsageMetadata( + prompt_token_count=20, + candidates_token_count=15, + total_token_count=35, + cached_content_token_count=0, + ), + ) - llm = ChatGoogleGenerativeAI( - model=MODEL_NAME, - google_api_key=SecretStr(FAKE_API_KEY), - ) + llm = ChatGoogleGenerativeAI( + model=MODEL_NAME, + google_api_key=SecretStr(FAKE_API_KEY), + ) + assert llm.client is not None + with patch.object( + llm.client.models, "generate_content", return_value=mock_response + ) as mock_client_method: result = llm.invoke( "How many O's are in Google?", generation_config={"thinking_config": {"include_thoughts": True}}, ) # Verify the call was made with merged config - mock_retry.assert_called_once() - call_args = mock_retry.call_args - request = call_args.kwargs["request"] - assert hasattr(request, "generation_config") - assert hasattr(request.generation_config, "thinking_config") - assert request.generation_config.thinking_config.include_thoughts is True + mock_client_method.assert_called_once() + call_args = mock_client_method.call_args + # Extract config from kwargs or positional args + config = call_args.kwargs.get("config") + if config is None and len(call_args.args) > 0: + # Config might be in positional args as well + for arg in call_args.args: + if hasattr(arg, "thinking_config"): + config = arg + break + + assert config is not None + assert hasattr(config, "thinking_config") + assert config.thinking_config.include_thoughts is True # Verify response structure assert isinstance(result, AIMessage) @@ -1993,7 +1719,7 @@ def test_modalities_override_in_generation_config() -> None: content=Content( parts=[ Part( - inline_data=glm.Blob( + inline_data=Blob( mime_type="image/jpeg", data=base64.b64decode( "/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAYEBQYFBAYGBQYHBwYIChAKCgkJChQODwwQFxQYGBcUFhYaHSUfGhsjHBYWICwgIyYnKSopGR8tMC0oMCUoKSj/2wBDAQcHBwoIChMKChMoGhYaKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCj/wAARCAABAAEDASIAAhEBAxEB/8QAFQABAQAAAAAAAAAAAAAAAAAAAAv/xAAUEAEAAAAAAAAAAAAAAAAAAAAA/8QAFQEBAQAAAAAAAAAAAAAAAAAAAAX/xAAUEQEAAAAAAAAAAAAAAAAAAAAA/9oADAMBAAIRAxEAPwCdABmX/9k=" @@ -2003,7 +1729,7 @@ def test_modalities_override_in_generation_config() -> None: Part(text="Meow! Here's a cat image for you."), ] ), - finish_reason=Candidate.FinishReason.STOP, + finish_reason="STOP", ) ] # Create proper usage metadata using dict approach @@ -2102,40 +1828,41 @@ def test_chat_google_genai_image_content_blocks() -> None: """Test generating an image with mocked response and `content_blocks` translation.""" mock_response = GenerateContentResponse( - { - "candidates": [ - { - "content": { - "parts": [ - {"text": "Meow!"}, - { - "inline_data": { - "mime_type": "image/png", - "data": base64.b64decode( - "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAf" - "FcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg==" - ), - } - }, - ] - }, - "finish_reason": "STOP", - } - ], - "usage_metadata": { - "prompt_token_count": 10, - "candidates_token_count": 5, - "total_token_count": 15, - }, - } + candidates=[ + Candidate( + content=Content( + parts=[ + Part(text="Meow!"), + Part( + inline_data=Blob( + mime_type="image/png", + data=base64.b64decode( + "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAf" + "FcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg==" + ), + ) + ), + ] + ), + finish_reason="STOP", + ) + ], + usage_metadata=GenerateContentResponseUsageMetadata( + prompt_token_count=10, + candidates_token_count=5, + total_token_count=15, + ), ) llm = ChatGoogleGenerativeAI( model=MODEL_NAME, google_api_key=SecretStr(FAKE_API_KEY), ) + assert llm.client is not None - with patch.object(llm.client, "generate_content", return_value=mock_response): + with patch.object( + llm.client.models, "generate_content", return_value=mock_response + ): result = llm.invoke( "Say 'meow!' and then Generate an image of a cat.", generation_config={ @@ -2217,20 +1944,18 @@ def test_content_blocks_translation_with_mixed_image_content() -> None: def test_chat_google_genai_invoke_with_audio_mocked() -> None: """Test generating audio with mocked response and `content_blocks` translation.""" mock_response = GenerateContentResponse( - { - "candidates": [ - { - # Empty content when audio is in additional_kwargs - "content": {"parts": []}, - "finish_reason": "STOP", - } - ], - "usage_metadata": { - "prompt_token_count": 10, - "candidates_token_count": 5, - "total_token_count": 15, - }, - } + candidates=[ + Candidate( + # Empty content when audio is in additional_kwargs + content=Content(parts=[]), + finish_reason="STOP", + ) + ], + usage_metadata=GenerateContentResponseUsageMetadata( + prompt_token_count=10, + candidates_token_count=5, + total_token_count=15, + ), ) wav_bytes = ( # (minimal WAV header) @@ -2243,8 +1968,11 @@ def test_chat_google_genai_invoke_with_audio_mocked() -> None: google_api_key=SecretStr(FAKE_API_KEY), response_modalities=[Modality.AUDIO], ) + assert llm.client is not None - with patch.object(llm.client, "generate_content", return_value=mock_response): + with patch.object( + llm.client.models, "generate_content", return_value=mock_response + ): with patch( "langchain_google_genai.chat_models._parse_response_candidate" ) as mock_parse: @@ -2480,16 +2208,17 @@ def test_signature_round_trip_conversion() -> None: ] ) - with patch("langchain_google_genai.chat_models._chat_with_retry") as mock_chat: - mock_chat.return_value = mock_response - - llm = ChatGoogleGenerativeAI( - model=MODEL_NAME, - google_api_key=SecretStr(FAKE_API_KEY), - output_version="v1", - include_thoughts=True, - ) + llm = ChatGoogleGenerativeAI( + model=MODEL_NAME, + google_api_key=SecretStr(FAKE_API_KEY), + output_version="v1", + include_thoughts=True, + ) + assert llm.client is not None + with patch.object( + llm.client.models, "generate_content", return_value=mock_response + ): # First call - get response with signatures result = llm.invoke("Test message") @@ -2526,14 +2255,17 @@ def test_signature_round_trip_conversion() -> None: HumanMessage(content="Follow up"), ] - # This should trigger signature conversion - mock_chat.return_value = GenerateContentResponse( + # Set up mock for the follow-up response + follow_up_response = GenerateContentResponse( candidates=[ Candidate(content=Content(parts=[Part(text="Follow up response")])) ] ) - follow_up = llm.invoke(conversation) + with patch.object( + llm.client.models, "generate_content", return_value=follow_up_response + ): + follow_up = llm.invoke(conversation) # Verify conversion was called assert mock_convert.call_count >= 1 @@ -2611,15 +2343,19 @@ def test_parse_chat_history_uses_index_for_signature() -> None: # Check the result model_content = formatted_messages[0] assert model_content.role == "model" - assert len(model_content.parts) == 1 - part = model_content.parts[0] + assert model_content.parts is not None + assert len(model_content.parts) == 2 - # Check if function_call is present - assert part.function_call is not None - assert part.function_call.name == "my_tool" + # First part should be the thinking text (thinking blocks come first) + thinking_part = model_content.parts[0] + assert thinking_part.thought is True + assert thinking_part.text == "I should use the tool." - # Check if thought_signature is correctly attached - assert part.thought_signature == sig_bytes + # Second part should be the function call with signature + function_part = model_content.parts[1] + assert function_part.function_call is not None + assert function_part.function_call.name == "my_tool" + assert function_part.thought_signature == sig_bytes def test_system_message_only_raises_error() -> None: @@ -2632,45 +2368,598 @@ def test_system_message_only_raises_error() -> None: # Should raise ValueError when only SystemMessage is provided with pytest.raises( ValueError, - match=r"No content messages found. The Gemini API requires at least one", + match=r"contents are required\.", ): llm.invoke([SystemMessage(content="You are a helpful assistant")]) -def test_system_message_with_additional_message_works() -> None: - """Test that `SystemMessage` works when combined with other messages.""" - mock_response = GenerateContentResponse( +def test_convert_to_parts_text_only() -> None: + """Test `_convert_to_parts` with text content.""" + # Test single string + result = _convert_to_parts("Hello, world!") + assert len(result) == 1 + assert result[0].text == "Hello, world!" + assert result[0].inline_data is None + # Test list of strings + result = _convert_to_parts(["Hello", "world", "!"]) + assert len(result) == 3 + assert result[0].text == "Hello" + assert result[1].text == "world" + assert result[2].text == "!" + + +def test_convert_to_parts_text_content_block() -> None: + """Test `_convert_to_parts` with text content blocks.""" + content = [{"type": "text", "text": "Hello, world!"}] + result = _convert_to_parts(content) + assert len(result) == 1 + assert result[0].text == "Hello, world!" + + +def test_convert_to_parts_image_url() -> None: + """Test `_convert_to_parts` with `image_url` content blocks.""" + content = [{"type": "image_url", "image_url": {"url": SMALL_VIEWABLE_BASE64_IMAGE}}] + result = _convert_to_parts(content) + assert len(result) == 1 + assert result[0].inline_data is not None + assert result[0].inline_data.mime_type == "image/png" + + +def test_convert_to_parts_image_url_string() -> None: + """Test `_convert_to_parts` with `image_url` as string.""" + content = [{"type": "image_url", "image_url": SMALL_VIEWABLE_BASE64_IMAGE}] + result = _convert_to_parts(content) + assert len(result) == 1 + assert result[0].inline_data is not None + assert result[0].inline_data.mime_type == "image/png" + + +def test_convert_to_parts_file_data_url() -> None: + """Test `_convert_to_parts` with file data URL.""" + content = [ { - "candidates": [ - { - "content": {"parts": [{"text": "Hello! I'm ready to help."}]}, - "finish_reason": "STOP", - } - ], - "usage_metadata": { - "prompt_token_count": 10, - "candidates_token_count": 5, - "total_token_count": 15, - }, + "type": "file", + "source_type": "url", + "url": "https://example.com/image.jpg", + "mime_type": "image/jpeg", } + ] + with patch("langchain_google_genai.chat_models.ImageBytesLoader") as mock_loader: + mock_loader_instance = Mock() + mock_loader_instance._bytes_from_url.return_value = b"fake_image_data" + mock_loader.return_value = mock_loader_instance + result = _convert_to_parts(content) + assert len(result) == 1 + assert result[0].inline_data is not None + assert result[0].inline_data.mime_type == "image/jpeg" + assert result[0].inline_data.data == b"fake_image_data" + + +def test_convert_to_parts_file_data_base64() -> None: + """Test `_convert_to_parts` with file data base64.""" + content = [ + { + "type": "file", + "source_type": "base64", + "data": "SGVsbG8gV29ybGQ=", # "Hello World" in base64 + "mime_type": "text/plain", + } + ] + result = _convert_to_parts(content) + assert len(result) == 1 + assert result[0].inline_data is not None + assert result[0].inline_data.mime_type == "text/plain" + assert result[0].inline_data.data == b"Hello World" + + +def test_convert_to_parts_file_data_auto_mime_type() -> None: + """Test `_convert_to_parts` with auto-detected mime type.""" + content = [ + { + "type": "file", + "source_type": "base64", + "data": "SGVsbG8gV29ybGQ=", + # No mime_type specified, should be auto-detected + } + ] + with patch("langchain_google_genai.chat_models.mimetypes.guess_type") as mock_guess: + mock_guess.return_value = ("text/plain", None) + result = _convert_to_parts(content) + assert len(result) == 1 + assert result[0].inline_data is not None + assert result[0].inline_data.mime_type == "text/plain" + + +def test_convert_to_parts_media_with_data() -> None: + """Test `_convert_to_parts` with media type containing data.""" + content = [{"type": "media", "mime_type": "video/mp4", "data": b"fake_video_data"}] + result = _convert_to_parts(content) + assert len(result) == 1 + assert result[0].inline_data is not None + assert result[0].inline_data.mime_type == "video/mp4" + assert result[0].inline_data.data == b"fake_video_data" + + +def test_convert_to_parts_media_with_file_uri() -> None: + """Test `_convert_to_parts` with media type containing file_uri.""" + content = [ + { + "type": "media", + "mime_type": "application/pdf", + "file_uri": "gs://bucket/file.pdf", + } + ] + result = _convert_to_parts(content) + assert len(result) == 1 + assert result[0].file_data is not None + assert result[0].file_data.mime_type == "application/pdf" + assert result[0].file_data.file_uri == "gs://bucket/file.pdf" + + +def test_convert_to_parts_media_with_video_metadata() -> None: + """Test `_convert_to_parts` with media type containing video metadata.""" + content = [ + { + "type": "media", + "mime_type": "video/mp4", + "file_uri": "gs://bucket/video.mp4", + "video_metadata": {"start_offset": "10s", "end_offset": "20s"}, + } + ] + result = _convert_to_parts(content) + assert len(result) == 1 + assert result[0].file_data is not None + assert result[0].video_metadata is not None + assert result[0].video_metadata.start_offset == "10s" + assert result[0].video_metadata.end_offset == "20s" + + +def test_convert_to_parts_executable_code() -> None: + """Test `_convert_to_parts` with executable code.""" + content = [ + { + "type": "executable_code", + "language": "python", + "executable_code": "print('Hello, World!')", + } + ] + result = _convert_to_parts(content) + assert len(result) == 1 + assert result[0].executable_code is not None + assert result[0].executable_code.language == Language.PYTHON + assert result[0].executable_code.code == "print('Hello, World!')" + + +def test_convert_to_parts_code_execution_result() -> None: + """Test `_convert_to_parts` with code execution result.""" + content = [ + { + "type": "code_execution_result", + "code_execution_result": "Hello, World!", + "outcome": CodeExecutionResultOutcome.OUTCOME_OK, + } + ] + result = _convert_to_parts(content) + assert len(result) == 1 + assert result[0].code_execution_result is not None + assert result[0].code_execution_result.output == "Hello, World!" + assert ( + result[0].code_execution_result.outcome == CodeExecutionResultOutcome.OUTCOME_OK ) - llm = ChatGoogleGenerativeAI( - model=MODEL_NAME, - google_api_key=SecretStr(FAKE_API_KEY), + +def test_convert_to_parts_code_execution_result_backward_compatibility() -> None: + """Test `_convert_to_parts` with code execution result without outcome (compat).""" + content = [ + { + "type": "code_execution_result", + "code_execution_result": "Hello, World!", + } + ] + result = _convert_to_parts(content) + assert len(result) == 1 + assert result[0].code_execution_result is not None + assert result[0].code_execution_result.output == "Hello, World!" + assert ( + result[0].code_execution_result.outcome == CodeExecutionResultOutcome.OUTCOME_OK + ) + + +def test_convert_to_parts_thinking() -> None: + """Test `_convert_to_parts` with thinking content.""" + content = [{"type": "thinking", "thinking": "I need to think about this..."}] + result = _convert_to_parts(content) + assert len(result) == 1 + assert result[0].text == "I need to think about this..." + assert result[0].thought is True + + +def test_convert_to_parts_mixed_content() -> None: + """Test `_convert_to_parts` with mixed content types.""" + content: list[dict[str, Any]] = [ + {"type": "text", "text": "Hello"}, + {"type": "text", "text": "World"}, + {"type": "image_url", "image_url": {"url": SMALL_VIEWABLE_BASE64_IMAGE}}, + ] + result = _convert_to_parts(content) + assert len(result) == 3 + assert result[0].text == "Hello" + assert result[1].text == "World" + assert result[2].inline_data is not None + + +def test_convert_to_parts_invalid_type() -> None: + """Test `_convert_to_parts` with invalid source_type.""" + content = [ + { + "type": "file", + "source_type": "invalid", + "data": "some_data", + } + ] + with pytest.raises(ValueError, match="Unrecognized message part type: file"): + _convert_to_parts(content) + + +def test_convert_to_parts_invalid_source_type() -> None: + """Test `_convert_to_parts` with invalid source_type.""" + content = [ + { + "type": "media", + "source_type": "invalid", + "data": "some_data", + "mime_type": "text/plain", + } + ] + with pytest.raises(ValueError, match="Data should be valid base64"): + _convert_to_parts(content) + + +def test_convert_to_parts_invalid_image_url_format() -> None: + """Test `_convert_to_parts` with invalid `image_url` format.""" + content = [{"type": "image_url", "image_url": {"invalid_key": "value"}}] + with pytest.raises(ValueError, match="Unrecognized message image format"): + _convert_to_parts(content) + + +def test_convert_to_parts_missing_mime_type_in_media() -> None: + """Test `_convert_to_parts` with missing `mime_type` in media.""" + content = [ + { + "type": "media", + "file_uri": "gs://bucket/file.pdf", + # Missing mime_type + } + ] + with pytest.raises(ValueError, match="Missing mime_type in media part"): + _convert_to_parts(content) + + +def test_convert_to_parts_media_missing_data_and_file_uri() -> None: + """Test `_convert_to_parts` with media missing both data and `file_uri`.""" + content = [ + { + "type": "media", + "mime_type": "application/pdf", + # Missing both data and file_uri + } + ] + with pytest.raises( + ValueError, match="Media part must have either data or file_uri" + ): + _convert_to_parts(content) + + +def test_convert_to_parts_missing_executable_code_keys() -> None: + """Test `_convert_to_parts` with missing keys in `executable_code`.""" + content = [ + { + "type": "executable_code", + "language": "python", + # Missing executable_code key + } + ] + with pytest.raises( + ValueError, match="Executable code part must have 'code' and 'language'" + ): + _convert_to_parts(content) + + +def test_convert_to_parts_missing_code_execution_result_key() -> None: + """Test `_convert_to_parts` with missing `code_execution_result` key.""" + content = [ + { + "type": "code_execution_result" + # Missing code_execution_result key + } + ] + with pytest.raises( + ValueError, match="Code execution result part must have 'code_execution_result'" + ): + _convert_to_parts(content) + + +def test_convert_to_parts_unrecognized_type() -> None: + """Test `_convert_to_parts` with unrecognized type.""" + content = [{"type": "unrecognized_type", "data": "some_data"}] + with pytest.raises(ValueError, match="Unrecognized message part type"): + _convert_to_parts(content) + + +def test_convert_to_parts_non_dict_mapping() -> None: + """Test `_convert_to_parts` with non-dict mapping.""" + content = [123] # Not a string or dict + with pytest.raises( + ChatGoogleGenerativeAIError, + match="Unknown error occurred while converting LC message content to parts", + ): + _convert_to_parts(content) # type: ignore[arg-type] + + +def test_convert_to_parts_unrecognized_format_warning() -> None: + """Test `_convert_to_parts` with unrecognized format triggers warning.""" + content = [{"some_key": "some_value"}] # Not a recognized format + with patch("langchain_google_genai.chat_models.logger.warning") as mock_warning: + result = _convert_to_parts(content) + mock_warning.assert_called_once() + assert "Unrecognized message part format" in mock_warning.call_args[0][0] + assert len(result) == 1 + assert result[0].text == "{'some_key': 'some_value'}" + + +def test_convert_tool_message_to_parts_string_content() -> None: + """Test `_convert_tool_message_to_parts` with string content.""" + message = ToolMessage(name="test_tool", content="test_result", tool_call_id="123") + result = _convert_tool_message_to_parts(message) + assert len(result) == 1 + assert result[0].function_response is not None + assert result[0].function_response.name == "test_tool" + assert result[0].function_response.response == {"output": "test_result"} + + +def test_convert_tool_message_to_parts_json_content() -> None: + """Test `_convert_tool_message_to_parts` with JSON string content.""" + message = ToolMessage( + name="test_tool", + content='{"result": "success", "data": [1, 2, 3]}', + tool_call_id="123", + ) + result = _convert_tool_message_to_parts(message) + assert len(result) == 1 + assert result[0].function_response is not None + assert result[0].function_response.name == "test_tool" + assert result[0].function_response.response == { + "result": "success", + "data": [1, 2, 3], + } + + +def test_convert_tool_message_to_parts_dict_content() -> None: + """Test `_convert_tool_message_to_parts` with `dict` content.""" + message = ToolMessage( # type: ignore[call-overload] + name="test_tool", + content={"result": "success", "data": [1, 2, 3]}, + tool_call_id="123", + ) + result = _convert_tool_message_to_parts(message) + assert len(result) == 1 + assert result[0].function_response is not None + assert result[0].function_response.name == "test_tool" + assert result[0].function_response.response == { + "output": str({"result": "success", "data": [1, 2, 3]}) + } + + +def test_convert_tool_message_to_parts_list_content_with_media() -> None: + """Test `_convert_tool_message_to_parts` with `list` content containing media.""" + message = ToolMessage( + name="test_tool", + content=[ + "Text response", + {"type": "image_url", "image_url": {"url": SMALL_VIEWABLE_BASE64_IMAGE}}, + ], + tool_call_id="123", + ) + result = _convert_tool_message_to_parts(message) + assert len(result) == 2 + # First part should be the media (image) + assert result[0].inline_data is not None + # Second part should be the function response + assert result[1].function_response is not None + assert result[1].function_response.name == "test_tool" + assert result[1].function_response.response == {"output": ["Text response"]} + + +def test_convert_tool_message_to_parts_with_name_parameter() -> None: + """Test `_convert_tool_message_to_parts` with explicit name parameter.""" + message = ToolMessage( + content="test_result", + tool_call_id="123", + # No name in message + ) + result = _convert_tool_message_to_parts(message, name="explicit_tool_name") + assert len(result) == 1 + assert result[0].function_response is not None + assert result[0].function_response.name == "explicit_tool_name" + + +def test_convert_tool_message_to_parts_legacy_name_in_kwargs() -> None: + """Test `_convert_tool_message_to_parts` with legacy name in `additional_kwargs`.""" + message = ToolMessage( + content="test_result", + tool_call_id="123", + additional_kwargs={"name": "legacy_tool_name"}, + ) + result = _convert_tool_message_to_parts(message) + assert len(result) == 1 + assert result[0].function_response is not None + assert result[0].function_response.name == "legacy_tool_name" + + +def test_convert_tool_message_to_parts_function_message() -> None: + """Test `_convert_tool_message_to_parts` with `FunctionMessage`.""" + message = FunctionMessage(name="test_function", content="function_result") + result = _convert_tool_message_to_parts(message) + assert len(result) == 1 + assert result[0].function_response is not None + assert result[0].function_response.name == "test_function" + assert result[0].function_response.response == {"output": "function_result"} + + +def test_convert_tool_message_to_parts_invalid_json_fallback() -> None: + """Test `_convert_tool_message_to_parts` with invalid JSON falls back to string.""" + message = ToolMessage( + name="test_tool", + content='{"invalid": json}', # Invalid JSON + tool_call_id="123", + ) + result = _convert_tool_message_to_parts(message) + assert len(result) == 1 + assert result[0].function_response is not None + assert result[0].function_response.response == {"output": '{"invalid": json}'} + + +def test_get_ai_message_tool_messages_parts_basic() -> None: + """Test `_get_ai_message_tool_messages_parts` with basic tool messages.""" + ai_message = AIMessage( + content="", + tool_calls=[ + {"id": "call_1", "name": "tool_1", "args": {"arg1": "value1"}}, + {"id": "call_2", "name": "tool_2", "args": {"arg2": "value2"}}, + ], + ) + tool_messages = [ + ToolMessage(name="tool_1", content="result_1", tool_call_id="call_1"), + ToolMessage(name="tool_2", content="result_2", tool_call_id="call_2"), + ] + result = _get_ai_message_tool_messages_parts(tool_messages, ai_message) + assert len(result) == 2 + # Check first tool response + assert result[0].function_response is not None + assert result[0].function_response.name == "tool_1" + assert result[0].function_response.response == {"output": "result_1"} + # Check second tool response + assert result[1].function_response is not None + assert result[1].function_response.name == "tool_2" + assert result[1].function_response.response == {"output": "result_2"} + + +def test_get_ai_message_tool_messages_parts_partial_matches() -> None: + """Test `_get_ai_message_tool_messages_parts` with partial tool message matches.""" + ai_message = AIMessage( + content="", + tool_calls=[ + {"id": "call_1", "name": "tool_1", "args": {"arg1": "value1"}}, + {"id": "call_2", "name": "tool_2", "args": {"arg2": "value2"}}, + ], + ) + tool_messages = [ + ToolMessage(name="tool_1", content="result_1", tool_call_id="call_1"), + # Missing tool_2 response + ] + result = _get_ai_message_tool_messages_parts(tool_messages, ai_message) + assert len(result) == 1 + # Only tool_1 response should be included + assert result[0].function_response is not None + assert result[0].function_response.name == "tool_1" + assert result[0].function_response.response == {"output": "result_1"} + + +def test_get_ai_message_tool_messages_parts_no_matches() -> None: + """Test `_get_ai_message_tool_messages_parts` with no matching tool messages.""" + ai_message = AIMessage( + content="", + tool_calls=[{"id": "call_1", "name": "tool_1", "args": {"arg1": "value1"}}], + ) + tool_messages = [ + ToolMessage(name="tool_2", content="result_2", tool_call_id="call_2"), + ToolMessage(name="tool_3", content="result_3", tool_call_id="call_3"), + ] + result = _get_ai_message_tool_messages_parts(tool_messages, ai_message) + assert len(result) == 0 + + +def test_get_ai_message_tool_messages_parts_empty_tool_calls() -> None: + """Test `_get_ai_message_tool_messages_parts` with empty tool calls.""" + ai_message = AIMessage(content="No tool calls") + tool_messages = [ + ToolMessage(name="tool_1", content="result_1", tool_call_id="call_1") + ] + result = _get_ai_message_tool_messages_parts(tool_messages, ai_message) + assert len(result) == 0 + + +def test_get_ai_message_tool_messages_parts_empty_tool_messages() -> None: + """Test `_get_ai_message_tool_messages_parts` with empty tool messages.""" + ai_message = AIMessage( + content="", + tool_calls=[{"id": "call_1", "name": "tool_1", "args": {"arg1": "value1"}}], + ) + result = _get_ai_message_tool_messages_parts([], ai_message) + assert len(result) == 0 + + +def test_get_ai_message_tool_messages_parts_duplicate_tool_calls() -> None: + """Test `_get_ai_message_tool_messages_parts` handles duplicate tool call IDs.""" + ai_message = AIMessage( + content="", + tool_calls=[ + {"id": "call_1", "name": "tool_1", "args": {"arg1": "value1"}}, + { + "id": "call_1", + "name": "tool_1", + "args": {"arg1": "value1"}, + }, # Duplicate ID + ], ) + tool_messages = [ + ToolMessage(name="tool_1", content="result_1", tool_call_id="call_1") + ] + result = _get_ai_message_tool_messages_parts(tool_messages, ai_message) + assert len(result) == 1 # Should only process the first match + assert result[0].function_response is not None + assert result[0].function_response.name == "tool_1" - with patch.object(llm.client, "generate_content", return_value=mock_response): - # SystemMessage + HumanMessage should work fine - result = llm.invoke( - [ - SystemMessage(content="You are a helpful assistant"), - HumanMessage(content="Hello"), - ] - ) - assert isinstance(result, AIMessage) - assert result.content == "Hello! I'm ready to help." +def test_get_ai_message_tool_messages_parts_order_preserved() -> None: + """Test `_get_ai_message_tool_messages_parts` preserves order of tool messages.""" + ai_message = AIMessage( + content="", + tool_calls=[ + {"id": "call_1", "name": "tool_1", "args": {"arg1": "value1"}}, + {"id": "call_2", "name": "tool_2", "args": {"arg2": "value2"}}, + ], + ) + tool_messages = [ + ToolMessage(name="tool_2", content="result_2", tool_call_id="call_2"), + ToolMessage(name="tool_1", content="result_1", tool_call_id="call_1"), + ] + result = _get_ai_message_tool_messages_parts(tool_messages, ai_message) + assert len(result) == 2 + # Order should be preserved based on tool_messages order, not tool_calls order + assert result[0].function_response is not None + assert result[0].function_response.name == "tool_2" + assert result[1].function_response is not None + assert result[1].function_response.name == "tool_1" + + +def test_get_ai_message_tool_messages_parts_with_name_from_tool_call() -> None: + """Test `_get_ai_message_tool_messages_parts` uses name from tool call""" + ai_message = AIMessage( + content="", + tool_calls=[ + {"id": "call_1", "name": "tool_from_call", "args": {"arg1": "value1"}} + ], + ) + tool_messages = [ + ToolMessage(content="result_1", tool_call_id="call_1") # No name in message + ] + result = _get_ai_message_tool_messages_parts(tool_messages, ai_message) + assert len(result) == 1 + assert result[0].function_response is not None + assert ( + result[0].function_response.name == "tool_from_call" + ) # Should use name from tool call def test_with_structured_output_json_schema_alias() -> None: @@ -2940,7 +3229,9 @@ def test_response_schema_mime_type_validation() -> None: schema = {"type": "object", "properties": {"field": {"type": "string"}}} # Test response_schema validation - error happens during _prepare_params - with pytest.raises(ValueError, match=r"response_schema.*is only supported when"): + with pytest.raises( + ValueError, match=r"JSON schema structured output is only supported when" + ): llm._prepare_params( stop=None, response_schema=schema, response_mime_type="text/plain" ) @@ -2957,6 +3248,233 @@ def test_response_schema_mime_type_validation() -> None: assert llm_with_json_schema is not None +def test_thinking_budget_preserved_with_structured_output() -> None: + """Test that `thinking_budget` is preserved when using `with_structured_output`.""" + + class TestSchema(BaseModel): + name: str + value: int + + mock_response = GenerateContentResponse( + candidates=[ + Candidate( + content=Content(parts=[Part(text='{"name": "test", "value": 42}')]), + finish_reason=FinishReason.STOP, + ) + ], + usage_metadata=GenerateContentResponseUsageMetadata( + prompt_token_count=10, + candidates_token_count=20, + total_token_count=30, + ), + ) + + llm = ChatGoogleGenerativeAI( + model=MODEL_NAME, + google_api_key=SecretStr(FAKE_API_KEY), + thinking_budget=0, + ) + assert llm.client is not None + + structured_llm = llm.with_structured_output(TestSchema, method="json_schema") + + with patch.object( + llm.client.models, "generate_content", return_value=mock_response + ) as mock_client_method: + structured_llm.invoke("test input") + + mock_client_method.assert_called_once() + + call_args = mock_client_method.call_args + config = call_args.kwargs.get("config") + + if config is None and len(call_args.args) > 0: + for arg in call_args.args: + if hasattr(arg, "thinking_config"): + config = arg + break + + assert config is not None, "Config should be present in API call" + assert hasattr(config, "thinking_config"), "thinking_config should be present" + assert config.thinking_config is not None, "thinking_config should not be None" + assert config.thinking_config.thinking_budget == 0, ( + f"thinking_budget should be 0, got {config.thinking_config.thinking_budget}" + ) + + +def test_thinking_level_preserved_with_structured_output() -> None: + """Test that `thinking_level` is preserved when using `with_structured_output`.""" + + class TestSchema(BaseModel): + name: str + value: int + + mock_response = GenerateContentResponse( + candidates=[ + Candidate( + content=Content(parts=[Part(text='{"name": "test", "value": 42}')]), + finish_reason=FinishReason.STOP, + ) + ], + usage_metadata=GenerateContentResponseUsageMetadata( + prompt_token_count=10, + candidates_token_count=20, + total_token_count=30, + ), + ) + + llm = ChatGoogleGenerativeAI( + model=MODEL_NAME, + google_api_key=SecretStr(FAKE_API_KEY), + thinking_level="low", + ) + assert llm.client is not None + + structured_llm = llm.with_structured_output(TestSchema, method="json_schema") + + with patch.object( + llm.client.models, "generate_content", return_value=mock_response + ) as mock_client_method: + structured_llm.invoke("test input") + + mock_client_method.assert_called_once() + call_args = mock_client_method.call_args + + config = call_args.kwargs.get("config") + if config is None and len(call_args.args) > 0: + for arg in call_args.args: + if hasattr(arg, "thinking_config"): + config = arg + break + + # Verify thinking_level is in the config + assert config is not None, "Config should be present in API call" + assert hasattr(config, "thinking_config"), "thinking_config should be present" + assert config.thinking_config is not None, "thinking_config should not be None" + assert config.thinking_config.thinking_level == ThinkingLevel.LOW, ( + f"thinking_level should be LOW, got {config.thinking_config.thinking_level}" + ) + + +def test_include_thoughts_preserved_with_structured_output() -> None: + """Test that `include_thoughts` is preserved when using `with_structured_output`.""" + + class TestSchema(BaseModel): + name: str + value: int + + mock_response = GenerateContentResponse( + candidates=[ + Candidate( + content=Content( + parts=[ + Part(text="Let me think...", thought=True), + Part(text='{"name": "test", "value": 42}'), + ] + ), + finish_reason=FinishReason.STOP, + ) + ], + usage_metadata=GenerateContentResponseUsageMetadata( + prompt_token_count=10, + candidates_token_count=20, + total_token_count=30, + ), + ) + + llm = ChatGoogleGenerativeAI( + model=MODEL_NAME, + google_api_key=SecretStr(FAKE_API_KEY), + include_thoughts=True, + ) + assert llm.client is not None + + structured_llm = llm.with_structured_output(TestSchema, method="json_schema") + + with patch.object( + llm.client.models, "generate_content", return_value=mock_response + ) as mock_client_method: + structured_llm.invoke("test input") + + mock_client_method.assert_called_once() + call_args = mock_client_method.call_args + + config = call_args.kwargs.get("config") + if config is None and len(call_args.args) > 0: + for arg in call_args.args: + if hasattr(arg, "thinking_config"): + config = arg + break + + assert config is not None, "Config should be present in API call" + assert hasattr(config, "thinking_config"), "thinking_config should be present" + assert config.thinking_config is not None, "thinking_config should not be None" + msg = ( + f"include_thoughts should be True, " + f"got {config.thinking_config.include_thoughts}" + ) + assert config.thinking_config.include_thoughts is True, msg + + +def test_thinking_budget_and_include_thoughts_with_structured_output() -> None: + """Test that both `thinking_budget` and `include_thoughts` are preserved.""" + + class TestSchema(BaseModel): + name: str + value: int + + mock_response = GenerateContentResponse( + candidates=[ + Candidate( + content=Content(parts=[Part(text='{"name": "test", "value": 42}')]), + finish_reason=FinishReason.STOP, + ) + ], + usage_metadata=GenerateContentResponseUsageMetadata( + prompt_token_count=10, + candidates_token_count=20, + total_token_count=30, + ), + ) + + llm = ChatGoogleGenerativeAI( + model=MODEL_NAME, + google_api_key=SecretStr(FAKE_API_KEY), + thinking_budget=0, + include_thoughts=False, + ) + assert llm.client is not None + + structured_llm = llm.with_structured_output(TestSchema, method="json_schema") + + with patch.object( + llm.client.models, "generate_content", return_value=mock_response + ) as mock_client_method: + structured_llm.invoke("test input") + + mock_client_method.assert_called_once() + call_args = mock_client_method.call_args + + config = call_args.kwargs.get("config") + if config is None and len(call_args.args) > 0: + for arg in call_args.args: + if hasattr(arg, "thinking_config"): + config = arg + break + + assert config is not None, "Config should be present in API call" + assert hasattr(config, "thinking_config"), "thinking_config should be present" + assert config.thinking_config is not None, "thinking_config should not be None" + assert config.thinking_config.thinking_budget == 0, ( + f"thinking_budget should be 0, got {config.thinking_config.thinking_budget}" + ) + msg = ( + f"include_thoughts should be False, " + f"got {config.thinking_config.include_thoughts}" + ) + assert config.thinking_config.include_thoughts is False, msg + + def test_is_new_gemini_model() -> None: assert _is_gemini_3_or_later("gemini-3.0-pro") is True assert _is_gemini_3_or_later("gemini-2.5-pro") is False @@ -3006,7 +3524,6 @@ def test_per_part_media_resolution_warning_gemini_25() -> None: assert expected_msg in str(w[0].message) -@pytest.mark.xfail(reason="Needs support in SDK.") def test_per_part_media_resolution_no_warning_new_models() -> None: """Test that per-part `media_resolution` does not warn for new models.""" content_with_media_resolution = [ @@ -3070,7 +3587,6 @@ def test_per_part_media_resolution_warning_gemini_25_data_block() -> None: ) -@pytest.mark.xfail(reason="Needs support in SDK.") def test_thinking_level_parameter() -> None: """Test that `thinking_level` is properly handled.""" # Test with thinking_level only @@ -3081,8 +3597,9 @@ def test_thinking_level_parameter() -> None: ) config = llm._prepare_params(stop=None) assert config.thinking_config is not None - assert config.thinking_config.thinking_level == "low" - assert not hasattr(config.thinking_config, "thinking_budget") + assert config.thinking_config.thinking_level == ThinkingLevel.LOW + # Pydantic models define all fields; check value is None rather than hasattr + assert config.thinking_config.thinking_budget is None # Test with thinking_level="high" llm = ChatGoogleGenerativeAI( @@ -3092,10 +3609,9 @@ def test_thinking_level_parameter() -> None: ) config = llm._prepare_params(stop=None) assert config.thinking_config is not None - assert config.thinking_config.thinking_level == "high" + assert config.thinking_config.thinking_level == ThinkingLevel.HIGH -@pytest.mark.xfail(reason="Needs support in SDK.") def test_thinking_level_takes_precedence_over_thinking_budget() -> None: """Test that `thinking_level` takes precedence when both are provided.""" with warnings.catch_warnings(record=True) as warning_list: @@ -3116,8 +3632,9 @@ def test_thinking_level_takes_precedence_over_thinking_budget() -> None: # Check that thinking_level is used and thinking_budget is ignored assert config.thinking_config is not None - assert config.thinking_config.thinking_level == "low" - assert not hasattr(config.thinking_config, "thinking_budget") + assert config.thinking_config.thinking_level == ThinkingLevel.LOW + # Pydantic models define all fields; check value is None rather than hasattr + assert config.thinking_config.thinking_budget is None def test_thinking_budget_alone_still_works() -> None: @@ -3130,7 +3647,8 @@ def test_thinking_budget_alone_still_works() -> None: config = llm._prepare_params(stop=None) assert config.thinking_config is not None assert config.thinking_config.thinking_budget == 64 - assert not hasattr(config.thinking_config, "thinking_level") + # Pydantic models define all fields; check value is None rather than hasattr + assert config.thinking_config.thinking_level is None def test_kwargs_override_max_output_tokens() -> None: @@ -3158,7 +3676,6 @@ def test_kwargs_override_thinking_budget() -> None: assert config.thinking_config.thinking_budget == 128 -@pytest.mark.xfail(reason="Needs support in SDK.") def test_kwargs_override_thinking_level() -> None: """Test that thinking_level can be overridden via kwargs.""" llm = ChatGoogleGenerativeAI( @@ -3169,4 +3686,425 @@ def test_kwargs_override_thinking_level() -> None: config = llm._prepare_params(stop=None, thinking_level="high") assert config.thinking_config is not None - assert config.thinking_config.thinking_level == "high" + assert config.thinking_config.thinking_level == ThinkingLevel.HIGH + + +def test_client_error_raises_descriptive_error() -> None: + """Test `ClientError` from the API is properly converted to a descriptive error.""" + invalid_model_name = "gemini-invalid-model-name" + mock_client = Mock() + mock_models = Mock() + mock_generate_content = Mock() + + # Simulate a NOT_FOUND error from the API (what happens with invalid model names) + mock_generate_content.side_effect = ClientError( + code=404, + response_json={ + "error": { + "message": f"models/{invalid_model_name} is not found", + "status": "NOT_FOUND", + } + }, + response=None, + ) + mock_models.generate_content = mock_generate_content + mock_client.return_value.models = mock_models + + with patch("langchain_google_genai.chat_models.Client", mock_client): + chat = ChatGoogleGenerativeAI( + model=invalid_model_name, + google_api_key=SecretStr(FAKE_API_KEY), + max_retries=0, # Disable retries for faster test + ) + + with pytest.raises( + ChatGoogleGenerativeAIError, + match=rf"Error calling model '{invalid_model_name}' \(NOT_FOUND\)", + ): + chat.invoke("test") + + +def test_kwargs_override_response_modalities() -> None: + """Test that `response_modalities` can be overridden via kwargs.""" + from langchain_core.messages import HumanMessage + + from langchain_google_genai import Modality + + llm = ChatGoogleGenerativeAI( + model=MODEL_NAME, + google_api_key=SecretStr(FAKE_API_KEY), + ) + + # Test passing response_modalities as kwarg to _prepare_request + msg = HumanMessage(content="test") + request = llm._prepare_request( + [msg], + response_modalities=[Modality.TEXT, Modality.IMAGE], + ) + + # Verify response_modalities is set correctly in the config + assert request["config"].response_modalities == ["TEXT", "IMAGE"] + + +def test_response_modalities_set_on_instance() -> None: + """Test that `response_modalities` can be set on the instance.""" + from langchain_core.messages import HumanMessage + + from langchain_google_genai import Modality + + llm = ChatGoogleGenerativeAI( + model=MODEL_NAME, + google_api_key=SecretStr(FAKE_API_KEY), + response_modalities=[Modality.TEXT], + ) + + msg = HumanMessage(content="test") + request = llm._prepare_request([msg]) + + assert request["config"].response_modalities == ["TEXT"] + + +def test_kwargs_response_modalities_overrides_instance() -> None: + """Test that kwarg `response_modalities` overrides instance value.""" + from langchain_core.messages import HumanMessage + + from langchain_google_genai import Modality + + llm = ChatGoogleGenerativeAI( + model=MODEL_NAME, + google_api_key=SecretStr(FAKE_API_KEY), + response_modalities=[Modality.TEXT], + ) + + msg = HumanMessage(content="test") + request = llm._prepare_request( + [msg], + response_modalities=[Modality.TEXT, Modality.IMAGE], + ) + + # Kwarg should override instance value + assert request["config"].response_modalities == ["TEXT", "IMAGE"] + + +# Test for protobuf integer/float conversion fix in chat models. + + +def test_parse_response_candidate_corrects_integer_like_floats() -> None: + """Test that `_parse_response_candidate` correctly handles integer-like floats. + + Handling in tool call arguments from the Gemini API response. + + This test addresses a bug where `proto.Message.to_dict()` converts integers + to floats, causing downstream type casting errors. + """ + # Create a mock Protobuf Struct for the arguments with problematic float values + args_struct = Struct() + args_struct.update( + { + "entity_type": "table", + "upstream_depth": 3.0, # The problematic float value that should be int + "downstream_depth": 5.0, # Another problematic float value + "fqn": "test.table.name", + "valid_float": 3.14, # This should remain as float + "string_param": "test_string", # This should remain as string + "bool_param": True, # This should remain as boolean + } + ) + + # Create the mock API response candidate + candidate = Candidate( + content=Content( + parts=[ + Part( + function_call=FunctionCall( + name="get_entity_lineage", + args=args_struct, + ) + ) + ] + ) + ) + + # Call the function we are testing + result_message = _parse_response_candidate(candidate) + + # Assert that the parsed tool_calls have the correct integer types + assert len(result_message.tool_calls) == 1 + tool_call = result_message.tool_calls[0] + assert tool_call["name"] == "get_entity_lineage" + assert tool_call["args"]["upstream_depth"] == 3 + assert tool_call["args"]["downstream_depth"] == 5 + assert isinstance(tool_call["args"]["upstream_depth"], int) + assert isinstance(tool_call["args"]["downstream_depth"], int) + + # Assert that non-integer values are preserved correctly + assert tool_call["args"]["valid_float"] == 3.14 + assert isinstance(tool_call["args"]["valid_float"], float) + assert tool_call["args"]["string_param"] == "test_string" + assert isinstance(tool_call["args"]["string_param"], str) + assert tool_call["args"]["bool_param"] is True + assert isinstance(tool_call["args"]["bool_param"], bool) + + # Assert that the additional_kwargs also contains corrected JSON + function_call_args = json.loads( + result_message.additional_kwargs["function_call"]["arguments"] + ) + assert function_call_args["upstream_depth"] == 3 + assert function_call_args["downstream_depth"] == 5 + assert isinstance(function_call_args["upstream_depth"], int) + assert isinstance(function_call_args["downstream_depth"], int) + + # Assert that non-integer values are preserved in additional_kwargs too + assert function_call_args["valid_float"] == 3.14 + assert isinstance(function_call_args["valid_float"], float) + + +def test_parse_response_candidate_handles_no_function_call() -> None: + """Test that the function works correctly when there's no function call.""" + candidate = Candidate( + content=Content( + parts=[Part(text="This is a regular text response without function calls")] + ) + ) + + result_message = _parse_response_candidate(candidate) + + assert ( + result_message.content + == "This is a regular text response without function calls" + ) + assert len(result_message.tool_calls) == 0 + assert "function_call" not in result_message.additional_kwargs + + +def test_parse_response_candidate_handles_empty_args() -> None: + """Test that the function works correctly with empty function call arguments.""" + args_struct = Struct() + # Empty struct - no arguments + + candidate = Candidate( + content=Content( + parts=[ + Part( + function_call=FunctionCall( + name="no_args_function", + args=args_struct, + ) + ) + ] + ) + ) + + result_message = _parse_response_candidate(candidate) + + assert len(result_message.tool_calls) == 1 + tool_call = result_message.tool_calls[0] + assert tool_call["name"] == "no_args_function" + assert tool_call["args"] == {} + + function_call_args = json.loads( + result_message.additional_kwargs["function_call"]["arguments"] + ) + assert function_call_args == {} + + +def test_backend_detection_default() -> None: + """Test default backend detection (Gemini Developer API).""" + llm = ChatGoogleGenerativeAI( + model=MODEL_NAME, + api_key=FAKE_API_KEY, + ) + assert llm._use_vertexai is False # type: ignore[attr-defined] + + +def test_backend_detection_explicit_vertexai_true() -> None: + """Test explicit `vertexai=True` forces Vertex AI backend.""" + llm = ChatGoogleGenerativeAI( + model=MODEL_NAME, + api_key=FAKE_API_KEY, + project="test-project", + vertexai=True, + ) + assert llm._use_vertexai is True # type: ignore[attr-defined] + + +def test_backend_detection_explicit_vertexai_false() -> None: + """Test explicit `vertexai=False` forces Gemini Developer API.""" + llm = ChatGoogleGenerativeAI( + model=MODEL_NAME, + api_key=FAKE_API_KEY, + project="test-project", + vertexai=False, + ) + assert llm._use_vertexai is False # type: ignore[attr-defined] + + +def test_backend_detection_project_auto_detects_vertexai() -> None: + """Test that providing project parameter auto-detects Vertex AI.""" + llm = ChatGoogleGenerativeAI( + model=MODEL_NAME, + api_key=FAKE_API_KEY, + project="test-project", + ) + assert llm._use_vertexai is True # type: ignore[attr-defined] + + +def test_backend_detection_credentials_auto_detects_vertexai() -> None: + """Test that providing credentials parameter auto-detects Vertex AI.""" + from unittest.mock import Mock + + fake_credentials = Mock() + fake_credentials.project_id = "test-project" + + llm = ChatGoogleGenerativeAI( + model=MODEL_NAME, + credentials=fake_credentials, + project="test-project", + ) + assert llm._use_vertexai is True # type: ignore[attr-defined] + + +def test_backend_detection_env_var_vertexai_true() -> None: + """Test `GOOGLE_GENAI_USE_VERTEXAI=true` forces Vertex AI.""" + original_env = os.environ.copy() + try: + os.environ["GOOGLE_GENAI_USE_VERTEXAI"] = "true" + os.environ["GOOGLE_CLOUD_PROJECT"] = "test-project" + + llm = ChatGoogleGenerativeAI( + model=MODEL_NAME, + api_key=FAKE_API_KEY, + ) + assert llm._use_vertexai is True # type: ignore[attr-defined] + finally: + os.environ.clear() + os.environ.update(original_env) + + +def test_backend_detection_env_var_vertexai_false() -> None: + """Test `GOOGLE_GENAI_USE_VERTEXAI=false` forces Gemini Developer API.""" + original_env = os.environ.copy() + try: + os.environ["GOOGLE_GENAI_USE_VERTEXAI"] = "false" + + llm = ChatGoogleGenerativeAI( + model=MODEL_NAME, + api_key=FAKE_API_KEY, + project="test-project", # Would normally trigger Vertex AI + ) + assert llm._use_vertexai is False # type: ignore[attr-defined] + finally: + os.environ.clear() + os.environ.update(original_env) + + +def test_backend_detection_env_var_variations() -> None: + """Test various values for `GOOGLE_GENAI_USE_VERTEXAI` env var.""" + original_env = os.environ.copy() + + # Test "1" as true + try: + os.environ["GOOGLE_GENAI_USE_VERTEXAI"] = "1" + os.environ["GOOGLE_CLOUD_PROJECT"] = "test-project" + + llm = ChatGoogleGenerativeAI( + model=MODEL_NAME, + api_key=FAKE_API_KEY, + ) + assert llm._use_vertexai is True # type: ignore[attr-defined] + finally: + os.environ.clear() + os.environ.update(original_env) + + # Test "yes" as true + try: + os.environ["GOOGLE_GENAI_USE_VERTEXAI"] = "yes" + os.environ["GOOGLE_CLOUD_PROJECT"] = "test-project" + + llm = ChatGoogleGenerativeAI( + model=MODEL_NAME, + api_key=FAKE_API_KEY, + ) + assert llm._use_vertexai is True # type: ignore[attr-defined] + finally: + os.environ.clear() + os.environ.update(original_env) + + # Test "0" as false + try: + os.environ["GOOGLE_GENAI_USE_VERTEXAI"] = "0" + + llm = ChatGoogleGenerativeAI( + model=MODEL_NAME, + api_key=FAKE_API_KEY, + project="test-project", + ) + assert llm._use_vertexai is False # type: ignore[attr-defined] + finally: + os.environ.clear() + os.environ.update(original_env) + + +def test_backend_detection_priority_explicit_over_env() -> None: + """Test that explicit vertexai parameter overrides env var.""" + + original_env = os.environ.copy() + try: + os.environ["GOOGLE_GENAI_USE_VERTEXAI"] = "true" + os.environ["GOOGLE_CLOUD_PROJECT"] = "test-project" + + # Explicit False should override env var True + llm = ChatGoogleGenerativeAI( + model=MODEL_NAME, + api_key=FAKE_API_KEY, + vertexai=False, + ) + assert llm._use_vertexai is False # type: ignore[attr-defined] + + # Explicit True should also work + llm2 = ChatGoogleGenerativeAI( + model=MODEL_NAME, + api_key=FAKE_API_KEY, + vertexai=True, + ) + assert llm2._use_vertexai is True # type: ignore[attr-defined] + finally: + os.environ.clear() + os.environ.update(original_env) + + +def test_model_name_normalization_for_vertexai() -> None: + """Test that model names with `'models/'` prefix are normalized for Vertex AI.""" + original_env = os.environ.copy() + try: + os.environ["GOOGLE_CLOUD_PROJECT"] = "test-project" + + # Test with models/ prefix for Vertex AI - should be stripped + llm_vertex = ChatGoogleGenerativeAI( + model="models/gemini-2.5-flash", + api_key=FAKE_API_KEY, + vertexai=True, + ) + assert llm_vertex.model == "gemini-2.5-flash" + assert llm_vertex._use_vertexai is True # type: ignore[attr-defined] + + # Test with models/ prefix for Google AI - should remain unchanged + llm_google_ai = ChatGoogleGenerativeAI( + model="models/gemini-2.5-flash", + api_key=FAKE_API_KEY, + vertexai=False, + ) + assert llm_google_ai.model == "models/gemini-2.5-flash" + assert llm_google_ai._use_vertexai is False # type: ignore[attr-defined] + + # Test without models/ prefix for Vertex AI - should remain unchanged + llm_vertex_no_prefix = ChatGoogleGenerativeAI( + model="gemini-2.5-flash", + api_key=FAKE_API_KEY, + vertexai=True, + ) + assert llm_vertex_no_prefix.model == "gemini-2.5-flash" + assert llm_vertex_no_prefix._use_vertexai is True # type: ignore[attr-defined] + finally: + os.environ.clear() + os.environ.update(original_env) diff --git a/libs/genai/tests/unit_tests/test_chat_models_protobuf_fix.py b/libs/genai/tests/unit_tests/test_chat_models_protobuf_fix.py deleted file mode 100644 index ea06cd4cb..000000000 --- a/libs/genai/tests/unit_tests/test_chat_models_protobuf_fix.py +++ /dev/null @@ -1,132 +0,0 @@ -"""Test for protobuf integer/float conversion fix in chat models.""" - -import json - -from google.ai.generativelanguage_v1beta.types import ( - Candidate, - Content, - FunctionCall, - Part, -) -from google.protobuf.struct_pb2 import Struct - -from langchain_google_genai.chat_models import _parse_response_candidate - - -def test_parse_response_candidate_corrects_integer_like_floats() -> None: - """Test that `_parse_response_candidate` correctly handles integer-like floats. - - Handling in tool call arguments from the Gemini API response. - - This test addresses a bug where `proto.Message.to_dict()` converts integers - to floats, causing downstream type casting errors. - """ - # Create a mock Protobuf Struct for the arguments with problematic float values - args_struct = Struct() - args_struct.update( - { - "entity_type": "table", - "upstream_depth": 3.0, # The problematic float value that should be int - "downstream_depth": 5.0, # Another problematic float value - "fqn": "test.table.name", - "valid_float": 3.14, # This should remain as float - "string_param": "test_string", # This should remain as string - "bool_param": True, # This should remain as boolean - } - ) - - # Create the mock API response candidate - candidate = Candidate( - content=Content( - parts=[ - Part( - function_call=FunctionCall( - name="get_entity_lineage", - args=args_struct, - ) - ) - ] - ) - ) - - # Call the function we are testing - result_message = _parse_response_candidate(candidate) - - # Assert that the parsed tool_calls have the correct integer types - assert len(result_message.tool_calls) == 1 - tool_call = result_message.tool_calls[0] - assert tool_call["name"] == "get_entity_lineage" - assert tool_call["args"]["upstream_depth"] == 3 - assert tool_call["args"]["downstream_depth"] == 5 - assert isinstance(tool_call["args"]["upstream_depth"], int) - assert isinstance(tool_call["args"]["downstream_depth"], int) - - # Assert that non-integer values are preserved correctly - assert tool_call["args"]["valid_float"] == 3.14 - assert isinstance(tool_call["args"]["valid_float"], float) - assert tool_call["args"]["string_param"] == "test_string" - assert isinstance(tool_call["args"]["string_param"], str) - assert tool_call["args"]["bool_param"] is True - assert isinstance(tool_call["args"]["bool_param"], bool) - - # Assert that the additional_kwargs also contains corrected JSON - function_call_args = json.loads( - result_message.additional_kwargs["function_call"]["arguments"] - ) - assert function_call_args["upstream_depth"] == 3 - assert function_call_args["downstream_depth"] == 5 - assert isinstance(function_call_args["upstream_depth"], int) - assert isinstance(function_call_args["downstream_depth"], int) - - # Assert that non-integer values are preserved in additional_kwargs too - assert function_call_args["valid_float"] == 3.14 - assert isinstance(function_call_args["valid_float"], float) - - -def test_parse_response_candidate_handles_no_function_call() -> None: - """Test that the function works correctly when there's no function call.""" - candidate = Candidate( - content=Content( - parts=[Part(text="This is a regular text response without function calls")] - ) - ) - - result_message = _parse_response_candidate(candidate) - - assert ( - result_message.content - == "This is a regular text response without function calls" - ) - assert len(result_message.tool_calls) == 0 - assert "function_call" not in result_message.additional_kwargs - - -def test_parse_response_candidate_handles_empty_args() -> None: - """Test that the function works correctly with empty function call arguments.""" - args_struct = Struct() - # Empty struct - no arguments - - candidate = Candidate( - content=Content( - parts=[ - Part( - function_call=FunctionCall( - name="no_args_function", - args=args_struct, - ) - ) - ] - ) - ) - - result_message = _parse_response_candidate(candidate) - - assert len(result_message.tool_calls) == 1 - tool_call = result_message.tool_calls[0] - assert tool_call["name"] == "no_args_function" - assert tool_call["args"] == {} - - function_call_args = json.loads( - result_message.additional_kwargs["function_call"]["arguments"] - ) - assert function_call_args == {} diff --git a/libs/genai/tests/unit_tests/test_common.py b/libs/genai/tests/unit_tests/test_common.py index 7780a873d..b48e03013 100644 --- a/libs/genai/tests/unit_tests/test_common.py +++ b/libs/genai/tests/unit_tests/test_common.py @@ -52,6 +52,8 @@ class TestModel(_BaseGoogleGenerativeAI): def test_get_user_agent_with_telemetry_env_variable( mock_environ_get: MagicMock, ) -> None: + """Test `get_user_agent` includes telemetry suffix when env variable is set.""" + mock_environ_get.return_value = True client_lib_version, user_agent_str = get_user_agent(module="test-module") assert client_lib_version == "1.2.3-test-module+remote_reasoning_engine" @@ -65,23 +67,13 @@ def test_get_user_agent_with_telemetry_env_variable( def test_get_user_agent_without_telemetry_env_variable( mock_environ_get: MagicMock, ) -> None: + """Test `get_user_agent` without telemetry suffix when env variable is not set.""" mock_environ_get.return_value = False client_lib_version, user_agent_str = get_user_agent(module="test-module") assert client_lib_version == "1.2.3-test-module" assert user_agent_str == "langchain-google-genai/1.2.3-test-module" -def test_version_is_cached_at_module_level() -> None: - """Test that version is cached at module level and doesn't call - `metadata.version`.""" - from langchain_google_genai import _common - - # The cached version should be a string - assert isinstance(_common.LC_GOOGLE_GENAI_VERSION, str) - # Should be either a valid version or "0.0.0" (fallback) - assert _common.LC_GOOGLE_GENAI_VERSION != "" - - async def test_get_user_agent_no_blocking_in_async_context() -> None: """Test that `get_user_agent` doesn't perform blocking I/O in async context. @@ -105,6 +97,18 @@ async def test_get_user_agent_no_blocking_in_async_context() -> None: assert "langchain-google-genai" in user_agent_str +def test_version_is_cached_at_module_level() -> None: + """Test that version is cached at module level and doesn't call + `metadata.version`.""" + from langchain_google_genai import _common + + # The cached version should be a string + assert isinstance(_common.LC_GOOGLE_GENAI_VERSION, str) + + # Should be either a valid version or "0.0.0" (fallback) + assert _common.LC_GOOGLE_GENAI_VERSION != "" + + def test_async_context_execution() -> None: """Run the async test to ensure it works in event loop.""" asyncio.run(test_get_user_agent_no_blocking_in_async_context()) diff --git a/libs/genai/tests/unit_tests/test_function_utils.py b/libs/genai/tests/unit_tests/test_function_utils.py index 53963d16c..ea696f271 100644 --- a/libs/genai/tests/unit_tests/test_function_utils.py +++ b/libs/genai/tests/unit_tests/test_function_utils.py @@ -3,8 +3,16 @@ from typing import Annotated, Any, Literal from unittest.mock import MagicMock, patch -import google.ai.generativelanguage as glm import pytest +from google.genai.types import ( + FunctionCallingConfig, + FunctionCallingConfigMode, + FunctionDeclaration, + Schema, + Tool, + ToolConfig, + Type, +) from langchain_core.documents import Document from langchain_core.tools import BaseTool, InjectedToolArg, tool from langchain_core.utils.function_calling import ( @@ -17,15 +25,76 @@ _convert_pydantic_to_genai_function, _format_base_tool_to_function_declaration, _format_dict_to_function_declaration, - _format_to_gapic_function_declaration, + _format_to_genai_function_declaration, _FunctionDeclarationLike, _tool_choice_to_tool_config, - _ToolConfigDict, convert_to_genai_function_declarations, tool_to_dict, ) +def assert_property_type( + property_dict: dict, expected_type: Type, property_name: str = "property" +) -> None: + """ + Utility function to assert that a property has the expected Type enum value. + Since tool_to_dict serializes Type enums to dictionaries with '_value_' field, + this function handles the comparison correctly. + Args: + property_dict: The property dictionary from the serialized schema + expected_type: The expected Type enum value + property_name: Name of the property for error messages (optional) + """ + actual_type_dict = property_dict.get("type", {}) + if isinstance(actual_type_dict, dict): + actual_value = actual_type_dict.get("_value_") + assert actual_value == expected_type.value, ( + f"Expected '{property_name}' to be {expected_type.value}, " + f"but got {actual_value}" + ) + else: + # In case the type is not serialized as a dict (fallback) + assert actual_type_dict == expected_type, ( + f"Expected '{property_name}' to be {expected_type}, " + f"but got {actual_type_dict}" + ) + + +def find_any_of_option_by_type(any_of_list: list, expected_type: Type) -> dict: + """ + Utility function to find an option in an any_of list that has the expected Type. + Since tool_to_dict serializes Type enums to dictionaries with '_value_' field, + this function handles the search correctly. + Args: + any_of_list: List of options from an any_of field + expected_type: The Type enum value to search for + Returns: + The matching option dictionary + Raises: + AssertionError: If no option with the expected type is found + """ + for opt in any_of_list: + type_dict = opt.get("type", {}) + if isinstance(type_dict, dict): + if type_dict.get("_value_") == expected_type.value: + return opt + if type_dict == expected_type: + return opt + # If we get here, no matching option was found + available_types = [] + for opt in any_of_list: + type_dict = opt.get("type", {}) + if isinstance(type_dict, dict): + available_types.append(type_dict.get("_value_", "unknown")) + else: + available_types.append(str(type_dict)) + msg = ( + f"No option with type {expected_type.value} found in any_of. " + f"Available types: {available_types}" + ) + raise AssertionError(msg) + + def test_tool_with_anyof_nullable_param() -> None: """Example test. @@ -68,7 +137,7 @@ def possibly_none( a_property = properties.get("a") assert isinstance(a_property, dict), "Expected a dict." - assert a_property.get("type_") == glm.Type.STRING, "Expected 'a' to be STRING." + assert_property_type(a_property, Type.STRING, "a") assert a_property.get("nullable") is True, "Expected 'a' to be marked as nullable." @@ -114,16 +183,14 @@ def possibly_none_list( assert isinstance(items_property, dict), "Expected a dict." # Assertions - assert items_property.get("type_") == glm.Type.ARRAY, ( - "Expected 'items' to be ARRAY." - ) + assert_property_type(items_property, Type.ARRAY, "items") assert items_property.get("nullable"), "Expected 'items' to be marked as nullable." # Check that the array items are recognized as strings items = items_property.get("items") assert isinstance(items, dict), "Expected 'items' to be a dict." - assert items.get("type_") == glm.Type.STRING, "Expected array items to be STRING." + assert_property_type(items, Type.STRING, "array items") def test_tool_with_nested_object_anyof_nullable_param() -> None: @@ -165,10 +232,19 @@ def possibly_none_dict( data_property = properties.get("data") assert isinstance(data_property, dict), "Expected a dict." - assert data_property.get("type_") in [ - glm.Type.OBJECT, - glm.Type.STRING, - ], "Expected 'data' to be recognized as an OBJECT or fallback to STRING." + # Check if it's OBJECT or STRING (fallback) + actual_type_dict = data_property.get("type", {}) + if isinstance(actual_type_dict, dict): + actual_value = actual_type_dict.get("_value_") + assert actual_value in [ + Type.OBJECT.value, + Type.STRING.value, + ], f"Expected 'data' to be OBJECT or STRING, but got {actual_value}" + else: + assert actual_type_dict in [ + Type.OBJECT, + Type.STRING, + ], f"Expected 'data' to be OBJECT or STRING, but got {actual_type_dict}" assert data_property.get("nullable") is True, ( "Expected 'data' to be marked as nullable." ) @@ -223,9 +299,7 @@ def possibly_none_enum( assert isinstance(status_property, dict), "Expected a dict." # Assertions - assert status_property.get("type_") == glm.Type.STRING, ( - "Expected 'status' to be STRING." - ) + assert_property_type(status_property, Type.STRING, "status") assert status_property.get("nullable") is True, ( "Expected 'status' to be marked as nullable." ) @@ -243,13 +317,13 @@ def search(question: str) -> str: search_tool = tool(search) -search_exp = glm.FunctionDeclaration( +search_exp = FunctionDeclaration( name="search", description="Search tool.", - parameters=glm.Schema( - type=glm.Type.OBJECT, + parameters=Schema( + type=Type.OBJECT, description="Search tool.", - properties={"question": glm.Schema(type=glm.Type.STRING)}, + properties={"question": Schema(type=Type.STRING)}, required=["question"], title="search", ), @@ -262,13 +336,13 @@ def _run(self) -> None: search_base_tool = SearchBaseTool(name="search", description="Search tool") -search_base_tool_exp = glm.FunctionDeclaration( +search_base_tool_exp = FunctionDeclaration( name=search_base_tool.name, description=search_base_tool.description, - parameters=glm.Schema( - type=glm.Type.OBJECT, + parameters=Schema( + type=Type.OBJECT, properties={ - "__arg1": glm.Schema(type=glm.Type.STRING), + "__arg1": Schema(type=Type.STRING), }, required=["__arg1"], ), @@ -287,27 +361,27 @@ class SearchModel(BaseModel): "description": search_model_schema["description"], "parameters": search_model_schema, } -search_model_exp = glm.FunctionDeclaration( +search_model_exp = FunctionDeclaration( name="SearchModel", description="Search model.", - parameters=glm.Schema( - type=glm.Type.OBJECT, + parameters=Schema( + type=Type.OBJECT, description="Search model.", properties={ - "question": glm.Schema(type=glm.Type.STRING), + "question": Schema(type=Type.STRING), }, required=["question"], title="SearchModel", ), ) -search_model_exp_pyd = glm.FunctionDeclaration( +search_model_exp_pyd = FunctionDeclaration( name="SearchModel", description="Search model.", - parameters=glm.Schema( - type=glm.Type.OBJECT, + parameters=Schema( + type=Type.OBJECT, properties={ - "question": glm.Schema(type=glm.Type.STRING), + "question": Schema(type=Type.STRING), }, required=["question"], ), @@ -322,7 +396,7 @@ class SearchModel(BaseModel): ) SRC_EXP_MOCKS_DESC: list[ - tuple[_FunctionDeclarationLike, glm.FunctionDeclaration, list[MagicMock], str] + tuple[_FunctionDeclarationLike, FunctionDeclaration, list[MagicMock], str] ] = [ (search, search_exp, [mock_base_tool], "plain function"), (search_tool, search_exp, [mock_base_tool], "LC tool"), @@ -339,6 +413,8 @@ def get_datetime() -> str: return datetime.datetime.now(tz=datetime.timezone.utc).strftime("%Y-%m-%d") schema = convert_to_genai_function_declarations([get_datetime]) + assert schema.function_declarations is not None + assert len(schema.function_declarations) > 0 function_declaration = schema.function_declarations[0] assert function_declaration.name == "get_datetime" assert function_declaration.description == "Gets the current datetime." @@ -355,9 +431,13 @@ def sum_two_numbers(a: float, b: float) -> str: return str(a + b) schema = convert_to_genai_function_declarations([sum_two_numbers]) + + assert schema.function_declarations is not None + assert len(schema.function_declarations) > 0 function_declaration = schema.function_declarations[0] assert function_declaration.name == "sum_two_numbers" assert function_declaration.parameters + assert function_declaration.parameters.required is not None assert len(function_declaration.parameters.required) == 2 @tool @@ -366,18 +446,22 @@ def do_something_optional(a: float, b: float = 0) -> str: return str(a + b) schema = convert_to_genai_function_declarations([do_something_optional]) + + assert schema.function_declarations is not None + assert len(schema.function_declarations) > 0 function_declaration = schema.function_declarations[0] assert function_declaration.name == "do_something_optional" assert function_declaration.parameters + assert function_declaration.parameters.required is not None assert len(function_declaration.parameters.required) == 1 src = [src for src, _, _, _ in SRC_EXP_MOCKS_DESC] fds = [fd for _, fd, _, _ in SRC_EXP_MOCKS_DESC] - expected = glm.Tool(function_declarations=fds) + expected = Tool(function_declarations=fds) result = convert_to_genai_function_declarations(src) assert result == expected - src_2 = glm.Tool(google_search_retrieval={}) + src_2 = Tool(google_search_retrieval={}) result = convert_to_genai_function_declarations([src_2]) assert result == src_2 @@ -385,7 +469,7 @@ def do_something_optional(a: float, b: float = 0) -> str: result = convert_to_genai_function_declarations([src_3]) assert result == src_2 - src_4 = glm.Tool(google_search={}) + src_4 = Tool(google_search={}) result = convert_to_genai_function_declarations([src_4]) assert result == src_4 @@ -396,8 +480,8 @@ def do_something_optional(a: float, b: float = 0) -> str: with pytest.raises(Exception) as exc_info: _ = convert_to_genai_function_declarations( [ - glm.Tool(google_search_retrieval={}), - glm.Tool(google_search_retrieval={}), + Tool(google_search_retrieval={}), + Tool(google_search_retrieval={}), ] ) assert str(exc_info.value).startswith("Providing multiple google_search_retrieval") @@ -441,176 +525,86 @@ def search_web( tools = [split_documents, search_web] # Convert to OpenAI first to mimic what we do in bind_tools. oai_tools = [convert_to_openai_tool(t) for t in tools] - expected = [ - { - "name": "split_documents", - "description": "Tool.", - "parameters": { - "type_": 6, - "properties": { - "chunk_size": { - "type_": 3, - "description": "chunk size.", - "format_": "", - "title": "", - "nullable": False, - "enum": [], - "max_items": "0", - "min_items": "0", - "properties": {}, - "required": [], - "min_properties": "0", - "max_properties": "0", - "min_length": "0", - "max_length": "0", - "pattern": "", - "any_of": [], - "property_ordering": [], - }, - "chunk_overlap": { - "type_": 3, - "description": "chunk overlap.", - "nullable": True, - "format_": "", - "title": "", - "enum": [], - "max_items": "0", - "min_items": "0", - "properties": {}, - "required": [], - "min_properties": "0", - "max_properties": "0", - "min_length": "0", - "max_length": "0", - "pattern": "", - "any_of": [], - "property_ordering": [], - }, - }, - "required": ["chunk_size"], - "format_": "", - "title": "", - "description": "", - "nullable": False, - "enum": [], - "max_items": "0", - "min_items": "0", - "min_properties": "0", - "max_properties": "0", - "min_length": "0", - "max_length": "0", - "pattern": "", - "any_of": [], - "property_ordering": [], - }, - "behavior": 0, - }, - { - "name": "search_web", - "description": "Tool.", - "parameters": { - "type_": 6, - "properties": { - "truncate_threshold": { - "type_": 3, - "description": "truncate threshold.", - "nullable": True, - "format_": "", - "title": "", - "enum": [], - "max_items": "0", - "min_items": "0", - "properties": {}, - "required": [], - "min_properties": "0", - "max_properties": "0", - "min_length": "0", - "max_length": "0", - "pattern": "", - "any_of": [], - "property_ordering": [], - }, - "engine": { - "type_": 1, - "description": "engine.", - "format_": "", - "title": "", - "nullable": False, - "enum": [], - "max_items": "0", - "min_items": "0", - "properties": {}, - "required": [], - "min_properties": "0", - "max_properties": "0", - "min_length": "0", - "max_length": "0", - "pattern": "", - "any_of": [], - "property_ordering": [], - }, - "query": { - "type_": 1, - "description": "query.", - "format_": "", - "title": "", - "nullable": False, - "enum": [], - "max_items": "0", - "min_items": "0", - "properties": {}, - "required": [], - "min_properties": "0", - "max_properties": "0", - "min_length": "0", - "max_length": "0", - "pattern": "", - "any_of": [], - "property_ordering": [], - }, - "num_results": { - "type_": 3, - "description": "number of results.", - "format_": "", - "title": "", - "nullable": False, - "enum": [], - "max_items": "0", - "min_items": "0", - "properties": {}, - "required": [], - "min_properties": "0", - "max_properties": "0", - "min_length": "0", - "max_length": "0", - "pattern": "", - "any_of": [], - "property_ordering": [], - }, - }, - "required": ["query"], - "format_": "", - "title": "", - "description": "", - "nullable": False, - "enum": [], - "max_items": "0", - "min_items": "0", - "min_properties": "0", - "max_properties": "0", - "min_length": "0", - "max_length": "0", - "pattern": "", - "any_of": [], - "property_ordering": [], - }, - "behavior": 0, - }, - ] actual = tool_to_dict(convert_to_genai_function_declarations(oai_tools))[ "function_declarations" ] - assert expected == actual + + # Check that we have the expected number of function declarations + assert len(actual) == 2 + + # Check the first function declaration (split_documents) + assert len(actual) > 0 + split_docs = actual[0] + assert isinstance(split_docs, dict) + assert split_docs["name"] == "split_documents" + assert split_docs["description"] == "Tool." + assert split_docs["behavior"] is None + + # Check parameters structure + params = split_docs["parameters"] + assert params["type"]["_value_"] == "OBJECT" + assert params["required"] == ["chunk_size"] + + # Check properties + properties = params["properties"] + assert "chunk_size" in properties + assert "chunk_overlap" in properties + + # Check chunk_size property + chunk_size_prop = properties["chunk_size"] + assert chunk_size_prop["type"]["_value_"] == "INTEGER" + assert chunk_size_prop["description"] == "chunk size." + assert chunk_size_prop["nullable"] is None + + # Check chunk_overlap property + chunk_overlap_prop = properties["chunk_overlap"] + assert chunk_overlap_prop["type"]["_value_"] == "INTEGER" + assert chunk_overlap_prop["description"] == "chunk overlap." + assert chunk_overlap_prop["nullable"] is True + + # Check the second function declaration (search_web) + assert len(actual) > 1 + search_web_func = actual[1] + assert isinstance(search_web_func, dict) + assert search_web_func["name"] == "search_web" + assert search_web_func["description"] == "Tool." + assert search_web_func["behavior"] is None + + # Check parameters structure + params = search_web_func["parameters"] + assert params["type"]["_value_"] == "OBJECT" + assert params["required"] == ["query"] + + # Check properties + properties = params["properties"] + assert "query" in properties + assert "engine" in properties + assert "num_results" in properties + assert "truncate_threshold" in properties + + # Check query property + query_prop = properties["query"] + assert query_prop["type"]["_value_"] == "STRING" + assert query_prop["description"] == "query." + assert query_prop["nullable"] is None + + # Check engine property + engine_prop = properties["engine"] + assert engine_prop["type"]["_value_"] == "STRING" + assert engine_prop["description"] == "engine." + assert engine_prop["nullable"] is None + + # Check num_results property + num_results_prop = properties["num_results"] + assert num_results_prop["type"]["_value_"] == "INTEGER" + assert num_results_prop["description"] == "number of results." + assert num_results_prop["nullable"] is None + + # Check truncate_threshold property + truncate_prop = properties["truncate_threshold"] + assert truncate_prop["type"]["_value_"] == "INTEGER" + assert truncate_prop["description"] == "truncate threshold." + assert truncate_prop["nullable"] is True def test_format_native_dict_to_genai_function() -> None: @@ -623,9 +617,9 @@ def test_format_native_dict_to_genai_function() -> None: ] } schema = convert_to_genai_function_declarations([calculator]) - expected = glm.Tool( + expected = Tool( function_declarations=[ - glm.FunctionDeclaration( + FunctionDeclaration( name="multiply", description="Returns the product of two numbers.", parameters=None, @@ -651,6 +645,8 @@ def test_format_dict_to_genai_function() -> None: ] } schema = convert_to_genai_function_declarations([calculator]) + assert schema.function_declarations is not None + assert len(schema.function_declarations) > 0 function_declaration = schema.function_declarations[0] assert function_declaration.name == "search" assert function_declaration.parameters @@ -659,27 +655,27 @@ def test_format_dict_to_genai_function() -> None: @pytest.mark.parametrize("choice", [True, "foo", ["foo"], "any"]) def test__tool_choice_to_tool_config(choice: Any) -> None: - expected = _ToolConfigDict( - function_calling_config={ - "mode": "ANY", - "allowed_function_names": ["foo"], - }, + expected = ToolConfig( + function_calling_config=FunctionCallingConfig( + mode=FunctionCallingConfigMode.ANY, + allowed_function_names=["foo"], + ), ) actual = _tool_choice_to_tool_config(choice, ["foo"]) assert expected == actual def test_tool_to_dict_glm_tool() -> None: - tool = glm.Tool( + tool = Tool( function_declarations=[ - glm.FunctionDeclaration( + FunctionDeclaration( name="multiply", description="Returns the product of two numbers.", - parameters=glm.Schema( - type=glm.Type.OBJECT, + parameters=Schema( + type=Type.OBJECT, properties={ - "a": glm.Schema(type=glm.Type.NUMBER), - "b": glm.Schema(type=glm.Type.NUMBER), + "a": Schema(type=Type.NUMBER), + "b": Schema(type=Type.NUMBER), }, required=["a", "b"], ), @@ -717,112 +713,54 @@ class Models(BaseModel): gapic_tool = convert_to_genai_function_declarations([Models]) tool_dict = tool_to_dict(gapic_tool) - assert tool_dict == { - "function_declarations": [ - { - "name": "Models", - "parameters": { - "type_": 6, - "properties": { - "models": { - "type_": 5, - "items": { - "type_": 6, - "description": "MyModel", - "properties": { - "age": { - "type_": 3, - "format_": "", - "title": "", - "description": "", - "nullable": False, - "enum": [], - "max_items": "0", - "min_items": "0", - "properties": {}, - "required": [], - "min_properties": "0", - "max_properties": "0", - "min_length": "0", - "max_length": "0", - "pattern": "", - "any_of": [], - "property_ordering": [], - }, - "name": { - "type_": 1, - "format_": "", - "title": "", - "description": "", - "nullable": False, - "enum": [], - "max_items": "0", - "min_items": "0", - "properties": {}, - "required": [], - "min_properties": "0", - "max_properties": "0", - "min_length": "0", - "max_length": "0", - "pattern": "", - "any_of": [], - "property_ordering": [], - }, - }, - "required": ["name", "age"], - "format_": "", - "title": "", - "nullable": False, - "enum": [], - "max_items": "0", - "min_items": "0", - "min_properties": "0", - "max_properties": "0", - "min_length": "0", - "max_length": "0", - "pattern": "", - "any_of": [], - "property_ordering": [], - }, - "format_": "", - "title": "", - "description": "", - "nullable": False, - "enum": [], - "max_items": "0", - "min_items": "0", - "properties": {}, - "required": [], - "min_properties": "0", - "max_properties": "0", - "min_length": "0", - "max_length": "0", - "pattern": "", - "any_of": [], - "property_ordering": [], - } - }, - "required": ["models"], - "format_": "", - "title": "", - "description": "", - "nullable": False, - "enum": [], - "max_items": "0", - "min_items": "0", - "min_properties": "0", - "max_properties": "0", - "min_length": "0", - "max_length": "0", - "pattern": "", - "any_of": [], - "property_ordering": [], - }, - "description": "", - "behavior": 0, - } - ] - } + + # Check that we have the expected structure + assert "function_declarations" in tool_dict + assert len(tool_dict["function_declarations"]) == 1 + + # Check the function declaration + assert "function_declarations" in tool_dict + assert len(tool_dict["function_declarations"]) > 0 + func_decl = tool_dict["function_declarations"][0] + assert isinstance(func_decl, dict) + assert func_decl["name"] == "Models" + assert func_decl["description"] is None + assert func_decl["behavior"] is None + + # Check parameters structure + params = func_decl["parameters"] + assert params["type"]["_value_"] == "OBJECT" + assert params["required"] == ["models"] + + # Check properties + properties = params["properties"] + assert "models" in properties + + # Check models property (array of MyModel) + models_prop = properties["models"] + assert models_prop["type"]["_value_"] == "ARRAY" + assert models_prop["nullable"] is None + + # Check items of the array + items = models_prop["items"] + assert items["type"]["_value_"] == "OBJECT" + assert items["description"] == "MyModel" + assert items["required"] == ["name", "age"] + + # Check properties of MyModel + model_properties = items["properties"] + assert "name" in model_properties + assert "age" in model_properties + + # Check name property + name_prop = model_properties["name"] + assert name_prop["type"]["_value_"] == "STRING" + assert name_prop["nullable"] is None + + # Check age property + age_prop = model_properties["age"] + assert age_prop["type"]["_value_"] == "INTEGER" + assert age_prop["nullable"] is None def test_tool_to_dict_pydantic_without_import(mock_safe_import: MagicMock) -> None: @@ -876,21 +814,15 @@ def process_nested_data( matrix_property = properties.get("matrix") assert isinstance(matrix_property, dict) - assert matrix_property.get("type_") == glm.Type.ARRAY, ( - "Expected 'matrix' to be ARRAY." - ) + assert_property_type(matrix_property, Type.ARRAY, "matrix") items_level1 = matrix_property.get("items") assert isinstance(items_level1, dict), "Expected first level 'items' to be a dict." - assert items_level1.get("type_") == glm.Type.ARRAY, ( - "Expected first level items to be ARRAY." - ) + assert_property_type(items_level1, Type.ARRAY, "first level items") items_level2 = items_level1.get("items") assert isinstance(items_level2, dict), "Expected second level 'items' to be a dict." - assert items_level2.get("type_") == glm.Type.STRING, ( - "Expected second level items to be STRING." - ) + assert_property_type(items_level2, Type.STRING, "second level items") assert "description" in matrix_property assert "description" in items_level1 @@ -954,18 +886,14 @@ class GetWeather(BaseModel): assert isinstance(helper1, dict), "Expected first option to be a dict." assert "properties" in helper1, "Expected first option to have properties." assert "x" in helper1["properties"], "Expected first option to have 'x' property." - assert helper1["properties"]["x"]["type_"] == glm.Type.BOOLEAN, ( - "Expected 'x' to be BOOLEAN." - ) + assert_property_type(helper1["properties"]["x"], Type.BOOLEAN, "x") # Check second option (Helper2) helper2 = any_of[1] assert isinstance(helper2, dict), "Expected second option to be a dict." assert "properties" in helper2, "Expected second option to have properties." assert "y" in helper2["properties"], "Expected second option to have 'y' property." - assert helper2["properties"]["y"]["type_"] == glm.Type.STRING, ( - "Expected 'y' to be STRING." - ) + assert_property_type(helper2["properties"]["y"], Type.STRING, "y") def test_tool_with_union_primitive_types() -> None: @@ -1016,23 +944,31 @@ class SearchQuery(BaseModel): assert len(any_of) == 2, "Expected 'any_of' to have 2 options." # One option should be a string - string_option = next( - (opt for opt in any_of if opt.get("type_") == glm.Type.STRING), None - ) - assert string_option is not None, "Expected one option to be a STRING." + # Just verify string option exists + _ = find_any_of_option_by_type(any_of, Type.STRING) # One option should be an object (Helper) - object_option = next( - (opt for opt in any_of if opt.get("type_") == glm.Type.OBJECT), None - ) - assert object_option is not None, "Expected one option to be an OBJECT." + object_option = find_any_of_option_by_type(any_of, Type.OBJECT) assert "properties" in object_option, "Expected object option to have properties." assert "value" in object_option["properties"], ( "Expected object option to have 'value' property." ) - assert object_option["properties"]["value"]["type_"] == 3, ( - "Expected 'value' to be NUMBER or INTEGER." - ) + # Note: This assertion expects the raw enum integer value (3 for NUMBER) + # This is a special case where the test was expecting the integer value + value_type = object_option["properties"]["value"].get("type", {}) + if isinstance(value_type, dict): + # For serialized enum, check _value_ and convert to enum to get integer + type_str = value_type.get("_value_") + if type_str == "NUMBER": + assert True, "Expected 'value' to be NUMBER." + elif type_str == "INTEGER": + assert True, "Expected 'value' to be INTEGER." + else: + assert False, f"Expected 'value' to be NUMBER or INTEGER, got {type_str}" + else: + assert value_type == 3, ( + f"Expected 'value' to be NUMBER or INTEGER (3), got {value_type}" + ) def test_tool_with_nested_union_types() -> None: @@ -1085,16 +1021,10 @@ class Person(BaseModel): assert len(location_any_of) == 2, "Expected 'location.any_of' to have 2 options." # One option should be a string - string_option = next( - (opt for opt in location_any_of if opt.get("type_") == glm.Type.STRING), None - ) - assert string_option is not None, "Expected one location option to be a STRING." - + # Just verify string option exists + _ = find_any_of_option_by_type(location_any_of, Type.STRING) # One option should be an object (Address) - address_option = next( - (opt for opt in location_any_of if opt.get("type_") == glm.Type.OBJECT), None - ) - assert address_option is not None, "Expected one location option to be an OBJECT." + address_option = find_any_of_option_by_type(location_any_of, Type.OBJECT) assert "properties" in address_option, "Expected address option to have properties" assert "city" in address_option["properties"], ( "Expected Address to have 'city' property." @@ -1130,6 +1060,8 @@ def configure_service(service_name: str, config: str | Configuration) -> str: genai_tool = convert_to_genai_function_declarations([oai_tool]) # Get function declaration + assert genai_tool.function_declarations is not None + assert len(genai_tool.function_declarations) > 0 function_declaration = genai_tool.function_declarations[0] # Check parameters @@ -1139,6 +1071,7 @@ def configure_service(service_name: str, config: str | Configuration) -> str: # Check for config property config_property = None + assert parameters.properties is not None, "Expected properties to exist" for prop_name, prop in parameters.properties.items(): if prop_name == "config": config_property = prop @@ -1146,22 +1079,24 @@ def configure_service(service_name: str, config: str | Configuration) -> str: assert config_property is not None, "Expected 'config' property to exist" assert hasattr(config_property, "any_of"), "Expected any_of attribute on config" + assert config_property.any_of is not None, "Expected any_of to not be None" assert len(config_property.any_of) == 2, "Expected config.any_of to have 2 options" # Check both variants of the Union type type_variants = [option.type for option in config_property.any_of] - assert glm.Type.STRING in type_variants, "Expected STRING to be one of the variants" - assert glm.Type.OBJECT in type_variants, "Expected OBJECT to be one of the variants" + assert Type.STRING in type_variants, "Expected STRING to be one of the variants" + assert Type.OBJECT in type_variants, "Expected OBJECT to be one of the variants" # Find the object variant object_variant = None for option in config_property.any_of: - if option.type == glm.Type.OBJECT: + if option.type == Type.OBJECT: object_variant = option break assert object_variant is not None, "Expected to find an object variant" assert hasattr(object_variant, "properties"), "Expected object to have properties" + assert object_variant.properties is not None, "Expected properties to not be None" # Check for settings property has_settings = False @@ -1209,15 +1144,16 @@ class GetWeather(BaseModel): function_declarations = genai_tool_dict.get("function_declarations", []) assert len(function_declarations) > 0, "Expected at least one function declaration" fn_decl = function_declarations[0] + assert isinstance(fn_decl, dict), "Expected function declaration to be a dict" # Check the name and description - assert fn_decl.get("name") == "GetWeather", "Expected name to be 'GetWeather'" # type: ignore - assert "Get weather information" in fn_decl.get("description", ""), ( # type: ignore + assert fn_decl.get("name") == "GetWeather", "Expected name to be 'GetWeather'" + assert "Get weather information" in fn_decl.get("description", ""), ( "Expected description to include weather information" ) # Check parameters - parameters = fn_decl.get("parameters", {}) # type: ignore + parameters = fn_decl.get("parameters", {}) properties = parameters.get("properties", {}) # Check location property @@ -1258,7 +1194,7 @@ class GetWeather(BaseModel): def test_union_type_schema_validation() -> None: - """Test that `Union` types get proper `type_` assignment for Gemini + """Test that `Union` types get proper `type` assignment for Gemini compatibility.""" class Response(BaseModel): @@ -1278,14 +1214,17 @@ class Act(BaseModel): # Convert to GenAI function declaration openai_func = convert_to_openai_function(Act) - genai_func = _format_to_gapic_function_declaration(openai_func) + genai_func = _format_to_genai_function_declaration(openai_func) # The action property should have a valid type (not 0) for Gemini compatibility + assert genai_func.parameters is not None, "genai_func.parameters should not be None" + assert genai_func.parameters.properties is not None, ( + "genai_func.parameters.properties should not be None" + ) action_prop = genai_func.parameters.properties["action"] - assert action_prop.type_ == glm.Type.OBJECT, ( - f"Union type should have OBJECT type, got {action_prop.type_}" + assert action_prop.type == Type.OBJECT, ( + f"Union type should have OBJECT type, got {action_prop.type}" ) - assert action_prop.type_ != 0, "Union type should not have type_ = 0" def test_optional_dict_schema_validation() -> None: @@ -1302,15 +1241,16 @@ class RequestsGetToolInput(BaseModel): # Convert to GenAI function declaration openai_func = convert_to_openai_function(RequestsGetToolInput) - genai_func = _format_to_gapic_function_declaration(openai_func) + genai_func = _format_to_genai_function_declaration(openai_func) # The params property should have OBJECT type, not STRING - params_prop = genai_func.parameters.properties["params"] - assert params_prop.type_ == glm.Type.OBJECT, ( - f"Optional[dict] should have OBJECT type, got {params_prop.type_}" + assert genai_func.parameters is not None, "genai_func.parameters should not be None" + assert genai_func.parameters.properties is not None, ( + "genai_func.parameters.properties should not be None" ) - assert params_prop.type_ != glm.Type.STRING, ( - "Optional[dict] should not be converted to STRING type" + params_prop = genai_func.parameters.properties["params"] + assert params_prop.type == Type.OBJECT, ( + f"Optional[dict] should have OBJECT type, got {params_prop.type}" ) assert params_prop.nullable is True, "Optional[dict] should be nullable" assert params_prop.description == "Query parameters for the GET request", ( @@ -1341,9 +1281,46 @@ class ToolInfo(BaseModel): # Check location property assert "kind" in properties kind_property = properties["kind"] - assert kind_property["type_"] == glm.Type.ARRAY + # Compare using _value_ because tool_to_dict serializes Type enums to dicts with + # '_value_' field + assert kind_property["type"]["_value_"] == "ARRAY" assert "items" in kind_property items_property = kind_property["items"] - assert items_property["type_"] == glm.Type.STRING + # Compare using _value_ because tool_to_dict serializes Type enums to dicts with + # '_value_' field + assert items_property["type"]["_value_"] == "STRING" assert items_property["enum"] == ["foo", "bar"] + + +def test_tool_with_non_class_args_schema() -> None: + """Test that tools with non-class `args_schema` are handled gracefully. + + This test reproduces the issue from GitHub issue #1360 where tools with + invalid `args_schema` (not a class, not a dict) would cause a TypeError + when issubclass() was called on a non-class object. + """ + + class ToolWithInvalidArgsSchema(BaseTool): + """A tool with an invalid args_schema for testing.""" + + name: str = "test_tool" + description: str = "A test tool with invalid args_schema" + + def _run(self, *args: Any, **kwargs: Any) -> str: + return "result" + + # Create a tool instance and set args_schema to a non-class object + tool = ToolWithInvalidArgsSchema() + # Simulate MCP tools with empty/invalid args_schema by setting it to a + # truthy non-class value (like a string, list, or other object) + tool.args_schema = "not_a_class" # type: ignore[assignment] + + # This should raise NotImplementedError with a clear message + # rather than TypeError from issubclass() + with pytest.raises(NotImplementedError) as exc_info: + _format_base_tool_to_function_declaration(tool) + + assert "args_schema must be a Pydantic BaseModel or JSON schema" in str( + exc_info.value + ) diff --git a/libs/genai/tests/unit_tests/test_imports.py b/libs/genai/tests/unit_tests/test_imports.py index e1a5fc046..0324f2d38 100644 --- a/libs/genai/tests/unit_tests/test_imports.py +++ b/libs/genai/tests/unit_tests/test_imports.py @@ -10,6 +10,7 @@ "HarmCategory", "Modality", "MediaResolution", + "create_context_cache", ] diff --git a/libs/genai/tests/unit_tests/test_llms.py b/libs/genai/tests/unit_tests/test_llms.py index 22450bb9a..d0061c587 100644 --- a/libs/genai/tests/unit_tests/test_llms.py +++ b/libs/genai/tests/unit_tests/test_llms.py @@ -1,6 +1,6 @@ from unittest.mock import ANY, Mock, patch -from google.ai.generativelanguage_v1beta.types import ( +from google.genai.types import ( Candidate, Content, GenerateContentResponse, @@ -59,20 +59,30 @@ def test_tracing_params() -> None: def test_base_url_support() -> None: """Test that `base_url` is properly passed through to `ChatGoogleGenerativeAI`.""" - mock_client = Mock() + mock_client_instance = Mock() + mock_models = Mock() mock_generate_content = Mock() - mock_generate_content.return_value = GenerateContentResponse( - candidates=[Candidate(content=Content(parts=[Part(text="test response")]))] + + # Create a proper mock response with the required attributes + mock_response = GenerateContentResponse( + candidates=[Candidate(content=Content(parts=[Part(text="test response")]))], + prompt_feedback=None, # This is optional and can be None ) - mock_client.return_value.generate_content = mock_generate_content + mock_generate_content.return_value = mock_response + mock_models.generate_content = mock_generate_content + mock_client_instance.models = mock_models + + mock_client_class = Mock() + mock_client_class.return_value = mock_client_instance + base_url = "https://example.com" param_api_key = "[secret]" param_secret_api_key = SecretStr(param_api_key) param_transport = "rest" with patch( - "langchain_google_genai._genai_extension.v1betaGenerativeServiceClient", - mock_client, + "langchain_google_genai.chat_models.Client", + mock_client_class, ): llm = GoogleGenerativeAI( model=MODEL_NAME, @@ -81,14 +91,12 @@ def test_base_url_support() -> None: transport=param_transport, ) - response = llm.invoke("test") - assert response == "test response" + response = llm.invoke("test") + assert response == "test response" - mock_client.assert_called_once_with( - transport=param_transport, - client_options=ANY, - client_info=ANY, + mock_client_class.assert_called_once_with( + api_key=param_api_key, + http_options=ANY, ) - call_client_options = mock_client.call_args_list[0].kwargs["client_options"] - assert call_client_options.api_key == param_api_key - assert call_client_options.api_endpoint == base_url + call_http_options = mock_client_class.call_args_list[0].kwargs["http_options"] + assert call_http_options.base_url == base_url diff --git a/libs/genai/tests/unit_tests/test_standard.py b/libs/genai/tests/unit_tests/test_standard.py index c36676680..5ceb6781e 100644 --- a/libs/genai/tests/unit_tests/test_standard.py +++ b/libs/genai/tests/unit_tests/test_standard.py @@ -5,6 +5,8 @@ MODEL_NAME = "gemini-2.5-flash" +FAKE_API_KEY = "fake-api-key" + class TestGeminiAIStandard(ChatModelUnitTests): @property @@ -13,12 +15,39 @@ def chat_model_class(self) -> type[BaseChatModel]: @property def chat_model_params(self) -> dict: - return {"model": MODEL_NAME} + return { + "model": MODEL_NAME, + "google_api_key": FAKE_API_KEY, + } @property def init_from_env_params(self) -> tuple[dict, dict, dict]: return ( {"GOOGLE_API_KEY": "api_key"}, - self.chat_model_params, + {"model": MODEL_NAME}, {"google_api_key": "api_key"}, ) + + @property + def supports_image_inputs(self) -> bool: + return True + + @property + def supports_image_urls(self) -> bool: + return True + + @property + def supports_pdf_inputs(self) -> bool: + return True + + @property + def supports_audio_inputs(self) -> bool: + return True + + @property + def supports_image_tool_message(self) -> bool: + return True + + @property + def supports_pdf_tool_message(self) -> bool: + return True diff --git a/libs/genai/uv.lock b/libs/genai/uv.lock index 32e1fc112..c0cc407c4 100644 --- a/libs/genai/uv.lock +++ b/libs/genai/uv.lock @@ -1,5 +1,5 @@ version = 1 -revision = 2 +revision = 3 requires-python = ">=3.10.0, <4.0.0" resolution-markers = [ "python_full_version >= '3.14' and platform_python_implementation == 'PyPy'", @@ -23,26 +23,25 @@ wheels = [ [[package]] name = "anyio" -version = "4.11.0" +version = "4.12.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, { name = "idna" }, - { name = "sniffio" }, { name = "typing-extensions", marker = "python_full_version < '3.13'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/c6/78/7d432127c41b50bccba979505f272c16cbcadcc33645d5fa3a738110ae75/anyio-4.11.0.tar.gz", hash = "sha256:82a8d0b81e318cc5ce71a5f1f8b5c4e63619620b63141ef8c995fa0db95a57c4", size = 219094, upload-time = "2025-09-23T09:19:12.58Z" } +sdist = { url = "https://files.pythonhosted.org/packages/16/ce/8a777047513153587e5434fd752e89334ac33e379aa3497db860eeb60377/anyio-4.12.0.tar.gz", hash = "sha256:73c693b567b0c55130c104d0b43a9baf3aa6a31fc6110116509f27bf75e21ec0", size = 228266, upload-time = "2025-11-28T23:37:38.911Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/15/b3/9b1a8074496371342ec1e796a96f99c82c945a339cd81a8e73de28b4cf9e/anyio-4.11.0-py3-none-any.whl", hash = "sha256:0287e96f4d26d4149305414d4e3bc32f0dcd0862365a4bddea19d7a1ec38c4fc", size = 109097, upload-time = "2025-09-23T09:19:10.601Z" }, + { url = "https://files.pythonhosted.org/packages/7f/9c/36c5c37947ebfb8c7f22e0eb6e4d188ee2d53aa3880f3f2744fb894f0cb1/anyio-4.12.0-py3-none-any.whl", hash = "sha256:dad2376a628f98eeca4881fc56cd06affd18f659b17a747d3ff0307ced94b1bb", size = 113362, upload-time = "2025-11-28T23:36:57.897Z" }, ] [[package]] name = "cachetools" -version = "6.2.1" +version = "6.2.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/cc/7e/b975b5814bd36faf009faebe22c1072a1fa1168db34d285ef0ba071ad78c/cachetools-6.2.1.tar.gz", hash = "sha256:3f391e4bd8f8bf0931169baf7456cc822705f4e2a31f840d218f445b9a854201", size = 31325, upload-time = "2025-10-12T14:55:30.139Z" } +sdist = { url = "https://files.pythonhosted.org/packages/fb/44/ca1675be2a83aeee1886ab745b28cda92093066590233cc501890eb8417a/cachetools-6.2.2.tar.gz", hash = "sha256:8e6d266b25e539df852251cfd6f990b4bc3a141db73b939058d809ebd2590fc6", size = 31571, upload-time = "2025-11-13T17:42:51.465Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/96/c5/1e741d26306c42e2bf6ab740b2202872727e0f606033c9dd713f8b93f5a8/cachetools-6.2.1-py3-none-any.whl", hash = "sha256:09868944b6dde876dfd44e1d47e18484541eaf12f26f29b7af91b26cc892d701", size = 11280, upload-time = "2025-10-12T14:55:28.382Z" }, + { url = "https://files.pythonhosted.org/packages/e6/46/eb6eca305c77a4489affe1c5d8f4cae82f285d9addd8de4ec084a7184221/cachetools-6.2.2-py3-none-any.whl", hash = "sha256:6c09c98183bf58560c97b2abfcedcbaf6a896a490f534b031b661d3723b45ace", size = 11503, upload-time = "2025-11-13T17:42:50.232Z" }, ] [[package]] @@ -236,14 +235,14 @@ wheels = [ [[package]] name = "exceptiongroup" -version = "1.3.0" +version = "1.3.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "typing-extensions", marker = "python_full_version < '3.11'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/0b/9f/a65090624ecf468cdca03533906e7c69ed7588582240cfe7cc9e770b50eb/exceptiongroup-1.3.0.tar.gz", hash = "sha256:b241f5885f560bc56a59ee63ca4c6a8bfa46ae4ad651af316d4e81817bb9fd88", size = 29749, upload-time = "2025-05-10T17:42:51.123Z" } +sdist = { url = "https://files.pythonhosted.org/packages/50/79/66800aadf48771f6b62f7eb014e352e5d06856655206165d775e675a02c9/exceptiongroup-1.3.1.tar.gz", hash = "sha256:8b412432c6055b0b7d14c310000ae93352ed6754f70fa8f7c34141f91c4e3219", size = 30371, upload-time = "2025-11-21T23:01:54.787Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/36/f4/c6e662dade71f56cd2f3735141b265c3c79293c109549c1e6933b0651ffc/exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10", size = 16674, upload-time = "2025-05-10T17:42:49.33Z" }, + { url = "https://files.pythonhosted.org/packages/8a/0e/97c33bf5009bdbac74fd2beace167cab3f978feb69cc36f1ef79360d6c4e/exceptiongroup-1.3.1-py3-none-any.whl", hash = "sha256:a7a39a3bd276781e98394987d3a5701d0c4edffb633bb7a5144577f82c773598", size = 16740, upload-time = "2025-11-21T23:01:53.443Z" }, ] [[package]] @@ -319,6 +318,30 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/6f/d1/385110a9ae86d91cc14c5282c61fe9f4dc41c0b9f7d423c6ad77038c4448/google_auth-2.43.0-py2.py3-none-any.whl", hash = "sha256:af628ba6fa493f75c7e9dbe9373d148ca9f4399b5ea29976519e0a3848eddd16", size = 223114, upload-time = "2025-11-06T00:13:35.209Z" }, ] +[package.optional-dependencies] +requests = [ + { name = "requests" }, +] + +[[package]] +name = "google-genai" +version = "1.53.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "google-auth", extra = ["requests"] }, + { name = "httpx" }, + { name = "pydantic" }, + { name = "requests" }, + { name = "tenacity" }, + { name = "typing-extensions" }, + { name = "websockets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/de/b3/36fbfde2e21e6d3bc67780b61da33632f495ab1be08076cf0a16af74098f/google_genai-1.53.0.tar.gz", hash = "sha256:938a26d22f3fd32c6eeeb4276ef204ef82884e63af9842ce3eac05ceb39cbd8d", size = 260102, upload-time = "2025-12-03T17:21:23.233Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/40/f2/97fefdd1ad1f3428321bac819ae7a83ccc59f6439616054736b7819fa56c/google_genai-1.53.0-py3-none-any.whl", hash = "sha256:65a3f99e5c03c372d872cda7419f5940e723374bb12a2f3ffd5e3e56e8eb2094", size = 262015, upload-time = "2025-12-03T17:21:21.934Z" }, +] + [[package]] name = "googleapis-common-protos" version = "1.72.0" @@ -484,7 +507,7 @@ wheels = [ [[package]] name = "langchain-core" -version = "1.1.0" +version = "1.1.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "jsonpatch" }, @@ -494,10 +517,11 @@ dependencies = [ { name = "pyyaml" }, { name = "tenacity" }, { name = "typing-extensions" }, + { name = "uuid-utils" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/1e/17/67c1cc2ace919e2b02dd9d783154d7fb3f1495a4ef835d9cd163b7855ac2/langchain_core-1.1.0.tar.gz", hash = "sha256:2b76a82d427922c8bc51c08404af4fc2a29e9f161dfe2297cb05091e810201e7", size = 781995, upload-time = "2025-11-21T21:01:26.958Z" } +sdist = { url = "https://files.pythonhosted.org/packages/95/9b/da155eee3a21ecec2e1d6b78ca5fd2ee1936dcd36b0bf0b70d94b5ac6fce/langchain_core-1.1.1.tar.gz", hash = "sha256:029877a34ac5dedefe9c5c36e3c1206e56fc4cac6025df933277451b6df177ad", size = 799487, upload-time = "2025-12-04T19:55:12.935Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/71/1e/e129fc471a2d2a7b3804480a937b5ab9319cab9f4142624fcb115f925501/langchain_core-1.1.0-py3-none-any.whl", hash = "sha256:2c9f27dadc6d21ed4aa46506a37a56e6a7e2d2f9141922dc5c251ba921822ee6", size = 473752, upload-time = "2025-11-21T21:01:25.841Z" }, + { url = "https://files.pythonhosted.org/packages/56/22/d8dc366b13ad394a4abc1d61fbb3cc638c0d3f959ad7981889f1f47fd6fd/langchain_core-1.1.1-py3-none-any.whl", hash = "sha256:98c8cfe7a29448f9d987313be1afac2686e3d12f2f38be024ee9f729ff7cb87c", size = 475045, upload-time = "2025-12-04T19:55:11.715Z" }, ] [[package]] @@ -507,6 +531,7 @@ source = { editable = "." } dependencies = [ { name = "filetype" }, { name = "google-ai-generativelanguage" }, + { name = "google-genai" }, { name = "langchain-core" }, { name = "pydantic" }, ] @@ -519,7 +544,7 @@ test = [ { name = "freezegun" }, { name = "langchain-tests" }, { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, - { name = "numpy", version = "2.3.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, + { name = "numpy", version = "2.3.5", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, { name = "pytest" }, { name = "pytest-asyncio" }, { name = "pytest-mock" }, @@ -534,7 +559,7 @@ test-integration = [ typing = [ { name = "mypy" }, { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, - { name = "numpy", version = "2.3.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, + { name = "numpy", version = "2.3.5", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, { name = "types-google-cloud-ndb" }, { name = "types-protobuf" }, { name = "types-requests", version = "2.31.0.6", source = { registry = "https://pypi.org/simple" }, marker = "platform_python_implementation == 'PyPy'" }, @@ -545,6 +570,7 @@ typing = [ requires-dist = [ { name = "filetype", specifier = ">=1.2.0,<2.0.0" }, { name = "google-ai-generativelanguage", specifier = ">=0.9.0,<1.0.0" }, + { name = "google-genai", specifier = ">=1.53.0,<2.0.0" }, { name = "langchain-core", specifier = ">=1.1.0,<2.0.0" }, { name = "pydantic", specifier = ">=2.0.0,<3.0.0" }, ] @@ -575,13 +601,13 @@ typing = [ [[package]] name = "langchain-tests" -version = "1.0.1" +version = "1.0.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "httpx" }, { name = "langchain-core" }, { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, - { name = "numpy", version = "2.3.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, + { name = "numpy", version = "2.3.5", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, { name = "pytest" }, { name = "pytest-asyncio" }, { name = "pytest-benchmark" }, @@ -591,14 +617,14 @@ dependencies = [ { name = "syrupy" }, { name = "vcrpy" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/2c/84/302dddf9f8388a0ecc0436603f76496a31b429a209de6dfa839288fe0db5/langchain_tests-1.0.1.tar.gz", hash = "sha256:42932b48d3bb77363104206f499441a81dc1563770b44757ad5e68226106ffd8", size = 159689, upload-time = "2025-11-03T14:48:44.789Z" } +sdist = { url = "https://files.pythonhosted.org/packages/81/73/29ff92b758e49c967b6816ca8ca7ad091b77bbf113741a9f628335d8373b/langchain_tests-1.0.2.tar.gz", hash = "sha256:7e96d82499ee32ab141e93bdeb122c942db41ff326bcc87ca27290ede92f0f78", size = 159564, upload-time = "2025-11-22T23:37:09.015Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/31/25/03be05a666ea77353df2d66032058bb04b6e9e4ddb3b8a63c3680b1278ac/langchain_tests-1.0.1-py3-none-any.whl", hash = "sha256:7a74fe22fedc490c44b319ede61aa652318f601ed11f76fdf781eec29a4500c4", size = 50861, upload-time = "2025-11-03T14:48:43.2Z" }, + { url = "https://files.pythonhosted.org/packages/25/b6/96434f4a01f7270ed29918c37c2e6b82ce208780f94c43a5ea133bd94a95/langchain_tests-1.0.2-py3-none-any.whl", hash = "sha256:713936d9e474ba39eeade95ee8e9d2e237b15a74d4896cae66ef1a9bd0a44d48", size = 50796, upload-time = "2025-11-22T23:37:07.612Z" }, ] [[package]] name = "langsmith" -version = "0.4.42" +version = "0.4.53" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "httpx" }, @@ -607,11 +633,12 @@ dependencies = [ { name = "pydantic" }, { name = "requests" }, { name = "requests-toolbelt" }, + { name = "uuid-utils" }, { name = "zstandard" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/c8/91/939cb2fa0317a8aedd0b4dab6c189722e5ef15abcf65304dc929e582826a/langsmith-0.4.42.tar.gz", hash = "sha256:a6e808e47581403cb019b47c8c10627c1644f78ed4c03fa877d6ad661476c38f", size = 953877, upload-time = "2025-11-09T16:31:21.503Z" } +sdist = { url = "https://files.pythonhosted.org/packages/49/1c/8c4fbb995d176594d79e7347ca5e3cf1a839d300bee2b6b38861fbf57809/langsmith-0.4.53.tar.gz", hash = "sha256:362255850ac80abf6edc9e9b3455c42aa248e12686a24c637d4c56fc41139ffe", size = 990765, upload-time = "2025-12-03T01:00:43.505Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/0f/17/4280bc381b40a642ea5efe1bab0237f03507a9d4281484c5baa1db82055a/langsmith-0.4.42-py3-none-any.whl", hash = "sha256:015b0a0c17eb1a61293e8cbb7d41778a4b37caddd267d54274ba94e4721b301b", size = 401937, upload-time = "2025-11-09T16:31:19.163Z" }, + { url = "https://files.pythonhosted.org/packages/fc/48/37cc533e2d16e4ec1d01f30b41933c9319af18389ea0f6866835ace7d331/langsmith-0.4.53-py3-none-any.whl", hash = "sha256:62e0b69d0f3b25afbd63dc5743a3bcec52993fe6c4e43e5b9e5311606aa04e9e", size = 411526, upload-time = "2025-12-03T01:00:42.053Z" }, ] [[package]] @@ -895,7 +922,7 @@ wheels = [ [[package]] name = "numpy" -version = "2.3.4" +version = "2.3.5" source = { registry = "https://pypi.org/simple" } resolution-markers = [ "python_full_version >= '3.14' and platform_python_implementation == 'PyPy'", @@ -905,81 +932,81 @@ resolution-markers = [ "python_full_version >= '3.11' and python_full_version < '3.13' and platform_python_implementation == 'PyPy'", "python_full_version >= '3.11' and python_full_version < '3.13' and platform_python_implementation != 'PyPy'", ] -sdist = { url = "https://files.pythonhosted.org/packages/b5/f4/098d2270d52b41f1bd7db9fc288aaa0400cb48c2a3e2af6fa365d9720947/numpy-2.3.4.tar.gz", hash = "sha256:a7d018bfedb375a8d979ac758b120ba846a7fe764911a64465fd87b8729f4a6a", size = 20582187, upload-time = "2025-10-15T16:18:11.77Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/60/e7/0e07379944aa8afb49a556a2b54587b828eb41dc9adc56fb7615b678ca53/numpy-2.3.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e78aecd2800b32e8347ce49316d3eaf04aed849cd5b38e0af39f829a4e59f5eb", size = 21259519, upload-time = "2025-10-15T16:15:19.012Z" }, - { url = "https://files.pythonhosted.org/packages/d0/cb/5a69293561e8819b09e34ed9e873b9a82b5f2ade23dce4c51dc507f6cfe1/numpy-2.3.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7fd09cc5d65bda1e79432859c40978010622112e9194e581e3415a3eccc7f43f", size = 14452796, upload-time = "2025-10-15T16:15:23.094Z" }, - { url = "https://files.pythonhosted.org/packages/e4/04/ff11611200acd602a1e5129e36cfd25bf01ad8e5cf927baf2e90236eb02e/numpy-2.3.4-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:1b219560ae2c1de48ead517d085bc2d05b9433f8e49d0955c82e8cd37bd7bf36", size = 5381639, upload-time = "2025-10-15T16:15:25.572Z" }, - { url = "https://files.pythonhosted.org/packages/ea/77/e95c757a6fe7a48d28a009267408e8aa382630cc1ad1db7451b3bc21dbb4/numpy-2.3.4-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:bafa7d87d4c99752d07815ed7a2c0964f8ab311eb8168f41b910bd01d15b6032", size = 6914296, upload-time = "2025-10-15T16:15:27.079Z" }, - { url = "https://files.pythonhosted.org/packages/a3/d2/137c7b6841c942124eae921279e5c41b1c34bab0e6fc60c7348e69afd165/numpy-2.3.4-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:36dc13af226aeab72b7abad501d370d606326a0029b9f435eacb3b8c94b8a8b7", size = 14591904, upload-time = "2025-10-15T16:15:29.044Z" }, - { url = "https://files.pythonhosted.org/packages/bb/32/67e3b0f07b0aba57a078c4ab777a9e8e6bc62f24fb53a2337f75f9691699/numpy-2.3.4-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a7b2f9a18b5ff9824a6af80de4f37f4ec3c2aab05ef08f51c77a093f5b89adda", size = 16939602, upload-time = "2025-10-15T16:15:31.106Z" }, - { url = "https://files.pythonhosted.org/packages/95/22/9639c30e32c93c4cee3ccdb4b09c2d0fbff4dcd06d36b357da06146530fb/numpy-2.3.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9984bd645a8db6ca15d850ff996856d8762c51a2239225288f08f9050ca240a0", size = 16372661, upload-time = "2025-10-15T16:15:33.546Z" }, - { url = "https://files.pythonhosted.org/packages/12/e9/a685079529be2b0156ae0c11b13d6be647743095bb51d46589e95be88086/numpy-2.3.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:64c5825affc76942973a70acf438a8ab618dbd692b84cd5ec40a0a0509edc09a", size = 18884682, upload-time = "2025-10-15T16:15:36.105Z" }, - { url = "https://files.pythonhosted.org/packages/cf/85/f6f00d019b0cc741e64b4e00ce865a57b6bed945d1bbeb1ccadbc647959b/numpy-2.3.4-cp311-cp311-win32.whl", hash = "sha256:ed759bf7a70342f7817d88376eb7142fab9fef8320d6019ef87fae05a99874e1", size = 6570076, upload-time = "2025-10-15T16:15:38.225Z" }, - { url = "https://files.pythonhosted.org/packages/7d/10/f8850982021cb90e2ec31990291f9e830ce7d94eef432b15066e7cbe0bec/numpy-2.3.4-cp311-cp311-win_amd64.whl", hash = "sha256:faba246fb30ea2a526c2e9645f61612341de1a83fb1e0c5edf4ddda5a9c10996", size = 13089358, upload-time = "2025-10-15T16:15:40.404Z" }, - { url = "https://files.pythonhosted.org/packages/d1/ad/afdd8351385edf0b3445f9e24210a9c3971ef4de8fd85155462fc4321d79/numpy-2.3.4-cp311-cp311-win_arm64.whl", hash = "sha256:4c01835e718bcebe80394fd0ac66c07cbb90147ebbdad3dcecd3f25de2ae7e2c", size = 10462292, upload-time = "2025-10-15T16:15:42.896Z" }, - { url = "https://files.pythonhosted.org/packages/96/7a/02420400b736f84317e759291b8edaeee9dc921f72b045475a9cbdb26b17/numpy-2.3.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ef1b5a3e808bc40827b5fa2c8196151a4c5abe110e1726949d7abddfe5c7ae11", size = 20957727, upload-time = "2025-10-15T16:15:44.9Z" }, - { url = "https://files.pythonhosted.org/packages/18/90/a014805d627aa5750f6f0e878172afb6454552da929144b3c07fcae1bb13/numpy-2.3.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c2f91f496a87235c6aaf6d3f3d89b17dba64996abadccb289f48456cff931ca9", size = 14187262, upload-time = "2025-10-15T16:15:47.761Z" }, - { url = "https://files.pythonhosted.org/packages/c7/e4/0a94b09abe89e500dc748e7515f21a13e30c5c3fe3396e6d4ac108c25fca/numpy-2.3.4-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:f77e5b3d3da652b474cc80a14084927a5e86a5eccf54ca8ca5cbd697bf7f2667", size = 5115992, upload-time = "2025-10-15T16:15:50.144Z" }, - { url = "https://files.pythonhosted.org/packages/88/dd/db77c75b055c6157cbd4f9c92c4458daef0dd9cbe6d8d2fe7f803cb64c37/numpy-2.3.4-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:8ab1c5f5ee40d6e01cbe96de5863e39b215a4d24e7d007cad56c7184fdf4aeef", size = 6648672, upload-time = "2025-10-15T16:15:52.442Z" }, - { url = "https://files.pythonhosted.org/packages/e1/e6/e31b0d713719610e406c0ea3ae0d90760465b086da8783e2fd835ad59027/numpy-2.3.4-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:77b84453f3adcb994ddbd0d1c5d11db2d6bda1a2b7fd5ac5bd4649d6f5dc682e", size = 14284156, upload-time = "2025-10-15T16:15:54.351Z" }, - { url = "https://files.pythonhosted.org/packages/f9/58/30a85127bfee6f108282107caf8e06a1f0cc997cb6b52cdee699276fcce4/numpy-2.3.4-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4121c5beb58a7f9e6dfdee612cb24f4df5cd4db6e8261d7f4d7450a997a65d6a", size = 16641271, upload-time = "2025-10-15T16:15:56.67Z" }, - { url = "https://files.pythonhosted.org/packages/06/f2/2e06a0f2adf23e3ae29283ad96959267938d0efd20a2e25353b70065bfec/numpy-2.3.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:65611ecbb00ac9846efe04db15cbe6186f562f6bb7e5e05f077e53a599225d16", size = 16059531, upload-time = "2025-10-15T16:15:59.412Z" }, - { url = "https://files.pythonhosted.org/packages/b0/e7/b106253c7c0d5dc352b9c8fab91afd76a93950998167fa3e5afe4ef3a18f/numpy-2.3.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dabc42f9c6577bcc13001b8810d300fe814b4cfbe8a92c873f269484594f9786", size = 18578983, upload-time = "2025-10-15T16:16:01.804Z" }, - { url = "https://files.pythonhosted.org/packages/73/e3/04ecc41e71462276ee867ccbef26a4448638eadecf1bc56772c9ed6d0255/numpy-2.3.4-cp312-cp312-win32.whl", hash = "sha256:a49d797192a8d950ca59ee2d0337a4d804f713bb5c3c50e8db26d49666e351dc", size = 6291380, upload-time = "2025-10-15T16:16:03.938Z" }, - { url = "https://files.pythonhosted.org/packages/3d/a8/566578b10d8d0e9955b1b6cd5db4e9d4592dd0026a941ff7994cedda030a/numpy-2.3.4-cp312-cp312-win_amd64.whl", hash = "sha256:985f1e46358f06c2a09921e8921e2c98168ed4ae12ccd6e5e87a4f1857923f32", size = 12787999, upload-time = "2025-10-15T16:16:05.801Z" }, - { url = "https://files.pythonhosted.org/packages/58/22/9c903a957d0a8071b607f5b1bff0761d6e608b9a965945411f867d515db1/numpy-2.3.4-cp312-cp312-win_arm64.whl", hash = "sha256:4635239814149e06e2cb9db3dd584b2fa64316c96f10656983b8026a82e6e4db", size = 10197412, upload-time = "2025-10-15T16:16:07.854Z" }, - { url = "https://files.pythonhosted.org/packages/57/7e/b72610cc91edf138bc588df5150957a4937221ca6058b825b4725c27be62/numpy-2.3.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:c090d4860032b857d94144d1a9976b8e36709e40386db289aaf6672de2a81966", size = 20950335, upload-time = "2025-10-15T16:16:10.304Z" }, - { url = "https://files.pythonhosted.org/packages/3e/46/bdd3370dcea2f95ef14af79dbf81e6927102ddf1cc54adc0024d61252fd9/numpy-2.3.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a13fc473b6db0be619e45f11f9e81260f7302f8d180c49a22b6e6120022596b3", size = 14179878, upload-time = "2025-10-15T16:16:12.595Z" }, - { url = "https://files.pythonhosted.org/packages/ac/01/5a67cb785bda60f45415d09c2bc245433f1c68dd82eef9c9002c508b5a65/numpy-2.3.4-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:3634093d0b428e6c32c3a69b78e554f0cd20ee420dcad5a9f3b2a63762ce4197", size = 5108673, upload-time = "2025-10-15T16:16:14.877Z" }, - { url = "https://files.pythonhosted.org/packages/c2/cd/8428e23a9fcebd33988f4cb61208fda832800ca03781f471f3727a820704/numpy-2.3.4-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:043885b4f7e6e232d7df4f51ffdef8c36320ee9d5f227b380ea636722c7ed12e", size = 6641438, upload-time = "2025-10-15T16:16:16.805Z" }, - { url = "https://files.pythonhosted.org/packages/3e/d1/913fe563820f3c6b079f992458f7331278dcd7ba8427e8e745af37ddb44f/numpy-2.3.4-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4ee6a571d1e4f0ea6d5f22d6e5fbd6ed1dc2b18542848e1e7301bd190500c9d7", size = 14281290, upload-time = "2025-10-15T16:16:18.764Z" }, - { url = "https://files.pythonhosted.org/packages/9e/7e/7d306ff7cb143e6d975cfa7eb98a93e73495c4deabb7d1b5ecf09ea0fd69/numpy-2.3.4-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fc8a63918b04b8571789688b2780ab2b4a33ab44bfe8ccea36d3eba51228c953", size = 16636543, upload-time = "2025-10-15T16:16:21.072Z" }, - { url = "https://files.pythonhosted.org/packages/47/6a/8cfc486237e56ccfb0db234945552a557ca266f022d281a2f577b98e955c/numpy-2.3.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:40cc556d5abbc54aabe2b1ae287042d7bdb80c08edede19f0c0afb36ae586f37", size = 16056117, upload-time = "2025-10-15T16:16:23.369Z" }, - { url = "https://files.pythonhosted.org/packages/b1/0e/42cb5e69ea901e06ce24bfcc4b5664a56f950a70efdcf221f30d9615f3f3/numpy-2.3.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ecb63014bb7f4ce653f8be7f1df8cbc6093a5a2811211770f6606cc92b5a78fd", size = 18577788, upload-time = "2025-10-15T16:16:27.496Z" }, - { url = "https://files.pythonhosted.org/packages/86/92/41c3d5157d3177559ef0a35da50f0cda7fa071f4ba2306dd36818591a5bc/numpy-2.3.4-cp313-cp313-win32.whl", hash = "sha256:e8370eb6925bb8c1c4264fec52b0384b44f675f191df91cbe0140ec9f0955646", size = 6282620, upload-time = "2025-10-15T16:16:29.811Z" }, - { url = "https://files.pythonhosted.org/packages/09/97/fd421e8bc50766665ad35536c2bb4ef916533ba1fdd053a62d96cc7c8b95/numpy-2.3.4-cp313-cp313-win_amd64.whl", hash = "sha256:56209416e81a7893036eea03abcb91c130643eb14233b2515c90dcac963fe99d", size = 12784672, upload-time = "2025-10-15T16:16:31.589Z" }, - { url = "https://files.pythonhosted.org/packages/ad/df/5474fb2f74970ca8eb978093969b125a84cc3d30e47f82191f981f13a8a0/numpy-2.3.4-cp313-cp313-win_arm64.whl", hash = "sha256:a700a4031bc0fd6936e78a752eefb79092cecad2599ea9c8039c548bc097f9bc", size = 10196702, upload-time = "2025-10-15T16:16:33.902Z" }, - { url = "https://files.pythonhosted.org/packages/11/83/66ac031464ec1767ea3ed48ce40f615eb441072945e98693bec0bcd056cc/numpy-2.3.4-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:86966db35c4040fdca64f0816a1c1dd8dbd027d90fca5a57e00e1ca4cd41b879", size = 21049003, upload-time = "2025-10-15T16:16:36.101Z" }, - { url = "https://files.pythonhosted.org/packages/5f/99/5b14e0e686e61371659a1d5bebd04596b1d72227ce36eed121bb0aeab798/numpy-2.3.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:838f045478638b26c375ee96ea89464d38428c69170360b23a1a50fa4baa3562", size = 14302980, upload-time = "2025-10-15T16:16:39.124Z" }, - { url = "https://files.pythonhosted.org/packages/2c/44/e9486649cd087d9fc6920e3fc3ac2aba10838d10804b1e179fb7cbc4e634/numpy-2.3.4-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:d7315ed1dab0286adca467377c8381cd748f3dc92235f22a7dfc42745644a96a", size = 5231472, upload-time = "2025-10-15T16:16:41.168Z" }, - { url = "https://files.pythonhosted.org/packages/3e/51/902b24fa8887e5fe2063fd61b1895a476d0bbf46811ab0c7fdf4bd127345/numpy-2.3.4-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:84f01a4d18b2cc4ade1814a08e5f3c907b079c847051d720fad15ce37aa930b6", size = 6739342, upload-time = "2025-10-15T16:16:43.777Z" }, - { url = "https://files.pythonhosted.org/packages/34/f1/4de9586d05b1962acdcdb1dc4af6646361a643f8c864cef7c852bf509740/numpy-2.3.4-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:817e719a868f0dacde4abdfc5c1910b301877970195db9ab6a5e2c4bd5b121f7", size = 14354338, upload-time = "2025-10-15T16:16:46.081Z" }, - { url = "https://files.pythonhosted.org/packages/1f/06/1c16103b425de7969d5a76bdf5ada0804b476fed05d5f9e17b777f1cbefd/numpy-2.3.4-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:85e071da78d92a214212cacea81c6da557cab307f2c34b5f85b628e94803f9c0", size = 16702392, upload-time = "2025-10-15T16:16:48.455Z" }, - { url = "https://files.pythonhosted.org/packages/34/b2/65f4dc1b89b5322093572b6e55161bb42e3e0487067af73627f795cc9d47/numpy-2.3.4-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:2ec646892819370cf3558f518797f16597b4e4669894a2ba712caccc9da53f1f", size = 16134998, upload-time = "2025-10-15T16:16:51.114Z" }, - { url = "https://files.pythonhosted.org/packages/d4/11/94ec578896cdb973aaf56425d6c7f2aff4186a5c00fac15ff2ec46998b46/numpy-2.3.4-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:035796aaaddfe2f9664b9a9372f089cfc88bd795a67bd1bfe15e6e770934cf64", size = 18651574, upload-time = "2025-10-15T16:16:53.429Z" }, - { url = "https://files.pythonhosted.org/packages/62/b7/7efa763ab33dbccf56dade36938a77345ce8e8192d6b39e470ca25ff3cd0/numpy-2.3.4-cp313-cp313t-win32.whl", hash = "sha256:fea80f4f4cf83b54c3a051f2f727870ee51e22f0248d3114b8e755d160b38cfb", size = 6413135, upload-time = "2025-10-15T16:16:55.992Z" }, - { url = "https://files.pythonhosted.org/packages/43/70/aba4c38e8400abcc2f345e13d972fb36c26409b3e644366db7649015f291/numpy-2.3.4-cp313-cp313t-win_amd64.whl", hash = "sha256:15eea9f306b98e0be91eb344a94c0e630689ef302e10c2ce5f7e11905c704f9c", size = 12928582, upload-time = "2025-10-15T16:16:57.943Z" }, - { url = "https://files.pythonhosted.org/packages/67/63/871fad5f0073fc00fbbdd7232962ea1ac40eeaae2bba66c76214f7954236/numpy-2.3.4-cp313-cp313t-win_arm64.whl", hash = "sha256:b6c231c9c2fadbae4011ca5e7e83e12dc4a5072f1a1d85a0a7b3ed754d145a40", size = 10266691, upload-time = "2025-10-15T16:17:00.048Z" }, - { url = "https://files.pythonhosted.org/packages/72/71/ae6170143c115732470ae3a2d01512870dd16e0953f8a6dc89525696069b/numpy-2.3.4-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:81c3e6d8c97295a7360d367f9f8553973651b76907988bb6066376bc2252f24e", size = 20955580, upload-time = "2025-10-15T16:17:02.509Z" }, - { url = "https://files.pythonhosted.org/packages/af/39/4be9222ffd6ca8a30eda033d5f753276a9c3426c397bb137d8e19dedd200/numpy-2.3.4-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:7c26b0b2bf58009ed1f38a641f3db4be8d960a417ca96d14e5b06df1506d41ff", size = 14188056, upload-time = "2025-10-15T16:17:04.873Z" }, - { url = "https://files.pythonhosted.org/packages/6c/3d/d85f6700d0a4aa4f9491030e1021c2b2b7421b2b38d01acd16734a2bfdc7/numpy-2.3.4-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:62b2198c438058a20b6704351b35a1d7db881812d8512d67a69c9de1f18ca05f", size = 5116555, upload-time = "2025-10-15T16:17:07.499Z" }, - { url = "https://files.pythonhosted.org/packages/bf/04/82c1467d86f47eee8a19a464c92f90a9bb68ccf14a54c5224d7031241ffb/numpy-2.3.4-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:9d729d60f8d53a7361707f4b68a9663c968882dd4f09e0d58c044c8bf5faee7b", size = 6643581, upload-time = "2025-10-15T16:17:09.774Z" }, - { url = "https://files.pythonhosted.org/packages/0c/d3/c79841741b837e293f48bd7db89d0ac7a4f2503b382b78a790ef1dc778a5/numpy-2.3.4-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bd0c630cf256b0a7fd9d0a11c9413b42fef5101219ce6ed5a09624f5a65392c7", size = 14299186, upload-time = "2025-10-15T16:17:11.937Z" }, - { url = "https://files.pythonhosted.org/packages/e8/7e/4a14a769741fbf237eec5a12a2cbc7a4c4e061852b6533bcb9e9a796c908/numpy-2.3.4-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d5e081bc082825f8b139f9e9fe42942cb4054524598aaeb177ff476cc76d09d2", size = 16638601, upload-time = "2025-10-15T16:17:14.391Z" }, - { url = "https://files.pythonhosted.org/packages/93/87/1c1de269f002ff0a41173fe01dcc925f4ecff59264cd8f96cf3b60d12c9b/numpy-2.3.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:15fb27364ed84114438fff8aaf998c9e19adbeba08c0b75409f8c452a8692c52", size = 16074219, upload-time = "2025-10-15T16:17:17.058Z" }, - { url = "https://files.pythonhosted.org/packages/cd/28/18f72ee77408e40a76d691001ae599e712ca2a47ddd2c4f695b16c65f077/numpy-2.3.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:85d9fb2d8cd998c84d13a79a09cc0c1091648e848e4e6249b0ccd7f6b487fa26", size = 18576702, upload-time = "2025-10-15T16:17:19.379Z" }, - { url = "https://files.pythonhosted.org/packages/c3/76/95650169b465ececa8cf4b2e8f6df255d4bf662775e797ade2025cc51ae6/numpy-2.3.4-cp314-cp314-win32.whl", hash = "sha256:e73d63fd04e3a9d6bc187f5455d81abfad05660b212c8804bf3b407e984cd2bc", size = 6337136, upload-time = "2025-10-15T16:17:22.886Z" }, - { url = "https://files.pythonhosted.org/packages/dc/89/a231a5c43ede5d6f77ba4a91e915a87dea4aeea76560ba4d2bf185c683f0/numpy-2.3.4-cp314-cp314-win_amd64.whl", hash = "sha256:3da3491cee49cf16157e70f607c03a217ea6647b1cea4819c4f48e53d49139b9", size = 12920542, upload-time = "2025-10-15T16:17:24.783Z" }, - { url = "https://files.pythonhosted.org/packages/0d/0c/ae9434a888f717c5ed2ff2393b3f344f0ff6f1c793519fa0c540461dc530/numpy-2.3.4-cp314-cp314-win_arm64.whl", hash = "sha256:6d9cd732068e8288dbe2717177320723ccec4fb064123f0caf9bbd90ab5be868", size = 10480213, upload-time = "2025-10-15T16:17:26.935Z" }, - { url = "https://files.pythonhosted.org/packages/83/4b/c4a5f0841f92536f6b9592694a5b5f68c9ab37b775ff342649eadf9055d3/numpy-2.3.4-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:22758999b256b595cf0b1d102b133bb61866ba5ceecf15f759623b64c020c9ec", size = 21052280, upload-time = "2025-10-15T16:17:29.638Z" }, - { url = "https://files.pythonhosted.org/packages/3e/80/90308845fc93b984d2cc96d83e2324ce8ad1fd6efea81b324cba4b673854/numpy-2.3.4-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:9cb177bc55b010b19798dc5497d540dea67fd13a8d9e882b2dae71de0cf09eb3", size = 14302930, upload-time = "2025-10-15T16:17:32.384Z" }, - { url = "https://files.pythonhosted.org/packages/3d/4e/07439f22f2a3b247cec4d63a713faae55e1141a36e77fb212881f7cda3fb/numpy-2.3.4-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:0f2bcc76f1e05e5ab58893407c63d90b2029908fa41f9f1cc51eecce936c3365", size = 5231504, upload-time = "2025-10-15T16:17:34.515Z" }, - { url = "https://files.pythonhosted.org/packages/ab/de/1e11f2547e2fe3d00482b19721855348b94ada8359aef5d40dd57bfae9df/numpy-2.3.4-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:8dc20bde86802df2ed8397a08d793da0ad7a5fd4ea3ac85d757bf5dd4ad7c252", size = 6739405, upload-time = "2025-10-15T16:17:36.128Z" }, - { url = "https://files.pythonhosted.org/packages/3b/40/8cd57393a26cebe2e923005db5134a946c62fa56a1087dc7c478f3e30837/numpy-2.3.4-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5e199c087e2aa71c8f9ce1cb7a8e10677dc12457e7cc1be4798632da37c3e86e", size = 14354866, upload-time = "2025-10-15T16:17:38.884Z" }, - { url = "https://files.pythonhosted.org/packages/93/39/5b3510f023f96874ee6fea2e40dfa99313a00bf3ab779f3c92978f34aace/numpy-2.3.4-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:85597b2d25ddf655495e2363fe044b0ae999b75bc4d630dc0d886484b03a5eb0", size = 16703296, upload-time = "2025-10-15T16:17:41.564Z" }, - { url = "https://files.pythonhosted.org/packages/41/0d/19bb163617c8045209c1996c4e427bccbc4bbff1e2c711f39203c8ddbb4a/numpy-2.3.4-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:04a69abe45b49c5955923cf2c407843d1c85013b424ae8a560bba16c92fe44a0", size = 16136046, upload-time = "2025-10-15T16:17:43.901Z" }, - { url = "https://files.pythonhosted.org/packages/e2/c1/6dba12fdf68b02a21ac411c9df19afa66bed2540f467150ca64d246b463d/numpy-2.3.4-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:e1708fac43ef8b419c975926ce1eaf793b0c13b7356cfab6ab0dc34c0a02ac0f", size = 18652691, upload-time = "2025-10-15T16:17:46.247Z" }, - { url = "https://files.pythonhosted.org/packages/f8/73/f85056701dbbbb910c51d846c58d29fd46b30eecd2b6ba760fc8b8a1641b/numpy-2.3.4-cp314-cp314t-win32.whl", hash = "sha256:863e3b5f4d9915aaf1b8ec79ae560ad21f0b8d5e3adc31e73126491bb86dee1d", size = 6485782, upload-time = "2025-10-15T16:17:48.872Z" }, - { url = "https://files.pythonhosted.org/packages/17/90/28fa6f9865181cb817c2471ee65678afa8a7e2a1fb16141473d5fa6bacc3/numpy-2.3.4-cp314-cp314t-win_amd64.whl", hash = "sha256:962064de37b9aef801d33bc579690f8bfe6c5e70e29b61783f60bcba838a14d6", size = 13113301, upload-time = "2025-10-15T16:17:50.938Z" }, - { url = "https://files.pythonhosted.org/packages/54/23/08c002201a8e7e1f9afba93b97deceb813252d9cfd0d3351caed123dcf97/numpy-2.3.4-cp314-cp314t-win_arm64.whl", hash = "sha256:8b5a9a39c45d852b62693d9b3f3e0fe052541f804296ff401a72a1b60edafb29", size = 10547532, upload-time = "2025-10-15T16:17:53.48Z" }, - { url = "https://files.pythonhosted.org/packages/b1/b6/64898f51a86ec88ca1257a59c1d7fd077b60082a119affefcdf1dd0df8ca/numpy-2.3.4-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:6e274603039f924c0fe5cb73438fa9246699c78a6df1bd3decef9ae592ae1c05", size = 21131552, upload-time = "2025-10-15T16:17:55.845Z" }, - { url = "https://files.pythonhosted.org/packages/ce/4c/f135dc6ebe2b6a3c77f4e4838fa63d350f85c99462012306ada1bd4bc460/numpy-2.3.4-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:d149aee5c72176d9ddbc6803aef9c0f6d2ceeea7626574fc68518da5476fa346", size = 14377796, upload-time = "2025-10-15T16:17:58.308Z" }, - { url = "https://files.pythonhosted.org/packages/d0/a4/f33f9c23fcc13dd8412fc8614559b5b797e0aba9d8e01dfa8bae10c84004/numpy-2.3.4-pp311-pypy311_pp73-macosx_14_0_arm64.whl", hash = "sha256:6d34ed9db9e6395bb6cd33286035f73a59b058169733a9db9f85e650b88df37e", size = 5306904, upload-time = "2025-10-15T16:18:00.596Z" }, - { url = "https://files.pythonhosted.org/packages/28/af/c44097f25f834360f9fb960fa082863e0bad14a42f36527b2a121abdec56/numpy-2.3.4-pp311-pypy311_pp73-macosx_14_0_x86_64.whl", hash = "sha256:fdebe771ca06bb8d6abce84e51dca9f7921fe6ad34a0c914541b063e9a68928b", size = 6819682, upload-time = "2025-10-15T16:18:02.32Z" }, - { url = "https://files.pythonhosted.org/packages/c5/8c/cd283b54c3c2b77e188f63e23039844f56b23bba1712318288c13fe86baf/numpy-2.3.4-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:957e92defe6c08211eb77902253b14fe5b480ebc5112bc741fd5e9cd0608f847", size = 14422300, upload-time = "2025-10-15T16:18:04.271Z" }, - { url = "https://files.pythonhosted.org/packages/b0/f0/8404db5098d92446b3e3695cf41c6f0ecb703d701cb0b7566ee2177f2eee/numpy-2.3.4-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:13b9062e4f5c7ee5c7e5be96f29ba71bc5a37fed3d1d77c37390ae00724d296d", size = 16760806, upload-time = "2025-10-15T16:18:06.668Z" }, - { url = "https://files.pythonhosted.org/packages/95/8e/2844c3959ce9a63acc7c8e50881133d86666f0420bcde695e115ced0920f/numpy-2.3.4-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:81b3a59793523e552c4a96109dde028aa4448ae06ccac5a76ff6532a85558a7f", size = 12973130, upload-time = "2025-10-15T16:18:09.397Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/76/65/21b3bc86aac7b8f2862db1e808f1ea22b028e30a225a34a5ede9bf8678f2/numpy-2.3.5.tar.gz", hash = "sha256:784db1dcdab56bf0517743e746dfb0f885fc68d948aba86eeec2cba234bdf1c0", size = 20584950, upload-time = "2025-11-16T22:52:42.067Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/43/77/84dd1d2e34d7e2792a236ba180b5e8fcc1e3e414e761ce0253f63d7f572e/numpy-2.3.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:de5672f4a7b200c15a4127042170a694d4df43c992948f5e1af57f0174beed10", size = 17034641, upload-time = "2025-11-16T22:49:19.336Z" }, + { url = "https://files.pythonhosted.org/packages/2a/ea/25e26fa5837106cde46ae7d0b667e20f69cbbc0efd64cba8221411ab26ae/numpy-2.3.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:acfd89508504a19ed06ef963ad544ec6664518c863436306153e13e94605c218", size = 12528324, upload-time = "2025-11-16T22:49:22.582Z" }, + { url = "https://files.pythonhosted.org/packages/4d/1a/e85f0eea4cf03d6a0228f5c0256b53f2df4bc794706e7df019fc622e47f1/numpy-2.3.5-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:ffe22d2b05504f786c867c8395de703937f934272eb67586817b46188b4ded6d", size = 5356872, upload-time = "2025-11-16T22:49:25.408Z" }, + { url = "https://files.pythonhosted.org/packages/5c/bb/35ef04afd567f4c989c2060cde39211e4ac5357155c1833bcd1166055c61/numpy-2.3.5-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:872a5cf366aec6bb1147336480fef14c9164b154aeb6542327de4970282cd2f5", size = 6893148, upload-time = "2025-11-16T22:49:27.549Z" }, + { url = "https://files.pythonhosted.org/packages/f2/2b/05bbeb06e2dff5eab512dfc678b1cc5ee94d8ac5956a0885c64b6b26252b/numpy-2.3.5-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3095bdb8dd297e5920b010e96134ed91d852d81d490e787beca7e35ae1d89cf7", size = 14557282, upload-time = "2025-11-16T22:49:30.964Z" }, + { url = "https://files.pythonhosted.org/packages/65/fb/2b23769462b34398d9326081fad5655198fcf18966fcb1f1e49db44fbf31/numpy-2.3.5-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8cba086a43d54ca804ce711b2a940b16e452807acebe7852ff327f1ecd49b0d4", size = 16897903, upload-time = "2025-11-16T22:49:34.191Z" }, + { url = "https://files.pythonhosted.org/packages/ac/14/085f4cf05fc3f1e8aa95e85404e984ffca9b2275a5dc2b1aae18a67538b8/numpy-2.3.5-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6cf9b429b21df6b99f4dee7a1218b8b7ffbbe7df8764dc0bd60ce8a0708fed1e", size = 16341672, upload-time = "2025-11-16T22:49:37.2Z" }, + { url = "https://files.pythonhosted.org/packages/6f/3b/1f73994904142b2aa290449b3bb99772477b5fd94d787093e4f24f5af763/numpy-2.3.5-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:396084a36abdb603546b119d96528c2f6263921c50df3c8fd7cb28873a237748", size = 18838896, upload-time = "2025-11-16T22:49:39.727Z" }, + { url = "https://files.pythonhosted.org/packages/cd/b9/cf6649b2124f288309ffc353070792caf42ad69047dcc60da85ee85fea58/numpy-2.3.5-cp311-cp311-win32.whl", hash = "sha256:b0c7088a73aef3d687c4deef8452a3ac7c1be4e29ed8bf3b366c8111128ac60c", size = 6563608, upload-time = "2025-11-16T22:49:42.079Z" }, + { url = "https://files.pythonhosted.org/packages/aa/44/9fe81ae1dcc29c531843852e2874080dc441338574ccc4306b39e2ff6e59/numpy-2.3.5-cp311-cp311-win_amd64.whl", hash = "sha256:a414504bef8945eae5f2d7cb7be2d4af77c5d1cb5e20b296c2c25b61dff2900c", size = 13078442, upload-time = "2025-11-16T22:49:43.99Z" }, + { url = "https://files.pythonhosted.org/packages/6d/a7/f99a41553d2da82a20a2f22e93c94f928e4490bb447c9ff3c4ff230581d3/numpy-2.3.5-cp311-cp311-win_arm64.whl", hash = "sha256:0cd00b7b36e35398fa2d16af7b907b65304ef8bb4817a550e06e5012929830fa", size = 10458555, upload-time = "2025-11-16T22:49:47.092Z" }, + { url = "https://files.pythonhosted.org/packages/44/37/e669fe6cbb2b96c62f6bbedc6a81c0f3b7362f6a59230b23caa673a85721/numpy-2.3.5-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:74ae7b798248fe62021dbf3c914245ad45d1a6b0cb4a29ecb4b31d0bfbc4cc3e", size = 16733873, upload-time = "2025-11-16T22:49:49.84Z" }, + { url = "https://files.pythonhosted.org/packages/c5/65/df0db6c097892c9380851ab9e44b52d4f7ba576b833996e0080181c0c439/numpy-2.3.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ee3888d9ff7c14604052b2ca5535a30216aa0a58e948cdd3eeb8d3415f638769", size = 12259838, upload-time = "2025-11-16T22:49:52.863Z" }, + { url = "https://files.pythonhosted.org/packages/5b/e1/1ee06e70eb2136797abe847d386e7c0e830b67ad1d43f364dd04fa50d338/numpy-2.3.5-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:612a95a17655e213502f60cfb9bf9408efdc9eb1d5f50535cc6eb365d11b42b5", size = 5088378, upload-time = "2025-11-16T22:49:55.055Z" }, + { url = "https://files.pythonhosted.org/packages/6d/9c/1ca85fb86708724275103b81ec4cf1ac1d08f465368acfc8da7ab545bdae/numpy-2.3.5-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:3101e5177d114a593d79dd79658650fe28b5a0d8abeb8ce6f437c0e6df5be1a4", size = 6628559, upload-time = "2025-11-16T22:49:57.371Z" }, + { url = "https://files.pythonhosted.org/packages/74/78/fcd41e5a0ce4f3f7b003da85825acddae6d7ecb60cf25194741b036ca7d6/numpy-2.3.5-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b973c57ff8e184109db042c842423ff4f60446239bd585a5131cc47f06f789d", size = 14250702, upload-time = "2025-11-16T22:49:59.632Z" }, + { url = "https://files.pythonhosted.org/packages/b6/23/2a1b231b8ff672b4c450dac27164a8b2ca7d9b7144f9c02d2396518352eb/numpy-2.3.5-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0d8163f43acde9a73c2a33605353a4f1bc4798745a8b1d73183b28e5b435ae28", size = 16606086, upload-time = "2025-11-16T22:50:02.127Z" }, + { url = "https://files.pythonhosted.org/packages/a0/c5/5ad26fbfbe2012e190cc7d5003e4d874b88bb18861d0829edc140a713021/numpy-2.3.5-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:51c1e14eb1e154ebd80e860722f9e6ed6ec89714ad2db2d3aa33c31d7c12179b", size = 16025985, upload-time = "2025-11-16T22:50:04.536Z" }, + { url = "https://files.pythonhosted.org/packages/d2/fa/dd48e225c46c819288148d9d060b047fd2a6fb1eb37eae25112ee4cb4453/numpy-2.3.5-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b46b4ec24f7293f23adcd2d146960559aaf8020213de8ad1909dba6c013bf89c", size = 18542976, upload-time = "2025-11-16T22:50:07.557Z" }, + { url = "https://files.pythonhosted.org/packages/05/79/ccbd23a75862d95af03d28b5c6901a1b7da4803181513d52f3b86ed9446e/numpy-2.3.5-cp312-cp312-win32.whl", hash = "sha256:3997b5b3c9a771e157f9aae01dd579ee35ad7109be18db0e85dbdbe1de06e952", size = 6285274, upload-time = "2025-11-16T22:50:10.746Z" }, + { url = "https://files.pythonhosted.org/packages/2d/57/8aeaf160312f7f489dea47ab61e430b5cb051f59a98ae68b7133ce8fa06a/numpy-2.3.5-cp312-cp312-win_amd64.whl", hash = "sha256:86945f2ee6d10cdfd67bcb4069c1662dd711f7e2a4343db5cecec06b87cf31aa", size = 12782922, upload-time = "2025-11-16T22:50:12.811Z" }, + { url = "https://files.pythonhosted.org/packages/78/a6/aae5cc2ca78c45e64b9ef22f089141d661516856cf7c8a54ba434576900d/numpy-2.3.5-cp312-cp312-win_arm64.whl", hash = "sha256:f28620fe26bee16243be2b7b874da327312240a7cdc38b769a697578d2100013", size = 10194667, upload-time = "2025-11-16T22:50:16.16Z" }, + { url = "https://files.pythonhosted.org/packages/db/69/9cde09f36da4b5a505341180a3f2e6fadc352fd4d2b7096ce9778db83f1a/numpy-2.3.5-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:d0f23b44f57077c1ede8c5f26b30f706498b4862d3ff0a7298b8411dd2f043ff", size = 16728251, upload-time = "2025-11-16T22:50:19.013Z" }, + { url = "https://files.pythonhosted.org/packages/79/fb/f505c95ceddd7027347b067689db71ca80bd5ecc926f913f1a23e65cf09b/numpy-2.3.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:aa5bc7c5d59d831d9773d1170acac7893ce3a5e130540605770ade83280e7188", size = 12254652, upload-time = "2025-11-16T22:50:21.487Z" }, + { url = "https://files.pythonhosted.org/packages/78/da/8c7738060ca9c31b30e9301ee0cf6c5ffdbf889d9593285a1cead337f9a5/numpy-2.3.5-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:ccc933afd4d20aad3c00bcef049cb40049f7f196e0397f1109dba6fed63267b0", size = 5083172, upload-time = "2025-11-16T22:50:24.562Z" }, + { url = "https://files.pythonhosted.org/packages/a4/b4/ee5bb2537fb9430fd2ef30a616c3672b991a4129bb1c7dcc42aa0abbe5d7/numpy-2.3.5-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:afaffc4393205524af9dfa400fa250143a6c3bc646c08c9f5e25a9f4b4d6a903", size = 6622990, upload-time = "2025-11-16T22:50:26.47Z" }, + { url = "https://files.pythonhosted.org/packages/95/03/dc0723a013c7d7c19de5ef29e932c3081df1c14ba582b8b86b5de9db7f0f/numpy-2.3.5-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9c75442b2209b8470d6d5d8b1c25714270686f14c749028d2199c54e29f20b4d", size = 14248902, upload-time = "2025-11-16T22:50:28.861Z" }, + { url = "https://files.pythonhosted.org/packages/f5/10/ca162f45a102738958dcec8023062dad0cbc17d1ab99d68c4e4a6c45fb2b/numpy-2.3.5-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:11e06aa0af8c0f05104d56450d6093ee639e15f24ecf62d417329d06e522e017", size = 16597430, upload-time = "2025-11-16T22:50:31.56Z" }, + { url = "https://files.pythonhosted.org/packages/2a/51/c1e29be863588db58175175f057286900b4b3327a1351e706d5e0f8dd679/numpy-2.3.5-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ed89927b86296067b4f81f108a2271d8926467a8868e554eaf370fc27fa3ccaf", size = 16024551, upload-time = "2025-11-16T22:50:34.242Z" }, + { url = "https://files.pythonhosted.org/packages/83/68/8236589d4dbb87253d28259d04d9b814ec0ecce7cb1c7fed29729f4c3a78/numpy-2.3.5-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:51c55fe3451421f3a6ef9a9c1439e82101c57a2c9eab9feb196a62b1a10b58ce", size = 18533275, upload-time = "2025-11-16T22:50:37.651Z" }, + { url = "https://files.pythonhosted.org/packages/40/56/2932d75b6f13465239e3b7b7e511be27f1b8161ca2510854f0b6e521c395/numpy-2.3.5-cp313-cp313-win32.whl", hash = "sha256:1978155dd49972084bd6ef388d66ab70f0c323ddee6f693d539376498720fb7e", size = 6277637, upload-time = "2025-11-16T22:50:40.11Z" }, + { url = "https://files.pythonhosted.org/packages/0c/88/e2eaa6cffb115b85ed7c7c87775cb8bcf0816816bc98ca8dbfa2ee33fe6e/numpy-2.3.5-cp313-cp313-win_amd64.whl", hash = "sha256:00dc4e846108a382c5869e77c6ed514394bdeb3403461d25a829711041217d5b", size = 12779090, upload-time = "2025-11-16T22:50:42.503Z" }, + { url = "https://files.pythonhosted.org/packages/8f/88/3f41e13a44ebd4034ee17baa384acac29ba6a4fcc2aca95f6f08ca0447d1/numpy-2.3.5-cp313-cp313-win_arm64.whl", hash = "sha256:0472f11f6ec23a74a906a00b48a4dcf3849209696dff7c189714511268d103ae", size = 10194710, upload-time = "2025-11-16T22:50:44.971Z" }, + { url = "https://files.pythonhosted.org/packages/13/cb/71744144e13389d577f867f745b7df2d8489463654a918eea2eeb166dfc9/numpy-2.3.5-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:414802f3b97f3c1eef41e530aaba3b3c1620649871d8cb38c6eaff034c2e16bd", size = 16827292, upload-time = "2025-11-16T22:50:47.715Z" }, + { url = "https://files.pythonhosted.org/packages/71/80/ba9dc6f2a4398e7f42b708a7fdc841bb638d353be255655498edbf9a15a8/numpy-2.3.5-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5ee6609ac3604fa7780e30a03e5e241a7956f8e2fcfe547d51e3afa5247ac47f", size = 12378897, upload-time = "2025-11-16T22:50:51.327Z" }, + { url = "https://files.pythonhosted.org/packages/2e/6d/db2151b9f64264bcceccd51741aa39b50150de9b602d98ecfe7e0c4bff39/numpy-2.3.5-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:86d835afea1eaa143012a2d7a3f45a3adce2d7adc8b4961f0b362214d800846a", size = 5207391, upload-time = "2025-11-16T22:50:54.542Z" }, + { url = "https://files.pythonhosted.org/packages/80/ae/429bacace5ccad48a14c4ae5332f6aa8ab9f69524193511d60ccdfdc65fa/numpy-2.3.5-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:30bc11310e8153ca664b14c5f1b73e94bd0503681fcf136a163de856f3a50139", size = 6721275, upload-time = "2025-11-16T22:50:56.794Z" }, + { url = "https://files.pythonhosted.org/packages/74/5b/1919abf32d8722646a38cd527bc3771eb229a32724ee6ba340ead9b92249/numpy-2.3.5-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1062fde1dcf469571705945b0f221b73928f34a20c904ffb45db101907c3454e", size = 14306855, upload-time = "2025-11-16T22:50:59.208Z" }, + { url = "https://files.pythonhosted.org/packages/a5/87/6831980559434973bebc30cd9c1f21e541a0f2b0c280d43d3afd909b66d0/numpy-2.3.5-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ce581db493ea1a96c0556360ede6607496e8bf9b3a8efa66e06477267bc831e9", size = 16657359, upload-time = "2025-11-16T22:51:01.991Z" }, + { url = "https://files.pythonhosted.org/packages/dd/91/c797f544491ee99fd00495f12ebb7802c440c1915811d72ac5b4479a3356/numpy-2.3.5-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:cc8920d2ec5fa99875b670bb86ddeb21e295cb07aa331810d9e486e0b969d946", size = 16093374, upload-time = "2025-11-16T22:51:05.291Z" }, + { url = "https://files.pythonhosted.org/packages/74/a6/54da03253afcbe7a72785ec4da9c69fb7a17710141ff9ac5fcb2e32dbe64/numpy-2.3.5-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:9ee2197ef8c4f0dfe405d835f3b6a14f5fee7782b5de51ba06fb65fc9b36e9f1", size = 18594587, upload-time = "2025-11-16T22:51:08.585Z" }, + { url = "https://files.pythonhosted.org/packages/80/e9/aff53abbdd41b0ecca94285f325aff42357c6b5abc482a3fcb4994290b18/numpy-2.3.5-cp313-cp313t-win32.whl", hash = "sha256:70b37199913c1bd300ff6e2693316c6f869c7ee16378faf10e4f5e3275b299c3", size = 6405940, upload-time = "2025-11-16T22:51:11.541Z" }, + { url = "https://files.pythonhosted.org/packages/d5/81/50613fec9d4de5480de18d4f8ef59ad7e344d497edbef3cfd80f24f98461/numpy-2.3.5-cp313-cp313t-win_amd64.whl", hash = "sha256:b501b5fa195cc9e24fe102f21ec0a44dffc231d2af79950b451e0d99cea02234", size = 12920341, upload-time = "2025-11-16T22:51:14.312Z" }, + { url = "https://files.pythonhosted.org/packages/bb/ab/08fd63b9a74303947f34f0bd7c5903b9c5532c2d287bead5bdf4c556c486/numpy-2.3.5-cp313-cp313t-win_arm64.whl", hash = "sha256:a80afd79f45f3c4a7d341f13acbe058d1ca8ac017c165d3fa0d3de6bc1a079d7", size = 10262507, upload-time = "2025-11-16T22:51:16.846Z" }, + { url = "https://files.pythonhosted.org/packages/ba/97/1a914559c19e32d6b2e233cf9a6a114e67c856d35b1d6babca571a3e880f/numpy-2.3.5-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:bf06bc2af43fa8d32d30fae16ad965663e966b1a3202ed407b84c989c3221e82", size = 16735706, upload-time = "2025-11-16T22:51:19.558Z" }, + { url = "https://files.pythonhosted.org/packages/57/d4/51233b1c1b13ecd796311216ae417796b88b0616cfd8a33ae4536330748a/numpy-2.3.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:052e8c42e0c49d2575621c158934920524f6c5da05a1d3b9bab5d8e259e045f0", size = 12264507, upload-time = "2025-11-16T22:51:22.492Z" }, + { url = "https://files.pythonhosted.org/packages/45/98/2fe46c5c2675b8306d0b4a3ec3494273e93e1226a490f766e84298576956/numpy-2.3.5-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:1ed1ec893cff7040a02c8aa1c8611b94d395590d553f6b53629a4461dc7f7b63", size = 5093049, upload-time = "2025-11-16T22:51:25.171Z" }, + { url = "https://files.pythonhosted.org/packages/ce/0e/0698378989bb0ac5f1660c81c78ab1fe5476c1a521ca9ee9d0710ce54099/numpy-2.3.5-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:2dcd0808a421a482a080f89859a18beb0b3d1e905b81e617a188bd80422d62e9", size = 6626603, upload-time = "2025-11-16T22:51:27Z" }, + { url = "https://files.pythonhosted.org/packages/5e/a6/9ca0eecc489640615642a6cbc0ca9e10df70df38c4d43f5a928ff18d8827/numpy-2.3.5-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:727fd05b57df37dc0bcf1a27767a3d9a78cbbc92822445f32cc3436ba797337b", size = 14262696, upload-time = "2025-11-16T22:51:29.402Z" }, + { url = "https://files.pythonhosted.org/packages/c8/f6/07ec185b90ec9d7217a00eeeed7383b73d7e709dae2a9a021b051542a708/numpy-2.3.5-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fffe29a1ef00883599d1dc2c51aa2e5d80afe49523c261a74933df395c15c520", size = 16597350, upload-time = "2025-11-16T22:51:32.167Z" }, + { url = "https://files.pythonhosted.org/packages/75/37/164071d1dde6a1a84c9b8e5b414fa127981bad47adf3a6b7e23917e52190/numpy-2.3.5-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:8f7f0e05112916223d3f438f293abf0727e1181b5983f413dfa2fefc4098245c", size = 16040190, upload-time = "2025-11-16T22:51:35.403Z" }, + { url = "https://files.pythonhosted.org/packages/08/3c/f18b82a406b04859eb026d204e4e1773eb41c5be58410f41ffa511d114ae/numpy-2.3.5-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:2e2eb32ddb9ccb817d620ac1d8dae7c3f641c1e5f55f531a33e8ab97960a75b8", size = 18536749, upload-time = "2025-11-16T22:51:39.698Z" }, + { url = "https://files.pythonhosted.org/packages/40/79/f82f572bf44cf0023a2fe8588768e23e1592585020d638999f15158609e1/numpy-2.3.5-cp314-cp314-win32.whl", hash = "sha256:66f85ce62c70b843bab1fb14a05d5737741e74e28c7b8b5a064de10142fad248", size = 6335432, upload-time = "2025-11-16T22:51:42.476Z" }, + { url = "https://files.pythonhosted.org/packages/a3/2e/235b4d96619931192c91660805e5e49242389742a7a82c27665021db690c/numpy-2.3.5-cp314-cp314-win_amd64.whl", hash = "sha256:e6a0bc88393d65807d751a614207b7129a310ca4fe76a74e5c7da5fa5671417e", size = 12919388, upload-time = "2025-11-16T22:51:45.275Z" }, + { url = "https://files.pythonhosted.org/packages/07/2b/29fd75ce45d22a39c61aad74f3d718e7ab67ccf839ca8b60866054eb15f8/numpy-2.3.5-cp314-cp314-win_arm64.whl", hash = "sha256:aeffcab3d4b43712bb7a60b65f6044d444e75e563ff6180af8f98dd4b905dfd2", size = 10476651, upload-time = "2025-11-16T22:51:47.749Z" }, + { url = "https://files.pythonhosted.org/packages/17/e1/f6a721234ebd4d87084cfa68d081bcba2f5cfe1974f7de4e0e8b9b2a2ba1/numpy-2.3.5-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:17531366a2e3a9e30762c000f2c43a9aaa05728712e25c11ce1dbe700c53ad41", size = 16834503, upload-time = "2025-11-16T22:51:50.443Z" }, + { url = "https://files.pythonhosted.org/packages/5c/1c/baf7ffdc3af9c356e1c135e57ab7cf8d247931b9554f55c467efe2c69eff/numpy-2.3.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:d21644de1b609825ede2f48be98dfde4656aefc713654eeee280e37cadc4e0ad", size = 12381612, upload-time = "2025-11-16T22:51:53.609Z" }, + { url = "https://files.pythonhosted.org/packages/74/91/f7f0295151407ddc9ba34e699013c32c3c91944f9b35fcf9281163dc1468/numpy-2.3.5-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:c804e3a5aba5460c73955c955bdbd5c08c354954e9270a2c1565f62e866bdc39", size = 5210042, upload-time = "2025-11-16T22:51:56.213Z" }, + { url = "https://files.pythonhosted.org/packages/2e/3b/78aebf345104ec50dd50a4d06ddeb46a9ff5261c33bcc58b1c4f12f85ec2/numpy-2.3.5-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:cc0a57f895b96ec78969c34f682c602bf8da1a0270b09bc65673df2e7638ec20", size = 6724502, upload-time = "2025-11-16T22:51:58.584Z" }, + { url = "https://files.pythonhosted.org/packages/02/c6/7c34b528740512e57ef1b7c8337ab0b4f0bddf34c723b8996c675bc2bc91/numpy-2.3.5-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:900218e456384ea676e24ea6a0417f030a3b07306d29d7ad843957b40a9d8d52", size = 14308962, upload-time = "2025-11-16T22:52:01.698Z" }, + { url = "https://files.pythonhosted.org/packages/80/35/09d433c5262bc32d725bafc619e095b6a6651caf94027a03da624146f655/numpy-2.3.5-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:09a1bea522b25109bf8e6f3027bd810f7c1085c64a0c7ce050c1676ad0ba010b", size = 16655054, upload-time = "2025-11-16T22:52:04.267Z" }, + { url = "https://files.pythonhosted.org/packages/7a/ab/6a7b259703c09a88804fa2430b43d6457b692378f6b74b356155283566ac/numpy-2.3.5-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:04822c00b5fd0323c8166d66c701dc31b7fbd252c100acd708c48f763968d6a3", size = 16091613, upload-time = "2025-11-16T22:52:08.651Z" }, + { url = "https://files.pythonhosted.org/packages/c2/88/330da2071e8771e60d1038166ff9d73f29da37b01ec3eb43cb1427464e10/numpy-2.3.5-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:d6889ec4ec662a1a37eb4b4fb26b6100841804dac55bd9df579e326cdc146227", size = 18591147, upload-time = "2025-11-16T22:52:11.453Z" }, + { url = "https://files.pythonhosted.org/packages/51/41/851c4b4082402d9ea860c3626db5d5df47164a712cb23b54be028b184c1c/numpy-2.3.5-cp314-cp314t-win32.whl", hash = "sha256:93eebbcf1aafdf7e2ddd44c2923e2672e1010bddc014138b229e49725b4d6be5", size = 6479806, upload-time = "2025-11-16T22:52:14.641Z" }, + { url = "https://files.pythonhosted.org/packages/90/30/d48bde1dfd93332fa557cff1972fbc039e055a52021fbef4c2c4b1eefd17/numpy-2.3.5-cp314-cp314t-win_amd64.whl", hash = "sha256:c8a9958e88b65c3b27e22ca2a076311636850b612d6bbfb76e8d156aacde2aaf", size = 13105760, upload-time = "2025-11-16T22:52:17.975Z" }, + { url = "https://files.pythonhosted.org/packages/2d/fd/4b5eb0b3e888d86aee4d198c23acec7d214baaf17ea93c1adec94c9518b9/numpy-2.3.5-cp314-cp314t-win_arm64.whl", hash = "sha256:6203fdf9f3dc5bdaed7319ad8698e685c7a3be10819f41d32a0723e611733b42", size = 10545459, upload-time = "2025-11-16T22:52:20.55Z" }, + { url = "https://files.pythonhosted.org/packages/c6/65/f9dea8e109371ade9c782b4e4756a82edf9d3366bca495d84d79859a0b79/numpy-2.3.5-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:f0963b55cdd70fad460fa4c1341f12f976bb26cb66021a5580329bd498988310", size = 16910689, upload-time = "2025-11-16T22:52:23.247Z" }, + { url = "https://files.pythonhosted.org/packages/00/4f/edb00032a8fb92ec0a679d3830368355da91a69cab6f3e9c21b64d0bb986/numpy-2.3.5-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:f4255143f5160d0de972d28c8f9665d882b5f61309d8362fdd3e103cf7bf010c", size = 12457053, upload-time = "2025-11-16T22:52:26.367Z" }, + { url = "https://files.pythonhosted.org/packages/16/a4/e8a53b5abd500a63836a29ebe145fc1ab1f2eefe1cfe59276020373ae0aa/numpy-2.3.5-pp311-pypy311_pp73-macosx_14_0_arm64.whl", hash = "sha256:a4b9159734b326535f4dd01d947f919c6eefd2d9827466a696c44ced82dfbc18", size = 5285635, upload-time = "2025-11-16T22:52:29.266Z" }, + { url = "https://files.pythonhosted.org/packages/a3/2f/37eeb9014d9c8b3e9c55bc599c68263ca44fdbc12a93e45a21d1d56df737/numpy-2.3.5-pp311-pypy311_pp73-macosx_14_0_x86_64.whl", hash = "sha256:2feae0d2c91d46e59fcd62784a3a83b3fb677fead592ce51b5a6fbb4f95965ff", size = 6801770, upload-time = "2025-11-16T22:52:31.421Z" }, + { url = "https://files.pythonhosted.org/packages/7d/e4/68d2f474df2cb671b2b6c2986a02e520671295647dad82484cde80ca427b/numpy-2.3.5-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ffac52f28a7849ad7576293c0cb7b9f08304e8f7d738a8cb8a90ec4c55a998eb", size = 14391768, upload-time = "2025-11-16T22:52:33.593Z" }, + { url = "https://files.pythonhosted.org/packages/b8/50/94ccd8a2b141cb50651fddd4f6a48874acb3c91c8f0842b08a6afc4b0b21/numpy-2.3.5-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:63c0e9e7eea69588479ebf4a8a270d5ac22763cc5854e9a7eae952a3908103f7", size = 16729263, upload-time = "2025-11-16T22:52:36.369Z" }, + { url = "https://files.pythonhosted.org/packages/2d/ee/346fa473e666fe14c52fcdd19ec2424157290a032d4c41f98127bfb31ac7/numpy-2.3.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:f16417ec91f12f814b10bafe79ef77e70113a2f5f7018640e7425ff979253425", size = 12967213, upload-time = "2025-11-16T22:52:39.38Z" }, ] [[package]] @@ -1218,17 +1245,17 @@ wheels = [ [[package]] name = "protobuf" -version = "6.33.0" +version = "6.33.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/19/ff/64a6c8f420818bb873713988ca5492cba3a7946be57e027ac63495157d97/protobuf-6.33.0.tar.gz", hash = "sha256:140303d5c8d2037730c548f8c7b93b20bb1dc301be280c378b82b8894589c954", size = 443463, upload-time = "2025-10-15T20:39:52.159Z" } +sdist = { url = "https://files.pythonhosted.org/packages/0a/03/a1440979a3f74f16cab3b75b0da1a1a7f922d56a8ddea96092391998edc0/protobuf-6.33.1.tar.gz", hash = "sha256:97f65757e8d09870de6fd973aeddb92f85435607235d20b2dfed93405d00c85b", size = 443432, upload-time = "2025-11-13T16:44:18.895Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/7e/ee/52b3fa8feb6db4a833dfea4943e175ce645144532e8a90f72571ad85df4e/protobuf-6.33.0-cp310-abi3-win32.whl", hash = "sha256:d6101ded078042a8f17959eccd9236fb7a9ca20d3b0098bbcb91533a5680d035", size = 425593, upload-time = "2025-10-15T20:39:40.29Z" }, - { url = "https://files.pythonhosted.org/packages/7b/c6/7a465f1825872c55e0341ff4a80198743f73b69ce5d43ab18043699d1d81/protobuf-6.33.0-cp310-abi3-win_amd64.whl", hash = "sha256:9a031d10f703f03768f2743a1c403af050b6ae1f3480e9c140f39c45f81b13ee", size = 436882, upload-time = "2025-10-15T20:39:42.841Z" }, - { url = "https://files.pythonhosted.org/packages/e1/a9/b6eee662a6951b9c3640e8e452ab3e09f117d99fc10baa32d1581a0d4099/protobuf-6.33.0-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:905b07a65f1a4b72412314082c7dbfae91a9e8b68a0cc1577515f8df58ecf455", size = 427521, upload-time = "2025-10-15T20:39:43.803Z" }, - { url = "https://files.pythonhosted.org/packages/10/35/16d31e0f92c6d2f0e77c2a3ba93185130ea13053dd16200a57434c882f2b/protobuf-6.33.0-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:e0697ece353e6239b90ee43a9231318302ad8353c70e6e45499fa52396debf90", size = 324445, upload-time = "2025-10-15T20:39:44.932Z" }, - { url = "https://files.pythonhosted.org/packages/e6/eb/2a981a13e35cda8b75b5585aaffae2eb904f8f351bdd3870769692acbd8a/protobuf-6.33.0-cp39-abi3-manylinux2014_s390x.whl", hash = "sha256:e0a1715e4f27355afd9570f3ea369735afc853a6c3951a6afe1f80d8569ad298", size = 339159, upload-time = "2025-10-15T20:39:46.186Z" }, - { url = "https://files.pythonhosted.org/packages/21/51/0b1cbad62074439b867b4e04cc09b93f6699d78fd191bed2bbb44562e077/protobuf-6.33.0-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:35be49fd3f4fefa4e6e2aacc35e8b837d6703c37a2168a55ac21e9b1bc7559ef", size = 323172, upload-time = "2025-10-15T20:39:47.465Z" }, - { url = "https://files.pythonhosted.org/packages/07/d1/0a28c21707807c6aacd5dc9c3704b2aa1effbf37adebd8caeaf68b17a636/protobuf-6.33.0-py3-none-any.whl", hash = "sha256:25c9e1963c6734448ea2d308cfa610e692b801304ba0908d7bfa564ac5132995", size = 170477, upload-time = "2025-10-15T20:39:51.311Z" }, + { url = "https://files.pythonhosted.org/packages/06/f1/446a9bbd2c60772ca36556bac8bfde40eceb28d9cc7838755bc41e001d8f/protobuf-6.33.1-cp310-abi3-win32.whl", hash = "sha256:f8d3fdbc966aaab1d05046d0240dd94d40f2a8c62856d41eaa141ff64a79de6b", size = 425593, upload-time = "2025-11-13T16:44:06.275Z" }, + { url = "https://files.pythonhosted.org/packages/a6/79/8780a378c650e3df849b73de8b13cf5412f521ca2ff9b78a45c247029440/protobuf-6.33.1-cp310-abi3-win_amd64.whl", hash = "sha256:923aa6d27a92bf44394f6abf7ea0500f38769d4b07f4be41cb52bd8b1123b9ed", size = 436883, upload-time = "2025-11-13T16:44:09.222Z" }, + { url = "https://files.pythonhosted.org/packages/cd/93/26213ff72b103ae55bb0d73e7fb91ea570ef407c3ab4fd2f1f27cac16044/protobuf-6.33.1-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:fe34575f2bdde76ac429ec7b570235bf0c788883e70aee90068e9981806f2490", size = 427522, upload-time = "2025-11-13T16:44:10.475Z" }, + { url = "https://files.pythonhosted.org/packages/c2/32/df4a35247923393aa6b887c3b3244a8c941c32a25681775f96e2b418f90e/protobuf-6.33.1-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:f8adba2e44cde2d7618996b3fc02341f03f5bc3f2748be72dc7b063319276178", size = 324445, upload-time = "2025-11-13T16:44:11.869Z" }, + { url = "https://files.pythonhosted.org/packages/8e/d0/d796e419e2ec93d2f3fa44888861c3f88f722cde02b7c3488fcc6a166820/protobuf-6.33.1-cp39-abi3-manylinux2014_s390x.whl", hash = "sha256:0f4cf01222c0d959c2b399142deb526de420be8236f22c71356e2a544e153c53", size = 339161, upload-time = "2025-11-13T16:44:12.778Z" }, + { url = "https://files.pythonhosted.org/packages/1d/2a/3c5f05a4af06649547027d288747f68525755de692a26a7720dced3652c0/protobuf-6.33.1-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:8fd7d5e0eb08cd5b87fd3df49bc193f5cfd778701f47e11d127d0afc6c39f1d1", size = 323171, upload-time = "2025-11-13T16:44:14.035Z" }, + { url = "https://files.pythonhosted.org/packages/08/b4/46310463b4f6ceef310f8348786f3cff181cea671578e3d9743ba61a459e/protobuf-6.33.1-py3-none-any.whl", hash = "sha256:d595a9fd694fdeb061a62fbe10eb039cc1e444df81ec9bb70c7fc59ebcb1eafa", size = 170477, upload-time = "2025-11-13T16:44:17.633Z" }, ] [[package]] @@ -1272,7 +1299,7 @@ wheels = [ [[package]] name = "pydantic" -version = "2.12.4" +version = "2.12.5" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "annotated-types" }, @@ -1280,9 +1307,9 @@ dependencies = [ { name = "typing-extensions" }, { name = "typing-inspection" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/96/ad/a17bc283d7d81837c061c49e3eaa27a45991759a1b7eae1031921c6bd924/pydantic-2.12.4.tar.gz", hash = "sha256:0f8cb9555000a4b5b617f66bfd2566264c4984b27589d3b845685983e8ea85ac", size = 821038, upload-time = "2025-11-05T10:50:08.59Z" } +sdist = { url = "https://files.pythonhosted.org/packages/69/44/36f1a6e523abc58ae5f928898e4aca2e0ea509b5aa6f6f392a5d882be928/pydantic-2.12.5.tar.gz", hash = "sha256:4d351024c75c0f085a9febbb665ce8c0c6ec5d30e903bdb6394b7ede26aebb49", size = 821591, upload-time = "2025-11-26T15:11:46.471Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/82/2f/e68750da9b04856e2a7ec56fc6f034a5a79775e9b9a81882252789873798/pydantic-2.12.4-py3-none-any.whl", hash = "sha256:92d3d202a745d46f9be6df459ac5a064fdaa3c1c4cd8adcfa332ccf3c05f871e", size = 463400, upload-time = "2025-11-05T10:50:06.732Z" }, + { url = "https://files.pythonhosted.org/packages/5a/87/b70ad306ebb6f9b585f114d0ac2137d792b48be34d732d60e597c2f8465a/pydantic-2.12.5-py3-none-any.whl", hash = "sha256:e561593fccf61e8a20fc46dfc2dfe075b8be7d0188df33f221ad1f0139180f9d", size = 463580, upload-time = "2025-11-26T15:11:44.605Z" }, ] [[package]] @@ -1707,15 +1734,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" }, ] -[[package]] -name = "sniffio" -version = "1.3.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372, upload-time = "2024-02-25T23:20:04.057Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235, upload-time = "2024-02-25T23:20:01.196Z" }, -] - [[package]] name = "syrupy" version = "4.9.1" @@ -1788,11 +1806,11 @@ wheels = [ [[package]] name = "types-google-cloud-ndb" -version = "2.3.0.20250317" +version = "2.3.0.20251121" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/79/00/b4c5d098ec502eb23757d228c094fc85a586edae37f2a9dc2d11ddd38d11/types_google_cloud_ndb-2.3.0.20250317.tar.gz", hash = "sha256:a0650dbbeda47fb752587e27396917b32febe055c5d70a8169ad1148d1b1ab56", size = 19474, upload-time = "2025-03-17T02:53:57.002Z" } +sdist = { url = "https://files.pythonhosted.org/packages/6b/06/f8ee47fcffcf8d2a506a54be6c71b49441658f7e0e5ac2cf99a8a1bdbe0e/types_google_cloud_ndb-2.3.0.20251121.tar.gz", hash = "sha256:5356da4d0d6a547c7d7ba2b7fc0b0615496e5234dfd45e987165d63b8b51d116", size = 19371, upload-time = "2025-11-21T03:03:38.85Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/07/a2/9cf307f60e5c93da2636e38d57334d40d09579123d536031ca0a3bff5476/types_google_cloud_ndb-2.3.0.20250317-py3-none-any.whl", hash = "sha256:e4be5d7b18ee943a776caf828f4a9b819fc93eedb53389a61fc9bc9e27fecdda", size = 22795, upload-time = "2025-03-17T02:53:55.882Z" }, + { url = "https://files.pythonhosted.org/packages/a4/da/74b74db37069ea371a2c192d3d9230698d535d07551c925057aec94f2b79/types_google_cloud_ndb-2.3.0.20251121-py3-none-any.whl", hash = "sha256:d7747131c30f9478f4615276cf1a681968cd84521e4768a875bca6631809bbe5", size = 22737, upload-time = "2025-11-21T03:03:37.999Z" }, ] [[package]] @@ -1900,6 +1918,35 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/a7/c2/fe1e52489ae3122415c51f387e221dd0773709bad6c6cdaa599e8a2c5185/urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc", size = 129795, upload-time = "2025-06-18T14:07:40.39Z" }, ] +[[package]] +name = "uuid-utils" +version = "0.12.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0b/0e/512fb221e4970c2f75ca9dae412d320b7d9ddc9f2b15e04ea8e44710396c/uuid_utils-0.12.0.tar.gz", hash = "sha256:252bd3d311b5d6b7f5dfce7a5857e27bb4458f222586bb439463231e5a9cbd64", size = 20889, upload-time = "2025-12-01T17:29:55.494Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8a/43/de5cd49a57b6293b911b6a9a62fc03e55db9f964da7d5882d9edbee1e9d2/uuid_utils-0.12.0-cp39-abi3-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:3b9b30707659292f207b98f294b0e081f6d77e1fbc760ba5b41331a39045f514", size = 603197, upload-time = "2025-12-01T17:29:30.104Z" }, + { url = "https://files.pythonhosted.org/packages/02/fa/5fd1d8c9234e44f0c223910808cde0de43bb69f7df1349e49b1afa7f2baa/uuid_utils-0.12.0-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:add3d820c7ec14ed37317375bea30249699c5d08ff4ae4dbee9fc9bce3bfbf65", size = 305168, upload-time = "2025-12-01T17:29:31.384Z" }, + { url = "https://files.pythonhosted.org/packages/c8/c6/8633ac9942bf9dc97a897b5154e5dcffa58816ec4dd780b3b12b559ff05c/uuid_utils-0.12.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b8fce83ecb3b16af29c7809669056c4b6e7cc912cab8c6d07361645de12dd79", size = 340580, upload-time = "2025-12-01T17:29:32.362Z" }, + { url = "https://files.pythonhosted.org/packages/f3/88/8a61307b04b4da1c576373003e6d857a04dade52ab035151d62cb84d5cb5/uuid_utils-0.12.0-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ec921769afcb905035d785582b0791d02304a7850fbd6ce924c1a8976380dfc6", size = 346771, upload-time = "2025-12-01T17:29:33.708Z" }, + { url = "https://files.pythonhosted.org/packages/1c/fb/aab2dcf94b991e62aa167457c7825b9b01055b884b888af926562864398c/uuid_utils-0.12.0-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6f3b060330f5899a92d5c723547dc6a95adef42433e9748f14c66859a7396664", size = 474781, upload-time = "2025-12-01T17:29:35.237Z" }, + { url = "https://files.pythonhosted.org/packages/5a/7a/dbd5e49c91d6c86dba57158bbfa0e559e1ddf377bb46dcfd58aea4f0d567/uuid_utils-0.12.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:908dfef7f0bfcf98d406e5dc570c25d2f2473e49b376de41792b6e96c1d5d291", size = 343685, upload-time = "2025-12-01T17:29:36.677Z" }, + { url = "https://files.pythonhosted.org/packages/1a/19/8c4b1d9f450159733b8be421a4e1fb03533709b80ed3546800102d085572/uuid_utils-0.12.0-cp39-abi3-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4c6a24148926bd0ca63e8a2dabf4cc9dc329a62325b3ad6578ecd60fbf926506", size = 366482, upload-time = "2025-12-01T17:29:37.979Z" }, + { url = "https://files.pythonhosted.org/packages/82/43/c79a6e45687647f80a159c8ba34346f287b065452cc419d07d2212d38420/uuid_utils-0.12.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:64a91e632669f059ef605f1771d28490b1d310c26198e46f754e8846dddf12f4", size = 523132, upload-time = "2025-12-01T17:29:39.293Z" }, + { url = "https://files.pythonhosted.org/packages/5a/a2/b2d75a621260a40c438aa88593827dfea596d18316520a99e839f7a5fb9d/uuid_utils-0.12.0-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:93c082212470bb4603ca3975916c205a9d7ef1443c0acde8fbd1e0f5b36673c7", size = 614218, upload-time = "2025-12-01T17:29:40.315Z" }, + { url = "https://files.pythonhosted.org/packages/13/6b/ba071101626edd5a6dabf8525c9a1537ff3d885dbc210540574a03901fef/uuid_utils-0.12.0-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:431b1fb7283ba974811b22abd365f2726f8f821ab33f0f715be389640e18d039", size = 546241, upload-time = "2025-12-01T17:29:41.656Z" }, + { url = "https://files.pythonhosted.org/packages/01/12/9a942b81c0923268e6d85bf98d8f0a61fcbcd5e432fef94fdf4ce2ef8748/uuid_utils-0.12.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:2ffd7838c40149100299fa37cbd8bab5ee382372e8e65a148002a37d380df7c8", size = 511842, upload-time = "2025-12-01T17:29:43.107Z" }, + { url = "https://files.pythonhosted.org/packages/a9/a7/c326f5163dd48b79368b87d8a05f5da4668dd228a3f5ca9d79d5fee2fc40/uuid_utils-0.12.0-cp39-abi3-win32.whl", hash = "sha256:487f17c0fee6cbc1d8b90fe811874174a9b1b5683bf2251549e302906a50fed3", size = 179088, upload-time = "2025-12-01T17:29:44.492Z" }, + { url = "https://files.pythonhosted.org/packages/38/92/41c8734dd97213ee1d5ae435cf4499705dc4f2751e3b957fd12376f61784/uuid_utils-0.12.0-cp39-abi3-win_amd64.whl", hash = "sha256:9598e7c9da40357ae8fffc5d6938b1a7017f09a1acbcc95e14af8c65d48c655a", size = 183003, upload-time = "2025-12-01T17:29:45.47Z" }, + { url = "https://files.pythonhosted.org/packages/c9/f9/52ab0359618987331a1f739af837d26168a4b16281c9c3ab46519940c628/uuid_utils-0.12.0-cp39-abi3-win_arm64.whl", hash = "sha256:c9bea7c5b2aa6f57937ebebeee4d4ef2baad10f86f1b97b58a3f6f34c14b4e84", size = 182975, upload-time = "2025-12-01T17:29:46.444Z" }, + { url = "https://files.pythonhosted.org/packages/ef/f7/6c55b7722cede3b424df02ed5cddb25c19543abda2f95fa4cfc34a892ae5/uuid_utils-0.12.0-pp311-pypy311_pp73-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:e2209d361f2996966ab7114f49919eb6aaeabc6041672abbbbf4fdbb8ec1acc0", size = 593065, upload-time = "2025-12-01T17:29:47.507Z" }, + { url = "https://files.pythonhosted.org/packages/b8/40/ce5fe8e9137dbd5570e0016c2584fca43ad81b11a1cef809a1a1b4952ab7/uuid_utils-0.12.0-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:d9636bcdbd6cfcad2b549c352b669412d0d1eb09be72044a2f13e498974863cd", size = 300047, upload-time = "2025-12-01T17:29:48.596Z" }, + { url = "https://files.pythonhosted.org/packages/fb/9b/31c5d0736d7b118f302c50214e581f40e904305d8872eb0f0c921d50e138/uuid_utils-0.12.0-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8cd8543a3419251fb78e703ce3b15fdfafe1b7c542cf40caf0775e01db7e7674", size = 335165, upload-time = "2025-12-01T17:29:49.755Z" }, + { url = "https://files.pythonhosted.org/packages/f6/5c/d80b4d08691c9d7446d0ad58fd41503081a662cfd2c7640faf68c64d8098/uuid_utils-0.12.0-pp311-pypy311_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e98db2d8977c052cb307ae1cb5cc37a21715e8d415dbc65863b039397495a013", size = 341437, upload-time = "2025-12-01T17:29:51.112Z" }, + { url = "https://files.pythonhosted.org/packages/f6/b3/9dccdc6f3c22f6ef5bd381ae559173f8a1ae185ae89ed1f39f499d9d8b02/uuid_utils-0.12.0-pp311-pypy311_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f8f2bdf5e4ffeb259ef6d15edae92aed60a1d6f07cbfab465d836f6b12b48da8", size = 469123, upload-time = "2025-12-01T17:29:52.389Z" }, + { url = "https://files.pythonhosted.org/packages/fd/90/6c35ef65fbc49f8189729839b793a4a74a7dd8c5aa5eb56caa93f8c97732/uuid_utils-0.12.0-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c3ec53c0cb15e1835870c139317cc5ec06e35aa22843e3ed7d9c74f23f23898", size = 335892, upload-time = "2025-12-01T17:29:53.44Z" }, + { url = "https://files.pythonhosted.org/packages/6b/c7/e3f3ce05c5af2bf86a0938d22165affe635f4dcbfd5687b1dacc042d3e0e/uuid_utils-0.12.0-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:84e5c0eba209356f7f389946a3a47b2cc2effd711b3fc7c7f155ad9f7d45e8a3", size = 360693, upload-time = "2025-12-01T17:29:54.558Z" }, +] + [[package]] name = "vcrpy" version = "7.0.0" @@ -1948,6 +1995,65 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/33/e8/e40370e6d74ddba47f002a32919d91310d6074130fe4e17dabcafc15cbf1/watchdog-6.0.0-py3-none-win_ia64.whl", hash = "sha256:a1914259fa9e1454315171103c6a30961236f508b9b623eae470268bbcc6a22f", size = 79067, upload-time = "2024-11-01T14:07:11.845Z" }, ] +[[package]] +name = "websockets" +version = "15.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/21/e6/26d09fab466b7ca9c7737474c52be4f76a40301b08362eb2dbc19dcc16c1/websockets-15.0.1.tar.gz", hash = "sha256:82544de02076bafba038ce055ee6412d68da13ab47f0c60cab827346de828dee", size = 177016, upload-time = "2025-03-05T20:03:41.606Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1e/da/6462a9f510c0c49837bbc9345aca92d767a56c1fb2939e1579df1e1cdcf7/websockets-15.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d63efaa0cd96cf0c5fe4d581521d9fa87744540d4bc999ae6e08595a1014b45b", size = 175423, upload-time = "2025-03-05T20:01:35.363Z" }, + { url = "https://files.pythonhosted.org/packages/1c/9f/9d11c1a4eb046a9e106483b9ff69bce7ac880443f00e5ce64261b47b07e7/websockets-15.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ac60e3b188ec7574cb761b08d50fcedf9d77f1530352db4eef1707fe9dee7205", size = 173080, upload-time = "2025-03-05T20:01:37.304Z" }, + { url = "https://files.pythonhosted.org/packages/d5/4f/b462242432d93ea45f297b6179c7333dd0402b855a912a04e7fc61c0d71f/websockets-15.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5756779642579d902eed757b21b0164cd6fe338506a8083eb58af5c372e39d9a", size = 173329, upload-time = "2025-03-05T20:01:39.668Z" }, + { url = "https://files.pythonhosted.org/packages/6e/0c/6afa1f4644d7ed50284ac59cc70ef8abd44ccf7d45850d989ea7310538d0/websockets-15.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fdfe3e2a29e4db3659dbd5bbf04560cea53dd9610273917799f1cde46aa725e", size = 182312, upload-time = "2025-03-05T20:01:41.815Z" }, + { url = "https://files.pythonhosted.org/packages/dd/d4/ffc8bd1350b229ca7a4db2a3e1c482cf87cea1baccd0ef3e72bc720caeec/websockets-15.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c2529b320eb9e35af0fa3016c187dffb84a3ecc572bcee7c3ce302bfeba52bf", size = 181319, upload-time = "2025-03-05T20:01:43.967Z" }, + { url = "https://files.pythonhosted.org/packages/97/3a/5323a6bb94917af13bbb34009fac01e55c51dfde354f63692bf2533ffbc2/websockets-15.0.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac1e5c9054fe23226fb11e05a6e630837f074174c4c2f0fe442996112a6de4fb", size = 181631, upload-time = "2025-03-05T20:01:46.104Z" }, + { url = "https://files.pythonhosted.org/packages/a6/cc/1aeb0f7cee59ef065724041bb7ed667b6ab1eeffe5141696cccec2687b66/websockets-15.0.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:5df592cd503496351d6dc14f7cdad49f268d8e618f80dce0cd5a36b93c3fc08d", size = 182016, upload-time = "2025-03-05T20:01:47.603Z" }, + { url = "https://files.pythonhosted.org/packages/79/f9/c86f8f7af208e4161a7f7e02774e9d0a81c632ae76db2ff22549e1718a51/websockets-15.0.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0a34631031a8f05657e8e90903e656959234f3a04552259458aac0b0f9ae6fd9", size = 181426, upload-time = "2025-03-05T20:01:48.949Z" }, + { url = "https://files.pythonhosted.org/packages/c7/b9/828b0bc6753db905b91df6ae477c0b14a141090df64fb17f8a9d7e3516cf/websockets-15.0.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3d00075aa65772e7ce9e990cab3ff1de702aa09be3940d1dc88d5abf1ab8a09c", size = 181360, upload-time = "2025-03-05T20:01:50.938Z" }, + { url = "https://files.pythonhosted.org/packages/89/fb/250f5533ec468ba6327055b7d98b9df056fb1ce623b8b6aaafb30b55d02e/websockets-15.0.1-cp310-cp310-win32.whl", hash = "sha256:1234d4ef35db82f5446dca8e35a7da7964d02c127b095e172e54397fb6a6c256", size = 176388, upload-time = "2025-03-05T20:01:52.213Z" }, + { url = "https://files.pythonhosted.org/packages/1c/46/aca7082012768bb98e5608f01658ff3ac8437e563eca41cf068bd5849a5e/websockets-15.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:39c1fec2c11dc8d89bba6b2bf1556af381611a173ac2b511cf7231622058af41", size = 176830, upload-time = "2025-03-05T20:01:53.922Z" }, + { url = "https://files.pythonhosted.org/packages/9f/32/18fcd5919c293a398db67443acd33fde142f283853076049824fc58e6f75/websockets-15.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:823c248b690b2fd9303ba00c4f66cd5e2d8c3ba4aa968b2779be9532a4dad431", size = 175423, upload-time = "2025-03-05T20:01:56.276Z" }, + { url = "https://files.pythonhosted.org/packages/76/70/ba1ad96b07869275ef42e2ce21f07a5b0148936688c2baf7e4a1f60d5058/websockets-15.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678999709e68425ae2593acf2e3ebcbcf2e69885a5ee78f9eb80e6e371f1bf57", size = 173082, upload-time = "2025-03-05T20:01:57.563Z" }, + { url = "https://files.pythonhosted.org/packages/86/f2/10b55821dd40eb696ce4704a87d57774696f9451108cff0d2824c97e0f97/websockets-15.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d50fd1ee42388dcfb2b3676132c78116490976f1300da28eb629272d5d93e905", size = 173330, upload-time = "2025-03-05T20:01:59.063Z" }, + { url = "https://files.pythonhosted.org/packages/a5/90/1c37ae8b8a113d3daf1065222b6af61cc44102da95388ac0018fcb7d93d9/websockets-15.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d99e5546bf73dbad5bf3547174cd6cb8ba7273062a23808ffea025ecb1cf8562", size = 182878, upload-time = "2025-03-05T20:02:00.305Z" }, + { url = "https://files.pythonhosted.org/packages/8e/8d/96e8e288b2a41dffafb78e8904ea7367ee4f891dafc2ab8d87e2124cb3d3/websockets-15.0.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:66dd88c918e3287efc22409d426c8f729688d89a0c587c88971a0faa2c2f3792", size = 181883, upload-time = "2025-03-05T20:02:03.148Z" }, + { url = "https://files.pythonhosted.org/packages/93/1f/5d6dbf551766308f6f50f8baf8e9860be6182911e8106da7a7f73785f4c4/websockets-15.0.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8dd8327c795b3e3f219760fa603dcae1dcc148172290a8ab15158cf85a953413", size = 182252, upload-time = "2025-03-05T20:02:05.29Z" }, + { url = "https://files.pythonhosted.org/packages/d4/78/2d4fed9123e6620cbf1706c0de8a1632e1a28e7774d94346d7de1bba2ca3/websockets-15.0.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8fdc51055e6ff4adeb88d58a11042ec9a5eae317a0a53d12c062c8a8865909e8", size = 182521, upload-time = "2025-03-05T20:02:07.458Z" }, + { url = "https://files.pythonhosted.org/packages/e7/3b/66d4c1b444dd1a9823c4a81f50231b921bab54eee2f69e70319b4e21f1ca/websockets-15.0.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:693f0192126df6c2327cce3baa7c06f2a117575e32ab2308f7f8216c29d9e2e3", size = 181958, upload-time = "2025-03-05T20:02:09.842Z" }, + { url = "https://files.pythonhosted.org/packages/08/ff/e9eed2ee5fed6f76fdd6032ca5cd38c57ca9661430bb3d5fb2872dc8703c/websockets-15.0.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:54479983bd5fb469c38f2f5c7e3a24f9a4e70594cd68cd1fa6b9340dadaff7cf", size = 181918, upload-time = "2025-03-05T20:02:11.968Z" }, + { url = "https://files.pythonhosted.org/packages/d8/75/994634a49b7e12532be6a42103597b71098fd25900f7437d6055ed39930a/websockets-15.0.1-cp311-cp311-win32.whl", hash = "sha256:16b6c1b3e57799b9d38427dda63edcbe4926352c47cf88588c0be4ace18dac85", size = 176388, upload-time = "2025-03-05T20:02:13.32Z" }, + { url = "https://files.pythonhosted.org/packages/98/93/e36c73f78400a65f5e236cd376713c34182e6663f6889cd45a4a04d8f203/websockets-15.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:27ccee0071a0e75d22cb35849b1db43f2ecd3e161041ac1ee9d2352ddf72f065", size = 176828, upload-time = "2025-03-05T20:02:14.585Z" }, + { url = "https://files.pythonhosted.org/packages/51/6b/4545a0d843594f5d0771e86463606a3988b5a09ca5123136f8a76580dd63/websockets-15.0.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:3e90baa811a5d73f3ca0bcbf32064d663ed81318ab225ee4f427ad4e26e5aff3", size = 175437, upload-time = "2025-03-05T20:02:16.706Z" }, + { url = "https://files.pythonhosted.org/packages/f4/71/809a0f5f6a06522af902e0f2ea2757f71ead94610010cf570ab5c98e99ed/websockets-15.0.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:592f1a9fe869c778694f0aa806ba0374e97648ab57936f092fd9d87f8bc03665", size = 173096, upload-time = "2025-03-05T20:02:18.832Z" }, + { url = "https://files.pythonhosted.org/packages/3d/69/1a681dd6f02180916f116894181eab8b2e25b31e484c5d0eae637ec01f7c/websockets-15.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0701bc3cfcb9164d04a14b149fd74be7347a530ad3bbf15ab2c678a2cd3dd9a2", size = 173332, upload-time = "2025-03-05T20:02:20.187Z" }, + { url = "https://files.pythonhosted.org/packages/a6/02/0073b3952f5bce97eafbb35757f8d0d54812b6174ed8dd952aa08429bcc3/websockets-15.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8b56bdcdb4505c8078cb6c7157d9811a85790f2f2b3632c7d1462ab5783d215", size = 183152, upload-time = "2025-03-05T20:02:22.286Z" }, + { url = "https://files.pythonhosted.org/packages/74/45/c205c8480eafd114b428284840da0b1be9ffd0e4f87338dc95dc6ff961a1/websockets-15.0.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0af68c55afbd5f07986df82831c7bff04846928ea8d1fd7f30052638788bc9b5", size = 182096, upload-time = "2025-03-05T20:02:24.368Z" }, + { url = "https://files.pythonhosted.org/packages/14/8f/aa61f528fba38578ec553c145857a181384c72b98156f858ca5c8e82d9d3/websockets-15.0.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64dee438fed052b52e4f98f76c5790513235efaa1ef7f3f2192c392cd7c91b65", size = 182523, upload-time = "2025-03-05T20:02:25.669Z" }, + { url = "https://files.pythonhosted.org/packages/ec/6d/0267396610add5bc0d0d3e77f546d4cd287200804fe02323797de77dbce9/websockets-15.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d5f6b181bb38171a8ad1d6aa58a67a6aa9d4b38d0f8c5f496b9e42561dfc62fe", size = 182790, upload-time = "2025-03-05T20:02:26.99Z" }, + { url = "https://files.pythonhosted.org/packages/02/05/c68c5adbf679cf610ae2f74a9b871ae84564462955d991178f95a1ddb7dd/websockets-15.0.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:5d54b09eba2bada6011aea5375542a157637b91029687eb4fdb2dab11059c1b4", size = 182165, upload-time = "2025-03-05T20:02:30.291Z" }, + { url = "https://files.pythonhosted.org/packages/29/93/bb672df7b2f5faac89761cb5fa34f5cec45a4026c383a4b5761c6cea5c16/websockets-15.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3be571a8b5afed347da347bfcf27ba12b069d9d7f42cb8c7028b5e98bbb12597", size = 182160, upload-time = "2025-03-05T20:02:31.634Z" }, + { url = "https://files.pythonhosted.org/packages/ff/83/de1f7709376dc3ca9b7eeb4b9a07b4526b14876b6d372a4dc62312bebee0/websockets-15.0.1-cp312-cp312-win32.whl", hash = "sha256:c338ffa0520bdb12fbc527265235639fb76e7bc7faafbb93f6ba80d9c06578a9", size = 176395, upload-time = "2025-03-05T20:02:33.017Z" }, + { url = "https://files.pythonhosted.org/packages/7d/71/abf2ebc3bbfa40f391ce1428c7168fb20582d0ff57019b69ea20fa698043/websockets-15.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:fcd5cf9e305d7b8338754470cf69cf81f420459dbae8a3b40cee57417f4614a7", size = 176841, upload-time = "2025-03-05T20:02:34.498Z" }, + { url = "https://files.pythonhosted.org/packages/cb/9f/51f0cf64471a9d2b4d0fc6c534f323b664e7095640c34562f5182e5a7195/websockets-15.0.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ee443ef070bb3b6ed74514f5efaa37a252af57c90eb33b956d35c8e9c10a1931", size = 175440, upload-time = "2025-03-05T20:02:36.695Z" }, + { url = "https://files.pythonhosted.org/packages/8a/05/aa116ec9943c718905997412c5989f7ed671bc0188ee2ba89520e8765d7b/websockets-15.0.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5a939de6b7b4e18ca683218320fc67ea886038265fd1ed30173f5ce3f8e85675", size = 173098, upload-time = "2025-03-05T20:02:37.985Z" }, + { url = "https://files.pythonhosted.org/packages/ff/0b/33cef55ff24f2d92924923c99926dcce78e7bd922d649467f0eda8368923/websockets-15.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:746ee8dba912cd6fc889a8147168991d50ed70447bf18bcda7039f7d2e3d9151", size = 173329, upload-time = "2025-03-05T20:02:39.298Z" }, + { url = "https://files.pythonhosted.org/packages/31/1d/063b25dcc01faa8fada1469bdf769de3768b7044eac9d41f734fd7b6ad6d/websockets-15.0.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:595b6c3969023ecf9041b2936ac3827e4623bfa3ccf007575f04c5a6aa318c22", size = 183111, upload-time = "2025-03-05T20:02:40.595Z" }, + { url = "https://files.pythonhosted.org/packages/93/53/9a87ee494a51bf63e4ec9241c1ccc4f7c2f45fff85d5bde2ff74fcb68b9e/websockets-15.0.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c714d2fc58b5ca3e285461a4cc0c9a66bd0e24c5da9911e30158286c9b5be7f", size = 182054, upload-time = "2025-03-05T20:02:41.926Z" }, + { url = "https://files.pythonhosted.org/packages/ff/b2/83a6ddf56cdcbad4e3d841fcc55d6ba7d19aeb89c50f24dd7e859ec0805f/websockets-15.0.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f3c1e2ab208db911594ae5b4f79addeb3501604a165019dd221c0bdcabe4db8", size = 182496, upload-time = "2025-03-05T20:02:43.304Z" }, + { url = "https://files.pythonhosted.org/packages/98/41/e7038944ed0abf34c45aa4635ba28136f06052e08fc2168520bb8b25149f/websockets-15.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:229cf1d3ca6c1804400b0a9790dc66528e08a6a1feec0d5040e8b9eb14422375", size = 182829, upload-time = "2025-03-05T20:02:48.812Z" }, + { url = "https://files.pythonhosted.org/packages/e0/17/de15b6158680c7623c6ef0db361da965ab25d813ae54fcfeae2e5b9ef910/websockets-15.0.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:756c56e867a90fb00177d530dca4b097dd753cde348448a1012ed6c5131f8b7d", size = 182217, upload-time = "2025-03-05T20:02:50.14Z" }, + { url = "https://files.pythonhosted.org/packages/33/2b/1f168cb6041853eef0362fb9554c3824367c5560cbdaad89ac40f8c2edfc/websockets-15.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:558d023b3df0bffe50a04e710bc87742de35060580a293c2a984299ed83bc4e4", size = 182195, upload-time = "2025-03-05T20:02:51.561Z" }, + { url = "https://files.pythonhosted.org/packages/86/eb/20b6cdf273913d0ad05a6a14aed4b9a85591c18a987a3d47f20fa13dcc47/websockets-15.0.1-cp313-cp313-win32.whl", hash = "sha256:ba9e56e8ceeeedb2e080147ba85ffcd5cd0711b89576b83784d8605a7df455fa", size = 176393, upload-time = "2025-03-05T20:02:53.814Z" }, + { url = "https://files.pythonhosted.org/packages/1b/6c/c65773d6cab416a64d191d6ee8a8b1c68a09970ea6909d16965d26bfed1e/websockets-15.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:e09473f095a819042ecb2ab9465aee615bd9c2028e4ef7d933600a8401c79561", size = 176837, upload-time = "2025-03-05T20:02:55.237Z" }, + { url = "https://files.pythonhosted.org/packages/02/9e/d40f779fa16f74d3468357197af8d6ad07e7c5a27ea1ca74ceb38986f77a/websockets-15.0.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:0c9e74d766f2818bb95f84c25be4dea09841ac0f734d1966f415e4edfc4ef1c3", size = 173109, upload-time = "2025-03-05T20:03:17.769Z" }, + { url = "https://files.pythonhosted.org/packages/bc/cd/5b887b8585a593073fd92f7c23ecd3985cd2c3175025a91b0d69b0551372/websockets-15.0.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1009ee0c7739c08a0cd59de430d6de452a55e42d6b522de7aa15e6f67db0b8e1", size = 173343, upload-time = "2025-03-05T20:03:19.094Z" }, + { url = "https://files.pythonhosted.org/packages/fe/ae/d34f7556890341e900a95acf4886833646306269f899d58ad62f588bf410/websockets-15.0.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76d1f20b1c7a2fa82367e04982e708723ba0e7b8d43aa643d3dcd404d74f1475", size = 174599, upload-time = "2025-03-05T20:03:21.1Z" }, + { url = "https://files.pythonhosted.org/packages/71/e6/5fd43993a87db364ec60fc1d608273a1a465c0caba69176dd160e197ce42/websockets-15.0.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f29d80eb9a9263b8d109135351caf568cc3f80b9928bccde535c235de55c22d9", size = 174207, upload-time = "2025-03-05T20:03:23.221Z" }, + { url = "https://files.pythonhosted.org/packages/2b/fb/c492d6daa5ec067c2988ac80c61359ace5c4c674c532985ac5a123436cec/websockets-15.0.1-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b359ed09954d7c18bbc1680f380c7301f92c60bf924171629c5db97febb12f04", size = 174155, upload-time = "2025-03-05T20:03:25.321Z" }, + { url = "https://files.pythonhosted.org/packages/68/a1/dcb68430b1d00b698ae7a7e0194433bce4f07ded185f0ee5fb21e2a2e91e/websockets-15.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:cad21560da69f4ce7658ca2cb83138fb4cf695a2ba3e475e0559e05991aa8122", size = 176884, upload-time = "2025-03-05T20:03:27.934Z" }, + { url = "https://files.pythonhosted.org/packages/fa/a8/5b41e0da817d64113292ab1f8247140aac61cbf6cfd085d6a0fa77f4984f/websockets-15.0.1-py3-none-any.whl", hash = "sha256:f7a866fbc1e97b5c617ee4116daaa09b722101d4a3c170c787450ba409f9736f", size = 169743, upload-time = "2025-03-05T20:03:39.41Z" }, +] + [[package]] name = "wrapt" version = "2.0.1"