diff --git a/AGENTS.md b/AGENTS.md index 7d46ef76f..abe7aaa58 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -7,7 +7,7 @@ This file provides guidance to agents when working with code in this repository. **DataDesigner** is an NVIDIA NeMo project for creating synthetic datasets from scratch. It's a comprehensive framework that generates structured data using multiple generation strategies: - **Sampled data**: Built-in generators (UUID, DateTime, etc.) and Faker integration -- **LLM-generated content**: Text, code, and structured data via LiteLLM +- **LLM-generated content**: Text, code, and structured data via native HTTP adapters - **Expression-based columns**: Derived columns using Jinja2 templates - **Validation & scoring**: Python, SQL, and remote validators; LLM-based judge scoring - **Seed dataset-based generation**: Generate from existing datasets @@ -25,7 +25,7 @@ The project follows a layered architecture: 2. **Engine Layer** ([packages/data-designer-engine/src/data_designer/engine/](packages/data-designer-engine/src/data_designer/engine/)): Internal generation and processing - `column_generators/`: Generates individual columns from configs - `dataset_builders/`: Orchestrates full dataset generation with DAG-based dependency management - - `models/`: LLM integration via LiteLLM with response parsing + - `models/`: LLM integration via native HTTP adapters with response parsing - `validators/`: Column validation (Python, SQL, Code, Remote) - `sampling_gen/`: Sophisticated person/entity sampling diff --git a/docs/concepts/models/model-providers.md b/docs/concepts/models/model-providers.md index 001b03135..9d397a87a 100644 --- a/docs/concepts/models/model-providers.md +++ b/docs/concepts/models/model-providers.md @@ -14,11 +14,24 @@ The `ModelProvider` class has the following fields: |-------|------|----------|-------------| | `name` | `str` | Yes | Unique identifier for the provider (e.g., `"nvidia"`, `"openai"`, `"openrouter"`) | | `endpoint` | `str` | Yes | API endpoint URL (e.g., `"https://integrate.api.nvidia.com/v1"`) | -| `provider_type` | `str` | No | Provider type (default: `"openai"`). Uses OpenAI-compatible API format | +| `provider_type` | `str` | No | Provider type: `"openai"` (default) or `"anthropic"`. See [Supported Provider Types](#supported-provider-types) below | | `api_key` | `str` | No | API key or environment variable name (e.g., `"NVIDIA_API_KEY"`) | | `extra_body` | `dict[str, Any]` | No | Additional parameters to include in the request body of all API requests to the provider. | | `extra_headers` | `dict[str, str]` | No | Additional headers to include in all API requests to the provider. | +## Supported Provider Types + +Data Designer supports two provider types: + +| Type | Description | +|------|-------------| +| `"openai"` | OpenAI-compatible chat completion API. This is the default and works with most providers, including NVIDIA NIM, vLLM, TGI, OpenRouter, Together AI, and OpenAI itself. | +| `"anthropic"` | Anthropic's native Messages API for Claude models. Use this when connecting directly to Anthropic's API. | + +Most self-hosted and third-party endpoints expose an OpenAI-compatible API, so `provider_type="openai"` is the right choice in the majority of cases. Only use `"anthropic"` when connecting directly to Anthropic's API at `https://api.anthropic.com`. + +> **Note:** Previous versions of Data Designer supported additional provider types (e.g., `"azure"`, `"bedrock"`, `"vertex_ai"`) via a LiteLLM bridge. These are no longer supported. If you were using one of these types, switch to `provider_type="openai"` and point the `endpoint` to an OpenAI-compatible proxy or gateway for that service. + ## API Key Configuration The `api_key` field can be specified in two ways: diff --git a/packages/data-designer-config/src/data_designer/config/column_configs.py b/packages/data-designer-config/src/data_designer/config/column_configs.py index e08a40240..cde98ea45 100644 --- a/packages/data-designer-config/src/data_designer/config/column_configs.py +++ b/packages/data-designer-config/src/data_designer/config/column_configs.py @@ -115,7 +115,7 @@ def inject_sampler_type_into_params(cls, data: dict) -> dict: class LLMTextColumnConfig(SingleColumnConfig): """Configuration for text generation columns using Large Language Models. - LLM text columns generate free-form text content using language models via LiteLLM. + LLM text columns generate free-form text content using language models. Prompts support Jinja2 templating to reference values from other columns, enabling context-aware generation. The generated text can optionally include message traces capturing the full conversation history. diff --git a/packages/data-designer-config/src/data_designer/lazy_heavy_imports.py b/packages/data-designer-config/src/data_designer/lazy_heavy_imports.py index 6c89c3452..778021f97 100644 --- a/packages/data-designer-config/src/data_designer/lazy_heavy_imports.py +++ b/packages/data-designer-config/src/data_designer/lazy_heavy_imports.py @@ -32,7 +32,6 @@ "pq": "pyarrow.parquet", "pa": "pyarrow", "faker": "faker", - "litellm": "litellm", "sqlfluff": "sqlfluff", "httpx": "httpx", "duckdb": "duckdb", diff --git a/packages/data-designer-engine/README.md b/packages/data-designer-engine/README.md index bc39be851..e701cebc5 100644 --- a/packages/data-designer-engine/README.md +++ b/packages/data-designer-engine/README.md @@ -2,7 +2,7 @@ Generation engine for NeMo Data Designer synthetic data generation framework. -This package contains the execution engine that powers Data Designer. It depends on `data-designer-config` and includes heavy dependencies like pandas, numpy, and LLM integration via litellm. +This package contains the execution engine that powers Data Designer. It depends on `data-designer-config` and includes heavy dependencies like pandas, numpy, and native HTTP-based LLM integration. ## Installation diff --git a/packages/data-designer-engine/pyproject.toml b/packages/data-designer-engine/pyproject.toml index 181519d1d..b8f2925fd 100644 --- a/packages/data-designer-engine/pyproject.toml +++ b/packages/data-designer-engine/pyproject.toml @@ -44,7 +44,6 @@ dependencies = [ "json-repair>=0.48.0,<1", "jsonpath-rust-bindings>=1.0,<2", "jsonschema>=4.0.0,<5", - "litellm>=1.77.0,<1.80.12", "lxml>=6.0.2,<7", "marko>=2.1.2,<3", "mcp>=1.26.0,<2", diff --git a/packages/data-designer-engine/src/data_designer/engine/dataset_builders/utils/async_concurrency.py b/packages/data-designer-engine/src/data_designer/engine/dataset_builders/utils/async_concurrency.py index 105278233..9546814fc 100644 --- a/packages/data-designer-engine/src/data_designer/engine/dataset_builders/utils/async_concurrency.py +++ b/packages/data-designer-engine/src/data_designer/engine/dataset_builders/utils/async_concurrency.py @@ -20,11 +20,12 @@ (TaskGroup) Singleton Event Loop: - The background loop is a process-wide singleton. LiteLLM and similar - libraries bind internal async state to a specific event loop, so creating - per-call or per-instance loops breaks connection reuse and triggers - cross-loop errors. ``ensure_async_engine_loop()`` creates one daemon - loop thread and reuses it for all executor instances. + The background loop is a process-wide singleton. Async-stateful + resources (connection pools, semaphores) bind internal state to a + specific event loop, so creating per-call or per-instance loops breaks + connection reuse and triggers cross-loop errors. + ``ensure_async_engine_loop()`` creates one daemon loop thread and + reuses it for all executor instances. Startup Handshake: Loop creation uses a ``threading.Event`` readiness handshake. The @@ -90,8 +91,8 @@ def ensure_async_engine_loop() -> asyncio.AbstractEventLoop: """Get or create a persistent event loop for async engine work. A single event loop is shared across all AsyncConcurrentExecutor instances - to avoid breaking libraries (like LiteLLM) that bind internal async state - to a specific event loop. + to avoid breaking async-stateful resources (connection pools, semaphores) + that bind internal state to a specific event loop. """ global _loop, _thread with _lock: diff --git a/packages/data-designer-engine/src/data_designer/engine/models/clients/adapters/__init__.py b/packages/data-designer-engine/src/data_designer/engine/models/clients/adapters/__init__.py index 9d6f5648b..644252eb9 100644 --- a/packages/data-designer-engine/src/data_designer/engine/models/clients/adapters/__init__.py +++ b/packages/data-designer-engine/src/data_designer/engine/models/clients/adapters/__init__.py @@ -4,7 +4,6 @@ from __future__ import annotations from data_designer.engine.models.clients.adapters.anthropic import AnthropicClient -from data_designer.engine.models.clients.adapters.litellm_bridge import LiteLLMBridgeClient, LiteLLMRouter from data_designer.engine.models.clients.adapters.openai_compatible import OpenAICompatibleClient -__all__ = ["AnthropicClient", "LiteLLMBridgeClient", "LiteLLMRouter", "OpenAICompatibleClient"] +__all__ = ["AnthropicClient", "OpenAICompatibleClient"] diff --git a/packages/data-designer-engine/src/data_designer/engine/models/clients/adapters/litellm_bridge.py b/packages/data-designer-engine/src/data_designer/engine/models/clients/adapters/litellm_bridge.py deleted file mode 100644 index bc8e5069b..000000000 --- a/packages/data-designer-engine/src/data_designer/engine/models/clients/adapters/litellm_bridge.py +++ /dev/null @@ -1,210 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: Apache-2.0 - -from __future__ import annotations - -import contextlib -import logging -from collections.abc import Iterator -from typing import Any, Protocol - -from data_designer.engine.models.clients.base import ModelClient -from data_designer.engine.models.clients.errors import ( - ProviderError, - extract_message_from_exception_string, - infer_error_kind_from_exception, - map_http_status_to_provider_error_kind, -) -from data_designer.engine.models.clients.parsing import ( - aextract_images_from_chat_response, - aextract_images_from_image_response, - aparse_chat_completion_response, - extract_embedding_vector, - extract_images_from_chat_response, - extract_images_from_image_response, - extract_usage, - parse_chat_completion_response, -) -from data_designer.engine.models.clients.types import ( - ChatCompletionRequest, - ChatCompletionResponse, - EmbeddingRequest, - EmbeddingResponse, - ImageGenerationRequest, - ImageGenerationResponse, - TransportKwargs, -) - -logger = logging.getLogger(__name__) - - -class LiteLLMRouter(Protocol): - """Structural type for the LiteLLM router methods the bridge depends on.""" - - def completion(self, *, model: str, messages: list[dict[str, Any]], **kwargs: Any) -> Any: ... - - async def acompletion(self, *, model: str, messages: list[dict[str, Any]], **kwargs: Any) -> Any: ... - - def embedding(self, *, model: str, input: list[str], **kwargs: Any) -> Any: ... - - async def aembedding(self, *, model: str, input: list[str], **kwargs: Any) -> Any: ... - - def image_generation(self, *, prompt: str, model: str, **kwargs: Any) -> Any: ... - - async def aimage_generation(self, *, prompt: str, model: str, **kwargs: Any) -> Any: ... - - -class LiteLLMBridgeClient(ModelClient): - """Bridge adapter that wraps the existing LiteLLM router behind canonical client types.""" - - # "messages" (optional, default None) and "prompt" (required) are passed explicitly - # to choose between the chat-completion and diffusion code paths, so exclude them - # from the automatic optional-field forwarding. - _IMAGE_EXCLUDE = frozenset({"messages", "prompt"}) - - def __init__(self, *, provider_name: str, router: LiteLLMRouter) -> None: - self.provider_name = provider_name - self._router = router - - def supports_chat_completion(self) -> bool: - return True - - def supports_embeddings(self) -> bool: - return True - - def supports_image_generation(self) -> bool: - return True - - def completion(self, request: ChatCompletionRequest) -> ChatCompletionResponse: - transport = TransportKwargs.from_request(request, flatten_extra_body=False) - with _handle_non_provider_errors(self.provider_name): - response = self._router.completion( - model=request.model, - messages=request.messages, - extra_headers=transport.headers or None, - **_with_timeout(transport), - ) - return parse_chat_completion_response(response) - - async def acompletion(self, request: ChatCompletionRequest) -> ChatCompletionResponse: - transport = TransportKwargs.from_request(request, flatten_extra_body=False) - with _handle_non_provider_errors(self.provider_name): - response = await self._router.acompletion( - model=request.model, - messages=request.messages, - extra_headers=transport.headers or None, - **_with_timeout(transport), - ) - return await aparse_chat_completion_response(response) - - def embeddings(self, request: EmbeddingRequest) -> EmbeddingResponse: - transport = TransportKwargs.from_request(request, flatten_extra_body=False) - with _handle_non_provider_errors(self.provider_name): - response = self._router.embedding( - model=request.model, - input=request.inputs, - extra_headers=transport.headers or None, - **_with_timeout(transport), - ) - vectors = [extract_embedding_vector(item) for item in getattr(response, "data", [])] - return EmbeddingResponse(vectors=vectors, usage=extract_usage(getattr(response, "usage", None)), raw=response) - - async def aembeddings(self, request: EmbeddingRequest) -> EmbeddingResponse: - transport = TransportKwargs.from_request(request, flatten_extra_body=False) - with _handle_non_provider_errors(self.provider_name): - response = await self._router.aembedding( - model=request.model, - input=request.inputs, - extra_headers=transport.headers or None, - **_with_timeout(transport), - ) - vectors = [extract_embedding_vector(item) for item in getattr(response, "data", [])] - return EmbeddingResponse(vectors=vectors, usage=extract_usage(getattr(response, "usage", None)), raw=response) - - def generate_image(self, request: ImageGenerationRequest) -> ImageGenerationResponse: - transport = TransportKwargs.from_request(request, exclude=self._IMAGE_EXCLUDE, flatten_extra_body=False) - with _handle_non_provider_errors(self.provider_name): - if request.messages is not None: - response = self._router.completion( - model=request.model, - messages=request.messages, - extra_headers=transport.headers or None, - **_with_timeout(transport), - ) - else: - response = self._router.image_generation( - prompt=request.prompt, - model=request.model, - extra_headers=transport.headers or None, - **_with_timeout(transport), - ) - - if request.messages is not None: - images = extract_images_from_chat_response(response) - else: - images = extract_images_from_image_response(response) - - usage = extract_usage(getattr(response, "usage", None), generated_images=len(images)) - return ImageGenerationResponse(images=images, usage=usage, raw=response) - - async def agenerate_image(self, request: ImageGenerationRequest) -> ImageGenerationResponse: - transport = TransportKwargs.from_request(request, exclude=self._IMAGE_EXCLUDE, flatten_extra_body=False) - with _handle_non_provider_errors(self.provider_name): - if request.messages is not None: - response = await self._router.acompletion( - model=request.model, - messages=request.messages, - extra_headers=transport.headers or None, - **_with_timeout(transport), - ) - else: - response = await self._router.aimage_generation( - prompt=request.prompt, - model=request.model, - extra_headers=transport.headers or None, - **_with_timeout(transport), - ) - - if request.messages is not None: - images = await aextract_images_from_chat_response(response) - else: - images = await aextract_images_from_image_response(response) - - usage = extract_usage(getattr(response, "usage", None), generated_images=len(images)) - return ImageGenerationResponse(images=images, usage=usage, raw=response) - - def close(self) -> None: - return None - - async def aclose(self) -> None: - return None - - -def _with_timeout(transport: TransportKwargs) -> dict[str, Any]: - """Merge ``transport.body`` with the per-request timeout so LiteLLM receives it as a kwarg.""" - if transport.timeout is not None: - return {**transport.body, "timeout": transport.timeout} - return transport.body - - -@contextlib.contextmanager -def _handle_non_provider_errors(provider_name: str) -> Iterator[None]: - """Catch non-ProviderError exceptions from the router and re-raise as ProviderError.""" - try: - yield - except ProviderError: - raise - except Exception as exc: - status_code = getattr(exc, "status_code", None) - if isinstance(status_code, int): - kind = map_http_status_to_provider_error_kind(status_code=status_code, body_text=str(exc)) - else: - kind = infer_error_kind_from_exception(exc) - - raise ProviderError( - kind=kind, - message=extract_message_from_exception_string(str(exc)), - status_code=status_code if isinstance(status_code, int) else None, - provider_name=provider_name, - cause=exc, - ) from exc diff --git a/packages/data-designer-engine/src/data_designer/engine/models/clients/errors.py b/packages/data-designer-engine/src/data_designer/engine/models/clients/errors.py index 213f1ac09..d11f6444a 100644 --- a/packages/data-designer-engine/src/data_designer/engine/models/clients/errors.py +++ b/packages/data-designer-engine/src/data_designer/engine/models/clients/errors.py @@ -130,9 +130,9 @@ def map_http_error_to_provider_error( def extract_message_from_exception_string(raw: str) -> str: - """Extract a human-readable message from a stringified LiteLLM exception. + """Extract a human-readable message from a stringified provider exception. - LiteLLM often formats errors as ``"Error code: 400 - {json}"``. This + Some providers format errors as ``"Error code: 400 - {json}"``. This mirrors the structured-key lookup in ``_extract_structured_message`` but operates on a raw string instead of an ``HttpResponse``. """ diff --git a/packages/data-designer-engine/src/data_designer/engine/models/clients/factory.py b/packages/data-designer-engine/src/data_designer/engine/models/clients/factory.py index 4832e9841..458ebfcad 100644 --- a/packages/data-designer-engine/src/data_designer/engine/models/clients/factory.py +++ b/packages/data-designer-engine/src/data_designer/engine/models/clients/factory.py @@ -3,24 +3,20 @@ from __future__ import annotations -import os - -import data_designer.lazy_heavy_imports as lazy from data_designer.config.models import ModelConfig -from data_designer.engine.model_provider import ModelProvider, ModelProviderRegistry +from data_designer.engine.errors import DataDesignerError +from data_designer.engine.model_provider import ModelProviderRegistry from data_designer.engine.models.clients.adapters.anthropic import AnthropicClient from data_designer.engine.models.clients.adapters.http_model_client import ClientConcurrencyMode -from data_designer.engine.models.clients.adapters.litellm_bridge import LiteLLMBridgeClient from data_designer.engine.models.clients.adapters.openai_compatible import OpenAICompatibleClient from data_designer.engine.models.clients.base import ModelClient from data_designer.engine.models.clients.retry import RetryConfig from data_designer.engine.models.clients.throttle_manager import ThrottleManager from data_designer.engine.models.clients.throttled import ThrottledModelClient -from data_designer.engine.models.litellm_overrides import CustomRouter, LiteLLMRouterDefaultKwargs +from data_designer.engine.models.errors import FormattedLLMErrorMessage from data_designer.engine.secret_resolver import SecretResolver -_BACKEND_ENV_VAR = "DATA_DESIGNER_MODEL_BACKEND" -_BACKEND_BRIDGE = "litellm_bridge" +_SUPPORTED_PROVIDER_TYPES = ("openai", "anthropic") def create_model_client( @@ -40,12 +36,10 @@ def create_model_client( secret_resolver: Resolver for secrets referenced in provider API key configs. model_provider_registry: Registry of model provider configurations used to look up endpoint, provider type, and API key reference. - retry_config: Optional retry configuration for native HTTP adapters. - Ignored by the ``LiteLLMBridgeClient`` (which has its own retry logic). + retry_config: Optional retry configuration for HTTP adapters. client_concurrency_mode: ``"sync"`` (default) for the sync engine path, ``"async"`` for the async engine path. Native HTTP adapters are - constrained to a single concurrency mode; the ``LiteLLMBridgeClient`` - ignores this parameter. + constrained to a single concurrency mode. throttle_manager: Optional throttle manager for per-request AIMD concurrency control. When provided, the returned client is wrapped with ``ThrottledModelClient``. @@ -61,11 +55,14 @@ def create_model_client( Returns: A ``ModelClient`` instance routed by provider type. + Raises: + DataDesignerError: If ``provider_type`` is not one of the supported + types (``"openai"``, ``"anthropic"``). + Routing logic: - 1. If ``DATA_DESIGNER_MODEL_BACKEND=litellm_bridge`` → always use bridge. - 2. If ``provider_type == "openai"`` → ``OpenAICompatibleClient``. - 3. If ``provider_type == "anthropic"`` → ``AnthropicClient``. - 4. Otherwise → ``LiteLLMBridgeClient`` (fallback for unknown providers). + 1. If ``provider_type == "openai"`` → ``OpenAICompatibleClient``. + 2. If ``provider_type == "anthropic"`` → ``AnthropicClient``. + 3. Otherwise → ``DataDesignerError``. """ provider = model_provider_registry.get_provider(model_config.provider) api_key = _resolve_api_key(provider.api_key, secret_resolver) @@ -73,11 +70,8 @@ def create_model_client( raw_timeout = model_config.inference_parameters.timeout timeout_s = float(raw_timeout if raw_timeout is not None else 60) - backend = os.environ.get(_BACKEND_ENV_VAR, "").strip().lower() - if backend == _BACKEND_BRIDGE: - client: ModelClient = _create_bridge_client(model_config, provider, api_key, max_parallel) - elif provider.provider_type == "openai": - client = OpenAICompatibleClient( + if provider.provider_type == "openai": + client: ModelClient = OpenAICompatibleClient( provider_name=provider.name, endpoint=provider.endpoint, api_key=api_key, @@ -97,7 +91,16 @@ def create_model_client( concurrency_mode=client_concurrency_mode, ) else: - client = _create_bridge_client(model_config, provider, api_key, max_parallel) + raise DataDesignerError( + FormattedLLMErrorMessage( + cause=(f"Provider type {provider.provider_type!r} for provider {provider.name!r} is not supported."), + solution=( + f"Change provider_type to one of {', '.join(repr(t) for t in _SUPPORTED_PROVIDER_TYPES)} " + "in your model provider config. Most OpenAI-compatible endpoints " + '(vLLM, TGI, NIM, etc.) work with provider_type="openai".' + ), + ) + ) if throttle_manager is not None: client = ThrottledModelClient( @@ -120,24 +123,3 @@ def _resolve_api_key(api_key_ref: str | None, secret_resolver: SecretResolver) - return None resolved = secret_resolver.resolve(api_key_ref) return resolved or None - - -def _create_bridge_client( - model_config: ModelConfig, - provider: ModelProvider, - api_key: str | None, - max_parallel: int, -) -> LiteLLMBridgeClient: - bridge_key = api_key or "not-used-but-required" - litellm_params = lazy.litellm.LiteLLM_Params( - model=f"{provider.provider_type}/{model_config.model}", - api_base=provider.endpoint, - api_key=bridge_key, - max_parallel_requests=max_parallel, - ) - deployment = { - "model_name": model_config.model, - "litellm_params": litellm_params.model_dump(), - } - router = CustomRouter([deployment], **LiteLLMRouterDefaultKwargs().model_dump()) - return LiteLLMBridgeClient(provider_name=provider.name, router=router) diff --git a/packages/data-designer-engine/src/data_designer/engine/models/clients/parsing.py b/packages/data-designer-engine/src/data_designer/engine/models/clients/parsing.py index 3942242eb..6f1217834 100644 --- a/packages/data-designer-engine/src/data_designer/engine/models/clients/parsing.py +++ b/packages/data-designer-engine/src/data_designer/engine/models/clients/parsing.py @@ -236,8 +236,8 @@ def extract_reasoning_content(message: Any) -> str | None: """Extract reasoning content from a provider response message. vLLM >= 0.16.0 uses ``message.reasoning`` as the canonical field; - ``message.reasoning_content`` is the legacy / LiteLLM-normalized fallback. - Check the canonical field first so reasoning traces survive LiteLLM removal. + ``message.reasoning_content`` is a legacy fallback used by some providers. + Check the canonical field first. Ref: https://github.com/NVIDIA-NeMo/DataDesigner/issues/374 """ diff --git a/packages/data-designer-engine/src/data_designer/engine/models/clients/types.py b/packages/data-designer-engine/src/data_designer/engine/models/clients/types.py index 3100e9b77..a8d295fb6 100644 --- a/packages/data-designer-engine/src/data_designer/engine/models/clients/types.py +++ b/packages/data-designer-engine/src/data_designer/engine/models/clients/types.py @@ -118,9 +118,8 @@ class TransportKwargs: Adapters call ``TransportKwargs.from_request(request)`` instead of manually handling ``extra_body`` / ``extra_headers`` on every request type. - - ``body``: API-level keyword arguments. By default ``extra_body`` keys are - merged into the top level; pass ``flatten_extra_body=False`` to preserve - ``extra_body`` as a nested dict (needed by LiteLLM). + - ``body``: API-level keyword arguments. ``extra_body`` keys are merged + into the top level. - ``headers``: Extra HTTP headers to attach to the outgoing request. """ @@ -136,16 +135,11 @@ def from_request( request: Any, *, exclude: frozenset[str] = frozenset(), - # TODO: remove flatten_extra_body after LiteLLMBridgeClient is retired - flatten_extra_body: bool = True, ) -> TransportKwargs: """Build transport-ready kwargs from a canonical request dataclass. 1. Collects all non-None optional fields (respecting *exclude*). - 2. Handles ``extra_body`` based on *flatten_extra_body*: - - ``True`` (default): merges its keys into the top-level body dict. - - ``False``: preserves it as ``extra_body`` in the body dict so - that callers like LiteLLM can forward it without param validation. + 2. Merges ``extra_body`` keys into the top-level body dict. 3. Pops ``extra_headers`` into a separate headers dict. 4. Extracts ``timeout`` as a per-request HTTP timeout override (not forwarded to the API body). @@ -156,12 +150,7 @@ def from_request( extra_headers = getattr(request, "extra_headers", None) or {} timeout = getattr(request, "timeout", None) - if flatten_extra_body: - body = {**optional_fields, **extra_body} - else: - body = {**optional_fields} - if extra_body: - body["extra_body"] = extra_body + body = {**optional_fields, **extra_body} return cls(body=body, headers=dict(extra_headers), timeout=timeout) diff --git a/packages/data-designer-engine/src/data_designer/engine/models/errors.py b/packages/data-designer-engine/src/data_designer/engine/models/errors.py index 42a2ba2dd..79d8a15da 100644 --- a/packages/data-designer-engine/src/data_designer/engine/models/errors.py +++ b/packages/data-designer-engine/src/data_designer/engine/models/errors.py @@ -6,17 +6,13 @@ import logging from collections.abc import Callable from functools import wraps -from typing import TYPE_CHECKING, Any, NoReturn +from typing import Any, NoReturn from pydantic import BaseModel -import data_designer.lazy_heavy_imports as lazy from data_designer.engine.errors import DataDesignerError from data_designer.engine.models.clients.errors import ProviderError, ProviderErrorKind -if TYPE_CHECKING: - import litellm - logger = logging.getLogger(__name__) @@ -187,7 +183,6 @@ def handle_llm_exceptions( cause=f"The API key provided for model {model_name!r} was found to be invalid or expired while {purpose}.", solution=f"Verify your API key for model provider and update it in your settings for model provider {model_provider_name!r}.", ) - err_msg_parser = DownstreamLLMExceptionMessageParser(model_name, model_provider_name, purpose) match exception: # Canonical ProviderError from the client adapter layer case ProviderError(): @@ -200,84 +195,6 @@ def handle_llm_exceptions( authentication_error, ) - # LiteLLM-specific errors (safety net during bridge period) - case lazy.litellm.exceptions.APIError(): - raise err_msg_parser.parse_api_error(exception, authentication_error) from None - - case lazy.litellm.exceptions.APIConnectionError(): - raise ModelAPIConnectionError( - FormattedLLMErrorMessage( - cause=f"Connection to model {model_name!r} hosted on model provider {model_provider_name!r} failed while {purpose}.", - solution="Check your network/proxy/firewall settings.", - ) - ) from None - - case lazy.litellm.exceptions.AuthenticationError(): - raise ModelAuthenticationError(authentication_error) from None - - case lazy.litellm.exceptions.ContextWindowExceededError(): - raise err_msg_parser.parse_context_window_exceeded_error(exception) from None - - case lazy.litellm.exceptions.UnsupportedParamsError(): - raise ModelUnsupportedParamsError( - FormattedLLMErrorMessage( - cause=f"One or more of the parameters you provided were found to be unsupported by model {model_name!r} while {purpose}.", - solution=f"Review the documentation for model provider {model_provider_name!r} and adjust your request.", - ) - ) from None - - case lazy.litellm.exceptions.BadRequestError(): - raise err_msg_parser.parse_bad_request_error(exception) from None - - case lazy.litellm.exceptions.InternalServerError(): - raise ModelInternalServerError( - FormattedLLMErrorMessage( - cause=f"Model {model_name!r} is currently experiencing internal server issues while {purpose}.", - solution=f"Try again in a few moments. Check with your model provider {model_provider_name!r} if the issue persists.", - ) - ) from None - - case lazy.litellm.exceptions.NotFoundError(): - raise ModelNotFoundError( - FormattedLLMErrorMessage( - cause=f"The specified model {model_name!r} could not be found while {purpose}.", - solution=f"Check that the model name is correct and supported by your model provider {model_provider_name!r} and try again.", - ) - ) from None - - case lazy.litellm.exceptions.PermissionDeniedError(): - raise ModelPermissionDeniedError( - FormattedLLMErrorMessage( - cause=f"Your API key was found to lack the necessary permissions to use model {model_name!r} while {purpose}.", - solution=f"Use an API key that has the right permissions for the model or use a model the API key in use has access to in model provider {model_provider_name!r}.", - ) - ) from None - - case lazy.litellm.exceptions.RateLimitError(): - raise ModelRateLimitError( - FormattedLLMErrorMessage( - cause=f"You have exceeded the rate limit for model {model_name!r} while {purpose}.", - solution="Wait and try again in a few moments.", - ) - ) from None - - case lazy.litellm.exceptions.Timeout(): - raise ModelTimeoutError( - FormattedLLMErrorMessage( - cause=f"The request to model {model_name!r} timed out while {purpose}.", - solution="Check your connection and try again. You may need to increase the timeout setting for the model.", - ) - ) from None - - case lazy.litellm.exceptions.UnprocessableEntityError(): - raise ModelUnprocessableEntityError( - FormattedLLMErrorMessage( - cause=f"The request to model {model_name!r} failed despite correct request format while {purpose}.", - solution="This is most likely temporary. Try again in a few moments.", - ) - ) from None - - # Parsing and validation errors case GenerationValidationFailureError(): detail_text = exception.detail.rstrip(".") if exception.detail is not None else None validation_detail = f" Validation detail: {detail_text}." if detail_text is not None else "" @@ -359,58 +276,6 @@ async def wrapper(model_facade: Any, *args: Any, **kwargs: Any) -> Any: return wrapper -class DownstreamLLMExceptionMessageParser: - def __init__(self, model_name: str, model_provider_name: str, purpose: str): - self.model_name = model_name - self.model_provider_name = model_provider_name - self.purpose = purpose - - def parse_bad_request_error(self, exception: litellm.exceptions.BadRequestError) -> DataDesignerError: - err_msg = FormattedLLMErrorMessage( - cause=f"The request for model {self.model_name!r} was found to be malformed or missing required parameters while {self.purpose}.", - solution="Check your request parameters and try again.", - ) - if "is not a multimodal model" in str(exception): - err_msg = FormattedLLMErrorMessage( - cause=f"Model {self.model_name!r} is not a multimodal model, but it looks like you are trying to provide multimodal context while {self.purpose}.", - solution="Check your request parameters and try again.", - ) - return ModelBadRequestError(err_msg) - - def parse_context_window_exceeded_error( - self, exception: litellm.exceptions.ContextWindowExceededError - ) -> DataDesignerError: - cause = f"The input data for model '{self.model_name}' was found to exceed its supported context width while {self.purpose}." - try: - if "OpenAIException - This model's maximum context length is " in str(exception): - openai_exception_cause = ( - str(exception).split("OpenAIException - ")[1].split("\n")[0].split(" Please reduce ")[0] - ) - cause = f"{cause} {openai_exception_cause}" - except Exception: - pass - finally: - return ModelContextWindowExceededError( - FormattedLLMErrorMessage( - cause=cause, - solution="Check the model's supported max context width. Adjust the length of your input along with completions and try again.", - ) - ) - - def parse_api_error( - self, exception: litellm.exceptions.APIError, auth_error_msg: FormattedLLMErrorMessage - ) -> DataDesignerError: - if "Error code: 403" in str(exception): - return ModelAuthenticationError(auth_error_msg) - - return ModelAPIError( - FormattedLLMErrorMessage( - cause=f"An unexpected API error occurred with model {self.model_name!r} while {self.purpose}.", - solution=f"Try again in a few moments. Check with your model provider {self.model_provider_name!r} if the issue persists.", - ) - ) - - def _raise_from_provider_error( exception: ProviderError, kind: ProviderErrorKind, @@ -471,9 +336,15 @@ def _raise_from_provider_error( raise ModelAuthenticationError(authentication_error) from None if kind == ProviderErrorKind.CONTEXT_WINDOW_EXCEEDED: + cause = ( + f"The input data for model '{model_name}' was found to exceed its supported context width while {purpose}." + ) + context_detail = _extract_context_window_detail(str(exception)) + if context_detail: + cause = f"{cause} {context_detail}" raise ModelContextWindowExceededError( FormattedLLMErrorMessage( - cause=f"The input data for model '{model_name}' was found to exceed its supported context width while {purpose}.", + cause=cause, solution="Check the model's supported max context width. Adjust the length of your input along with completions and try again.", ) ) from None @@ -532,3 +403,14 @@ def _raise_from_provider_error( exception, ) ) from None + + +def _extract_context_window_detail(error_text: str) -> str | None: + """Extract the specific token-count detail from an OpenAI-style context window error.""" + marker = "this model's maximum context length is " + lower_text = error_text.lower() + if marker in lower_text: + start = lower_text.index(marker) + detail = error_text[start + len(marker) :].split("\n")[0].split(" Please reduce ")[0] + return f"This model's maximum context length is {detail}" + return None diff --git a/packages/data-designer-engine/src/data_designer/engine/models/factory.py b/packages/data-designer-engine/src/data_designer/engine/models/factory.py index dff83ca19..6ef2b2727 100644 --- a/packages/data-designer-engine/src/data_designer/engine/models/factory.py +++ b/packages/data-designer-engine/src/data_designer/engine/models/factory.py @@ -27,7 +27,7 @@ def create_model_registry( ) -> ModelRegistry: """Factory function for creating a ModelRegistry instance. - Heavy dependencies (litellm, httpx) are deferred until this function is called. + Heavy dependencies (httpx, etc.) are deferred until this function is called. This is a factory function pattern - imports inside factories are idiomatic Python for lazy initialization. @@ -52,11 +52,8 @@ def create_model_registry( from data_designer.engine.models.clients.retry import RetryConfig from data_designer.engine.models.clients.throttle_manager import ThrottleManager from data_designer.engine.models.facade import ModelFacade - from data_designer.engine.models.litellm_overrides import apply_litellm_patches from data_designer.engine.models.registry import ModelRegistry - apply_litellm_patches() - throttle_manager = ThrottleManager((run_config or RunConfig()).throttle) def model_facade_factory( diff --git a/packages/data-designer-engine/src/data_designer/engine/models/litellm_overrides.py b/packages/data-designer-engine/src/data_designer/engine/models/litellm_overrides.py deleted file mode 100644 index b60fea081..000000000 --- a/packages/data-designer-engine/src/data_designer/engine/models/litellm_overrides.py +++ /dev/null @@ -1,199 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: Apache-2.0 - - -""" -LiteLLM overrides and customizations. - -Note on imports: This module uses direct (eager) imports for litellm rather than lazy loading. -This is intentional because: - -1. Class inheritance requires base classes to be resolved at class definition time, - making lazy imports incompatible with our ThreadSafeCache and CustomRouter classes. - -2. This module is already lazily loaded at the application level - it's only imported - by facade.py, which itself is imported inside the create_model_registry() factory - function. So litellm is only loaded when models are actually needed. - -3. Attempting to use lazy imports here causes intermittent ImportErrors. -""" - -from __future__ import annotations - -import random -import threading - -import httpx -import litellm -from litellm import RetryPolicy -from litellm.caching.in_memory_cache import InMemoryCache -from litellm.litellm_core_utils.logging_callback_manager import LoggingCallbackManager -from litellm.router import Router -from litellm.types.llms.openai import ImageURLListItem -from pydantic import BaseModel, Field -from typing_extensions import NotRequired, override - -from data_designer.logging import quiet_noisy_logger - -DEFAULT_MAX_CALLBACKS = 1000 - - -class LiteLLMRouterDefaultKwargs(BaseModel): - ## Number of seconds to wait initially after a connection - ## failure. - initial_retry_after_s: float = 2.0 - - ## Jitter percentage added during exponential backoff to - ## smooth repeated retries over time. - jitter_pct: float = 0.2 - - ## Maximum number of seconds to wait for an API request - ## before letting it die. Will trigger a retry. - timeout: float = 60.0 - - ## Sets the default retry policy, including the number - ## of retries to use in particular scenarios. - retry_policy: RetryPolicy = Field( - default_factory=lambda: RetryPolicy( - RateLimitErrorRetries=3, - TimeoutErrorRetries=3, - ) - ) - - -class ThreadSafeCache(InMemoryCache): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - self._lock = threading.RLock() - - def get_cache(self, key, **kwargs): - with self._lock: - return super().get_cache(key, **kwargs) - - def set_cache(self, key, value, **kwargs): - with self._lock: - super().set_cache(key, value, **kwargs) - - def batch_get_cache(self, keys: list, **kwargs): - with self._lock: - return super().batch_get_cache(keys, **kwargs) - - def delete_cache(self, key): - with self._lock: - super().delete_cache(key) - - def evict_cache(self): - with self._lock: - super().evict_cache() - - def increment_cache(self, key, value: int, **kwargs) -> int: - with self._lock: - return super().increment_cache(key, value, **kwargs) - - def flush_cache(self): - with self._lock: - super().flush_cache() - - -class CustomRouter(Router): - def __init__( - self, - *args, - initial_retry_after_s: float, - jitter_pct: float, - **kwargs, - ): - super().__init__(*args, **kwargs) - self._initial_retry_after_s = initial_retry_after_s - self._jitter_pct = jitter_pct - - def _extract_retry_delay_from_headers(self, e: Exception) -> int | float | None: - """ - Most of this code logic was extracted directly from the parent - `Router`'s `_time_to_sleep_before_retry` function. Our override - of that method below should only affect requests where the server - didn't explicitly return a desired retry-delay. If the server did - return this info, we'll simply use that retry value returned here. - """ - - response_headers: httpx.Headers | None = None - if hasattr(e, "response") and hasattr(e.response, "headers"): # type: ignore - response_headers = e.response.headers # type: ignore - if hasattr(e, "litellm_response_headers"): - response_headers = e.litellm_response_headers # type: ignore - - retry_after = litellm.utils._get_retry_after_from_exception_header(response_headers) - - # If the API asks us to wait a certain amount of time (and it's a reasonable amount), just do what it says. - if retry_after is not None and 0 < retry_after <= 60: - return retry_after - else: - return None - - @override - def _time_to_sleep_before_retry( - self, - e: Exception, - remaining_retries: int, - num_retries: int, - healthy_deployments: list | None = None, - all_deployments: list | None = None, - ) -> int | float: - """ - Implements exponential backoff for retries. - - Technically, litellm's `Router` already implements some - form of exponential backoff. However, that backoff - is not customizable w.r.t jitter and initial delay - timing. For that reason, we override this method to - utilize our own custom instance variables, deferring - to the existing implementation wherever we can. - """ - - # If the response headers indicated how long we should wait, - # use that information. - if retry_after := self._extract_retry_delay_from_headers(e): - return retry_after - - return self.calculate_exponential_backoff( - initial_retry_after_s=self._initial_retry_after_s, - current_retry=num_retries - remaining_retries, - jitter_pct=self._jitter_pct, - ) - - @staticmethod - def calculate_exponential_backoff(initial_retry_after_s: float, current_retry: int, jitter_pct: float) -> float: - sleep_s = initial_retry_after_s * (pow(2.0, current_retry)) - jitter = 1.0 + random.uniform(-jitter_pct, jitter_pct) - return sleep_s * jitter - - -def patch_image_url_list_item(): - """Make ImageURLListItem.index optional. - - Some providers (e.g. OpenRouter) return image objects without the - ``index`` field. LiteLLM's TypedDict marks it as required, causing - a Pydantic validation error when constructing ``Message``. - """ - ImageURLListItem.__annotations__["index"] = NotRequired[int] - ImageURLListItem.__required_keys__ = ImageURLListItem.__required_keys__ - {"index"} - ImageURLListItem.__optional_keys__ = ImageURLListItem.__optional_keys__ | {"index"} - - # Pydantic v2 compiles TypedDict schemas at class definition time, - # so we must rebuild the Message model to pick up the annotation change. - litellm.Message.model_rebuild(force=True) - - -def apply_litellm_patches(): - litellm.in_memory_llm_clients_cache = ThreadSafeCache() - - # Workaround for the litellm issue described in https://github.com/BerriAI/litellm/issues/9792 - LoggingCallbackManager.MAX_CALLBACKS = DEFAULT_MAX_CALLBACKS - - # Workaround for missing 'index' field in image responses from some providers - patch_image_url_list_item() - - quiet_noisy_logger("httpx") - quiet_noisy_logger("LiteLLM") - quiet_noisy_logger("LiteLLM Router") diff --git a/packages/data-designer-engine/src/data_designer/engine/resources/resource_provider.py b/packages/data-designer-engine/src/data_designer/engine/resources/resource_provider.py index 5bfcf9ecc..28199bc71 100644 --- a/packages/data-designer-engine/src/data_designer/engine/resources/resource_provider.py +++ b/packages/data-designer-engine/src/data_designer/engine/resources/resource_provider.py @@ -93,7 +93,7 @@ def create_resource_provider( ) -> ResourceProvider: """Factory function for creating a ResourceProvider instance. - This function triggers lazy loading of heavy dependencies like litellm. + This function triggers lazy loading of heavy dependencies like httpx. The creation order is: 1. MCPProviderRegistry (can be empty) 2. MCPRegistry with tool_configs diff --git a/packages/data-designer-engine/tests/engine/models/clients/conftest.py b/packages/data-designer-engine/tests/engine/models/clients/conftest.py index e3415f6f6..d06f99777 100644 --- a/packages/data-designer-engine/tests/engine/models/clients/conftest.py +++ b/packages/data-designer-engine/tests/engine/models/clients/conftest.py @@ -7,21 +7,6 @@ from typing import Any from unittest.mock import AsyncMock, MagicMock -import pytest - -from data_designer.engine.models.clients.adapters.litellm_bridge import LiteLLMBridgeClient - - -@pytest.fixture -def mock_router() -> MagicMock: - return MagicMock() - - -@pytest.fixture -def bridge_client(mock_router: MagicMock) -> LiteLLMBridgeClient: - return LiteLLMBridgeClient(provider_name="stub-provider", router=mock_router) - - # --------------------------------------------------------------------------- # Shared mock helpers for native HTTP adapter tests # --------------------------------------------------------------------------- diff --git a/packages/data-designer-engine/tests/engine/models/clients/test_factory.py b/packages/data-designer-engine/tests/engine/models/clients/test_factory.py index 9cff50d9c..ffdad291f 100644 --- a/packages/data-designer-engine/tests/engine/models/clients/test_factory.py +++ b/packages/data-designer-engine/tests/engine/models/clients/test_factory.py @@ -3,7 +3,7 @@ from __future__ import annotations -from unittest.mock import MagicMock, patch +from unittest.mock import MagicMock import pytest @@ -12,10 +12,10 @@ ModelConfig, ModelProvider, ) +from data_designer.engine.errors import DataDesignerError from data_designer.engine.model_provider import ModelProviderRegistry from data_designer.engine.models.clients.adapters.anthropic import AnthropicClient from data_designer.engine.models.clients.adapters.http_model_client import ClientConcurrencyMode -from data_designer.engine.models.clients.adapters.litellm_bridge import LiteLLMBridgeClient from data_designer.engine.models.clients.adapters.openai_compatible import OpenAICompatibleClient from data_designer.engine.models.clients.factory import create_model_client from data_designer.engine.models.clients.retry import RetryConfig @@ -113,14 +113,9 @@ def test_anthropic_provider_type_case_insensitive( assert isinstance(client, AnthropicClient), f"Failed for provider_type={variant!r}" -@patch("data_designer.engine.models.clients.factory.CustomRouter") -@patch("data_designer.engine.models.clients.factory.LiteLLMRouterDefaultKwargs") -def test_unknown_provider_creates_bridge_client( - mock_kwargs: MagicMock, - mock_router: MagicMock, +def test_unknown_provider_type_raises_data_designer_error( secret_resolver: SecretResolver, ) -> None: - mock_kwargs.return_value.model_dump.return_value = {} provider = ModelProvider(name="custom-provider", endpoint="https://custom.example.com", provider_type="custom") registry = ModelProviderRegistry(providers=[provider]) config = ModelConfig( @@ -129,41 +124,8 @@ def test_unknown_provider_creates_bridge_client( inference_parameters=ChatCompletionInferenceParams(), provider="custom-provider", ) - client = create_model_client(config, secret_resolver, registry) - assert isinstance(client, LiteLLMBridgeClient) - - -# --- Backend env var override --- - - -@patch("data_designer.engine.models.clients.factory.CustomRouter") -@patch("data_designer.engine.models.clients.factory.LiteLLMRouterDefaultKwargs") -def test_bridge_env_override_forces_bridge_for_openai_provider( - mock_kwargs: MagicMock, - mock_router: MagicMock, - openai_model_config: ModelConfig, - secret_resolver: SecretResolver, - openai_registry: ModelProviderRegistry, -) -> None: - mock_kwargs.return_value.model_dump.return_value = {} - with patch.dict("os.environ", {"DATA_DESIGNER_MODEL_BACKEND": "litellm_bridge"}): - client = create_model_client(openai_model_config, secret_resolver, openai_registry) - assert isinstance(client, LiteLLMBridgeClient) - - -@patch("data_designer.engine.models.clients.factory.CustomRouter") -@patch("data_designer.engine.models.clients.factory.LiteLLMRouterDefaultKwargs") -def test_bridge_env_override_forces_bridge_for_anthropic_provider( - mock_kwargs: MagicMock, - mock_router: MagicMock, - anthropic_model_config: ModelConfig, - secret_resolver: SecretResolver, - anthropic_registry: ModelProviderRegistry, -) -> None: - mock_kwargs.return_value.model_dump.return_value = {} - with patch.dict("os.environ", {"DATA_DESIGNER_MODEL_BACKEND": "litellm_bridge"}): - client = create_model_client(anthropic_model_config, secret_resolver, anthropic_registry) - assert isinstance(client, LiteLLMBridgeClient) + with pytest.raises(DataDesignerError, match="Provider type 'custom'.*is not supported"): + create_model_client(config, secret_resolver, registry) def test_openai_provider_type_case_insensitive( @@ -179,20 +141,6 @@ def test_openai_provider_type_case_insensitive( assert isinstance(client, OpenAICompatibleClient), f"Failed for provider_type={variant!r}" -def test_native_env_var_still_uses_native_for_openai_provider( - openai_model_config: ModelConfig, - secret_resolver: SecretResolver, - openai_registry: ModelProviderRegistry, -) -> None: - with patch.dict("os.environ", {"DATA_DESIGNER_MODEL_BACKEND": "native"}): - client = create_model_client( - openai_model_config, - secret_resolver, - openai_registry, - ) - assert isinstance(client, OpenAICompatibleClient) - - # --- Mode parameter forwarding --- @@ -259,31 +207,6 @@ def test_throttle_manager_wraps_anthropic_client( assert isinstance(client._inner, AnthropicClient) -@patch("data_designer.engine.models.clients.factory.CustomRouter") -@patch("data_designer.engine.models.clients.factory.LiteLLMRouterDefaultKwargs") -def test_bridge_client_is_wrapped_with_throttle_manager( - mock_kwargs: MagicMock, - mock_router: MagicMock, - secret_resolver: SecretResolver, -) -> None: - """LiteLLMBridgeClient is wrapped, but AIMD accuracy is best-effort - because the bridge's internal router may retry 429s before the - wrapper sees them. See architecture notes for scope.""" - mock_kwargs.return_value.model_dump.return_value = {} - provider = ModelProvider(name="custom-provider", endpoint="https://custom.example.com", provider_type="custom") - registry = ModelProviderRegistry(providers=[provider]) - config = ModelConfig( - alias="test-custom", - model="custom-model", - inference_parameters=ChatCompletionInferenceParams(), - provider="custom-provider", - ) - tm = ThrottleManager() - client = create_model_client(config, secret_resolver, registry, throttle_manager=tm) - assert isinstance(client, ThrottledModelClient) - assert isinstance(client._inner, LiteLLMBridgeClient) - - def test_no_throttle_manager_returns_inner_client_directly( openai_model_config: ModelConfig, secret_resolver: SecretResolver, diff --git a/packages/data-designer-engine/tests/engine/models/clients/test_litellm_bridge.py b/packages/data-designer-engine/tests/engine/models/clients/test_litellm_bridge.py deleted file mode 100644 index 34e73e6b6..000000000 --- a/packages/data-designer-engine/tests/engine/models/clients/test_litellm_bridge.py +++ /dev/null @@ -1,452 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: Apache-2.0 - -from __future__ import annotations - -from types import SimpleNamespace -from typing import Any -from unittest.mock import AsyncMock, MagicMock - -import pytest - -from data_designer.engine.models.clients.adapters.litellm_bridge import LiteLLMBridgeClient -from data_designer.engine.models.clients.errors import ProviderError, ProviderErrorKind -from data_designer.engine.models.clients.types import ( - ChatCompletionRequest, - EmbeddingRequest, - ImageGenerationRequest, -) - - -def test_completion_maps_canonical_fields_from_litellm_response( - mock_router: MagicMock, - bridge_client: LiteLLMBridgeClient, -) -> None: - response = _build_chat_response( - content="final answer", - reasoning_content="reasoning trace", - tool_calls=[{"id": "call-1", "function": {"name": "lookup", "arguments": '{"query":"foo"}'}}], - usage=SimpleNamespace(prompt_tokens=11, completion_tokens=13, total_tokens=24), - ) - mock_router.completion.return_value = response - - request = ChatCompletionRequest( - model="stub-model", - messages=[{"role": "user", "content": "hello"}], - tools=[{"type": "function", "function": {"name": "lookup"}}], - temperature=0.2, - top_p=0.8, - max_tokens=256, - extra_body={"foo": "bar"}, - extra_headers={"x-trace": "1"}, - ) - result = bridge_client.completion(request) - - assert result.message.content == "final answer" - assert result.message.reasoning_content == "reasoning trace" - assert len(result.message.tool_calls) == 1 - assert result.message.tool_calls[0].id == "call-1" - assert result.message.tool_calls[0].name == "lookup" - assert result.message.tool_calls[0].arguments_json == '{"query":"foo"}' - assert result.usage is not None - assert result.usage.input_tokens == 11 - assert result.usage.output_tokens == 13 - assert result.usage.total_tokens == 24 - assert result.raw is response - - mock_router.completion.assert_called_once_with( - model="stub-model", - messages=[{"role": "user", "content": "hello"}], - tools=[{"type": "function", "function": {"name": "lookup"}}], - temperature=0.2, - top_p=0.8, - max_tokens=256, - extra_body={"foo": "bar"}, - extra_headers={"x-trace": "1"}, - ) - - -@pytest.mark.asyncio -async def test_acompletion_maps_canonical_fields_from_litellm_response( - mock_router: MagicMock, - bridge_client: LiteLLMBridgeClient, -) -> None: - response = _build_chat_response(content="async result", reasoning_content=None, tool_calls=[], usage=None) - mock_router.acompletion = AsyncMock(return_value=response) - - request = ChatCompletionRequest(model="stub-model", messages=[{"role": "user", "content": "hello"}]) - result = await bridge_client.acompletion(request) - - assert result.message.content == "async result" - assert result.usage is None - mock_router.acompletion.assert_awaited_once_with( - model="stub-model", - messages=[{"role": "user", "content": "hello"}], - extra_headers=None, - ) - - -def test_completion_passes_extra_body_as_distinct_kwarg( - mock_router: MagicMock, - bridge_client: LiteLLMBridgeClient, -) -> None: - response = _build_chat_response(content="ok", reasoning_content=None, tool_calls=[], usage=None) - mock_router.completion.return_value = response - - request = ChatCompletionRequest( - model="stub-model", - messages=[{"role": "user", "content": "hello"}], - temperature=0.5, - extra_body={"reasoning_effort": "high"}, - ) - bridge_client.completion(request) - - mock_router.completion.assert_called_once_with( - model="stub-model", - messages=[{"role": "user", "content": "hello"}], - temperature=0.5, - extra_body={"reasoning_effort": "high"}, - extra_headers=None, - ) - - -def test_embeddings_maps_vectors_and_usage( - mock_router: MagicMock, - bridge_client: LiteLLMBridgeClient, -) -> None: - response = SimpleNamespace( - data=[{"embedding": [1, 2]}, SimpleNamespace(embedding=[3.5, 4.5])], - usage=SimpleNamespace(prompt_tokens=4, total_tokens=4), - ) - mock_router.embedding.return_value = response - - request = EmbeddingRequest(model="stub-model", inputs=["a", "b"], dimensions=32, encoding_format="float") - result = bridge_client.embeddings(request) - - assert result.vectors == [[1.0, 2.0], [3.5, 4.5]] - assert result.usage is not None - assert result.usage.input_tokens == 4 - assert result.usage.output_tokens is None - assert result.raw is response - mock_router.embedding.assert_called_once_with( - model="stub-model", - input=["a", "b"], - encoding_format="float", - dimensions=32, - extra_headers=None, - ) - - -@pytest.mark.parametrize( - "messages", - [ - [{"role": "user", "content": "generate image"}], - [], - ], - ids=["with-content", "empty-list"], -) -def test_generate_image_uses_chat_completion_path_when_messages_provided( - mock_router: MagicMock, - bridge_client: LiteLLMBridgeClient, - messages: list[dict[str, Any]], -) -> None: - response = _build_chat_response( - content=None, - reasoning_content=None, - tool_calls=None, - images=[{"image_url": {"url": "data:image/png;base64,aGVsbG8="}}], - usage=None, - ) - mock_router.completion.return_value = response - - request = ImageGenerationRequest( - model="stub-model", - prompt="unused because messages are supplied", - messages=messages, - extra_body={"n": 1}, - ) - result = bridge_client.generate_image(request) - - assert len(result.images) == 1 - assert result.images[0].b64_data == "aGVsbG8=" - mock_router.completion.assert_called_once_with( - model="stub-model", - messages=messages, - extra_body={"n": 1}, - extra_headers=None, - ) - mock_router.image_generation.assert_not_called() - - -def test_generate_image_uses_diffusion_path_without_messages( - mock_router: MagicMock, - bridge_client: LiteLLMBridgeClient, -) -> None: - response = SimpleNamespace( - data=[ - SimpleNamespace(b64_json="Zmlyc3Q="), - {"url": "data:image/jpeg;base64,c2Vjb25k"}, - ], - usage=SimpleNamespace(input_tokens=9, output_tokens=12), - ) - mock_router.image_generation.return_value = response - - request = ImageGenerationRequest(model="stub-model", prompt="make an image", extra_body={"n": 2}) - result = bridge_client.generate_image(request) - - assert [image.b64_data for image in result.images] == ["Zmlyc3Q=", "c2Vjb25k"] - assert [image.mime_type for image in result.images] == [None, "image/jpeg"] - assert result.usage is not None - assert result.usage.input_tokens == 9 - assert result.usage.output_tokens == 12 - assert result.usage.total_tokens == 21 - assert result.usage.generated_images == 2 - mock_router.image_generation.assert_called_once_with( - prompt="make an image", model="stub-model", extra_body={"n": 2}, extra_headers=None - ) - - -@pytest.mark.asyncio -async def test_aembeddings_maps_vectors_and_usage( - mock_router: MagicMock, - bridge_client: LiteLLMBridgeClient, -) -> None: - response = SimpleNamespace( - data=[{"embedding": [0.1, 0.2]}, SimpleNamespace(embedding=[0.3, 0.4])], - usage=SimpleNamespace(prompt_tokens=5, total_tokens=5), - ) - mock_router.aembedding = AsyncMock(return_value=response) - - request = EmbeddingRequest(model="stub-model", inputs=["x", "y"]) - result = await bridge_client.aembeddings(request) - - assert result.vectors == [[0.1, 0.2], [0.3, 0.4]] - assert result.usage is not None - assert result.usage.input_tokens == 5 - assert result.raw is response - mock_router.aembedding.assert_awaited_once_with(model="stub-model", input=["x", "y"], extra_headers=None) - - -def test_completion_coerces_list_content_blocks_to_string( - mock_router: MagicMock, - bridge_client: LiteLLMBridgeClient, -) -> None: - response = _build_chat_response( - content=[{"type": "text", "text": "first"}, {"type": "text", "text": "second"}], - reasoning_content=None, - tool_calls=[], - usage=None, - ) - mock_router.completion.return_value = response - - request = ChatCompletionRequest(model="stub-model", messages=[{"role": "user", "content": "hello"}]) - result = bridge_client.completion(request) - - assert result.message.content == "first\nsecond" - - -def test_close_and_aclose_are_callable(bridge_client: LiteLLMBridgeClient) -> None: - bridge_client.close() - - -@pytest.mark.asyncio -async def test_aclose_is_callable(bridge_client: LiteLLMBridgeClient) -> None: - await bridge_client.aclose() - - -@pytest.mark.asyncio -async def test_agenerate_image_uses_diffusion_path_without_messages( - mock_router: MagicMock, - bridge_client: LiteLLMBridgeClient, -) -> None: - response = SimpleNamespace( - data=[SimpleNamespace(b64_json="YXN5bmM=")], - usage=SimpleNamespace(input_tokens=3, output_tokens=7), - ) - mock_router.aimage_generation = AsyncMock(return_value=response) - - request = ImageGenerationRequest(model="stub-model", prompt="async image", extra_body={"n": 1}) - result = await bridge_client.agenerate_image(request) - - assert len(result.images) == 1 - assert result.images[0].b64_data == "YXN5bmM=" - assert result.usage is not None - assert result.usage.generated_images == 1 - mock_router.aimage_generation.assert_awaited_once_with( - prompt="async image", model="stub-model", extra_body={"n": 1}, extra_headers=None - ) - - -def test_completion_with_empty_choices_returns_empty_message( - mock_router: MagicMock, - bridge_client: LiteLLMBridgeClient, -) -> None: - response = SimpleNamespace(choices=[], usage=None) - mock_router.completion.return_value = response - - request = ChatCompletionRequest(model="stub-model", messages=[{"role": "user", "content": "hello"}]) - result = bridge_client.completion(request) - - assert result.message.content is None - assert result.message.tool_calls == [] - assert result.message.images == [] - - -def test_completion_with_tool_call_dict_arguments( - mock_router: MagicMock, - bridge_client: LiteLLMBridgeClient, -) -> None: - response = _build_chat_response( - content=None, - reasoning_content=None, - tool_calls=[{"id": "call-2", "function": {"name": "search", "arguments": {"q": "test"}}}], - usage=None, - ) - mock_router.completion.return_value = response - - request = ChatCompletionRequest(model="stub-model", messages=[{"role": "user", "content": "hello"}]) - result = bridge_client.completion(request) - - assert len(result.message.tool_calls) == 1 - assert result.message.tool_calls[0].arguments_json == '{"q": "test"}' - - -# --- Exception wrapping tests --- - - -def test_completion_wraps_router_exception_with_status_code( - mock_router: MagicMock, - bridge_client: LiteLLMBridgeClient, -) -> None: - exc = Exception("Rate limit exceeded") - exc.status_code = 429 # type: ignore[attr-defined] - mock_router.completion.side_effect = exc - - request = ChatCompletionRequest(model="stub-model", messages=[{"role": "user", "content": "hello"}]) - with pytest.raises(ProviderError) as exc_info: - bridge_client.completion(request) - - assert exc_info.value.kind == ProviderErrorKind.RATE_LIMIT - assert exc_info.value.status_code == 429 - assert exc_info.value.provider_name == "stub-provider" - assert exc_info.value.__cause__ is exc - - -def test_completion_wraps_generic_router_exception( - mock_router: MagicMock, - bridge_client: LiteLLMBridgeClient, -) -> None: - mock_router.completion.side_effect = RuntimeError("something broke") - - request = ChatCompletionRequest(model="stub-model", messages=[{"role": "user", "content": "hello"}]) - with pytest.raises(ProviderError) as exc_info: - bridge_client.completion(request) - - assert exc_info.value.kind == ProviderErrorKind.API_ERROR - assert "something broke" in exc_info.value.message - assert exc_info.value.status_code is None - - -def test_completion_passes_through_provider_error( - mock_router: MagicMock, - bridge_client: LiteLLMBridgeClient, -) -> None: - original = ProviderError(kind=ProviderErrorKind.AUTHENTICATION, message="bad key") - mock_router.completion.side_effect = original - - request = ChatCompletionRequest(model="stub-model", messages=[{"role": "user", "content": "hello"}]) - with pytest.raises(ProviderError) as exc_info: - bridge_client.completion(request) - - assert exc_info.value is original - - -@pytest.mark.asyncio -async def test_acompletion_wraps_router_exception( - mock_router: MagicMock, - bridge_client: LiteLLMBridgeClient, -) -> None: - mock_router.acompletion = AsyncMock(side_effect=ConnectionError("connection refused")) - - request = ChatCompletionRequest(model="stub-model", messages=[{"role": "user", "content": "hello"}]) - with pytest.raises(ProviderError) as exc_info: - await bridge_client.acompletion(request) - - assert exc_info.value.kind == ProviderErrorKind.API_CONNECTION - - -def test_embeddings_wraps_router_exception( - mock_router: MagicMock, - bridge_client: LiteLLMBridgeClient, -) -> None: - exc = Exception("server error") - exc.status_code = 500 # type: ignore[attr-defined] - mock_router.embedding.side_effect = exc - - request = EmbeddingRequest(model="stub-model", inputs=["a"]) - with pytest.raises(ProviderError) as exc_info: - bridge_client.embeddings(request) - - assert exc_info.value.kind == ProviderErrorKind.INTERNAL_SERVER - - -def test_generate_image_wraps_router_exception( - mock_router: MagicMock, - bridge_client: LiteLLMBridgeClient, -) -> None: - mock_router.image_generation.side_effect = TimeoutError("timed out") - - request = ImageGenerationRequest(model="stub-model", prompt="make an image") - with pytest.raises(ProviderError) as exc_info: - bridge_client.generate_image(request) - - assert exc_info.value.kind == ProviderErrorKind.TIMEOUT - - -@pytest.mark.asyncio -async def test_agenerate_image_wraps_router_exception( - mock_router: MagicMock, - bridge_client: LiteLLMBridgeClient, -) -> None: - mock_router.aimage_generation = AsyncMock(side_effect=RuntimeError("boom")) - - request = ImageGenerationRequest(model="stub-model", prompt="async image") - with pytest.raises(ProviderError) as exc_info: - await bridge_client.agenerate_image(request) - - assert exc_info.value.kind == ProviderErrorKind.API_ERROR - - -@pytest.mark.asyncio -async def test_aembeddings_wraps_router_exception( - mock_router: MagicMock, - bridge_client: LiteLLMBridgeClient, -) -> None: - mock_router.aembedding = AsyncMock(side_effect=RuntimeError("network error")) - - request = EmbeddingRequest(model="stub-model", inputs=["a"]) - with pytest.raises(ProviderError) as exc_info: - await bridge_client.aembeddings(request) - - assert exc_info.value.kind == ProviderErrorKind.API_ERROR - - -# --- Helpers --- - - -def _build_chat_response( - *, - content: Any, - reasoning_content: str | None, - tool_calls: list[dict[str, Any]] | None, - usage: Any, - images: list[dict[str, Any]] | None = None, -) -> Any: - message = SimpleNamespace( - content=content, - reasoning_content=reasoning_content, - tool_calls=tool_calls, - images=images, - ) - choice = SimpleNamespace(message=message) - return SimpleNamespace(choices=[choice], usage=usage) diff --git a/packages/data-designer-engine/tests/engine/models/clients/test_parsing.py b/packages/data-designer-engine/tests/engine/models/clients/test_parsing.py index 76ff2b2a3..262ca35c7 100644 --- a/packages/data-designer-engine/tests/engine/models/clients/test_parsing.py +++ b/packages/data-designer-engine/tests/engine/models/clients/test_parsing.py @@ -46,31 +46,6 @@ def test_extra_body_empty_dict_produces_no_extra_keys() -> None: assert "extra_body" not in transport.body -# --- TransportKwargs.from_request: extra_body preserved (opt-in) --- - - -def test_extra_body_preserved_when_flatten_disabled() -> None: - request = ChatCompletionRequest( - model="m", - messages=[], - temperature=0.7, - extra_body={"reasoning_effort": "high", "seed": 42}, - ) - transport = TransportKwargs.from_request(request, flatten_extra_body=False) - - assert transport.body["temperature"] == 0.7 - assert transport.body["extra_body"] == {"reasoning_effort": "high", "seed": 42} - assert "reasoning_effort" not in transport.body - assert "seed" not in transport.body - - -def test_extra_body_empty_dict_not_injected_when_flatten_disabled() -> None: - request = ChatCompletionRequest(model="m", messages=[], extra_body={}) - transport = TransportKwargs.from_request(request, flatten_extra_body=False) - - assert "extra_body" not in transport.body - - # --- TransportKwargs.from_request: extra_headers separation --- diff --git a/packages/data-designer-engine/tests/engine/models/conftest.py b/packages/data-designer-engine/tests/engine/models/conftest.py index 9fa9231aa..a4aa1f129 100644 --- a/packages/data-designer-engine/tests/engine/models/conftest.py +++ b/packages/data-designer-engine/tests/engine/models/conftest.py @@ -93,7 +93,7 @@ def stub_model_registry( @pytest.fixture def stub_model_client() -> MagicMock: - """Mock ModelClient for testing ModelFacade without a real LiteLLM router.""" + """Mock ModelClient for testing ModelFacade without a real HTTP adapter.""" return MagicMock(spec=ModelClient) diff --git a/packages/data-designer-engine/tests/engine/models/test_litellm_overrides.py b/packages/data-designer-engine/tests/engine/models/test_litellm_overrides.py deleted file mode 100644 index aa547e187..000000000 --- a/packages/data-designer-engine/tests/engine/models/test_litellm_overrides.py +++ /dev/null @@ -1,184 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: Apache-2.0 - -from __future__ import annotations - -from unittest.mock import patch - -import litellm -import pytest -from pydantic import ValidationError - -from data_designer.engine.models import litellm_overrides -from data_designer.engine.models.litellm_overrides import ( - DEFAULT_MAX_CALLBACKS, - CustomRouter, - ImageURLListItem, - ThreadSafeCache, - apply_litellm_patches, - patch_image_url_list_item, -) - - -@pytest.fixture -def stub_thread_safe_cache(): - return ThreadSafeCache() - - -@pytest.fixture -def stub_custom_router(): - return CustomRouter([], initial_retry_after_s=1.0, jitter_pct=0.0) - - -@pytest.mark.parametrize( - "retry_count,jitter,expected_sleep_s", - [ - (0, 0.0, 1.0), - (1, 0.0, 2.0), - (2, 0.0, 4.0), - (3, 0.0, 8.0), - (0, 0.2, 1.2), - (1, 0.2, 2.4), - (2, 0.2, 4.8), - (3, 0.2, 9.6), - ], -) -def test_custom_router_calculate_exponential_backoff(retry_count: int, jitter: float, expected_sleep_s: float): - with patch("random.uniform", return_value=jitter): - assert ( - CustomRouter.calculate_exponential_backoff( - initial_retry_after_s=1, current_retry=retry_count, jitter_pct=jitter - ) - == expected_sleep_s - ) - - -def test_apply_litellm_patches_no_exceptions(): - try: - apply_litellm_patches() - except Exception as e: - pytest.fail(f"apply_litellm_patches() raised an unexpected exception: {e}") - - -@patch.object(litellm_overrides, "quiet_noisy_logger", autospec=True) -def test_apply_litellm_patches(mock_quiet_noisy_logger: object) -> None: - litellm_overrides.apply_litellm_patches() - assert isinstance(litellm.in_memory_llm_clients_cache, ThreadSafeCache) - assert ( - litellm.litellm_core_utils.logging_callback_manager.LoggingCallbackManager.MAX_CALLBACKS - == DEFAULT_MAX_CALLBACKS - ) - assert mock_quiet_noisy_logger.call_count == 3 - assert mock_quiet_noisy_logger.call_args_list[0][0][0] == "httpx" - assert mock_quiet_noisy_logger.call_args_list[1][0][0] == "LiteLLM" - assert mock_quiet_noisy_logger.call_args_list[2][0][0] == "LiteLLM Router" - - -@pytest.mark.parametrize( - "test_case,key,value,expected_result", - [ - ("get_cache", "test_key", "test_value", "test_value"), - ("set_cache", "test_key", "test_value", "test_value"), - ], -) -def test_thread_safe_cache_basic_operations(stub_thread_safe_cache, test_case, key, value, expected_result): - stub_thread_safe_cache.set_cache(key, value) - result = stub_thread_safe_cache.get_cache(key) - assert result == expected_result - - -def test_thread_safe_cache_batch_get_cache(stub_thread_safe_cache): - stub_thread_safe_cache.set_cache("key1", "value1") - stub_thread_safe_cache.set_cache("key2", "value2") - - result = stub_thread_safe_cache.batch_get_cache(["key1", "key2"]) - assert result == ["value1", "value2"] - - -def test_thread_safe_cache_delete_cache(stub_thread_safe_cache): - stub_thread_safe_cache.set_cache("test_key", "test_value") - stub_thread_safe_cache.delete_cache("test_key") - - result = stub_thread_safe_cache.get_cache("test_key") - assert result is None - - -def test_thread_safe_cache_evict_cache(stub_thread_safe_cache): - stub_thread_safe_cache.set_cache("test_key", "test_value") - stub_thread_safe_cache.evict_cache() - stub_thread_safe_cache.get_cache("test_key") - assert True - - -def test_thread_safe_cache_increment_cache(stub_thread_safe_cache): - stub_thread_safe_cache.set_cache("counter", 5) - - result = stub_thread_safe_cache.increment_cache("counter", 3) - assert result == 8 - - final_value = stub_thread_safe_cache.get_cache("counter") - assert final_value == 8 - - -def test_thread_safe_cache_flush_cache(stub_thread_safe_cache): - stub_thread_safe_cache.set_cache("key1", "value1") - stub_thread_safe_cache.set_cache("key2", "value2") - stub_thread_safe_cache.flush_cache() - - assert stub_thread_safe_cache.get_cache("key1") is None - assert stub_thread_safe_cache.get_cache("key2") is None - - -def test_custom_router_initialization(): - router = CustomRouter([], initial_retry_after_s=2.0, jitter_pct=0.1) - - assert router._initial_retry_after_s == 2.0 - assert router._jitter_pct == 0.1 - - -@patch("random.uniform", return_value=0.1, autospec=True) -def test_custom_router_calculate_exponential_backoff_with_jitter(mock_uniform): - result = CustomRouter.calculate_exponential_backoff(initial_retry_after_s=1.0, current_retry=2, jitter_pct=0.2) - assert result >= 4.0 - assert result <= 4.4 - mock_uniform.assert_called_once_with(-0.2, 0.2) - - -def test_patch_image_url_list_item_makes_index_optional() -> None: - original_annotation = ImageURLListItem.__annotations__["index"] - original_required = ImageURLListItem.__required_keys__ - original_optional = ImageURLListItem.__optional_keys__ - try: - # Restore to unpatched state in case prior tests already applied the patch - ImageURLListItem.__annotations__["index"] = int - ImageURLListItem.__required_keys__ = original_required | {"index"} - ImageURLListItem.__optional_keys__ = original_optional - {"index"} - litellm.Message.model_rebuild(force=True) - - assert "index" in ImageURLListItem.__required_keys__ - - with pytest.raises(ValidationError): - litellm.Message( - content=None, - role="assistant", - images=[{"type": "image_url", "image_url": {"url": "data:image/png;base64,abc"}}], - ) - - patch_image_url_list_item() - - assert "index" not in ImageURLListItem.__required_keys__ - assert "index" in ImageURLListItem.__optional_keys__ - - message = litellm.Message( - content=None, - role="assistant", - images=[{"type": "image_url", "image_url": {"url": "data:image/png;base64,abc"}}], - ) - assert message.images is not None - assert len(message.images) == 1 - assert message.images[0]["type"] == "image_url" - finally: - ImageURLListItem.__annotations__["index"] = original_annotation - ImageURLListItem.__required_keys__ = original_required - ImageURLListItem.__optional_keys__ = original_optional - litellm.Message.model_rebuild(force=True) diff --git a/packages/data-designer-engine/tests/engine/models/test_model_errors.py b/packages/data-designer-engine/tests/engine/models/test_model_errors.py index 4712f0de5..b72b26003 100644 --- a/packages/data-designer-engine/tests/engine/models/test_model_errors.py +++ b/packages/data-designer-engine/tests/engine/models/test_model_errors.py @@ -6,26 +6,10 @@ from unittest.mock import MagicMock import pytest -from litellm.exceptions import ( - APIConnectionError, - APIError, - AuthenticationError, - BadRequestError, - ContextWindowExceededError, - InternalServerError, - NotFoundError, - PermissionDeniedError, - RateLimitError, - Timeout, - UnprocessableEntityError, - UnsupportedParamsError, -) from data_designer.engine.models.clients.errors import ProviderError, ProviderErrorKind from data_designer.engine.models.errors import ( DataDesignerError, - DownstreamLLMExceptionMessageParser, - FormattedLLMErrorMessage, GenerationValidationFailureError, ModelAPIConnectionError, ModelAPIError, @@ -55,53 +39,6 @@ @pytest.mark.parametrize( "exception,expected_exception,expected_error_msg", [ - ( - APIConnectionError("Connection error", "openai", stub_model_name), - ModelAPIConnectionError, - f"Cause: Connection to model '{stub_model_name}' hosted on model provider '{stub_model_provider_name}' failed while {stub_purpose}.", - ), - ( - APIError(500, "Some litellm error", "openai", stub_model_name), - ModelAPIError, - f"Cause: An unexpected API error occurred with model '{stub_model_name}' while {stub_purpose}.", - ), - ( - AuthenticationError("Authentication error", "openai", stub_model_name), - ModelAuthenticationError, - f"Cause: The API key provided for model '{stub_model_name}' was found to be invalid or expired while {stub_purpose}.", - ), - ( - BadRequestError("Bad request", "openai", stub_model_name), - ModelBadRequestError, - f"Cause: The request for model '{stub_model_name}' was found to be malformed or missing required parameters while {stub_purpose}.", - ), - ( - ContextWindowExceededError("Context window exceeded", "openai", stub_model_name), - ModelContextWindowExceededError, - f"Cause: The input data for model '{stub_model_name}' was found to exceed its supported context width while {stub_purpose}.", - ), - ( - InternalServerError("Internal server error", "openai", stub_model_name), - ModelInternalServerError, - f"Cause: Model '{stub_model_name}' is currently experiencing internal server issues while {stub_purpose}.", - ), - ( - NotFoundError("Not found", "openai", stub_model_name), - ModelNotFoundError, - f"Cause: The specified model '{stub_model_name}' could not be found while {stub_purpose}.", - ), - ( - PermissionDeniedError( - "Permission denied", "openai", stub_model_name, MagicMock(status_code=403, text="Permission denied") - ), - ModelPermissionDeniedError, - f"Cause: Your API key was found to lack the necessary permissions to use model '{stub_model_name}' while {stub_purpose}.", - ), - ( - RateLimitError("Rate limit exceeded", "openai", stub_model_name), - ModelRateLimitError, - f"Cause: You have exceeded the rate limit for model '{stub_model_name}' while {stub_purpose}.", - ), ( ProviderError( kind=ProviderErrorKind.BAD_REQUEST, @@ -157,19 +94,84 @@ f"Cause: Your API key was found to lack the necessary permissions to use model '{stub_model_name}' while {stub_purpose}.", ), ( - Timeout("Request timed out", "openai", stub_model_name), + ProviderError( + kind=ProviderErrorKind.RATE_LIMIT, + message="Rate limit exceeded", + status_code=429, + ), + ModelRateLimitError, + f"Cause: You have exceeded the rate limit for model '{stub_model_name}' while {stub_purpose}.", + ), + ( + ProviderError( + kind=ProviderErrorKind.AUTHENTICATION, + message="Invalid API key", + status_code=401, + ), + ModelAuthenticationError, + f"Cause: The API key provided for model '{stub_model_name}' was found to be invalid or expired while {stub_purpose}.", + ), + ( + ProviderError( + kind=ProviderErrorKind.API_CONNECTION, + message="Connection refused", + ), + ModelAPIConnectionError, + f"Cause: Connection to model '{stub_model_name}' hosted on model provider '{stub_model_provider_name}' failed while {stub_purpose}.", + ), + ( + ProviderError( + kind=ProviderErrorKind.TIMEOUT, + message="Request timed out", + status_code=408, + ), ModelTimeoutError, f"Cause: The request to model '{stub_model_name}' timed out while {stub_purpose}.", ), ( - UnprocessableEntityError("Unprocessable entity", "openai", stub_model_name, response=MagicMock()), + ProviderError( + kind=ProviderErrorKind.NOT_FOUND, + message="Model not found", + status_code=404, + ), + ModelNotFoundError, + f"Cause: The specified model '{stub_model_name}' could not be found while {stub_purpose}.", + ), + ( + ProviderError( + kind=ProviderErrorKind.INTERNAL_SERVER, + message="Internal server error", + status_code=500, + ), + ModelInternalServerError, + f"Cause: Model '{stub_model_name}' is currently experiencing internal server issues while {stub_purpose}.", + ), + ( + ProviderError( + kind=ProviderErrorKind.UNPROCESSABLE_ENTITY, + message="Unprocessable entity", + status_code=422, + ), ModelUnprocessableEntityError, f"Cause: The request to model '{stub_model_name}' failed despite correct request format while {stub_purpose}.", ), ( - UnsupportedParamsError("Unsupported parameters", "openai", stub_model_name), - ModelUnsupportedParamsError, - f"Cause: One or more of the parameters you provided were found to be unsupported by model '{stub_model_name}' while {stub_purpose}.", + ProviderError( + kind=ProviderErrorKind.API_ERROR, + message="Unknown API error", + status_code=418, + ), + ModelAPIError, + f"Cause: An unexpected API error occurred with model '{stub_model_name}' while {stub_purpose}.", + ), + ( + ProviderError( + kind=ProviderErrorKind.BAD_REQUEST, + message=f"{stub_model_name} is not a multimodal model", + status_code=400, + ), + ModelBadRequestError, + f"Cause: Model '{stub_model_name}' is not a multimodal model, but it looks like you are trying to provide multimodal context while {stub_purpose}.", ), ( GenerationValidationFailureError( @@ -190,8 +192,29 @@ ), (DataDesignerError("Some NemoDataDesigner error"), DataDesignerError, "Some NemoDataDesigner error"), ], + ids=[ + "bad_request", + "unsupported_params", + "quota_exceeded", + "unsupported_capability", + "permission_denied", + "rate_limit", + "authentication", + "api_connection", + "timeout", + "not_found", + "internal_server", + "unprocessable_entity", + "api_error", + "bad_request_multimodal", + "generation_validation_failure", + "unexpected_exception", + "data_designer_error_passthrough", + ], ) -def test_handle_llm_exceptions(exception, expected_exception, expected_error_msg): +def test_handle_llm_exceptions( + exception: Exception, expected_exception: type[Exception], expected_error_msg: str +) -> None: with pytest.raises(expected_exception, match=re.escape(expected_error_msg)): handle_llm_exceptions(exception, stub_model_name, stub_model_provider_name, stub_purpose) @@ -230,60 +253,16 @@ def test_handle_llm_exceptions_strips_duplicate_period_from_validation_detail() assert exc_info.value.detail == "Field required." -def test_catch_llm_exceptions(): +def test_catch_llm_exceptions() -> None: @catch_llm_exceptions - def stub_function(model_facade: Any, *args, **kwargs): - raise RateLimitError("Rate limit exceeded", "openai", stub_model_name) + def stub_function(model_facade: Any, *args: Any, **kwargs: Any) -> None: + raise ProviderError(kind=ProviderErrorKind.RATE_LIMIT, message="Rate limit exceeded", status_code=429) with pytest.raises(ModelRateLimitError, match="Cause: You have exceeded the rate limit for model"): - stub_function(MagicMock(model_name=stub_model_name)) - - -def test_openai_exception_message_parser(): - parser = DownstreamLLMExceptionMessageParser(stub_model_name, stub_model_provider_name, stub_purpose) + stub_function(MagicMock(model_name=stub_model_name, model_provider_name=stub_model_provider_name)) - with pytest.raises( - ModelBadRequestError, - match="Cause: The request for model 'test-model' was found to be malformed or missing required parameters", - ): - raise parser.parse_bad_request_error(BadRequestError("Bad request", "openai", stub_model_name)) - with pytest.raises( - ModelBadRequestError, - match="Cause: Model 'test-model' is not a multimodal model, but it looks like you are trying to provide multimodal context", - ): - raise parser.parse_bad_request_error( - BadRequestError(f"Bad request. {stub_model_name} is not a multimodal model", "openai", stub_model_name) - ) - - with pytest.raises( - ModelContextWindowExceededError, - match="Cause: The input data for model 'test-model' was found to exceed its supported context width", - ): - raise parser.parse_context_window_exceeded_error( - ContextWindowExceededError("Context window exceeded", "openai", stub_model_name) - ) - - with pytest.raises(ModelContextWindowExceededError, match="This model's maximum context length is 32768 tokens."): - detailed_error_from_upstream = "OpenAIException - This model's maximum context length is 32768 tokens. However, you requested 32778 tokens (10 in the messages, 32768 in the completion). Please reduce the length of the messages or completion" - raise parser.parse_context_window_exceeded_error( - ContextWindowExceededError(detailed_error_from_upstream, "openai", stub_model_name) - ) - - authentication_error = FormattedLLMErrorMessage(cause="Test auth error cause", solution="Test auth errorsolution") - with pytest.raises( - ModelAPIError, - match="Cause: An unexpected API error occurred with model 'test-model' while running generation for column 'test'.", - ): - raise parser.parse_api_error(APIError(500, "Some api error", "openai", stub_model_name), authentication_error) - - with pytest.raises(ModelAuthenticationError, match="Cause: Test auth error cause"): - raise parser.parse_api_error( - APIError(403, "Some obtuse error. Error code: 403", "openai", stub_model_name), authentication_error - ) - - -def test_get_exception_primary_cause_with_cause(): +def test_get_exception_primary_cause_with_cause() -> None: root_cause = ValueError("Root cause") try: raise root_cause @@ -298,33 +277,35 @@ def test_get_exception_primary_cause_with_cause(): assert result == root_cause -def test_get_exception_primary_cause_without_cause(): +def test_get_exception_primary_cause_without_cause() -> None: exception = ValueError("No cause") result = get_exception_primary_cause(exception) assert result == exception -def test_handle_llm_exceptions_context_window_with_openai_exception(): - exception = ContextWindowExceededError( - "OpenAIException - The model's context window was exceeded. Please reduce the length of your prompt or try a different model. Please reduce the prompt length.", - "openai", - stub_model_name, +def test_handle_llm_exceptions_context_window_with_openai_detail() -> None: + exception = ProviderError( + kind=ProviderErrorKind.CONTEXT_WINDOW_EXCEEDED, + message="This model's maximum context length is 32768 tokens. However, you requested 32778 tokens (10 in the messages, 32768 in the completion). Please reduce the length of the messages or completion", + status_code=400, ) with pytest.raises(ModelContextWindowExceededError) as exc_info: handle_llm_exceptions( exception, model_name=stub_model_name, model_provider_name=stub_model_provider_name, purpose=stub_purpose ) assert "exceed its supported context width" in str(exc_info.value) + assert "maximum context length is 32768 tokens" in str(exc_info.value) -def test_handle_llm_exceptions_context_window_with_openai_exception_parsing_error(): - class MockException(ContextWindowExceededError): - def __str__(self): - raise Exception("Parsing error") - - exception = MockException("Context window exceeded", "openai", stub_model_name) +def test_handle_llm_exceptions_context_window_without_openai_detail() -> None: + exception = ProviderError( + kind=ProviderErrorKind.CONTEXT_WINDOW_EXCEEDED, + message="context length exceeded", + status_code=400, + ) with pytest.raises(ModelContextWindowExceededError) as exc_info: handle_llm_exceptions( exception, model_name=stub_model_name, model_provider_name=stub_model_provider_name, purpose=stub_purpose ) assert "exceed its supported context width" in str(exc_info.value) + assert "maximum context length" not in str(exc_info.value) diff --git a/packages/data-designer-engine/tests/engine/models/test_model_registry.py b/packages/data-designer-engine/tests/engine/models/test_model_registry.py index 50d7ff107..1974f5310 100644 --- a/packages/data-designer-engine/tests/engine/models/test_model_registry.py +++ b/packages/data-designer-engine/tests/engine/models/test_model_registry.py @@ -6,7 +6,6 @@ import pytest from data_designer.config.models import ChatCompletionInferenceParams, ModelConfig -from data_designer.engine.models import litellm_overrides from data_designer.engine.models.errors import ModelAuthenticationError from data_designer.engine.models.facade import ModelFacade from data_designer.engine.models.factory import create_model_registry @@ -42,9 +41,7 @@ def stub_no_usage_config(): ) -@patch.object(litellm_overrides, "apply_litellm_patches", autospec=True) def test_create_model_registry( - mock_apply_litellm_patches: object, stub_model_configs: list[ModelConfig], stub_secrets_resolver: object, stub_model_provider_registry: object, @@ -55,7 +52,6 @@ def test_create_model_registry( model_provider_registry=stub_model_provider_registry, ) assert isinstance(model_registry, ModelRegistry) - mock_apply_litellm_patches.assert_called_once() def test_public_props(stub_model_configs, stub_model_registry): diff --git a/plans/343/model-facade-overhaul-plan-step-1.md b/plans/343/model-facade-overhaul-plan-step-1.md index 2f3dc035b..b23d4927d 100644 --- a/plans/343/model-facade-overhaul-plan-step-1.md +++ b/plans/343/model-facade-overhaul-plan-step-1.md @@ -208,18 +208,17 @@ Updated files (Step 1): - Repurposed from original "Config/CLI auth schema rollout" scope. PR #426 review revealed that the dual-mode sync/async `HttpModelClient` creates intractable lifecycle bugs (transport leaks, cross-mode teardown). This PR constrains each `HttpModelClient` instance to a single mode (`sync` or `async`) via a constructor flag, simplifies `close()`/`aclose()` to single-mode teardown, and adds `ModelRegistry.arun_health_check()` so async-engine health checks use the async path consistently. - files: `clients/adapters/http_model_client.py`, `clients/factory.py`, `models/factory.py`, `models/registry.py`, `dataset_builders/column_wise_builder.py` - docs: `plans/343/model-facade-overhaul-pr-5-architecture-notes.md` -6. PR-6 (in progress): Dual-layer ThrottleManager integration (client wrapper + scheduler submission slot management). +6. PR-6 (merged): Dual-layer ThrottleManager integration (client wrapper + scheduler submission slot management). - Repurposed from original "Config/CLI auth schema rollout" scope. The ThrottleManager (PR-3) is instantiated and models register into it (PR-4), but no execution path acquires or releases throttle permits. This PR adds a `ThrottledModelClient` wrapper that acquires/releases throttle permits around every HTTP call (per-request AIMD accuracy), and updates the `AsyncTaskScheduler` to release submission slots for LLM-bound tasks (cross-key starvation prevention). The `ModelFacade` is untouched — throttling is a transport concern below it. PR-6 also narrows the HTTP-layer retry boundary: `429` is removed from transport-level retryable statuses so raw rate-limit responses reach `ThrottleManager.release_rate_limited()` on the first throttled attempt, while non-rate-limit transient failures (`502`/`503`/`504`, transport errors) remain retried in the shared HTTP layer. AIMD tuning parameters are exposed on `RunConfig` (`throttle_reduce_factor`, `throttle_additive_increase`, `throttle_success_window`, `throttle_block_seconds`) and forwarded through the factory chain to `ThrottleManager`. The submission pool is sized dynamically from aggregate `max_parallel_requests` via `ModelRegistry.get_aggregate_max_parallel_requests()`. Design rationale in `plans/343/dual-layer-throttle-exploration.md`. - files: `models/clients/throttled.py` (new), `models/clients/retry.py`, `models/clients/factory.py`, `models/factory.py`, `models/registry.py`, `config/run_config.py`, `resources/resource_provider.py`, `dataset_builders/async_scheduler.py`, `dataset_builders/column_wise_builder.py` - docs: `plans/343/model-facade-overhaul-pr-6-architecture-notes.md`, `plans/343/dual-layer-throttle-exploration.md` -7. PR-7: Config/CLI auth schema rollout + migration guards + docs. +7. PR-7: Remove LiteLLM dependency and bridge path. + - Native adapters are now the default for all predefined providers (PR-6). No soak window needed — drop the bridge entirely. + - files: remove `clients/adapters/litellm_bridge.py`, `models/litellm_overrides.py`; remove `apply_litellm_patches()` call from `models/factory.py`; remove LiteLLM fallback branch and `_create_bridge_client` from `clients/factory.py`; remove `DATA_DESIGNER_MODEL_BACKEND` env-var support; remove LiteLLM match arms from `models/errors.py`; remove `litellm` from `lazy_heavy_imports.py` and `pyproject.toml` runtime deps. + - docs: remove LiteLLM references and close out migration notes. +8. PR-8: Config/CLI auth schema rollout + migration guards + docs. - files: `config/models.py`, `cli/forms/provider_builder.py` - docs: publish auth schema migration guide (legacy `api_key` fallback + typed `auth` objects) and CLI examples. -8. PR-8: Cutover flag default flip to native while retaining bridge path. - - docs: update rollout runbook and env-flag guidance (`DATA_DESIGNER_MODEL_BACKEND`) for operators. -9. PR-9: Remove LiteLLM dependency/path after soak window. - - files: `lazy_heavy_imports.py` and removal of legacy LiteLLM runtime path - - docs: remove LiteLLM references and close out migration notes. ### PR coverage check (Step 1) @@ -232,7 +231,7 @@ Every file listed in `File-level change map` must map to exactly one PR above. I 3. Are sync and async paths symmetric in behavior? 4. Does adaptive throttling honor global cap and domain key rules? 5. Is any secret material exposed in logs or reprs? -6. Is rollback possible via feature flag with bridge path retained during soak? +6. Is the LiteLLM bridge path fully removed with no residual imports or runtime references? 7. Are adapter lifecycle teardown hooks wired (`ModelRegistry`/`ResourceProvider`) with no leaked clients in tests? ## Why This Plan @@ -1016,8 +1015,8 @@ During mixed bridge/native rollout: 1. `apply_litellm_patches()` must run if any configured model resolves to `LiteLLMBridgeClient`. 2. Patch application must be idempotent and safe when called multiple times. -3. `ThreadSafeCache` + LiteLLM patch behavior remains in place until PR-7 removes bridge/LiteLLM path. -4. PR-7 is the cleanup point for removing `litellm_overrides.py` patch side effects. +3. `ThreadSafeCache` + LiteLLM patch behavior is removed in PR-7 along with the bridge/LiteLLM path. +4. PR-7 is the cleanup point for removing `litellm_overrides.py` and all patch side effects. ## Error Model and Mapping diff --git a/plans/343/model-facade-overhaul-pr-6-architecture-notes.md b/plans/343/model-facade-overhaul-pr-6-architecture-notes.md index b5dcfdc0e..40a9746c0 100644 --- a/plans/343/model-facade-overhaul-pr-6-architecture-notes.md +++ b/plans/343/model-facade-overhaul-pr-6-architecture-notes.md @@ -878,11 +878,11 @@ Key points: with a `get_effective_throttle_capacity()` method that deduplicates by `(provider_name, model_id)` and uses the real `min()` cap. -PR-7 picks up the config/CLI auth schema rollout that was originally +PR-7 removes the LiteLLM dependency and bridge path entirely — native +adapters are the default for all predefined providers and no soak +window is needed. + +PR-8 picks up the config/CLI auth schema rollout that was originally scoped for PR-6, adding typed provider-specific auth objects (`AnthropicAuth`, `OpenAIApiKeyAuth`) to `ModelProvider` with backward-compatible `api_key` fallback. - -PR-8 flips the default backend to native while retaining the bridge path. - -PR-9 removes the LiteLLM dependency after the soak window. diff --git a/plans/343/model-facade-overhaul-pr-7-architecture-notes.md b/plans/343/model-facade-overhaul-pr-7-architecture-notes.md new file mode 100644 index 000000000..922c33b91 --- /dev/null +++ b/plans/343/model-facade-overhaul-pr-7-architecture-notes.md @@ -0,0 +1,287 @@ +--- + +## date: 2026-03-24 +authors: + - nmulepati + +# Model Facade Overhaul PR-7 Architecture Notes + +This document captures the architecture intent and implementation plan +for PR-7 from `plans/343/model-facade-overhaul-plan-step-1.md`. + +PR-7 removes the LiteLLM dependency and bridge path entirely. With +PR-6 merged, all three predefined providers (NVIDIA, OpenAI, +OpenRouter) route to native HTTP adapters (`OpenAICompatibleClient`, +`AnthropicClient`) by default. The LiteLLM bridge was retained as a +fallback for unknown `provider_type` values and as an opt-in via the +`DATA_DESIGNER_MODEL_BACKEND=litellm_bridge` environment variable. +Neither path is needed: users with custom providers should configure +`provider_type` as `"openai"` or `"anthropic"` (the two API formats +the industry has converged on), and the env-var escape hatch was a +migration aid that is no longer necessary. + +## Goal + +Remove all LiteLLM runtime code, test code, and the `litellm` package +dependency. After this PR, `litellm` is not imported, installed, or +referenced anywhere in the runtime or test codebase. + +## Problem + +LiteLLM remains in the dependency tree and is eagerly imported at +startup via `apply_litellm_patches()` in `models/factory.py`, even +though no default code path uses it. This causes: + +- **Startup cost:** `litellm` is a heavy import (~300ms+) that loads + on every `create_model_registry()` call regardless of backend. +- **Dependency surface:** `litellm` pins a narrow version range + (`>=1.77.0,<1.80.12`) that constrains upgrades and introduces + transitive dependency conflicts. +- **Dead code:** The bridge adapter, LiteLLM overrides module, + `CustomRouter`, `ThreadSafeCache`, and LiteLLM exception match arms + in `errors.py` are unreachable in the default flow. +- **Test maintenance:** Bridge-specific tests and LiteLLM override + tests exercise code that no production path uses. + +## What Changes + +### 1. Delete `litellm_bridge.py` + +Remove `clients/adapters/litellm_bridge.py` — the bridge adapter that +wraps LiteLLM's router behind the `ModelClient` protocol. + +### 2. Delete `litellm_overrides.py` + +Remove `models/litellm_overrides.py` — the module containing +`ThreadSafeCache`, `CustomRouter`, `LiteLLMRouterDefaultKwargs`, +`patch_image_url_list_item`, and `apply_litellm_patches`. These were +LiteLLM-specific workarounds (thread-safe cache, exponential backoff +override, image URL schema patch) that are no longer needed. + +### 3. Remove `apply_litellm_patches()` from `models/factory.py` + +The `create_model_registry` factory unconditionally calls +`apply_litellm_patches()`. This import and call are removed. The +factory retains its existing structure — it just no longer has a +LiteLLM initialization step. + +### 4. Remove LiteLLM fallback from `clients/factory.py` + +The `create_model_client` function currently has four routing paths: + +1. `DATA_DESIGNER_MODEL_BACKEND=litellm_bridge` → bridge (removed) +2. `provider_type == "openai"` → `OpenAICompatibleClient` (kept) +3. `provider_type == "anthropic"` → `AnthropicClient` (kept) +4. Unknown `provider_type` → bridge fallback (removed) + +After this PR, unknown `provider_type` values raise a +`ValueError` with a clear message listing supported types. The +`DATA_DESIGNER_MODEL_BACKEND` env-var, `_BACKEND_ENV_VAR`, +`_BACKEND_BRIDGE` constants, and `_create_bridge_client` helper are +all removed. + +The imports of `LiteLLMBridgeClient`, `CustomRouter`, +`LiteLLMRouterDefaultKwargs`, and `lazy` (for `lazy.litellm`) are +removed from the factory module. + +### 5. Remove LiteLLM match arms from `models/errors.py` + +The `handle_llm_exceptions` function has a `match` statement with +two groups of cases: + +1. `ProviderError` — canonical errors from native adapters (kept) +2. `lazy.litellm.exceptions.*` — LiteLLM-specific errors labeled + "safety net during bridge period" (removed) + +After removal, the match statement handles: `ProviderError`, +`GenerationValidationFailureError`, `DataDesignerError`, and the +generic `case _:` fallback. The `DownstreamLLMExceptionMessageParser` +class is also removed — it only parsed LiteLLM exception types +(`BadRequestError`, `ContextWindowExceededError`, `APIError`). + +The `import litellm` in the `TYPE_CHECKING` block and the +`import data_designer.lazy_heavy_imports as lazy` are removed from +this module. + +**Ported behavior:** `DownstreamLLMExceptionMessageParser` had one +piece of logic that the native `_raise_from_provider_error` path +lacked: extracting the specific token-count detail from OpenAI-style +context window errors (e.g. "This model's maximum context length is +32768 tokens"). This is ported to a new private helper +`_extract_context_window_detail` called from the +`CONTEXT_WINDOW_EXCEEDED` branch of `_raise_from_provider_error`. +The `ProviderError.message` field carries the provider's response +body, which contains the same text the old LiteLLM exception did. + +**403 behavioral delta:** The old LiteLLM path treated +`"Error code: 403"` in the exception string as +`ModelAuthenticationError`. The native path maps HTTP 403 to +`ProviderErrorKind.PERMISSION_DENIED` → `ModelPermissionDeniedError`. +This is more correct — 403 means "forbidden/permission denied", not +"bad credentials" (that's 401). The old behavior was a LiteLLM-era +workaround. No change is needed. + +### 6. Remove `litellm` from `lazy_heavy_imports.py` + +The `"litellm": "litellm"` entry is removed from `_LAZY_IMPORTS`. + +### 7. Remove `litellm` from `pyproject.toml` + +The `litellm>=1.77.0,<1.80.12` line is removed from the engine +package's runtime dependencies. + +### 8. Clean up `adapters/__init__.py` + +Remove `LiteLLMBridgeClient` and `LiteLLMRouter` from the +`__init__.py` exports. + +### 9. Update `async_concurrency.py` docstring + +The module docstring references LiteLLM as the reason for the +singleton event loop. The rationale is updated to reference +`httpx.AsyncClient` (the actual async-stateful dependency now). + +### 10. Update `README.md` and docstrings + +- `packages/data-designer-engine/README.md`: Remove "LLM integration + via litellm" reference. +- `column_configs.py`: Update LLMTextColumnConfig docstring to remove + "via LiteLLM" reference. + +### 11. Remove `flatten_extra_body` from `TransportKwargs.from_request` + +The `flatten_extra_body` parameter on `TransportKwargs.from_request` +existed solely for the LiteLLM bridge, which needed `extra_body` +preserved as a nested dict rather than merged into the top-level body. +All native adapters use the default (`True` — merge into top level). +The parameter, its `False` branch, and two tests exercising the +non-flatten path are removed. + +### 12. Clean up stale LiteLLM references in docstrings and comments + +All remaining LiteLLM references in docstrings and comments are +updated across the codebase: + +- `models/factory.py`: "Heavy dependencies (litellm, httpx)" → httpx +- `resources/resource_provider.py`: "heavy dependencies like litellm" + → httpx +- `clients/errors.py`: "stringified LiteLLM exception" → "stringified + provider exception" +- `clients/types.py`: Remove LiteLLM references from + `TransportKwargs` docstring +- `clients/parsing.py`: "LiteLLM-normalized fallback" → "legacy + fallback" +- `async_concurrency.py`: "libraries (like LiteLLM)" → + "httpx.AsyncClient" +- `AGENTS.md`: "via LiteLLM" → "via native HTTP adapters" + +### 13. Update benchmark script + +`scripts/benchmarks/benchmark_engine_v2.py` patches +`CustomRouter.completion` / `acompletion` for simulated LLM +responses. This is updated to patch the native +`OpenAICompatibleClient` instead. + +### 14. Delete test files + +- `tests/engine/models/clients/test_litellm_bridge.py` — bridge + adapter tests +- `tests/engine/models/test_litellm_overrides.py` — override/patch + tests + +### 15. Update test files + +- `tests/engine/models/clients/conftest.py` — remove `mock_router` + and `bridge_client` fixtures (only used by bridge tests); retain + shared HTTP mock helpers. +- `tests/engine/models/clients/test_factory.py` — remove bridge + fallback tests (`test_unknown_provider_creates_bridge_client`, + `test_bridge_env_override_forces_bridge_for_openai_provider`, + `test_bridge_env_override_forces_bridge_for_anthropic_provider`, + `test_bridge_client_is_wrapped_with_throttle_manager`); add test + for unknown provider raising `ValueError`. +- `tests/engine/models/test_model_registry.py` — remove + `apply_litellm_patches` mock from `test_create_model_registry`. +- `tests/engine/models/test_model_errors.py` — remove all test cases + that construct LiteLLM exception types. Add parametrized test cases + for every `ProviderErrorKind` value (`AUTHENTICATION`, + `API_CONNECTION`, `TIMEOUT`, `NOT_FOUND`, `INTERNAL_SERVER`, + `UNPROCESSABLE_ENTITY`, `API_ERROR`, multimodal `BAD_REQUEST` + variant) to ensure full coverage of the native error path. Add + dedicated tests for `_extract_context_window_detail` (with and + without OpenAI-style detail in the error message). +- `tests/engine/models/clients/test_parsing.py` — remove two + `flatten_extra_body=False` tests that exercised the removed + parameter. +- `tests/engine/models/conftest.py` — update `stub_model_client` + fixture docstring to remove "without a real LiteLLM router" + reference. + +## What Does NOT Change + +1. **`ModelFacade`** — untouched. The facade delegates to + `ModelClient` and is unaware of which adapter backs it. +2. **`ModelClient` protocol** — unchanged. Native adapters already + implement it. +3. **`ProviderError` / `ProviderErrorKind`** — the canonical error + model introduced in PR-2 is unchanged. It was always the target + error type; the LiteLLM match arms were a bridge-period safety net. +4. **`ThrottledModelClient`** — unchanged. It wraps any `ModelClient`. +5. **`RetryTransport`** — unchanged. +6. **`ThrottleManager`** — unchanged. +7. **Native adapters** (`OpenAICompatibleClient`, `AnthropicClient`) + — unchanged. +8. **`SecretResolver`** — unchanged. +9. **`RunConfig` / `ThrottleConfig`** — unchanged. + +## Files Touched + +| File | Change | +| --- | --- | +| `clients/adapters/litellm_bridge.py` | **Deleted** | +| `models/litellm_overrides.py` | **Deleted** | +| `clients/adapters/__init__.py` | Remove `LiteLLMBridgeClient`, `LiteLLMRouter` exports | +| `clients/factory.py` | Remove bridge fallback, env-var support, `_create_bridge_client`; raise `ValueError` for unknown `provider_type` | +| `models/factory.py` | Remove `apply_litellm_patches()` call and import | +| `models/errors.py` | Remove LiteLLM exception match arms and `DownstreamLLMExceptionMessageParser`; port context window detail extraction to `_extract_context_window_detail` | +| `clients/types.py` | Remove `flatten_extra_body` parameter from `TransportKwargs.from_request` | +| `clients/parsing.py` | Update "LiteLLM-normalized fallback" → "legacy fallback" in docstring | +| `clients/errors.py` | Update "stringified LiteLLM exception" → "stringified provider exception" in docstring | +| `resources/resource_provider.py` | Remove LiteLLM reference from docstring | +| `lazy_heavy_imports.py` | Remove `"litellm"` entry | +| `engine/pyproject.toml` | Remove `litellm` runtime dependency | +| `engine/README.md` | Remove LiteLLM reference | +| `config/column_configs.py` | Update docstring | +| `dataset_builders/utils/async_concurrency.py` | Update docstring | +| `scripts/benchmarks/benchmark_engine_v2.py` | Patch native client instead of `CustomRouter` | +| `tests/.../test_litellm_bridge.py` | **Deleted** | +| `tests/.../test_litellm_overrides.py` | **Deleted** | +| `tests/.../clients/conftest.py` | Remove bridge fixtures | +| `tests/.../clients/test_factory.py` | Remove bridge tests; add unknown-provider `ValueError` test | +| `tests/.../test_model_registry.py` | Remove `apply_litellm_patches` mock | +| `tests/.../test_model_errors.py` | Remove LiteLLM exception test cases; add full `ProviderErrorKind` coverage and context window detail tests | +| `tests/.../clients/test_parsing.py` | Remove `flatten_extra_body=False` tests | +| `tests/.../conftest.py` | Update docstring | +| `AGENTS.md` | Remove "via LiteLLM" references | + +## Risk Assessment + +**Low risk.** This PR only removes dead code paths. The native +adapters have been the default since PR-3/PR-4 and have been +exercised in production through PR-5 and PR-6. The bridge path was +a safety net that is no longer needed. + +The only behavioral change is that unknown `provider_type` values +now raise a `ValueError` instead of silently falling back to the +LiteLLM bridge. This is intentional — users should explicitly +configure their provider type. + +## Migration Impact + +Users who were explicitly setting +`DATA_DESIGNER_MODEL_BACKEND=litellm_bridge` will see a startup +error. The fix is to remove the env var — native adapters are the +only path. Users with custom `provider_type` values that aren't +`"openai"` or `"anthropic"` will see a `ValueError` at client +creation time. The fix is to set `provider_type` to whichever API +format their provider uses (almost always `"openai"`). diff --git a/scripts/benchmarks/benchmark_engine_v2.py b/scripts/benchmarks/benchmark_engine_v2.py index 5a3bb073a..286fe48f4 100644 --- a/scripts/benchmarks/benchmark_engine_v2.py +++ b/scripts/benchmarks/benchmark_engine_v2.py @@ -31,6 +31,7 @@ from data_designer.config.sampler_params import SamplerType, UniformSamplerParams from data_designer.config.validator_params import LocalCallableValidatorParams, ValidatorType from data_designer.engine.mcp.registry import MCPToolDefinition, MCPToolResult +from data_designer.engine.models.clients.types import AssistantMessage, ChatCompletionResponse, ToolCall from data_designer.lazy_heavy_imports import np, pd if TYPE_CHECKING: @@ -198,22 +199,10 @@ class ResponseProfile: @dataclass(frozen=True) -class FakeMessage: - content: str - tool_calls: list[dict[str, Any]] | None = None - reasoning_content: str | None = None +class FakeCompletionResponse: + """Wraps a ChatCompletionResponse with benchmark-specific latency metadata.""" - -@dataclass(frozen=True) -class FakeChoice: - message: FakeMessage - - -@dataclass(frozen=True) -class FakeResponse: - choices: list[FakeChoice] - usage: Any | None = None - model: str | None = None + response: ChatCompletionResponse latency_ms: float = 0.0 @@ -389,61 +378,62 @@ def _mock_tool_result(tool_name: str, arguments: dict[str, Any], provider_name: return MCPToolResult(content=json.dumps(payload)) -def _fake_response(model: str, messages: list[dict[str, Any]], **kwargs: Any) -> FakeResponse: +def _fake_response(model: str, messages: list[dict[str, Any]], **kwargs: Any) -> FakeCompletionResponse: if kwargs.get("tools") and _should_request_tool(messages): - tool_call = _build_tool_call(model, messages) - # Compute latency for tool-call responses using the same profile/seed mechanism. + raw_call = _build_tool_call(model, messages) profile = _profile_for_model(model) rng = random.Random(_stable_seed(model, messages)) latency_ms = float(int(rng.betavariate(profile.latency_alpha, profile.latency_beta) * 900.0)) - return FakeResponse( - choices=[FakeChoice(message=FakeMessage(content="Using tool.", tool_calls=[tool_call]))], - model=model, + return FakeCompletionResponse( + response=ChatCompletionResponse( + message=AssistantMessage( + content="Using tool.", + tool_calls=[ + ToolCall( + id=raw_call["id"], + name=raw_call["function"]["name"], + arguments_json=raw_call["function"]["arguments"], + ) + ], + ), + ), latency_ms=latency_ms, ) response_text, latency_ms = _mock_response_text(model, messages) - return FakeResponse( - choices=[FakeChoice(message=FakeMessage(content=response_text))], - model=model, + return FakeCompletionResponse( + response=ChatCompletionResponse(message=AssistantMessage(content=response_text)), latency_ms=latency_ms, ) @contextlib.contextmanager def _patch_llm_responses(*, simulated_latency: bool = False) -> Iterator[None]: - # Imports are deferred so engine selection respects DATA_DESIGNER_ASYNC_ENGINE. - from data_designer.engine.models.litellm_overrides import CustomRouter + from data_designer.engine.models.clients.adapters.openai_compatible import OpenAICompatibleClient - original_completion = CustomRouter.completion - original_acompletion = getattr(CustomRouter, "acompletion", None) + original_completion = OpenAICompatibleClient.completion + original_acompletion = OpenAICompatibleClient.acompletion - def fake_completion(self: Any, model: str, messages: list[dict[str, Any]], **kwargs: Any) -> FakeResponse: + def fake_completion(self: Any, request: Any) -> ChatCompletionResponse: _ = self - response = _fake_response(model, messages, **kwargs) - if simulated_latency and response.latency_ms > 0: - time.sleep(response.latency_ms / 1000.0) - return response + fake = _fake_response(request.model, request.messages, tools=request.tools) + if simulated_latency and fake.latency_ms > 0: + time.sleep(fake.latency_ms / 1000.0) + return fake.response - async def fake_acompletion(self: Any, model: str, messages: list[dict[str, Any]], **kwargs: Any) -> FakeResponse: + async def fake_acompletion(self: Any, request: Any) -> ChatCompletionResponse: _ = self - response = _fake_response(model, messages, **kwargs) - if simulated_latency and response.latency_ms > 0: - await asyncio.sleep(response.latency_ms / 1000.0) - return response + fake = _fake_response(request.model, request.messages, tools=request.tools) + if simulated_latency and fake.latency_ms > 0: + await asyncio.sleep(fake.latency_ms / 1000.0) + return fake.response - CustomRouter.completion = fake_completion - CustomRouter.acompletion = fake_acompletion + OpenAICompatibleClient.completion = fake_completion # type: ignore[assignment] + OpenAICompatibleClient.acompletion = fake_acompletion # type: ignore[assignment] try: yield finally: - CustomRouter.completion = original_completion - if original_acompletion is not None: - CustomRouter.acompletion = original_acompletion - else: - try: - delattr(CustomRouter, "acompletion") - except AttributeError: - pass + OpenAICompatibleClient.completion = original_completion # type: ignore[assignment] + OpenAICompatibleClient.acompletion = original_acompletion # type: ignore[assignment] @contextlib.contextmanager diff --git a/uv.lock b/uv.lock index 9e4dbb49b..3ee940806 100644 --- a/uv.lock +++ b/uv.lock @@ -1,5 +1,5 @@ version = 1 -revision = 3 +revision = 2 requires-python = ">=3.10" resolution-markers = [ "python_full_version >= '3.14'", @@ -856,7 +856,6 @@ dependencies = [ { name = "json-repair" }, { name = "jsonpath-rust-bindings" }, { name = "jsonschema" }, - { name = "litellm" }, { name = "lxml" }, { name = "marko" }, { name = "mcp" }, @@ -883,7 +882,6 @@ requires-dist = [ { name = "json-repair", specifier = ">=0.48.0,<1" }, { name = "jsonpath-rust-bindings", specifier = ">=1.0,<2" }, { name = "jsonschema", specifier = ">=4.0.0,<5" }, - { name = "litellm", specifier = ">=1.77.0,<1.80.12" }, { name = "lxml", specifier = ">=6.0.2,<7" }, { name = "marko", specifier = ">=2.1.2,<3" }, { name = "mcp", specifier = ">=1.26.0,<2" }, @@ -1068,15 +1066,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/33/6b/e0547afaf41bf2c42e52430072fa5658766e3d65bd4b03a563d1b6336f57/distlib-0.4.0-py2.py3-none-any.whl", hash = "sha256:9659f7d87e46584a30b5780e43ac7a2143098441670ff0a49d5f9034c54a6c16", size = 469047, upload-time = "2025-07-17T16:51:58.613Z" }, ] -[[package]] -name = "distro" -version = "1.9.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/fc/f8/98eea607f65de6527f8a2e8885fc8015d3e6f5775df186e443e0964a11c3/distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed", size = 60722, upload-time = "2023-12-24T09:54:32.31Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/12/b3/231ffd4ab1fc9d679809f356cebee130ac7daa00d6d6f3206dd4fd137e9e/distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2", size = 20277, upload-time = "2023-12-24T09:54:30.421Z" }, -] - [[package]] name = "dnspython" version = "2.8.0" @@ -1183,69 +1172,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/cb/a8/20d0723294217e47de6d9e2e40fd4a9d2f7c4b6ef974babd482a59743694/fastjsonschema-2.21.2-py3-none-any.whl", hash = "sha256:1c797122d0a86c5cace2e54bf4e819c36223b552017172f32c5c024a6b77e463", size = 24024, upload-time = "2025-08-14T18:49:34.776Z" }, ] -[[package]] -name = "fastuuid" -version = "0.14.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/c3/7d/d9daedf0f2ebcacd20d599928f8913e9d2aea1d56d2d355a93bfa2b611d7/fastuuid-0.14.0.tar.gz", hash = "sha256:178947fc2f995b38497a74172adee64fdeb8b7ec18f2a5934d037641ba265d26", size = 18232, upload-time = "2025-10-19T22:19:22.402Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/ad/b2/731a6696e37cd20eed353f69a09f37a984a43c9713764ee3f7ad5f57f7f9/fastuuid-0.14.0-cp310-cp310-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:6e6243d40f6c793c3e2ee14c13769e341b90be5ef0c23c82fa6515a96145181a", size = 516760, upload-time = "2025-10-19T22:25:21.509Z" }, - { url = "https://files.pythonhosted.org/packages/c5/79/c73c47be2a3b8734d16e628982653517f80bbe0570e27185d91af6096507/fastuuid-0.14.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:13ec4f2c3b04271f62be2e1ce7e95ad2dd1cf97e94503a3760db739afbd48f00", size = 264748, upload-time = "2025-10-19T22:41:52.873Z" }, - { url = "https://files.pythonhosted.org/packages/24/c5/84c1eea05977c8ba5173555b0133e3558dc628bcf868d6bf1689ff14aedc/fastuuid-0.14.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b2fdd48b5e4236df145a149d7125badb28e0a383372add3fbaac9a6b7a394470", size = 254537, upload-time = "2025-10-19T22:33:55.603Z" }, - { url = "https://files.pythonhosted.org/packages/0e/23/4e362367b7fa17dbed646922f216b9921efb486e7abe02147e4b917359f8/fastuuid-0.14.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f74631b8322d2780ebcf2d2d75d58045c3e9378625ec51865fe0b5620800c39d", size = 278994, upload-time = "2025-10-19T22:26:17.631Z" }, - { url = "https://files.pythonhosted.org/packages/b2/72/3985be633b5a428e9eaec4287ed4b873b7c4c53a9639a8b416637223c4cd/fastuuid-0.14.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:83cffc144dc93eb604b87b179837f2ce2af44871a7b323f2bfed40e8acb40ba8", size = 280003, upload-time = "2025-10-19T22:23:45.415Z" }, - { url = "https://files.pythonhosted.org/packages/b3/6d/6ef192a6df34e2266d5c9deb39cd3eea986df650cbcfeaf171aa52a059c3/fastuuid-0.14.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1a771f135ab4523eb786e95493803942a5d1fc1610915f131b363f55af53b219", size = 303583, upload-time = "2025-10-19T22:26:00.756Z" }, - { url = "https://files.pythonhosted.org/packages/9d/11/8a2ea753c68d4fece29d5d7c6f3f903948cc6e82d1823bc9f7f7c0355db3/fastuuid-0.14.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4edc56b877d960b4eda2c4232f953a61490c3134da94f3c28af129fb9c62a4f6", size = 460955, upload-time = "2025-10-19T22:36:25.196Z" }, - { url = "https://files.pythonhosted.org/packages/23/42/7a32c93b6ce12642d9a152ee4753a078f372c9ebb893bc489d838dd4afd5/fastuuid-0.14.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:bcc96ee819c282e7c09b2eed2b9bd13084e3b749fdb2faf58c318d498df2efbe", size = 480763, upload-time = "2025-10-19T22:24:28.451Z" }, - { url = "https://files.pythonhosted.org/packages/b9/e9/a5f6f686b46e3ed4ed3b93770111c233baac87dd6586a411b4988018ef1d/fastuuid-0.14.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7a3c0bca61eacc1843ea97b288d6789fbad7400d16db24e36a66c28c268cfe3d", size = 452613, upload-time = "2025-10-19T22:25:06.827Z" }, - { url = "https://files.pythonhosted.org/packages/b4/c9/18abc73c9c5b7fc0e476c1733b678783b2e8a35b0be9babd423571d44e98/fastuuid-0.14.0-cp310-cp310-win32.whl", hash = "sha256:7f2f3efade4937fae4e77efae1af571902263de7b78a0aee1a1653795a093b2a", size = 155045, upload-time = "2025-10-19T22:28:32.732Z" }, - { url = "https://files.pythonhosted.org/packages/5e/8a/d9e33f4eb4d4f6d9f2c5c7d7e96b5cdbb535c93f3b1ad6acce97ee9d4bf8/fastuuid-0.14.0-cp310-cp310-win_amd64.whl", hash = "sha256:ae64ba730d179f439b0736208b4c279b8bc9c089b102aec23f86512ea458c8a4", size = 156122, upload-time = "2025-10-19T22:23:15.59Z" }, - { url = "https://files.pythonhosted.org/packages/98/f3/12481bda4e5b6d3e698fbf525df4443cc7dce746f246b86b6fcb2fba1844/fastuuid-0.14.0-cp311-cp311-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:73946cb950c8caf65127d4e9a325e2b6be0442a224fd51ba3b6ac44e1912ce34", size = 516386, upload-time = "2025-10-19T22:42:40.176Z" }, - { url = "https://files.pythonhosted.org/packages/59/19/2fc58a1446e4d72b655648eb0879b04e88ed6fa70d474efcf550f640f6ec/fastuuid-0.14.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:12ac85024637586a5b69645e7ed986f7535106ed3013640a393a03e461740cb7", size = 264569, upload-time = "2025-10-19T22:25:50.977Z" }, - { url = "https://files.pythonhosted.org/packages/78/29/3c74756e5b02c40cfcc8b1d8b5bac4edbd532b55917a6bcc9113550e99d1/fastuuid-0.14.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:05a8dde1f395e0c9b4be515b7a521403d1e8349443e7641761af07c7ad1624b1", size = 254366, upload-time = "2025-10-19T22:29:49.166Z" }, - { url = "https://files.pythonhosted.org/packages/52/96/d761da3fccfa84f0f353ce6e3eb8b7f76b3aa21fd25e1b00a19f9c80a063/fastuuid-0.14.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09378a05020e3e4883dfdab438926f31fea15fd17604908f3d39cbeb22a0b4dc", size = 278978, upload-time = "2025-10-19T22:35:41.306Z" }, - { url = "https://files.pythonhosted.org/packages/fc/c2/f84c90167cc7765cb82b3ff7808057608b21c14a38531845d933a4637307/fastuuid-0.14.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbb0c4b15d66b435d2538f3827f05e44e2baafcc003dd7d8472dc67807ab8fd8", size = 279692, upload-time = "2025-10-19T22:25:36.997Z" }, - { url = "https://files.pythonhosted.org/packages/af/7b/4bacd03897b88c12348e7bd77943bac32ccf80ff98100598fcff74f75f2e/fastuuid-0.14.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cd5a7f648d4365b41dbf0e38fe8da4884e57bed4e77c83598e076ac0c93995e7", size = 303384, upload-time = "2025-10-19T22:29:46.578Z" }, - { url = "https://files.pythonhosted.org/packages/c0/a2/584f2c29641df8bd810d00c1f21d408c12e9ad0c0dafdb8b7b29e5ddf787/fastuuid-0.14.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c0a94245afae4d7af8c43b3159d5e3934c53f47140be0be624b96acd672ceb73", size = 460921, upload-time = "2025-10-19T22:36:42.006Z" }, - { url = "https://files.pythonhosted.org/packages/24/68/c6b77443bb7764c760e211002c8638c0c7cce11cb584927e723215ba1398/fastuuid-0.14.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:2b29e23c97e77c3a9514d70ce343571e469098ac7f5a269320a0f0b3e193ab36", size = 480575, upload-time = "2025-10-19T22:28:18.975Z" }, - { url = "https://files.pythonhosted.org/packages/5a/87/93f553111b33f9bb83145be12868c3c475bf8ea87c107063d01377cc0e8e/fastuuid-0.14.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1e690d48f923c253f28151b3a6b4e335f2b06bf669c68a02665bc150b7839e94", size = 452317, upload-time = "2025-10-19T22:25:32.75Z" }, - { url = "https://files.pythonhosted.org/packages/9e/8c/a04d486ca55b5abb7eaa65b39df8d891b7b1635b22db2163734dc273579a/fastuuid-0.14.0-cp311-cp311-win32.whl", hash = "sha256:a6f46790d59ab38c6aa0e35c681c0484b50dc0acf9e2679c005d61e019313c24", size = 154804, upload-time = "2025-10-19T22:24:15.615Z" }, - { url = "https://files.pythonhosted.org/packages/9c/b2/2d40bf00820de94b9280366a122cbaa60090c8cf59e89ac3938cf5d75895/fastuuid-0.14.0-cp311-cp311-win_amd64.whl", hash = "sha256:e150eab56c95dc9e3fefc234a0eedb342fac433dacc273cd4d150a5b0871e1fa", size = 156099, upload-time = "2025-10-19T22:24:31.646Z" }, - { url = "https://files.pythonhosted.org/packages/02/a2/e78fcc5df65467f0d207661b7ef86c5b7ac62eea337c0c0fcedbeee6fb13/fastuuid-0.14.0-cp312-cp312-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:77e94728324b63660ebf8adb27055e92d2e4611645bf12ed9d88d30486471d0a", size = 510164, upload-time = "2025-10-19T22:31:45.635Z" }, - { url = "https://files.pythonhosted.org/packages/2b/b3/c846f933f22f581f558ee63f81f29fa924acd971ce903dab1a9b6701816e/fastuuid-0.14.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:caa1f14d2102cb8d353096bc6ef6c13b2c81f347e6ab9d6fbd48b9dea41c153d", size = 261837, upload-time = "2025-10-19T22:38:38.53Z" }, - { url = "https://files.pythonhosted.org/packages/54/ea/682551030f8c4fa9a769d9825570ad28c0c71e30cf34020b85c1f7ee7382/fastuuid-0.14.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d23ef06f9e67163be38cece704170486715b177f6baae338110983f99a72c070", size = 251370, upload-time = "2025-10-19T22:40:26.07Z" }, - { url = "https://files.pythonhosted.org/packages/14/dd/5927f0a523d8e6a76b70968e6004966ee7df30322f5fc9b6cdfb0276646a/fastuuid-0.14.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0c9ec605ace243b6dbe3bd27ebdd5d33b00d8d1d3f580b39fdd15cd96fd71796", size = 277766, upload-time = "2025-10-19T22:37:23.779Z" }, - { url = "https://files.pythonhosted.org/packages/16/6e/c0fb547eef61293153348f12e0f75a06abb322664b34a1573a7760501336/fastuuid-0.14.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:808527f2407f58a76c916d6aa15d58692a4a019fdf8d4c32ac7ff303b7d7af09", size = 278105, upload-time = "2025-10-19T22:26:56.821Z" }, - { url = "https://files.pythonhosted.org/packages/2d/b1/b9c75e03b768f61cf2e84ee193dc18601aeaf89a4684b20f2f0e9f52b62c/fastuuid-0.14.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2fb3c0d7fef6674bbeacdd6dbd386924a7b60b26de849266d1ff6602937675c8", size = 301564, upload-time = "2025-10-19T22:30:31.604Z" }, - { url = "https://files.pythonhosted.org/packages/fc/fa/f7395fdac07c7a54f18f801744573707321ca0cee082e638e36452355a9d/fastuuid-0.14.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ab3f5d36e4393e628a4df337c2c039069344db5f4b9d2a3c9cea48284f1dd741", size = 459659, upload-time = "2025-10-19T22:31:32.341Z" }, - { url = "https://files.pythonhosted.org/packages/66/49/c9fd06a4a0b1f0f048aacb6599e7d96e5d6bc6fa680ed0d46bf111929d1b/fastuuid-0.14.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:b9a0ca4f03b7e0b01425281ffd44e99d360e15c895f1907ca105854ed85e2057", size = 478430, upload-time = "2025-10-19T22:26:22.962Z" }, - { url = "https://files.pythonhosted.org/packages/be/9c/909e8c95b494e8e140e8be6165d5fc3f61fdc46198c1554df7b3e1764471/fastuuid-0.14.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3acdf655684cc09e60fb7e4cf524e8f42ea760031945aa8086c7eae2eeeabeb8", size = 450894, upload-time = "2025-10-19T22:27:01.647Z" }, - { url = "https://files.pythonhosted.org/packages/90/eb/d29d17521976e673c55ef7f210d4cdd72091a9ec6755d0fd4710d9b3c871/fastuuid-0.14.0-cp312-cp312-win32.whl", hash = "sha256:9579618be6280700ae36ac42c3efd157049fe4dd40ca49b021280481c78c3176", size = 154374, upload-time = "2025-10-19T22:29:19.879Z" }, - { url = "https://files.pythonhosted.org/packages/cc/fc/f5c799a6ea6d877faec0472d0b27c079b47c86b1cdc577720a5386483b36/fastuuid-0.14.0-cp312-cp312-win_amd64.whl", hash = "sha256:d9e4332dc4ba054434a9594cbfaf7823b57993d7d8e7267831c3e059857cf397", size = 156550, upload-time = "2025-10-19T22:27:49.658Z" }, - { url = "https://files.pythonhosted.org/packages/a5/83/ae12dd39b9a39b55d7f90abb8971f1a5f3c321fd72d5aa83f90dc67fe9ed/fastuuid-0.14.0-cp313-cp313-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:77a09cb7427e7af74c594e409f7731a0cf887221de2f698e1ca0ebf0f3139021", size = 510720, upload-time = "2025-10-19T22:42:34.633Z" }, - { url = "https://files.pythonhosted.org/packages/53/b0/a4b03ff5d00f563cc7546b933c28cb3f2a07344b2aec5834e874f7d44143/fastuuid-0.14.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:9bd57289daf7b153bfa3e8013446aa144ce5e8c825e9e366d455155ede5ea2dc", size = 262024, upload-time = "2025-10-19T22:30:25.482Z" }, - { url = "https://files.pythonhosted.org/packages/9c/6d/64aee0a0f6a58eeabadd582e55d0d7d70258ffdd01d093b30c53d668303b/fastuuid-0.14.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:ac60fc860cdf3c3f327374db87ab8e064c86566ca8c49d2e30df15eda1b0c2d5", size = 251679, upload-time = "2025-10-19T22:36:14.096Z" }, - { url = "https://files.pythonhosted.org/packages/60/f5/a7e9cda8369e4f7919d36552db9b2ae21db7915083bc6336f1b0082c8b2e/fastuuid-0.14.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ab32f74bd56565b186f036e33129da77db8be09178cd2f5206a5d4035fb2a23f", size = 277862, upload-time = "2025-10-19T22:36:23.302Z" }, - { url = "https://files.pythonhosted.org/packages/f0/d3/8ce11827c783affffd5bd4d6378b28eb6cc6d2ddf41474006b8d62e7448e/fastuuid-0.14.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33e678459cf4addaedd9936bbb038e35b3f6b2061330fd8f2f6a1d80414c0f87", size = 278278, upload-time = "2025-10-19T22:29:43.809Z" }, - { url = "https://files.pythonhosted.org/packages/a2/51/680fb6352d0bbade04036da46264a8001f74b7484e2fd1f4da9e3db1c666/fastuuid-0.14.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1e3cc56742f76cd25ecb98e4b82a25f978ccffba02e4bdce8aba857b6d85d87b", size = 301788, upload-time = "2025-10-19T22:36:06.825Z" }, - { url = "https://files.pythonhosted.org/packages/fa/7c/2014b5785bd8ebdab04ec857635ebd84d5ee4950186a577db9eff0fb8ff6/fastuuid-0.14.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:cb9a030f609194b679e1660f7e32733b7a0f332d519c5d5a6a0a580991290022", size = 459819, upload-time = "2025-10-19T22:35:31.623Z" }, - { url = "https://files.pythonhosted.org/packages/01/d2/524d4ceeba9160e7a9bc2ea3e8f4ccf1ad78f3bde34090ca0c51f09a5e91/fastuuid-0.14.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:09098762aad4f8da3a888eb9ae01c84430c907a297b97166b8abc07b640f2995", size = 478546, upload-time = "2025-10-19T22:26:03.023Z" }, - { url = "https://files.pythonhosted.org/packages/bc/17/354d04951ce114bf4afc78e27a18cfbd6ee319ab1829c2d5fb5e94063ac6/fastuuid-0.14.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:1383fff584fa249b16329a059c68ad45d030d5a4b70fb7c73a08d98fd53bcdab", size = 450921, upload-time = "2025-10-19T22:31:02.151Z" }, - { url = "https://files.pythonhosted.org/packages/fb/be/d7be8670151d16d88f15bb121c5b66cdb5ea6a0c2a362d0dcf30276ade53/fastuuid-0.14.0-cp313-cp313-win32.whl", hash = "sha256:a0809f8cc5731c066c909047f9a314d5f536c871a7a22e815cc4967c110ac9ad", size = 154559, upload-time = "2025-10-19T22:36:36.011Z" }, - { url = "https://files.pythonhosted.org/packages/22/1d/5573ef3624ceb7abf4a46073d3554e37191c868abc3aecd5289a72f9810a/fastuuid-0.14.0-cp313-cp313-win_amd64.whl", hash = "sha256:0df14e92e7ad3276327631c9e7cec09e32572ce82089c55cb1bb8df71cf394ed", size = 156539, upload-time = "2025-10-19T22:33:35.898Z" }, - { url = "https://files.pythonhosted.org/packages/16/c9/8c7660d1fe3862e3f8acabd9be7fc9ad71eb270f1c65cce9a2b7a31329ab/fastuuid-0.14.0-cp314-cp314-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:b852a870a61cfc26c884af205d502881a2e59cc07076b60ab4a951cc0c94d1ad", size = 510600, upload-time = "2025-10-19T22:43:44.17Z" }, - { url = "https://files.pythonhosted.org/packages/4c/f4/a989c82f9a90d0ad995aa957b3e572ebef163c5299823b4027986f133dfb/fastuuid-0.14.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:c7502d6f54cd08024c3ea9b3514e2d6f190feb2f46e6dbcd3747882264bb5f7b", size = 262069, upload-time = "2025-10-19T22:43:38.38Z" }, - { url = "https://files.pythonhosted.org/packages/da/6c/a1a24f73574ac995482b1326cf7ab41301af0fabaa3e37eeb6b3df00e6e2/fastuuid-0.14.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:1ca61b592120cf314cfd66e662a5b54a578c5a15b26305e1b8b618a6f22df714", size = 251543, upload-time = "2025-10-19T22:32:22.537Z" }, - { url = "https://files.pythonhosted.org/packages/1a/20/2a9b59185ba7a6c7b37808431477c2d739fcbdabbf63e00243e37bd6bf49/fastuuid-0.14.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa75b6657ec129d0abded3bec745e6f7ab642e6dba3a5272a68247e85f5f316f", size = 277798, upload-time = "2025-10-19T22:33:53.821Z" }, - { url = "https://files.pythonhosted.org/packages/ef/33/4105ca574f6ded0af6a797d39add041bcfb468a1255fbbe82fcb6f592da2/fastuuid-0.14.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8a0dfea3972200f72d4c7df02c8ac70bad1bb4c58d7e0ec1e6f341679073a7f", size = 278283, upload-time = "2025-10-19T22:29:02.812Z" }, - { url = "https://files.pythonhosted.org/packages/fe/8c/fca59f8e21c4deb013f574eae05723737ddb1d2937ce87cb2a5d20992dc3/fastuuid-0.14.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1bf539a7a95f35b419f9ad105d5a8a35036df35fdafae48fb2fd2e5f318f0d75", size = 301627, upload-time = "2025-10-19T22:35:54.985Z" }, - { url = "https://files.pythonhosted.org/packages/cb/e2/f78c271b909c034d429218f2798ca4e89eeda7983f4257d7865976ddbb6c/fastuuid-0.14.0-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:9a133bf9cc78fdbd1179cb58a59ad0100aa32d8675508150f3658814aeefeaa4", size = 459778, upload-time = "2025-10-19T22:28:00.999Z" }, - { url = "https://files.pythonhosted.org/packages/1e/f0/5ff209d865897667a2ff3e7a572267a9ced8f7313919f6d6043aed8b1caa/fastuuid-0.14.0-cp314-cp314-musllinux_1_1_i686.whl", hash = "sha256:f54d5b36c56a2d5e1a31e73b950b28a0d83eb0c37b91d10408875a5a29494bad", size = 478605, upload-time = "2025-10-19T22:36:21.764Z" }, - { url = "https://files.pythonhosted.org/packages/e0/c8/2ce1c78f983a2c4987ea865d9516dbdfb141a120fd3abb977ae6f02ba7ca/fastuuid-0.14.0-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:ec27778c6ca3393ef662e2762dba8af13f4ec1aaa32d08d77f71f2a70ae9feb8", size = 450837, upload-time = "2025-10-19T22:34:37.178Z" }, - { url = "https://files.pythonhosted.org/packages/df/60/dad662ec9a33b4a5fe44f60699258da64172c39bd041da2994422cdc40fe/fastuuid-0.14.0-cp314-cp314-win32.whl", hash = "sha256:e23fc6a83f112de4be0cc1990e5b127c27663ae43f866353166f87df58e73d06", size = 154532, upload-time = "2025-10-19T22:35:18.217Z" }, - { url = "https://files.pythonhosted.org/packages/1f/f6/da4db31001e854025ffd26bc9ba0740a9cbba2c3259695f7c5834908b336/fastuuid-0.14.0-cp314-cp314-win_amd64.whl", hash = "sha256:df61342889d0f5e7a32f7284e55ef95103f2110fee433c2ae7c2c0956d76ac8a", size = 156457, upload-time = "2025-10-19T22:33:44.579Z" }, -] - [[package]] name = "filelock" version = "3.25.2" @@ -1419,6 +1345,7 @@ dependencies = [ { name = "griffecli" }, { name = "griffelib" }, ] +sdist = { url = "https://files.pythonhosted.org/packages/04/56/28a0accac339c164b52a92c6cfc45a903acc0c174caa5c1713803467b533/griffe-2.0.0.tar.gz", hash = "sha256:c68979cd8395422083a51ea7cf02f9c119d889646d99b7b656ee43725de1b80f", size = 293906, upload-time = "2026-03-23T21:06:53.402Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/8b/94/ee21d41e7eb4f823b94603b9d40f86d3c7fde80eacc2c3c71845476dddaa/griffe-2.0.0-py3-none-any.whl", hash = "sha256:5418081135a391c3e6e757a7f3f156f1a1a746cc7b4023868ff7d5e2f9a980aa", size = 5214, upload-time = "2026-02-09T19:09:44.105Z" }, ] @@ -1431,6 +1358,7 @@ dependencies = [ { name = "colorama" }, { name = "griffelib" }, ] +sdist = { url = "https://files.pythonhosted.org/packages/a4/f8/2e129fd4a86e52e58eefe664de05e7d502decf766e7316cc9e70fdec3e18/griffecli-2.0.0.tar.gz", hash = "sha256:312fa5ebb4ce6afc786356e2d0ce85b06c1c20d45abc42d74f0cda65e159f6ef", size = 56213, upload-time = "2026-03-23T21:06:54.8Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/e6/ed/d93f7a447bbf7a935d8868e9617cbe1cadf9ee9ee6bd275d3040fbf93d60/griffecli-2.0.0-py3-none-any.whl", hash = "sha256:9f7cd9ee9b21d55e91689358978d2385ae65c22f307a63fb3269acf3f21e643d", size = 9345, upload-time = "2026-02-09T19:09:42.554Z" }, ] @@ -1439,123 +1367,11 @@ wheels = [ name = "griffelib" version = "2.0.0" source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ad/06/eccbd311c9e2b3ca45dbc063b93134c57a1ccc7607c5e545264ad092c4a9/griffelib-2.0.0.tar.gz", hash = "sha256:e504d637a089f5cab9b5daf18f7645970509bf4f53eda8d79ed71cce8bd97934", size = 166312, upload-time = "2026-03-23T21:06:55.954Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/4d/51/c936033e16d12b627ea334aaaaf42229c37620d0f15593456ab69ab48161/griffelib-2.0.0-py3-none-any.whl", hash = "sha256:01284878c966508b6d6f1dbff9b6fa607bc062d8261c5c7253cb285b06422a7f", size = 142004, upload-time = "2026-02-09T19:09:40.561Z" }, ] -[[package]] -name = "grpcio" -version = "1.67.1" -source = { registry = "https://pypi.org/simple" } -resolution-markers = [ - "python_full_version >= '3.12' and python_full_version < '3.14'", - "python_full_version == '3.11.*'", - "python_full_version < '3.11'", -] -sdist = { url = "https://files.pythonhosted.org/packages/20/53/d9282a66a5db45981499190b77790570617a604a38f3d103d0400974aeb5/grpcio-1.67.1.tar.gz", hash = "sha256:3dc2ed4cabea4dc14d5e708c2b426205956077cc5de419b4d4079315017e9732", size = 12580022, upload-time = "2024-10-29T06:30:07.787Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/4e/cd/f6ca5c49aa0ae7bc6d0757f7dae6f789569e9490a635eaabe02bc02de7dc/grpcio-1.67.1-cp310-cp310-linux_armv7l.whl", hash = "sha256:8b0341d66a57f8a3119b77ab32207072be60c9bf79760fa609c5609f2deb1f3f", size = 5112450, upload-time = "2024-10-29T06:23:38.202Z" }, - { url = "https://files.pythonhosted.org/packages/d4/f0/d9bbb4a83cbee22f738ee7a74aa41e09ccfb2dcea2cc30ebe8dab5b21771/grpcio-1.67.1-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:f5a27dddefe0e2357d3e617b9079b4bfdc91341a91565111a21ed6ebbc51b22d", size = 10937518, upload-time = "2024-10-29T06:23:43.535Z" }, - { url = "https://files.pythonhosted.org/packages/5b/17/0c5dbae3af548eb76669887642b5f24b232b021afe77eb42e22bc8951d9c/grpcio-1.67.1-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:43112046864317498a33bdc4797ae6a268c36345a910de9b9c17159d8346602f", size = 5633610, upload-time = "2024-10-29T06:23:47.168Z" }, - { url = "https://files.pythonhosted.org/packages/17/48/e000614e00153d7b2760dcd9526b95d72f5cfe473b988e78f0ff3b472f6c/grpcio-1.67.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c9b929f13677b10f63124c1a410994a401cdd85214ad83ab67cc077fc7e480f0", size = 6240678, upload-time = "2024-10-29T06:23:49.352Z" }, - { url = "https://files.pythonhosted.org/packages/64/19/a16762a70eeb8ddfe43283ce434d1499c1c409ceec0c646f783883084478/grpcio-1.67.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e7d1797a8a3845437d327145959a2c0c47c05947c9eef5ff1a4c80e499dcc6fa", size = 5884528, upload-time = "2024-10-29T06:23:52.345Z" }, - { url = "https://files.pythonhosted.org/packages/6b/dc/bd016aa3684914acd2c0c7fa4953b2a11583c2b844f3d7bae91fa9b98fbb/grpcio-1.67.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:0489063974d1452436139501bf6b180f63d4977223ee87488fe36858c5725292", size = 6583680, upload-time = "2024-10-29T06:23:55.074Z" }, - { url = "https://files.pythonhosted.org/packages/1a/93/1441cb14c874f11aa798a816d582f9da82194b6677f0f134ea53d2d5dbeb/grpcio-1.67.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9fd042de4a82e3e7aca44008ee2fb5da01b3e5adb316348c21980f7f58adc311", size = 6162967, upload-time = "2024-10-29T06:23:57.286Z" }, - { url = "https://files.pythonhosted.org/packages/29/e9/9295090380fb4339b7e935b9d005fa9936dd573a22d147c9e5bb2df1b8d4/grpcio-1.67.1-cp310-cp310-win32.whl", hash = "sha256:638354e698fd0c6c76b04540a850bf1db27b4d2515a19fcd5cf645c48d3eb1ed", size = 3616336, upload-time = "2024-10-29T06:23:59.69Z" }, - { url = "https://files.pythonhosted.org/packages/ce/de/7c783b8cb8f02c667ca075c49680c4aeb8b054bc69784bcb3e7c1bbf4985/grpcio-1.67.1-cp310-cp310-win_amd64.whl", hash = "sha256:608d87d1bdabf9e2868b12338cd38a79969eaf920c89d698ead08f48de9c0f9e", size = 4352071, upload-time = "2024-10-29T06:24:02.477Z" }, - { url = "https://files.pythonhosted.org/packages/59/2c/b60d6ea1f63a20a8d09c6db95c4f9a16497913fb3048ce0990ed81aeeca0/grpcio-1.67.1-cp311-cp311-linux_armv7l.whl", hash = "sha256:7818c0454027ae3384235a65210bbf5464bd715450e30a3d40385453a85a70cb", size = 5119075, upload-time = "2024-10-29T06:24:04.696Z" }, - { url = "https://files.pythonhosted.org/packages/b3/9a/e1956f7ca582a22dd1f17b9e26fcb8229051b0ce6d33b47227824772feec/grpcio-1.67.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ea33986b70f83844cd00814cee4451055cd8cab36f00ac64a31f5bb09b31919e", size = 11009159, upload-time = "2024-10-29T06:24:07.781Z" }, - { url = "https://files.pythonhosted.org/packages/43/a8/35fbbba580c4adb1d40d12e244cf9f7c74a379073c0a0ca9d1b5338675a1/grpcio-1.67.1-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:c7a01337407dd89005527623a4a72c5c8e2894d22bead0895306b23c6695698f", size = 5629476, upload-time = "2024-10-29T06:24:11.444Z" }, - { url = "https://files.pythonhosted.org/packages/77/c9/864d336e167263d14dfccb4dbfa7fce634d45775609895287189a03f1fc3/grpcio-1.67.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:80b866f73224b0634f4312a4674c1be21b2b4afa73cb20953cbbb73a6b36c3cc", size = 6239901, upload-time = "2024-10-29T06:24:14.2Z" }, - { url = "https://files.pythonhosted.org/packages/f7/1e/0011408ebabf9bd69f4f87cc1515cbfe2094e5a32316f8714a75fd8ddfcb/grpcio-1.67.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9fff78ba10d4250bfc07a01bd6254a6d87dc67f9627adece85c0b2ed754fa96", size = 5881010, upload-time = "2024-10-29T06:24:17.451Z" }, - { url = "https://files.pythonhosted.org/packages/b4/7d/fbca85ee9123fb296d4eff8df566f458d738186d0067dec6f0aa2fd79d71/grpcio-1.67.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:8a23cbcc5bb11ea7dc6163078be36c065db68d915c24f5faa4f872c573bb400f", size = 6580706, upload-time = "2024-10-29T06:24:20.038Z" }, - { url = "https://files.pythonhosted.org/packages/75/7a/766149dcfa2dfa81835bf7df623944c1f636a15fcb9b6138ebe29baf0bc6/grpcio-1.67.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1a65b503d008f066e994f34f456e0647e5ceb34cfcec5ad180b1b44020ad4970", size = 6161799, upload-time = "2024-10-29T06:24:22.604Z" }, - { url = "https://files.pythonhosted.org/packages/09/13/5b75ae88810aaea19e846f5380611837de411181df51fd7a7d10cb178dcb/grpcio-1.67.1-cp311-cp311-win32.whl", hash = "sha256:e29ca27bec8e163dca0c98084040edec3bc49afd10f18b412f483cc68c712744", size = 3616330, upload-time = "2024-10-29T06:24:25.775Z" }, - { url = "https://files.pythonhosted.org/packages/aa/39/38117259613f68f072778c9638a61579c0cfa5678c2558706b10dd1d11d3/grpcio-1.67.1-cp311-cp311-win_amd64.whl", hash = "sha256:786a5b18544622bfb1e25cc08402bd44ea83edfb04b93798d85dca4d1a0b5be5", size = 4354535, upload-time = "2024-10-29T06:24:28.614Z" }, - { url = "https://files.pythonhosted.org/packages/6e/25/6f95bd18d5f506364379eabc0d5874873cc7dbdaf0757df8d1e82bc07a88/grpcio-1.67.1-cp312-cp312-linux_armv7l.whl", hash = "sha256:267d1745894200e4c604958da5f856da6293f063327cb049a51fe67348e4f953", size = 5089809, upload-time = "2024-10-29T06:24:31.24Z" }, - { url = "https://files.pythonhosted.org/packages/10/3f/d79e32e5d0354be33a12db2267c66d3cfeff700dd5ccdd09fd44a3ff4fb6/grpcio-1.67.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:85f69fdc1d28ce7cff8de3f9c67db2b0ca9ba4449644488c1e0303c146135ddb", size = 10981985, upload-time = "2024-10-29T06:24:34.942Z" }, - { url = "https://files.pythonhosted.org/packages/21/f2/36fbc14b3542e3a1c20fb98bd60c4732c55a44e374a4eb68f91f28f14aab/grpcio-1.67.1-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:f26b0b547eb8d00e195274cdfc63ce64c8fc2d3e2d00b12bf468ece41a0423a0", size = 5588770, upload-time = "2024-10-29T06:24:38.145Z" }, - { url = "https://files.pythonhosted.org/packages/0d/af/bbc1305df60c4e65de8c12820a942b5e37f9cf684ef5e49a63fbb1476a73/grpcio-1.67.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4422581cdc628f77302270ff839a44f4c24fdc57887dc2a45b7e53d8fc2376af", size = 6214476, upload-time = "2024-10-29T06:24:41.006Z" }, - { url = "https://files.pythonhosted.org/packages/92/cf/1d4c3e93efa93223e06a5c83ac27e32935f998bc368e276ef858b8883154/grpcio-1.67.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d7616d2ded471231c701489190379e0c311ee0a6c756f3c03e6a62b95a7146e", size = 5850129, upload-time = "2024-10-29T06:24:43.553Z" }, - { url = "https://files.pythonhosted.org/packages/ae/ca/26195b66cb253ac4d5ef59846e354d335c9581dba891624011da0e95d67b/grpcio-1.67.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8a00efecde9d6fcc3ab00c13f816313c040a28450e5e25739c24f432fc6d3c75", size = 6568489, upload-time = "2024-10-29T06:24:46.453Z" }, - { url = "https://files.pythonhosted.org/packages/d1/94/16550ad6b3f13b96f0856ee5dfc2554efac28539ee84a51d7b14526da985/grpcio-1.67.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:699e964923b70f3101393710793289e42845791ea07565654ada0969522d0a38", size = 6149369, upload-time = "2024-10-29T06:24:49.112Z" }, - { url = "https://files.pythonhosted.org/packages/33/0d/4c3b2587e8ad7f121b597329e6c2620374fccbc2e4e1aa3c73ccc670fde4/grpcio-1.67.1-cp312-cp312-win32.whl", hash = "sha256:4e7b904484a634a0fff132958dabdb10d63e0927398273917da3ee103e8d1f78", size = 3599176, upload-time = "2024-10-29T06:24:51.443Z" }, - { url = "https://files.pythonhosted.org/packages/7d/36/0c03e2d80db69e2472cf81c6123aa7d14741de7cf790117291a703ae6ae1/grpcio-1.67.1-cp312-cp312-win_amd64.whl", hash = "sha256:5721e66a594a6c4204458004852719b38f3d5522082be9061d6510b455c90afc", size = 4346574, upload-time = "2024-10-29T06:24:54.587Z" }, - { url = "https://files.pythonhosted.org/packages/12/d2/2f032b7a153c7723ea3dea08bffa4bcaca9e0e5bdf643ce565b76da87461/grpcio-1.67.1-cp313-cp313-linux_armv7l.whl", hash = "sha256:aa0162e56fd10a5547fac8774c4899fc3e18c1aa4a4759d0ce2cd00d3696ea6b", size = 5091487, upload-time = "2024-10-29T06:24:57.416Z" }, - { url = "https://files.pythonhosted.org/packages/d0/ae/ea2ff6bd2475a082eb97db1104a903cf5fc57c88c87c10b3c3f41a184fc0/grpcio-1.67.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:beee96c8c0b1a75d556fe57b92b58b4347c77a65781ee2ac749d550f2a365dc1", size = 10943530, upload-time = "2024-10-29T06:25:01.062Z" }, - { url = "https://files.pythonhosted.org/packages/07/62/646be83d1a78edf8d69b56647327c9afc223e3140a744c59b25fbb279c3b/grpcio-1.67.1-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:a93deda571a1bf94ec1f6fcda2872dad3ae538700d94dc283c672a3b508ba3af", size = 5589079, upload-time = "2024-10-29T06:25:04.254Z" }, - { url = "https://files.pythonhosted.org/packages/d0/25/71513d0a1b2072ce80d7f5909a93596b7ed10348b2ea4fdcbad23f6017bf/grpcio-1.67.1-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0e6f255980afef598a9e64a24efce87b625e3e3c80a45162d111a461a9f92955", size = 6213542, upload-time = "2024-10-29T06:25:06.824Z" }, - { url = "https://files.pythonhosted.org/packages/76/9a/d21236297111052dcb5dc85cd77dc7bf25ba67a0f55ae028b2af19a704bc/grpcio-1.67.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e838cad2176ebd5d4a8bb03955138d6589ce9e2ce5d51c3ada34396dbd2dba8", size = 5850211, upload-time = "2024-10-29T06:25:10.149Z" }, - { url = "https://files.pythonhosted.org/packages/2d/fe/70b1da9037f5055be14f359026c238821b9bcf6ca38a8d760f59a589aacd/grpcio-1.67.1-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:a6703916c43b1d468d0756c8077b12017a9fcb6a1ef13faf49e67d20d7ebda62", size = 6572129, upload-time = "2024-10-29T06:25:12.853Z" }, - { url = "https://files.pythonhosted.org/packages/74/0d/7df509a2cd2a54814598caf2fb759f3e0b93764431ff410f2175a6efb9e4/grpcio-1.67.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:917e8d8994eed1d86b907ba2a61b9f0aef27a2155bca6cbb322430fc7135b7bb", size = 6149819, upload-time = "2024-10-29T06:25:15.803Z" }, - { url = "https://files.pythonhosted.org/packages/0a/08/bc3b0155600898fd10f16b79054e1cca6cb644fa3c250c0fe59385df5e6f/grpcio-1.67.1-cp313-cp313-win32.whl", hash = "sha256:e279330bef1744040db8fc432becc8a727b84f456ab62b744d3fdb83f327e121", size = 3596561, upload-time = "2024-10-29T06:25:19.348Z" }, - { url = "https://files.pythonhosted.org/packages/5a/96/44759eca966720d0f3e1b105c43f8ad4590c97bf8eb3cd489656e9590baa/grpcio-1.67.1-cp313-cp313-win_amd64.whl", hash = "sha256:fa0c739ad8b1996bd24823950e3cb5152ae91fca1c09cc791190bf1627ffefba", size = 4346042, upload-time = "2024-10-29T06:25:21.939Z" }, -] - -[[package]] -name = "grpcio" -version = "1.78.0" -source = { registry = "https://pypi.org/simple" } -resolution-markers = [ - "python_full_version >= '3.14'", -] -dependencies = [ - { name = "typing-extensions", marker = "python_full_version >= '3.14'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/06/8a/3d098f35c143a89520e568e6539cc098fcd294495910e359889ce8741c84/grpcio-1.78.0.tar.gz", hash = "sha256:7382b95189546f375c174f53a5fa873cef91c4b8005faa05cc5b3beea9c4f1c5", size = 12852416, upload-time = "2026-02-06T09:57:18.093Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/5a/a8/690a085b4d1fe066130de97a87de32c45062cf2ecd218df9675add895550/grpcio-1.78.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:7cc47943d524ee0096f973e1081cb8f4f17a4615f2116882a5f1416e4cfe92b5", size = 5946986, upload-time = "2026-02-06T09:54:34.043Z" }, - { url = "https://files.pythonhosted.org/packages/c7/1b/e5213c5c0ced9d2d92778d30529ad5bb2dcfb6c48c4e2d01b1f302d33d64/grpcio-1.78.0-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:c3f293fdc675ccba4db5a561048cca627b5e7bd1c8a6973ffedabe7d116e22e2", size = 11816533, upload-time = "2026-02-06T09:54:37.04Z" }, - { url = "https://files.pythonhosted.org/packages/18/37/1ba32dccf0a324cc5ace744c44331e300b000a924bf14840f948c559ede7/grpcio-1.78.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:10a9a644b5dd5aec3b82b5b0b90d41c0fa94c85ef42cb42cf78a23291ddb5e7d", size = 6519964, upload-time = "2026-02-06T09:54:40.268Z" }, - { url = "https://files.pythonhosted.org/packages/ed/f5/c0e178721b818072f2e8b6fde13faaba942406c634009caf065121ce246b/grpcio-1.78.0-cp310-cp310-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:4c5533d03a6cbd7f56acfc9cfb44ea64f63d29091e40e44010d34178d392d7eb", size = 7198058, upload-time = "2026-02-06T09:54:42.389Z" }, - { url = "https://files.pythonhosted.org/packages/5b/b2/40d43c91ae9cd667edc960135f9f08e58faa1576dc95af29f66ec912985f/grpcio-1.78.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ff870aebe9a93a85283837801d35cd5f8814fe2ad01e606861a7fb47c762a2b7", size = 6727212, upload-time = "2026-02-06T09:54:44.91Z" }, - { url = "https://files.pythonhosted.org/packages/ed/88/9da42eed498f0efcfcd9156e48ae63c0cde3bea398a16c99fb5198c885b6/grpcio-1.78.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:391e93548644e6b2726f1bb84ed60048d4bcc424ce5e4af0843d28ca0b754fec", size = 7300845, upload-time = "2026-02-06T09:54:47.562Z" }, - { url = "https://files.pythonhosted.org/packages/23/3f/1c66b7b1b19a8828890e37868411a6e6925df5a9030bfa87ab318f34095d/grpcio-1.78.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:df2c8f3141f7cbd112a6ebbd760290b5849cda01884554f7c67acc14e7b1758a", size = 8284605, upload-time = "2026-02-06T09:54:50.475Z" }, - { url = "https://files.pythonhosted.org/packages/94/c4/ca1bd87394f7b033e88525384b4d1e269e8424ab441ea2fba1a0c5b50986/grpcio-1.78.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:bd8cb8026e5f5b50498a3c4f196f57f9db344dad829ffae16b82e4fdbaea2813", size = 7726672, upload-time = "2026-02-06T09:54:53.11Z" }, - { url = "https://files.pythonhosted.org/packages/41/09/f16e487d4cc65ccaf670f6ebdd1a17566b965c74fc3d93999d3b2821e052/grpcio-1.78.0-cp310-cp310-win32.whl", hash = "sha256:f8dff3d9777e5d2703a962ee5c286c239bf0ba173877cc68dc02c17d042e29de", size = 4076715, upload-time = "2026-02-06T09:54:55.549Z" }, - { url = "https://files.pythonhosted.org/packages/2a/32/4ce60d94e242725fd3bcc5673c04502c82a8e87b21ea411a63992dc39f8f/grpcio-1.78.0-cp310-cp310-win_amd64.whl", hash = "sha256:94f95cf5d532d0e717eed4fc1810e8e6eded04621342ec54c89a7c2f14b581bf", size = 4799157, upload-time = "2026-02-06T09:54:59.838Z" }, - { url = "https://files.pythonhosted.org/packages/86/c7/d0b780a29b0837bf4ca9580904dfb275c1fc321ded7897d620af7047ec57/grpcio-1.78.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:2777b783f6c13b92bd7b716667452c329eefd646bfb3f2e9dabea2e05dbd34f6", size = 5951525, upload-time = "2026-02-06T09:55:01.989Z" }, - { url = "https://files.pythonhosted.org/packages/c5/b1/96920bf2ee61df85a9503cb6f733fe711c0ff321a5a697d791b075673281/grpcio-1.78.0-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:9dca934f24c732750389ce49d638069c3892ad065df86cb465b3fa3012b70c9e", size = 11830418, upload-time = "2026-02-06T09:55:04.462Z" }, - { url = "https://files.pythonhosted.org/packages/83/0c/7c1528f098aeb75a97de2bae18c530f56959fb7ad6c882db45d9884d6edc/grpcio-1.78.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:459ab414b35f4496138d0ecd735fed26f1318af5e52cb1efbc82a09f0d5aa911", size = 6524477, upload-time = "2026-02-06T09:55:07.111Z" }, - { url = "https://files.pythonhosted.org/packages/8d/52/e7c1f3688f949058e19a011c4e0dec973da3d0ae5e033909677f967ae1f4/grpcio-1.78.0-cp311-cp311-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:082653eecbdf290e6e3e2c276ab2c54b9e7c299e07f4221872380312d8cf395e", size = 7198266, upload-time = "2026-02-06T09:55:10.016Z" }, - { url = "https://files.pythonhosted.org/packages/e5/61/8ac32517c1e856677282c34f2e7812d6c328fa02b8f4067ab80e77fdc9c9/grpcio-1.78.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:85f93781028ec63f383f6bc90db785a016319c561cc11151fbb7b34e0d012303", size = 6730552, upload-time = "2026-02-06T09:55:12.207Z" }, - { url = "https://files.pythonhosted.org/packages/bd/98/b8ee0158199250220734f620b12e4a345955ac7329cfd908d0bf0fda77f0/grpcio-1.78.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:f12857d24d98441af6a1d5c87442d624411db486f7ba12550b07788f74b67b04", size = 7304296, upload-time = "2026-02-06T09:55:15.044Z" }, - { url = "https://files.pythonhosted.org/packages/bd/0f/7b72762e0d8840b58032a56fdbd02b78fc645b9fa993d71abf04edbc54f4/grpcio-1.78.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5397fff416b79e4b284959642a4e95ac4b0f1ece82c9993658e0e477d40551ec", size = 8288298, upload-time = "2026-02-06T09:55:17.276Z" }, - { url = "https://files.pythonhosted.org/packages/24/ae/ae4ce56bc5bb5caa3a486d60f5f6083ac3469228faa734362487176c15c5/grpcio-1.78.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:fbe6e89c7ffb48518384068321621b2a69cab509f58e40e4399fdd378fa6d074", size = 7730953, upload-time = "2026-02-06T09:55:19.545Z" }, - { url = "https://files.pythonhosted.org/packages/b5/6e/8052e3a28eb6a820c372b2eb4b5e32d195c661e137d3eca94d534a4cfd8a/grpcio-1.78.0-cp311-cp311-win32.whl", hash = "sha256:6092beabe1966a3229f599d7088b38dfc8ffa1608b5b5cdda31e591e6500f856", size = 4076503, upload-time = "2026-02-06T09:55:21.521Z" }, - { url = "https://files.pythonhosted.org/packages/08/62/f22c98c5265dfad327251fa2f840b591b1df5f5e15d88b19c18c86965b27/grpcio-1.78.0-cp311-cp311-win_amd64.whl", hash = "sha256:1afa62af6e23f88629f2b29ec9e52ec7c65a7176c1e0a83292b93c76ca882558", size = 4799767, upload-time = "2026-02-06T09:55:24.107Z" }, - { url = "https://files.pythonhosted.org/packages/4e/f4/7384ed0178203d6074446b3c4f46c90a22ddf7ae0b3aee521627f54cfc2a/grpcio-1.78.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:f9ab915a267fc47c7e88c387a3a28325b58c898e23d4995f765728f4e3dedb97", size = 5913985, upload-time = "2026-02-06T09:55:26.832Z" }, - { url = "https://files.pythonhosted.org/packages/81/ed/be1caa25f06594463f685b3790b320f18aea49b33166f4141bfdc2bfb236/grpcio-1.78.0-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:3f8904a8165ab21e07e58bf3e30a73f4dffc7a1e0dbc32d51c61b5360d26f43e", size = 11811853, upload-time = "2026-02-06T09:55:29.224Z" }, - { url = "https://files.pythonhosted.org/packages/24/a7/f06d151afc4e64b7e3cc3e872d331d011c279aaab02831e40a81c691fb65/grpcio-1.78.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:859b13906ce098c0b493af92142ad051bf64c7870fa58a123911c88606714996", size = 6475766, upload-time = "2026-02-06T09:55:31.825Z" }, - { url = "https://files.pythonhosted.org/packages/8a/a8/4482922da832ec0082d0f2cc3a10976d84a7424707f25780b82814aafc0a/grpcio-1.78.0-cp312-cp312-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:b2342d87af32790f934a79c3112641e7b27d63c261b8b4395350dad43eff1dc7", size = 7170027, upload-time = "2026-02-06T09:55:34.7Z" }, - { url = "https://files.pythonhosted.org/packages/54/bf/f4a3b9693e35d25b24b0b39fa46d7d8a3c439e0a3036c3451764678fec20/grpcio-1.78.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:12a771591ae40bc65ba67048fa52ef4f0e6db8279e595fd349f9dfddeef571f9", size = 6690766, upload-time = "2026-02-06T09:55:36.902Z" }, - { url = "https://files.pythonhosted.org/packages/c7/b9/521875265cc99fe5ad4c5a17010018085cae2810a928bf15ebe7d8bcd9cc/grpcio-1.78.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:185dea0d5260cbb2d224c507bf2a5444d5abbb1fa3594c1ed7e4c709d5eb8383", size = 7266161, upload-time = "2026-02-06T09:55:39.824Z" }, - { url = "https://files.pythonhosted.org/packages/05/86/296a82844fd40a4ad4a95f100b55044b4f817dece732bf686aea1a284147/grpcio-1.78.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:51b13f9aed9d59ee389ad666b8c2214cc87b5de258fa712f9ab05f922e3896c6", size = 8253303, upload-time = "2026-02-06T09:55:42.353Z" }, - { url = "https://files.pythonhosted.org/packages/f3/e4/ea3c0caf5468537f27ad5aab92b681ed7cc0ef5f8c9196d3fd42c8c2286b/grpcio-1.78.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fd5f135b1bd58ab088930b3c613455796dfa0393626a6972663ccdda5b4ac6ce", size = 7698222, upload-time = "2026-02-06T09:55:44.629Z" }, - { url = "https://files.pythonhosted.org/packages/d7/47/7f05f81e4bb6b831e93271fb12fd52ba7b319b5402cbc101d588f435df00/grpcio-1.78.0-cp312-cp312-win32.whl", hash = "sha256:94309f498bcc07e5a7d16089ab984d42ad96af1d94b5a4eb966a266d9fcabf68", size = 4066123, upload-time = "2026-02-06T09:55:47.644Z" }, - { url = "https://files.pythonhosted.org/packages/ad/e7/d6914822c88aa2974dbbd10903d801a28a19ce9cd8bad7e694cbbcf61528/grpcio-1.78.0-cp312-cp312-win_amd64.whl", hash = "sha256:9566fe4ababbb2610c39190791e5b829869351d14369603702e890ef3ad2d06e", size = 4797657, upload-time = "2026-02-06T09:55:49.86Z" }, - { url = "https://files.pythonhosted.org/packages/05/a9/8f75894993895f361ed8636cd9237f4ab39ef87fd30db17467235ed1c045/grpcio-1.78.0-cp313-cp313-linux_armv7l.whl", hash = "sha256:ce3a90455492bf8bfa38e56fbbe1dbd4f872a3d8eeaf7337dc3b1c8aa28c271b", size = 5920143, upload-time = "2026-02-06T09:55:52.035Z" }, - { url = "https://files.pythonhosted.org/packages/55/06/0b78408e938ac424100100fd081189451b472236e8a3a1f6500390dc4954/grpcio-1.78.0-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:2bf5e2e163b356978b23652c4818ce4759d40f4712ee9ec5a83c4be6f8c23a3a", size = 11803926, upload-time = "2026-02-06T09:55:55.494Z" }, - { url = "https://files.pythonhosted.org/packages/88/93/b59fe7832ff6ae3c78b813ea43dac60e295fa03606d14d89d2e0ec29f4f3/grpcio-1.78.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8f2ac84905d12918e4e55a16da17939eb63e433dc11b677267c35568aa63fc84", size = 6478628, upload-time = "2026-02-06T09:55:58.533Z" }, - { url = "https://files.pythonhosted.org/packages/ed/df/e67e3734527f9926b7d9c0dde6cd998d1d26850c3ed8eeec81297967ac67/grpcio-1.78.0-cp313-cp313-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:b58f37edab4a3881bc6c9bca52670610e0c9ca14e2ea3cf9debf185b870457fb", size = 7173574, upload-time = "2026-02-06T09:56:01.786Z" }, - { url = "https://files.pythonhosted.org/packages/a6/62/cc03fffb07bfba982a9ec097b164e8835546980aec25ecfa5f9c1a47e022/grpcio-1.78.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:735e38e176a88ce41840c21bb49098ab66177c64c82426e24e0082500cc68af5", size = 6692639, upload-time = "2026-02-06T09:56:04.529Z" }, - { url = "https://files.pythonhosted.org/packages/bf/9a/289c32e301b85bdb67d7ec68b752155e674ee3ba2173a1858f118e399ef3/grpcio-1.78.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:2045397e63a7a0ee7957c25f7dbb36ddc110e0cfb418403d110c0a7a68a844e9", size = 7268838, upload-time = "2026-02-06T09:56:08.397Z" }, - { url = "https://files.pythonhosted.org/packages/0e/79/1be93f32add280461fa4773880196572563e9c8510861ac2da0ea0f892b6/grpcio-1.78.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:a9f136fbafe7ccf4ac7e8e0c28b31066e810be52d6e344ef954a3a70234e1702", size = 8251878, upload-time = "2026-02-06T09:56:10.914Z" }, - { url = "https://files.pythonhosted.org/packages/65/65/793f8e95296ab92e4164593674ae6291b204bb5f67f9d4a711489cd30ffa/grpcio-1.78.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:748b6138585379c737adc08aeffd21222abbda1a86a0dca2a39682feb9196c20", size = 7695412, upload-time = "2026-02-06T09:56:13.593Z" }, - { url = "https://files.pythonhosted.org/packages/1c/9f/1e233fe697ecc82845942c2822ed06bb522e70d6771c28d5528e4c50f6a4/grpcio-1.78.0-cp313-cp313-win32.whl", hash = "sha256:271c73e6e5676afe4fc52907686670c7cea22ab2310b76a59b678403ed40d670", size = 4064899, upload-time = "2026-02-06T09:56:15.601Z" }, - { url = "https://files.pythonhosted.org/packages/4d/27/d86b89e36de8a951501fb06a0f38df19853210f341d0b28f83f4aa0ffa08/grpcio-1.78.0-cp313-cp313-win_amd64.whl", hash = "sha256:f2d4e43ee362adfc05994ed479334d5a451ab7bc3f3fee1b796b8ca66895acb4", size = 4797393, upload-time = "2026-02-06T09:56:17.882Z" }, - { url = "https://files.pythonhosted.org/packages/29/f2/b56e43e3c968bfe822fa6ce5bca10d5c723aa40875b48791ce1029bb78c7/grpcio-1.78.0-cp314-cp314-linux_armv7l.whl", hash = "sha256:e87cbc002b6f440482b3519e36e1313eb5443e9e9e73d6a52d43bd2004fcfd8e", size = 5920591, upload-time = "2026-02-06T09:56:20.758Z" }, - { url = "https://files.pythonhosted.org/packages/5d/81/1f3b65bd30c334167bfa8b0d23300a44e2725ce39bba5b76a2460d85f745/grpcio-1.78.0-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:c41bc64626db62e72afec66b0c8a0da76491510015417c127bfc53b2fe6d7f7f", size = 11813685, upload-time = "2026-02-06T09:56:24.315Z" }, - { url = "https://files.pythonhosted.org/packages/0e/1c/bbe2f8216a5bd3036119c544d63c2e592bdf4a8ec6e4a1867592f4586b26/grpcio-1.78.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8dfffba826efcf366b1e3ccc37e67afe676f290e13a3b48d31a46739f80a8724", size = 6487803, upload-time = "2026-02-06T09:56:27.367Z" }, - { url = "https://files.pythonhosted.org/packages/16/5c/a6b2419723ea7ddce6308259a55e8e7593d88464ce8db9f4aa857aba96fa/grpcio-1.78.0-cp314-cp314-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:74be1268d1439eaaf552c698cdb11cd594f0c49295ae6bb72c34ee31abbe611b", size = 7173206, upload-time = "2026-02-06T09:56:29.876Z" }, - { url = "https://files.pythonhosted.org/packages/df/1e/b8801345629a415ea7e26c83d75eb5dbe91b07ffe5210cc517348a8d4218/grpcio-1.78.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:be63c88b32e6c0f1429f1398ca5c09bc64b0d80950c8bb7807d7d7fb36fb84c7", size = 6693826, upload-time = "2026-02-06T09:56:32.305Z" }, - { url = "https://files.pythonhosted.org/packages/34/84/0de28eac0377742679a510784f049738a80424b17287739fc47d63c2439e/grpcio-1.78.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:3c586ac70e855c721bda8f548d38c3ca66ac791dc49b66a8281a1f99db85e452", size = 7277897, upload-time = "2026-02-06T09:56:34.915Z" }, - { url = "https://files.pythonhosted.org/packages/ca/9c/ad8685cfe20559a9edb66f735afdcb2b7d3de69b13666fdfc542e1916ebd/grpcio-1.78.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:35eb275bf1751d2ffbd8f57cdbc46058e857cf3971041521b78b7db94bdaf127", size = 8252404, upload-time = "2026-02-06T09:56:37.553Z" }, - { url = "https://files.pythonhosted.org/packages/3c/05/33a7a4985586f27e1de4803887c417ec7ced145ebd069bc38a9607059e2b/grpcio-1.78.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:207db540302c884b8848036b80db352a832b99dfdf41db1eb554c2c2c7800f65", size = 7696837, upload-time = "2026-02-06T09:56:40.173Z" }, - { url = "https://files.pythonhosted.org/packages/73/77/7382241caf88729b106e49e7d18e3116216c778e6a7e833826eb96de22f7/grpcio-1.78.0-cp314-cp314-win32.whl", hash = "sha256:57bab6deef2f4f1ca76cc04565df38dc5713ae6c17de690721bdf30cb1e0545c", size = 4142439, upload-time = "2026-02-06T09:56:43.258Z" }, - { url = "https://files.pythonhosted.org/packages/48/b2/b096ccce418882fbfda4f7496f9357aaa9a5af1896a9a7f60d9f2b275a06/grpcio-1.78.0-cp314-cp314-win_amd64.whl", hash = "sha256:dce09d6116df20a96acfdbf85e4866258c3758180e8c49845d6ba8248b6d0bbb", size = 4929852, upload-time = "2026-02-06T09:56:45.885Z" }, -] - [[package]] name = "h11" version = "0.16.0" @@ -1684,18 +1500,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/0e/61/66938bbb5fc52dbdf84594873d5b51fb1f7c7794e9c0f5bd885f30bc507b/idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea", size = 71008, upload-time = "2025-10-12T14:55:18.883Z" }, ] -[[package]] -name = "importlib-metadata" -version = "8.7.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "zipp" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/f3/49/3b30cad09e7771a4982d9975a8cbf64f00d4a1ececb53297f1d9a7be1b10/importlib_metadata-8.7.1.tar.gz", hash = "sha256:49fef1ae6440c182052f407c8d34a68f72efc36db9ca90dc0113398f2fdde8bb", size = 57107, upload-time = "2025-12-21T10:00:19.278Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/fa/5e/f8e9a1d23b9c20a551a8a02ea3637b4642e22c2626e3a13a9a29cdea99eb/importlib_metadata-8.7.1-py3-none-any.whl", hash = "sha256:5a1f80bf1daa489495071efbb095d75a634cf28a8bc299581244063b53176151", size = 27865, upload-time = "2025-12-21T10:00:18.329Z" }, -] - [[package]] name = "iniconfig" version = "2.3.0" @@ -1872,103 +1676,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67", size = 134899, upload-time = "2025-03-05T20:05:00.369Z" }, ] -[[package]] -name = "jiter" -version = "0.13.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/0d/5e/4ec91646aee381d01cdb9974e30882c9cd3b8c5d1079d6b5ff4af522439a/jiter-0.13.0.tar.gz", hash = "sha256:f2839f9c2c7e2dffc1bc5929a510e14ce0a946be9365fd1219e7ef342dae14f4", size = 164847, upload-time = "2026-02-02T12:37:56.441Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/d0/5a/41da76c5ea07bec1b0472b6b2fdb1b651074d504b19374d7e130e0cdfb25/jiter-0.13.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2ffc63785fd6c7977defe49b9824ae6ce2b2e2b77ce539bdaf006c26da06342e", size = 311164, upload-time = "2026-02-02T12:35:17.688Z" }, - { url = "https://files.pythonhosted.org/packages/40/cb/4a1bf994a3e869f0d39d10e11efb471b76d0ad70ecbfb591427a46c880c2/jiter-0.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4a638816427006c1e3f0013eb66d391d7a3acda99a7b0cf091eff4497ccea33a", size = 320296, upload-time = "2026-02-02T12:35:19.828Z" }, - { url = "https://files.pythonhosted.org/packages/09/82/acd71ca9b50ecebadc3979c541cd717cce2fe2bc86236f4fa597565d8f1a/jiter-0.13.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19928b5d1ce0ff8c1ee1b9bdef3b5bfc19e8304f1b904e436caf30bc15dc6cf5", size = 352742, upload-time = "2026-02-02T12:35:21.258Z" }, - { url = "https://files.pythonhosted.org/packages/71/03/d1fc996f3aecfd42eb70922edecfb6dd26421c874503e241153ad41df94f/jiter-0.13.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:309549b778b949d731a2f0e1594a3f805716be704a73bf3ad9a807eed5eb5721", size = 363145, upload-time = "2026-02-02T12:35:24.653Z" }, - { url = "https://files.pythonhosted.org/packages/f1/61/a30492366378cc7a93088858f8991acd7d959759fe6138c12a4644e58e81/jiter-0.13.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bcdabaea26cb04e25df3103ce47f97466627999260290349a88c8136ecae0060", size = 487683, upload-time = "2026-02-02T12:35:26.162Z" }, - { url = "https://files.pythonhosted.org/packages/20/4e/4223cffa9dbbbc96ed821c5aeb6bca510848c72c02086d1ed3f1da3d58a7/jiter-0.13.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a3a377af27b236abbf665a69b2bdd680e3b5a0bd2af825cd3b81245279a7606c", size = 373579, upload-time = "2026-02-02T12:35:27.582Z" }, - { url = "https://files.pythonhosted.org/packages/fe/c9/b0489a01329ab07a83812d9ebcffe7820a38163c6d9e7da644f926ff877c/jiter-0.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe49d3ff6db74321f144dff9addd4a5874d3105ac5ba7c5b77fac099cfae31ae", size = 362904, upload-time = "2026-02-02T12:35:28.925Z" }, - { url = "https://files.pythonhosted.org/packages/05/af/53e561352a44afcba9a9bc67ee1d320b05a370aed8df54eafe714c4e454d/jiter-0.13.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2113c17c9a67071b0f820733c0893ed1d467b5fcf4414068169e5c2cabddb1e2", size = 392380, upload-time = "2026-02-02T12:35:30.385Z" }, - { url = "https://files.pythonhosted.org/packages/76/2a/dd805c3afb8ed5b326c5ae49e725d1b1255b9754b1b77dbecdc621b20773/jiter-0.13.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:ab1185ca5c8b9491b55ebf6c1e8866b8f68258612899693e24a92c5fdb9455d5", size = 517939, upload-time = "2026-02-02T12:35:31.865Z" }, - { url = "https://files.pythonhosted.org/packages/20/2a/7b67d76f55b8fe14c937e7640389612f05f9a4145fc28ae128aaa5e62257/jiter-0.13.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9621ca242547edc16400981ca3231e0c91c0c4c1ab8573a596cd9bb3575d5c2b", size = 551696, upload-time = "2026-02-02T12:35:33.306Z" }, - { url = "https://files.pythonhosted.org/packages/85/9c/57cdd64dac8f4c6ab8f994fe0eb04dc9fd1db102856a4458fcf8a99dfa62/jiter-0.13.0-cp310-cp310-win32.whl", hash = "sha256:a7637d92b1c9d7a771e8c56f445c7f84396d48f2e756e5978840ecba2fac0894", size = 204592, upload-time = "2026-02-02T12:35:34.58Z" }, - { url = "https://files.pythonhosted.org/packages/a7/38/f4f3ea5788b8a5bae7510a678cdc747eda0c45ffe534f9878ff37e7cf3b3/jiter-0.13.0-cp310-cp310-win_amd64.whl", hash = "sha256:c1b609e5cbd2f52bb74fb721515745b407df26d7b800458bd97cb3b972c29e7d", size = 206016, upload-time = "2026-02-02T12:35:36.435Z" }, - { url = "https://files.pythonhosted.org/packages/71/29/499f8c9eaa8a16751b1c0e45e6f5f1761d180da873d417996cc7bddc8eef/jiter-0.13.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:ea026e70a9a28ebbdddcbcf0f1323128a8db66898a06eaad3a4e62d2f554d096", size = 311157, upload-time = "2026-02-02T12:35:37.758Z" }, - { url = "https://files.pythonhosted.org/packages/50/f6/566364c777d2ab450b92100bea11333c64c38d32caf8dc378b48e5b20c46/jiter-0.13.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:66aa3e663840152d18cc8ff1e4faad3dd181373491b9cfdc6004b92198d67911", size = 319729, upload-time = "2026-02-02T12:35:39.246Z" }, - { url = "https://files.pythonhosted.org/packages/73/dd/560f13ec5e4f116d8ad2658781646cca91b617ae3b8758d4a5076b278f70/jiter-0.13.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c3524798e70655ff19aec58c7d05adb1f074fecff62da857ea9be2b908b6d701", size = 354766, upload-time = "2026-02-02T12:35:40.662Z" }, - { url = "https://files.pythonhosted.org/packages/7c/0d/061faffcfe94608cbc28a0d42a77a74222bdf5055ccdbe5fd2292b94f510/jiter-0.13.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ec7e287d7fbd02cb6e22f9a00dd9c9cd504c40a61f2c61e7e1f9690a82726b4c", size = 362587, upload-time = "2026-02-02T12:35:42.025Z" }, - { url = "https://files.pythonhosted.org/packages/92/c9/c66a7864982fd38a9773ec6e932e0398d1262677b8c60faecd02ffb67bf3/jiter-0.13.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:47455245307e4debf2ce6c6e65a717550a0244231240dcf3b8f7d64e4c2f22f4", size = 487537, upload-time = "2026-02-02T12:35:43.459Z" }, - { url = "https://files.pythonhosted.org/packages/6c/86/84eb4352cd3668f16d1a88929b5888a3fe0418ea8c1dfc2ad4e7bf6e069a/jiter-0.13.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ee9da221dca6e0429c2704c1b3655fe7b025204a71d4d9b73390c759d776d165", size = 373717, upload-time = "2026-02-02T12:35:44.928Z" }, - { url = "https://files.pythonhosted.org/packages/6e/09/9fe4c159358176f82d4390407a03f506a8659ed13ca3ac93a843402acecf/jiter-0.13.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24ab43126d5e05f3d53a36a8e11eb2f23304c6c1117844aaaf9a0aa5e40b5018", size = 362683, upload-time = "2026-02-02T12:35:46.636Z" }, - { url = "https://files.pythonhosted.org/packages/c9/5e/85f3ab9caca0c1d0897937d378b4a515cae9e119730563572361ea0c48ae/jiter-0.13.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9da38b4fedde4fb528c740c2564628fbab737166a0e73d6d46cb4bb5463ff411", size = 392345, upload-time = "2026-02-02T12:35:48.088Z" }, - { url = "https://files.pythonhosted.org/packages/12/4c/05b8629ad546191939e6f0c2f17e29f542a398f4a52fb987bc70b6d1eb8b/jiter-0.13.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0b34c519e17658ed88d5047999a93547f8889f3c1824120c26ad6be5f27b6cf5", size = 517775, upload-time = "2026-02-02T12:35:49.482Z" }, - { url = "https://files.pythonhosted.org/packages/4d/88/367ea2eb6bc582c7052e4baf5ddf57ebe5ab924a88e0e09830dfb585c02d/jiter-0.13.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d2a6394e6af690d462310a86b53c47ad75ac8c21dc79f120714ea449979cb1d3", size = 551325, upload-time = "2026-02-02T12:35:51.104Z" }, - { url = "https://files.pythonhosted.org/packages/f3/12/fa377ffb94a2f28c41afaed093e0d70cfe512035d5ecb0cad0ae4792d35e/jiter-0.13.0-cp311-cp311-win32.whl", hash = "sha256:0f0c065695f616a27c920a56ad0d4fc46415ef8b806bf8fc1cacf25002bd24e1", size = 204709, upload-time = "2026-02-02T12:35:52.467Z" }, - { url = "https://files.pythonhosted.org/packages/cb/16/8e8203ce92f844dfcd3d9d6a5a7322c77077248dbb12da52d23193a839cd/jiter-0.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:0733312953b909688ae3c2d58d043aa040f9f1a6a75693defed7bc2cc4bf2654", size = 204560, upload-time = "2026-02-02T12:35:53.925Z" }, - { url = "https://files.pythonhosted.org/packages/44/26/97cc40663deb17b9e13c3a5cf29251788c271b18ee4d262c8f94798b8336/jiter-0.13.0-cp311-cp311-win_arm64.whl", hash = "sha256:5d9b34ad56761b3bf0fbe8f7e55468704107608512350962d3317ffd7a4382d5", size = 189608, upload-time = "2026-02-02T12:35:55.304Z" }, - { url = "https://files.pythonhosted.org/packages/2e/30/7687e4f87086829955013ca12a9233523349767f69653ebc27036313def9/jiter-0.13.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:0a2bd69fc1d902e89925fc34d1da51b2128019423d7b339a45d9e99c894e0663", size = 307958, upload-time = "2026-02-02T12:35:57.165Z" }, - { url = "https://files.pythonhosted.org/packages/c3/27/e57f9a783246ed95481e6749cc5002a8a767a73177a83c63ea71f0528b90/jiter-0.13.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f917a04240ef31898182f76a332f508f2cc4b57d2b4d7ad2dbfebbfe167eb505", size = 318597, upload-time = "2026-02-02T12:35:58.591Z" }, - { url = "https://files.pythonhosted.org/packages/cf/52/e5719a60ac5d4d7c5995461a94ad5ef962a37c8bf5b088390e6fad59b2ff/jiter-0.13.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c1e2b199f446d3e82246b4fd9236d7cb502dc2222b18698ba0d986d2fecc6152", size = 348821, upload-time = "2026-02-02T12:36:00.093Z" }, - { url = "https://files.pythonhosted.org/packages/61/db/c1efc32b8ba4c740ab3fc2d037d8753f67685f475e26b9d6536a4322bcdd/jiter-0.13.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04670992b576fa65bd056dbac0c39fe8bd67681c380cb2b48efa885711d9d726", size = 364163, upload-time = "2026-02-02T12:36:01.937Z" }, - { url = "https://files.pythonhosted.org/packages/55/8a/fb75556236047c8806995671a18e4a0ad646ed255276f51a20f32dceaeec/jiter-0.13.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5a1aff1fbdb803a376d4d22a8f63f8e7ccbce0b4890c26cc7af9e501ab339ef0", size = 483709, upload-time = "2026-02-02T12:36:03.41Z" }, - { url = "https://files.pythonhosted.org/packages/7e/16/43512e6ee863875693a8e6f6d532e19d650779d6ba9a81593ae40a9088ff/jiter-0.13.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b3fb8c2053acaef8580809ac1d1f7481a0a0bdc012fd7f5d8b18fb696a5a089", size = 370480, upload-time = "2026-02-02T12:36:04.791Z" }, - { url = "https://files.pythonhosted.org/packages/f8/4c/09b93e30e984a187bc8aaa3510e1ec8dcbdcd71ca05d2f56aac0492453aa/jiter-0.13.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bdaba7d87e66f26a2c45d8cbadcbfc4bf7884182317907baf39cfe9775bb4d93", size = 360735, upload-time = "2026-02-02T12:36:06.994Z" }, - { url = "https://files.pythonhosted.org/packages/1a/1b/46c5e349019874ec5dfa508c14c37e29864ea108d376ae26d90bee238cd7/jiter-0.13.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7b88d649135aca526da172e48083da915ec086b54e8e73a425ba50999468cc08", size = 391814, upload-time = "2026-02-02T12:36:08.368Z" }, - { url = "https://files.pythonhosted.org/packages/15/9e/26184760e85baee7162ad37b7912797d2077718476bf91517641c92b3639/jiter-0.13.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:e404ea551d35438013c64b4f357b0474c7abf9f781c06d44fcaf7a14c69ff9e2", size = 513990, upload-time = "2026-02-02T12:36:09.993Z" }, - { url = "https://files.pythonhosted.org/packages/e9/34/2c9355247d6debad57a0a15e76ab1566ab799388042743656e566b3b7de1/jiter-0.13.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1f4748aad1b4a93c8bdd70f604d0f748cdc0e8744c5547798acfa52f10e79228", size = 548021, upload-time = "2026-02-02T12:36:11.376Z" }, - { url = "https://files.pythonhosted.org/packages/ac/4a/9f2c23255d04a834398b9c2e0e665382116911dc4d06b795710503cdad25/jiter-0.13.0-cp312-cp312-win32.whl", hash = "sha256:0bf670e3b1445fc4d31612199f1744f67f889ee1bbae703c4b54dc097e5dd394", size = 203024, upload-time = "2026-02-02T12:36:12.682Z" }, - { url = "https://files.pythonhosted.org/packages/09/ee/f0ae675a957ae5a8f160be3e87acea6b11dc7b89f6b7ab057e77b2d2b13a/jiter-0.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:15db60e121e11fe186c0b15236bd5d18381b9ddacdcf4e659feb96fc6c969c92", size = 205424, upload-time = "2026-02-02T12:36:13.93Z" }, - { url = "https://files.pythonhosted.org/packages/1b/02/ae611edf913d3cbf02c97cdb90374af2082c48d7190d74c1111dde08bcdd/jiter-0.13.0-cp312-cp312-win_arm64.whl", hash = "sha256:41f92313d17989102f3cb5dd533a02787cdb99454d494344b0361355da52fcb9", size = 186818, upload-time = "2026-02-02T12:36:15.308Z" }, - { url = "https://files.pythonhosted.org/packages/91/9c/7ee5a6ff4b9991e1a45263bfc46731634c4a2bde27dfda6c8251df2d958c/jiter-0.13.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1f8a55b848cbabf97d861495cd65f1e5c590246fabca8b48e1747c4dfc8f85bf", size = 306897, upload-time = "2026-02-02T12:36:16.748Z" }, - { url = "https://files.pythonhosted.org/packages/7c/02/be5b870d1d2be5dd6a91bdfb90f248fbb7dcbd21338f092c6b89817c3dbf/jiter-0.13.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f556aa591c00f2c45eb1b89f68f52441a016034d18b65da60e2d2875bbbf344a", size = 317507, upload-time = "2026-02-02T12:36:18.351Z" }, - { url = "https://files.pythonhosted.org/packages/da/92/b25d2ec333615f5f284f3a4024f7ce68cfa0604c322c6808b2344c7f5d2b/jiter-0.13.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f7e1d61da332ec412350463891923f960c3073cf1aae93b538f0bb4c8cd46efb", size = 350560, upload-time = "2026-02-02T12:36:19.746Z" }, - { url = "https://files.pythonhosted.org/packages/be/ec/74dcb99fef0aca9fbe56b303bf79f6bd839010cb18ad41000bf6cc71eec0/jiter-0.13.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3097d665a27bc96fd9bbf7f86178037db139f319f785e4757ce7ccbf390db6c2", size = 363232, upload-time = "2026-02-02T12:36:21.243Z" }, - { url = "https://files.pythonhosted.org/packages/1b/37/f17375e0bb2f6a812d4dd92d7616e41917f740f3e71343627da9db2824ce/jiter-0.13.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9d01ecc3a8cbdb6f25a37bd500510550b64ddf9f7d64a107d92f3ccb25035d0f", size = 483727, upload-time = "2026-02-02T12:36:22.688Z" }, - { url = "https://files.pythonhosted.org/packages/77/d2/a71160a5ae1a1e66c1395b37ef77da67513b0adba73b993a27fbe47eb048/jiter-0.13.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ed9bbc30f5d60a3bdf63ae76beb3f9db280d7f195dfcfa61af792d6ce912d159", size = 370799, upload-time = "2026-02-02T12:36:24.106Z" }, - { url = "https://files.pythonhosted.org/packages/01/99/ed5e478ff0eb4e8aa5fd998f9d69603c9fd3f32de3bd16c2b1194f68361c/jiter-0.13.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98fbafb6e88256f4454de33c1f40203d09fc33ed19162a68b3b257b29ca7f663", size = 359120, upload-time = "2026-02-02T12:36:25.519Z" }, - { url = "https://files.pythonhosted.org/packages/16/be/7ffd08203277a813f732ba897352797fa9493faf8dc7995b31f3d9cb9488/jiter-0.13.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5467696f6b827f1116556cb0db620440380434591e93ecee7fd14d1a491b6daa", size = 390664, upload-time = "2026-02-02T12:36:26.866Z" }, - { url = "https://files.pythonhosted.org/packages/d1/84/e0787856196d6d346264d6dcccb01f741e5f0bd014c1d9a2ebe149caf4f3/jiter-0.13.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:2d08c9475d48b92892583df9da592a0e2ac49bcd41fae1fec4f39ba6cf107820", size = 513543, upload-time = "2026-02-02T12:36:28.217Z" }, - { url = "https://files.pythonhosted.org/packages/65/50/ecbd258181c4313cf79bca6c88fb63207d04d5bf5e4f65174114d072aa55/jiter-0.13.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:aed40e099404721d7fcaf5b89bd3b4568a4666358bcac7b6b15c09fb6252ab68", size = 547262, upload-time = "2026-02-02T12:36:29.678Z" }, - { url = "https://files.pythonhosted.org/packages/27/da/68f38d12e7111d2016cd198161b36e1f042bd115c169255bcb7ec823a3bf/jiter-0.13.0-cp313-cp313-win32.whl", hash = "sha256:36ebfbcffafb146d0e6ffb3e74d51e03d9c35ce7c625c8066cdbfc7b953bdc72", size = 200630, upload-time = "2026-02-02T12:36:31.808Z" }, - { url = "https://files.pythonhosted.org/packages/25/65/3bd1a972c9a08ecd22eb3b08a95d1941ebe6938aea620c246cf426ae09c2/jiter-0.13.0-cp313-cp313-win_amd64.whl", hash = "sha256:8d76029f077379374cf0dbc78dbe45b38dec4a2eb78b08b5194ce836b2517afc", size = 202602, upload-time = "2026-02-02T12:36:33.679Z" }, - { url = "https://files.pythonhosted.org/packages/15/fe/13bd3678a311aa67686bb303654792c48206a112068f8b0b21426eb6851e/jiter-0.13.0-cp313-cp313-win_arm64.whl", hash = "sha256:bb7613e1a427cfcb6ea4544f9ac566b93d5bf67e0d48c787eca673ff9c9dff2b", size = 185939, upload-time = "2026-02-02T12:36:35.065Z" }, - { url = "https://files.pythonhosted.org/packages/49/19/a929ec002ad3228bc97ca01dbb14f7632fffdc84a95ec92ceaf4145688ae/jiter-0.13.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:fa476ab5dd49f3bf3a168e05f89358c75a17608dbabb080ef65f96b27c19ab10", size = 316616, upload-time = "2026-02-02T12:36:36.579Z" }, - { url = "https://files.pythonhosted.org/packages/52/56/d19a9a194afa37c1728831e5fb81b7722c3de18a3109e8f282bfc23e587a/jiter-0.13.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ade8cb6ff5632a62b7dbd4757d8c5573f7a2e9ae285d6b5b841707d8363205ef", size = 346850, upload-time = "2026-02-02T12:36:38.058Z" }, - { url = "https://files.pythonhosted.org/packages/36/4a/94e831c6bf287754a8a019cb966ed39ff8be6ab78cadecf08df3bb02d505/jiter-0.13.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9950290340acc1adaded363edd94baebcee7dabdfa8bee4790794cd5cfad2af6", size = 358551, upload-time = "2026-02-02T12:36:39.417Z" }, - { url = "https://files.pythonhosted.org/packages/a2/ec/a4c72c822695fa80e55d2b4142b73f0012035d9fcf90eccc56bc060db37c/jiter-0.13.0-cp313-cp313t-win_amd64.whl", hash = "sha256:2b4972c6df33731aac0742b64fd0d18e0a69bc7d6e03108ce7d40c85fd9e3e6d", size = 201950, upload-time = "2026-02-02T12:36:40.791Z" }, - { url = "https://files.pythonhosted.org/packages/b6/00/393553ec27b824fbc29047e9c7cd4a3951d7fbe4a76743f17e44034fa4e4/jiter-0.13.0-cp313-cp313t-win_arm64.whl", hash = "sha256:701a1e77d1e593c1b435315ff625fd071f0998c5f02792038a5ca98899261b7d", size = 185852, upload-time = "2026-02-02T12:36:42.077Z" }, - { url = "https://files.pythonhosted.org/packages/6e/f5/f1997e987211f6f9bd71b8083047b316208b4aca0b529bb5f8c96c89ef3e/jiter-0.13.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:cc5223ab19fe25e2f0bf2643204ad7318896fe3729bf12fde41b77bfc4fafff0", size = 308804, upload-time = "2026-02-02T12:36:43.496Z" }, - { url = "https://files.pythonhosted.org/packages/cd/8f/5482a7677731fd44881f0204981ce2d7175db271f82cba2085dd2212e095/jiter-0.13.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:9776ebe51713acf438fd9b4405fcd86893ae5d03487546dae7f34993217f8a91", size = 318787, upload-time = "2026-02-02T12:36:45.071Z" }, - { url = "https://files.pythonhosted.org/packages/f3/b9/7257ac59778f1cd025b26a23c5520a36a424f7f1b068f2442a5b499b7464/jiter-0.13.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:879e768938e7b49b5e90b7e3fecc0dbec01b8cb89595861fb39a8967c5220d09", size = 353880, upload-time = "2026-02-02T12:36:47.365Z" }, - { url = "https://files.pythonhosted.org/packages/c3/87/719eec4a3f0841dad99e3d3604ee4cba36af4419a76f3cb0b8e2e691ad67/jiter-0.13.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:682161a67adea11e3aae9038c06c8b4a9a71023228767477d683f69903ebc607", size = 366702, upload-time = "2026-02-02T12:36:48.871Z" }, - { url = "https://files.pythonhosted.org/packages/d2/65/415f0a75cf6921e43365a1bc227c565cb949caca8b7532776e430cbaa530/jiter-0.13.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a13b68cd1cd8cc9de8f244ebae18ccb3e4067ad205220ef324c39181e23bbf66", size = 486319, upload-time = "2026-02-02T12:36:53.006Z" }, - { url = "https://files.pythonhosted.org/packages/54/a2/9e12b48e82c6bbc6081fd81abf915e1443add1b13d8fc586e1d90bb02bb8/jiter-0.13.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87ce0f14c6c08892b610686ae8be350bf368467b6acd5085a5b65441e2bf36d2", size = 372289, upload-time = "2026-02-02T12:36:54.593Z" }, - { url = "https://files.pythonhosted.org/packages/4e/c1/e4693f107a1789a239c759a432e9afc592366f04e901470c2af89cfd28e1/jiter-0.13.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c365005b05505a90d1c47856420980d0237adf82f70c4aff7aebd3c1cc143ad", size = 360165, upload-time = "2026-02-02T12:36:56.112Z" }, - { url = "https://files.pythonhosted.org/packages/17/08/91b9ea976c1c758240614bd88442681a87672eebc3d9a6dde476874e706b/jiter-0.13.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1317fdffd16f5873e46ce27d0e0f7f4f90f0cdf1d86bf6abeaea9f63ca2c401d", size = 389634, upload-time = "2026-02-02T12:36:57.495Z" }, - { url = "https://files.pythonhosted.org/packages/18/23/58325ef99390d6d40427ed6005bf1ad54f2577866594bcf13ce55675f87d/jiter-0.13.0-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:c05b450d37ba0c9e21c77fef1f205f56bcee2330bddca68d344baebfc55ae0df", size = 514933, upload-time = "2026-02-02T12:36:58.909Z" }, - { url = "https://files.pythonhosted.org/packages/5b/25/69f1120c7c395fd276c3996bb8adefa9c6b84c12bb7111e5c6ccdcd8526d/jiter-0.13.0-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:775e10de3849d0631a97c603f996f518159272db00fdda0a780f81752255ee9d", size = 548842, upload-time = "2026-02-02T12:37:00.433Z" }, - { url = "https://files.pythonhosted.org/packages/18/05/981c9669d86850c5fbb0d9e62bba144787f9fba84546ba43d624ee27ef29/jiter-0.13.0-cp314-cp314-win32.whl", hash = "sha256:632bf7c1d28421c00dd8bbb8a3bac5663e1f57d5cd5ed962bce3c73bf62608e6", size = 202108, upload-time = "2026-02-02T12:37:01.718Z" }, - { url = "https://files.pythonhosted.org/packages/8d/96/cdcf54dd0b0341db7d25413229888a346c7130bd20820530905fdb65727b/jiter-0.13.0-cp314-cp314-win_amd64.whl", hash = "sha256:f22ef501c3f87ede88f23f9b11e608581c14f04db59b6a801f354397ae13739f", size = 204027, upload-time = "2026-02-02T12:37:03.075Z" }, - { url = "https://files.pythonhosted.org/packages/fb/f9/724bcaaab7a3cd727031fe4f6995cb86c4bd344909177c186699c8dec51a/jiter-0.13.0-cp314-cp314-win_arm64.whl", hash = "sha256:07b75fe09a4ee8e0c606200622e571e44943f47254f95e2436c8bdcaceb36d7d", size = 187199, upload-time = "2026-02-02T12:37:04.414Z" }, - { url = "https://files.pythonhosted.org/packages/62/92/1661d8b9fd6a3d7a2d89831db26fe3c1509a287d83ad7838831c7b7a5c7e/jiter-0.13.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:964538479359059a35fb400e769295d4b315ae61e4105396d355a12f7fef09f0", size = 318423, upload-time = "2026-02-02T12:37:05.806Z" }, - { url = "https://files.pythonhosted.org/packages/4f/3b/f77d342a54d4ebcd128e520fc58ec2f5b30a423b0fd26acdfc0c6fef8e26/jiter-0.13.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e104da1db1c0991b3eaed391ccd650ae8d947eab1480c733e5a3fb28d4313e40", size = 351438, upload-time = "2026-02-02T12:37:07.189Z" }, - { url = "https://files.pythonhosted.org/packages/76/b3/ba9a69f0e4209bd3331470c723c2f5509e6f0482e416b612431a5061ed71/jiter-0.13.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0e3a5f0cde8ff433b8e88e41aa40131455420fb3649a3c7abdda6145f8cb7202", size = 364774, upload-time = "2026-02-02T12:37:08.579Z" }, - { url = "https://files.pythonhosted.org/packages/b3/16/6cdb31fa342932602458dbb631bfbd47f601e03d2e4950740e0b2100b570/jiter-0.13.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:57aab48f40be1db920a582b30b116fe2435d184f77f0e4226f546794cedd9cf0", size = 487238, upload-time = "2026-02-02T12:37:10.066Z" }, - { url = "https://files.pythonhosted.org/packages/ed/b1/956cc7abaca8d95c13aa8d6c9b3f3797241c246cd6e792934cc4c8b250d2/jiter-0.13.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7772115877c53f62beeb8fd853cab692dbc04374ef623b30f997959a4c0e7e95", size = 372892, upload-time = "2026-02-02T12:37:11.656Z" }, - { url = "https://files.pythonhosted.org/packages/26/c4/97ecde8b1e74f67b8598c57c6fccf6df86ea7861ed29da84629cdbba76c4/jiter-0.13.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1211427574b17b633cfceba5040de8081e5abf114f7a7602f73d2e16f9fdaa59", size = 360309, upload-time = "2026-02-02T12:37:13.244Z" }, - { url = "https://files.pythonhosted.org/packages/4b/d7/eabe3cf46715854ccc80be2cd78dd4c36aedeb30751dbf85a1d08c14373c/jiter-0.13.0-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7beae3a3d3b5212d3a55d2961db3c292e02e302feb43fce6a3f7a31b90ea6dfe", size = 389607, upload-time = "2026-02-02T12:37:14.881Z" }, - { url = "https://files.pythonhosted.org/packages/df/2d/03963fc0804e6109b82decfb9974eb92df3797fe7222428cae12f8ccaa0c/jiter-0.13.0-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:e5562a0f0e90a6223b704163ea28e831bd3a9faa3512a711f031611e6b06c939", size = 514986, upload-time = "2026-02-02T12:37:16.326Z" }, - { url = "https://files.pythonhosted.org/packages/f6/6c/8c83b45eb3eb1c1e18d841fe30b4b5bc5619d781267ca9bc03e005d8fd0a/jiter-0.13.0-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:6c26a424569a59140fb51160a56df13f438a2b0967365e987889186d5fc2f6f9", size = 548756, upload-time = "2026-02-02T12:37:17.736Z" }, - { url = "https://files.pythonhosted.org/packages/47/66/eea81dfff765ed66c68fd2ed8c96245109e13c896c2a5015c7839c92367e/jiter-0.13.0-cp314-cp314t-win32.whl", hash = "sha256:24dc96eca9f84da4131cdf87a95e6ce36765c3b156fc9ae33280873b1c32d5f6", size = 201196, upload-time = "2026-02-02T12:37:19.101Z" }, - { url = "https://files.pythonhosted.org/packages/ff/32/4ac9c7a76402f8f00d00842a7f6b83b284d0cf7c1e9d4227bc95aa6d17fa/jiter-0.13.0-cp314-cp314t-win_amd64.whl", hash = "sha256:0a8d76c7524087272c8ae913f5d9d608bd839154b62c4322ef65723d2e5bb0b8", size = 204215, upload-time = "2026-02-02T12:37:20.495Z" }, - { url = "https://files.pythonhosted.org/packages/f9/8e/7def204fea9f9be8b3c21a6f2dd6c020cf56c7d5ff753e0e23ed7f9ea57e/jiter-0.13.0-cp314-cp314t-win_arm64.whl", hash = "sha256:2c26cf47e2cad140fa23b6d58d435a7c0161f5c514284802f25e87fddfe11024", size = 187152, upload-time = "2026-02-02T12:37:22.124Z" }, - { url = "https://files.pythonhosted.org/packages/79/b3/3c29819a27178d0e461a8571fb63c6ae38be6dc36b78b3ec2876bbd6a910/jiter-0.13.0-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:b1cbfa133241d0e6bdab48dcdc2604e8ba81512f6bbd68ec3e8e1357dd3c316c", size = 307016, upload-time = "2026-02-02T12:37:42.755Z" }, - { url = "https://files.pythonhosted.org/packages/eb/ae/60993e4b07b1ac5ebe46da7aa99fdbb802eb986c38d26e3883ac0125c4e0/jiter-0.13.0-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:db367d8be9fad6e8ebbac4a7578b7af562e506211036cba2c06c3b998603c3d2", size = 305024, upload-time = "2026-02-02T12:37:44.774Z" }, - { url = "https://files.pythonhosted.org/packages/77/fa/2227e590e9cf98803db2811f172b2d6460a21539ab73006f251c66f44b14/jiter-0.13.0-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45f6f8efb2f3b0603092401dc2df79fa89ccbc027aaba4174d2d4133ed661434", size = 339337, upload-time = "2026-02-02T12:37:46.668Z" }, - { url = "https://files.pythonhosted.org/packages/2d/92/015173281f7eb96c0ef580c997da8ef50870d4f7f4c9e03c845a1d62ae04/jiter-0.13.0-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:597245258e6ad085d064780abfb23a284d418d3e61c57362d9449c6c7317ee2d", size = 346395, upload-time = "2026-02-02T12:37:48.09Z" }, - { url = "https://files.pythonhosted.org/packages/80/60/e50fa45dd7e2eae049f0ce964663849e897300433921198aef94b6ffa23a/jiter-0.13.0-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:3d744a6061afba08dd7ae375dcde870cffb14429b7477e10f67e9e6d68772a0a", size = 305169, upload-time = "2026-02-02T12:37:50.376Z" }, - { url = "https://files.pythonhosted.org/packages/d2/73/a009f41c5eed71c49bec53036c4b33555afcdee70682a18c6f66e396c039/jiter-0.13.0-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:ff732bd0a0e778f43d5009840f20b935e79087b4dc65bd36f1cd0f9b04b8ff7f", size = 303808, upload-time = "2026-02-02T12:37:52.092Z" }, - { url = "https://files.pythonhosted.org/packages/c4/10/528b439290763bff3d939268085d03382471b442f212dca4ff5f12802d43/jiter-0.13.0-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ab44b178f7981fcaea7e0a5df20e773c663d06ffda0198f1a524e91b2fde7e59", size = 337384, upload-time = "2026-02-02T12:37:53.582Z" }, - { url = "https://files.pythonhosted.org/packages/67/8a/a342b2f0251f3dac4ca17618265d93bf244a2a4d089126e81e4c1056ac50/jiter-0.13.0-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7bb00b6d26db67a05fe3e12c76edc75f32077fb51deed13822dc648fa373bc19", size = 343768, upload-time = "2026-02-02T12:37:55.055Z" }, -] - [[package]] name = "json-repair" version = "0.58.5" @@ -2381,31 +2088,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/82/3d/14ce75ef66813643812f3093ab17e46d3a206942ce7376d31ec2d36229e7/lark-1.3.1-py3-none-any.whl", hash = "sha256:c629b661023a014c37da873b4ff58a817398d12635d3bbb2c5a03be7fe5d1e12", size = 113151, upload-time = "2025-10-27T18:25:54.882Z" }, ] -[[package]] -name = "litellm" -version = "1.80.11" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "aiohttp" }, - { name = "click" }, - { name = "fastuuid" }, - { name = "grpcio", version = "1.67.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.14'" }, - { name = "grpcio", version = "1.78.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.14'" }, - { name = "httpx" }, - { name = "importlib-metadata" }, - { name = "jinja2" }, - { name = "jsonschema" }, - { name = "openai" }, - { name = "pydantic" }, - { name = "python-dotenv" }, - { name = "tiktoken" }, - { name = "tokenizers" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/55/47/be6cd7b356418ca8bef3b843507940ce77b76ef2dfe515f2b4ba9b461ff0/litellm-1.80.11.tar.gz", hash = "sha256:c9fc63e7acb6360363238fe291bcff1488c59ff66020416d8376c0ee56414a19", size = 13189510, upload-time = "2025-12-22T12:47:29.181Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/97/0b/9e637344f24f3fe0e8039cd2337389fe05e0d31f518bc3e0a5cdbe45784a/litellm-1.80.11-py3-none-any.whl", hash = "sha256:406283d66ead77dc7ff0e0b2559c80e9e497d8e7c2257efb1cb9210a20d09d54", size = 11456346, upload-time = "2025-12-22T12:47:26.469Z" }, -] - [[package]] name = "lxml" version = "6.0.2" @@ -3317,25 +2999,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/57/a7/b35835e278c18b85206834b3aa3abe68e77a98769c59233d1f6300284781/numpy-2.4.3-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:4b42639cdde6d24e732ff823a3fa5b701d8acad89c4142bc1d0bd6dc85200ba5", size = 12504685, upload-time = "2026-03-09T07:58:50.525Z" }, ] -[[package]] -name = "openai" -version = "2.26.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "anyio" }, - { name = "distro" }, - { name = "httpx" }, - { name = "jiter" }, - { name = "pydantic" }, - { name = "sniffio" }, - { name = "tqdm" }, - { name = "typing-extensions" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/d7/91/2a06c4e9597c338cac1e5e5a8dd6f29e1836fc229c4c523529dca387fda8/openai-2.26.0.tar.gz", hash = "sha256:b41f37c140ae0034a6e92b0c509376d907f3a66109935fba2c1b471a7c05a8fb", size = 666702, upload-time = "2026-03-05T23:17:35.874Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/c6/2e/3f73e8ca53718952222cacd0cf7eecc9db439d020f0c1fe7ae717e4e199a/openai-2.26.0-py3-none-any.whl", hash = "sha256:6151bf8f83802f036117f06cc8a57b3a4da60da9926826cc96747888b57f394f", size = 1136409, upload-time = "2026-03-05T23:17:34.072Z" }, -] - [[package]] name = "overrides" version = "7.7.0" @@ -4882,15 +4545,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" }, ] -[[package]] -name = "sniffio" -version = "1.3.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372, upload-time = "2024-02-25T23:20:04.057Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235, upload-time = "2024-02-25T23:20:01.196Z" }, -] - [[package]] name = "soupsieve" version = "2.8.3" @@ -5060,36 +4714,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e6/34/ebdc18bae6aa14fbee1a08b63c015c72b64868ff7dae68808ab500c492e2/tinycss2-1.4.0-py3-none-any.whl", hash = "sha256:3a49cf47b7675da0b15d0c6e1df8df4ebd96e9394bb905a5775adb0d884c5289", size = 26610, upload-time = "2024-10-24T14:58:28.029Z" }, ] -[[package]] -name = "tokenizers" -version = "0.22.2" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "huggingface-hub" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/73/6f/f80cfef4a312e1fb34baf7d85c72d4411afde10978d4657f8cdd811d3ccc/tokenizers-0.22.2.tar.gz", hash = "sha256:473b83b915e547aa366d1eee11806deaf419e17be16310ac0a14077f1e28f917", size = 372115, upload-time = "2026-01-05T10:45:15.988Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/92/97/5dbfabf04c7e348e655e907ed27913e03db0923abb5dfdd120d7b25630e1/tokenizers-0.22.2-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:544dd704ae7238755d790de45ba8da072e9af3eea688f698b137915ae959281c", size = 3100275, upload-time = "2026-01-05T10:41:02.158Z" }, - { url = "https://files.pythonhosted.org/packages/2e/47/174dca0502ef88b28f1c9e06b73ce33500eedfac7a7692108aec220464e7/tokenizers-0.22.2-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:1e418a55456beedca4621dbab65a318981467a2b188e982a23e117f115ce5001", size = 2981472, upload-time = "2026-01-05T10:41:00.276Z" }, - { url = "https://files.pythonhosted.org/packages/d6/84/7990e799f1309a8b87af6b948f31edaa12a3ed22d11b352eaf4f4b2e5753/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2249487018adec45d6e3554c71d46eb39fa8ea67156c640f7513eb26f318cec7", size = 3290736, upload-time = "2026-01-05T10:40:32.165Z" }, - { url = "https://files.pythonhosted.org/packages/78/59/09d0d9ba94dcd5f4f1368d4858d24546b4bdc0231c2354aa31d6199f0399/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25b85325d0815e86e0bac263506dd114578953b7b53d7de09a6485e4a160a7dd", size = 3168835, upload-time = "2026-01-05T10:40:38.847Z" }, - { url = "https://files.pythonhosted.org/packages/47/50/b3ebb4243e7160bda8d34b731e54dd8ab8b133e50775872e7a434e524c28/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bfb88f22a209ff7b40a576d5324bf8286b519d7358663db21d6246fb17eea2d5", size = 3521673, upload-time = "2026-01-05T10:40:56.614Z" }, - { url = "https://files.pythonhosted.org/packages/e0/fa/89f4cb9e08df770b57adb96f8cbb7e22695a4cb6c2bd5f0c4f0ebcf33b66/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1c774b1276f71e1ef716e5486f21e76333464f47bece56bbd554485982a9e03e", size = 3724818, upload-time = "2026-01-05T10:40:44.507Z" }, - { url = "https://files.pythonhosted.org/packages/64/04/ca2363f0bfbe3b3d36e95bf67e56a4c88c8e3362b658e616d1ac185d47f2/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:df6c4265b289083bf710dff49bc51ef252f9d5be33a45ee2bed151114a56207b", size = 3379195, upload-time = "2026-01-05T10:40:51.139Z" }, - { url = "https://files.pythonhosted.org/packages/2e/76/932be4b50ef6ccedf9d3c6639b056a967a86258c6d9200643f01269211ca/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:369cc9fc8cc10cb24143873a0d95438bb8ee257bb80c71989e3ee290e8d72c67", size = 3274982, upload-time = "2026-01-05T10:40:58.331Z" }, - { url = "https://files.pythonhosted.org/packages/1d/28/5f9f5a4cc211b69e89420980e483831bcc29dade307955cc9dc858a40f01/tokenizers-0.22.2-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:29c30b83d8dcd061078b05ae0cb94d3c710555fbb44861139f9f83dcca3dc3e4", size = 9478245, upload-time = "2026-01-05T10:41:04.053Z" }, - { url = "https://files.pythonhosted.org/packages/6c/fb/66e2da4704d6aadebf8cb39f1d6d1957df667ab24cff2326b77cda0dcb85/tokenizers-0.22.2-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:37ae80a28c1d3265bb1f22464c856bd23c02a05bb211e56d0c5301a435be6c1a", size = 9560069, upload-time = "2026-01-05T10:45:10.673Z" }, - { url = "https://files.pythonhosted.org/packages/16/04/fed398b05caa87ce9b1a1bb5166645e38196081b225059a6edaff6440fac/tokenizers-0.22.2-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:791135ee325f2336f498590eb2f11dc5c295232f288e75c99a36c5dbce63088a", size = 9899263, upload-time = "2026-01-05T10:45:12.559Z" }, - { url = "https://files.pythonhosted.org/packages/05/a1/d62dfe7376beaaf1394917e0f8e93ee5f67fea8fcf4107501db35996586b/tokenizers-0.22.2-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:38337540fbbddff8e999d59970f3c6f35a82de10053206a7562f1ea02d046fa5", size = 10033429, upload-time = "2026-01-05T10:45:14.333Z" }, - { url = "https://files.pythonhosted.org/packages/fd/18/a545c4ea42af3df6effd7d13d250ba77a0a86fb20393143bbb9a92e434d4/tokenizers-0.22.2-cp39-abi3-win32.whl", hash = "sha256:a6bf3f88c554a2b653af81f3204491c818ae2ac6fbc09e76ef4773351292bc92", size = 2502363, upload-time = "2026-01-05T10:45:20.593Z" }, - { url = "https://files.pythonhosted.org/packages/65/71/0670843133a43d43070abeb1949abfdef12a86d490bea9cd9e18e37c5ff7/tokenizers-0.22.2-cp39-abi3-win_amd64.whl", hash = "sha256:c9ea31edff2968b44a88f97d784c2f16dc0729b8b143ed004699ebca91f05c48", size = 2747786, upload-time = "2026-01-05T10:45:18.411Z" }, - { url = "https://files.pythonhosted.org/packages/72/f4/0de46cfa12cdcbcd464cc59fde36912af405696f687e53a091fb432f694c/tokenizers-0.22.2-cp39-abi3-win_arm64.whl", hash = "sha256:9ce725d22864a1e965217204946f830c37876eee3b2ba6fc6255e8e903d5fcbc", size = 2612133, upload-time = "2026-01-05T10:45:17.232Z" }, - { url = "https://files.pythonhosted.org/packages/84/04/655b79dbcc9b3ac5f1479f18e931a344af67e5b7d3b251d2dcdcd7558592/tokenizers-0.22.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:753d47ebd4542742ef9261d9da92cd545b2cacbb48349a1225466745bb866ec4", size = 3282301, upload-time = "2026-01-05T10:40:34.858Z" }, - { url = "https://files.pythonhosted.org/packages/46/cd/e4851401f3d8f6f45d8480262ab6a5c8cb9c4302a790a35aa14eeed6d2fd/tokenizers-0.22.2-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e10bf9113d209be7cd046d40fbabbaf3278ff6d18eb4da4c500443185dc1896c", size = 3161308, upload-time = "2026-01-05T10:40:40.737Z" }, - { url = "https://files.pythonhosted.org/packages/6f/6e/55553992a89982cd12d4a66dddb5e02126c58677ea3931efcbe601d419db/tokenizers-0.22.2-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:64d94e84f6660764e64e7e0b22baa72f6cd942279fdbb21d46abd70d179f0195", size = 3718964, upload-time = "2026-01-05T10:40:46.56Z" }, - { url = "https://files.pythonhosted.org/packages/59/8c/b1c87148aa15e099243ec9f0cf9d0e970cc2234c3257d558c25a2c5304e6/tokenizers-0.22.2-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f01a9c019878532f98927d2bacb79bbb404b43d3437455522a00a30718cdedb5", size = 3373542, upload-time = "2026-01-05T10:40:52.803Z" }, -] - [[package]] name = "tomli" version = "2.4.0" @@ -5618,12 +5242,3 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/51/47/3fa2286c3cb162c71cdb34c4224d5745a1ceceb391b2bd9b19b668a8d724/yarl-1.23.0-cp314-cp314t-win_arm64.whl", hash = "sha256:44bb7bef4ea409384e3f8bc36c063d77ea1b8d4a5b2706956c0d6695f07dcc25", size = 86041, upload-time = "2026-03-01T22:07:49.026Z" }, { url = "https://files.pythonhosted.org/packages/69/68/c8739671f5699c7dc470580a4f821ef37c32c4cb0b047ce223a7f115757f/yarl-1.23.0-py3-none-any.whl", hash = "sha256:a2df6afe50dea8ae15fa34c9f824a3ee958d785fd5d089063d960bae1daa0a3f", size = 48288, upload-time = "2026-03-01T22:07:51.388Z" }, ] - -[[package]] -name = "zipp" -version = "3.23.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/e3/02/0f2892c661036d50ede074e376733dca2ae7c6eb617489437771209d4180/zipp-3.23.0.tar.gz", hash = "sha256:a07157588a12518c9d4034df3fbbee09c814741a33ff63c05fa29d26a2404166", size = 25547, upload-time = "2025-06-08T17:06:39.4Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/2e/54/647ade08bf0db230bfea292f893923872fd20be6ac6f53b2b936ba839d75/zipp-3.23.0-py3-none-any.whl", hash = "sha256:071652d6115ed432f5ce1d34c336c0adfd6a884660d1e9712a256d3d3bd4b14e", size = 10276, upload-time = "2025-06-08T17:06:38.034Z" }, -]