From 989f02fc31fa9960f8b2b141297d345ea8843971 Mon Sep 17 00:00:00 2001 From: hlohaus <983577+hlohaus@users.noreply.github.com> Date: Sat, 1 Mar 2025 01:46:04 +0100 Subject: [PATCH 1/6] Add ToolSupportProvider --- docs/pydantic_ai.md | 67 +++++++++++++++++++++ g4f/Provider/PollinationsAI.py | 14 +++-- g4f/Provider/hf/HuggingFaceAPI.py | 4 +- g4f/Provider/hf/models.py | 1 + g4f/Provider/template/OpenaiTemplate.py | 1 - g4f/client/__init__.py | 10 ++-- g4f/debug.py | 8 +-- g4f/gui/client/demo.html | 10 +++- g4f/integration/__init__.py | 0 g4f/providers/tool_support.py | 77 +++++++++++++++++++++++++ g4f/tools/pydantic_ai.py | 1 - 11 files changed, 172 insertions(+), 21 deletions(-) create mode 100644 g4f/integration/__init__.py create mode 100644 g4f/providers/tool_support.py delete mode 100644 g4f/tools/pydantic_ai.py diff --git a/docs/pydantic_ai.md b/docs/pydantic_ai.md index 5349571613b..0b2ae6d6c87 100644 --- a/docs/pydantic_ai.md +++ b/docs/pydantic_ai.md @@ -129,6 +129,73 @@ This example demonstrates the use of a custom Pydantic model (`MyModel`) to capt --- +### Support for Models/Providers without Tool Call Suport + +For models/providers that do not fully support tool calls or lack a direct API for structured output, the `ToolSupportProvider` can be used to bridge the gap. This provider ensures that the agent properly formats the response, even when the model itself doesn't have built-in support for structured outputs. It does so by leveraging a tool list and creating a response format when only one tool is used. + +### Example for Models/Providers without Tool Support (Single Tool Usage) + +```python +from pydantic import BaseModel +from pydantic_ai import Agent +from pydantic_ai.models import ModelSettings +from g4f.integration.pydantic_ai import AIModel +from g4f.providers.tool_support import ToolSupportProvider + +from g4f import debug +debug.logging = True + +# Define a custom model for structured output (e.g., city and country) +class MyModel(BaseModel): + city: str + country: str + +# Create the agent for a model with tool support (using one tool) +agent = Agent(AIModel( + "PollinationsAI:gpt-4o", # Specify the provider and model + ToolSupportProvider # Use ToolSupportProvider to handle tool-based response formatting +), result_type=MyModel, model_settings=ModelSettings(temperature=0)) + +if __name__ == '__main__': + # Run the agent with a query to extract information (e.g., city and country) + result = agent.run_sync('European city with the bear.') + print(result.data) # Structured output of city and country + print(result.usage()) # Usage statistics +``` + +### Explanation: + +- **`ToolSupportProvider` as a Bridge:** The `ToolSupportProvider` acts as a bridge between the agent and the model, ensuring that the response is formatted into a structured output, even if the model doesn't have an API that directly supports such formatting. + + - For instance, if the model generates raw text or unstructured data, the `ToolSupportProvider` will convert this into the expected format (like `MyModel`), allowing the agent to process it as structured data. + +- **Model Initialization:** We initialize the agent with the `PollinationsAI:gpt-4o` model, which may not have a built-in API for returning structured outputs. Instead, it relies on the `ToolSupportProvider` to format the output. + +- **Custom Result Model:** We define a custom Pydantic model (`MyModel`) to capture the expected output in a structured way (e.g., `city` and `country` fields). This helps ensure that even when the model doesn't support structured data, the agent can interpret and format it. + +- **Debug Logging:** The `g4f.debug.logging` is enabled to provide detailed logs for troubleshooting and monitoring the agent's execution. + +### Example Output: + +```bash +city='Berlin' +country='Germany' +usage={'prompt_tokens': 15, 'completion_tokens': 50} +``` + +### Key Points: + +- **`ToolSupportProvider` Role:** The `ToolSupportProvider` ensures that the agent formats the raw or unstructured response from the model into a structured format, even if the model itself lacks built-in support for structured data. + +- **Single Tool Usage:** The `ToolSupportProvider` is particularly useful when only one tool is used by the model, and it needs to format or transform the model's output into a structured response without additional tools. + +### Notes: + +- This approach is ideal for models that return unstructured text or data that needs to be transformed into a structured format (e.g., Pydantic models). +- The `ToolSupportProvider` bridges the gap between the model's output and the expected structured format, enabling seamless integration into workflows that require structured responses. + +--- + ## LangChain Integration Example For users working with LangChain, here is an example demonstrating how to integrate G4F models into a LangChain environment: diff --git a/g4f/Provider/PollinationsAI.py b/g4f/Provider/PollinationsAI.py index f71b227592a..c5bd6ccbada 100644 --- a/g4f/Provider/PollinationsAI.py +++ b/g4f/Provider/PollinationsAI.py @@ -1,6 +1,5 @@ from __future__ import annotations -import json import random import requests from urllib.parse import quote_plus @@ -15,6 +14,7 @@ from ..requests.raise_for_status import raise_for_status from ..requests.aiohttp import get_connector from ..providers.response import ImageResponse, ImagePreview, FinishReason, Usage +from .. import debug DEFAULT_HEADERS = { 'Accept': '*/*', @@ -74,9 +74,11 @@ def get_models(cls, **kwargs): try: # Update of image models image_response = requests.get("https://image.pollinations.ai/models") - image_response.raise_for_status() - new_image_models = image_response.json() - + if image_response.ok: + new_image_models = image_response.json() + else: + new_image_models = [] + # Combine models without duplicates all_image_models = ( cls.image_models + # Already contains the default @@ -112,8 +114,8 @@ def get_models(cls, **kwargs): cls.text_models = [cls.default_model] if not cls.image_models: cls.image_models = [cls.default_image_model] - raise RuntimeError(f"Failed to fetch models: {e}") from e - + debug.error(f"Failed to fetch models: {e}") + return cls.text_models + cls.image_models @classmethod diff --git a/g4f/Provider/hf/HuggingFaceAPI.py b/g4f/Provider/hf/HuggingFaceAPI.py index e775a7ae976..665d2294589 100644 --- a/g4f/Provider/hf/HuggingFaceAPI.py +++ b/g4f/Provider/hf/HuggingFaceAPI.py @@ -61,10 +61,10 @@ async def create_async_generator( images: ImagesType = None, **kwargs ): - if model in cls.model_aliases: - model = cls.model_aliases[model] if model == llama_models["name"]: model = llama_models["text"] if images is None else llama_models["vision"] + if model in cls.model_aliases: + model = cls.model_aliases[model] api_base = f"https://api-inference.huggingface.co/models/{model}/v1" pipeline_tag = await cls.get_pipline_tag(model, api_key) if pipeline_tag not in ("text-generation", "image-text-to-text"): diff --git a/g4f/Provider/hf/models.py b/g4f/Provider/hf/models.py index 53c33a21d36..def7c05cfa4 100644 --- a/g4f/Provider/hf/models.py +++ b/g4f/Provider/hf/models.py @@ -20,6 +20,7 @@ model_aliases = { ### Chat ### "qwen-2.5-72b": "Qwen/Qwen2.5-Coder-32B-Instruct", + "llama-3": "meta-llama/Llama-3.3-70B-Instruct", "llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct", "command-r-plus": "CohereForAI/c4ai-command-r-plus-08-2024", "deepseek-r1": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", diff --git a/g4f/Provider/template/OpenaiTemplate.py b/g4f/Provider/template/OpenaiTemplate.py index d84277273d6..359fdfd7552 100644 --- a/g4f/Provider/template/OpenaiTemplate.py +++ b/g4f/Provider/template/OpenaiTemplate.py @@ -145,7 +145,6 @@ async def create_async_generator( elif content_type.startswith("text/event-stream"): await raise_for_status(response) first = True - is_thinking = 0 async for line in response.iter_lines(): if line.startswith(b"data: "): chunk = line[6:] diff --git a/g4f/client/__init__.py b/g4f/client/__init__.py index 8bd6d0dc4d4..8aceaaf214e 100644 --- a/g4f/client/__init__.py +++ b/g4f/client/__init__.py @@ -275,6 +275,7 @@ def __init__(self, client: Client, provider: Optional[ProviderType] = None): def create( self, + *, messages: Messages, model: str, provider: Optional[ProviderType] = None, @@ -306,8 +307,8 @@ def create( response = iter_run_tools( provider.get_create_function(), - model, - messages, + model=model, + messages=messages, stream=stream, **filter_none( proxy=self.client.proxy if proxy is None else proxy, @@ -561,6 +562,7 @@ def __init__(self, client: AsyncClient, provider: Optional[ProviderType] = None) def create( self, + *, messages: Messages, model: str, provider: Optional[ProviderType] = None, @@ -592,8 +594,8 @@ def create( response = async_iter_run_tools( provider, - model, - messages, + model=model, + messages=messages, stream=stream, **filter_none( proxy=self.client.proxy if proxy is None else proxy, diff --git a/g4f/debug.py b/g4f/debug.py index 10cd37f644b..36abb3a9f1a 100644 --- a/g4f/debug.py +++ b/g4f/debug.py @@ -1,10 +1,7 @@ import sys -from .providers.types import ProviderType logging: bool = False version_check: bool = True -last_provider: ProviderType = None -last_model: str = None version: str = None log_handler: callable = print logs: list = [] @@ -14,4 +11,7 @@ def log(text, file = None): log_handler(text, file=file) def error(error, name: str = None): - log(error if isinstance(error, str) else f"{type(error).__name__ if name is None else name}: {error}", file=sys.stderr) \ No newline at end of file + log( + error if isinstance(error, str) else f"{type(error).__name__ if name is None else name}: {error}", + file=sys.stderr + ) \ No newline at end of file diff --git a/g4f/gui/client/demo.html b/g4f/gui/client/demo.html index 94ff5c398dd..7464c783f59 100644 --- a/g4f/gui/client/demo.html +++ b/g4f/gui/client/demo.html @@ -201,7 +201,7 @@
-