Skip to content

Commit

Permalink
Merge pull request #2743 from hlohaus/16Feb
Browse files Browse the repository at this point in the history
16 feb
  • Loading branch information
hlohaus authored Feb 22, 2025
2 parents ba2e6eb + 6ba21db commit cc92ce1
Show file tree
Hide file tree
Showing 28 changed files with 583 additions and 408 deletions.
54 changes: 50 additions & 4 deletions docs/pydantic_ai.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,12 +21,12 @@ pip install g4f pydantic_ai

### 1. Patch PydanticAI to Use G4F Models

In order to use PydanticAI with G4F models, you need to apply the necessary patch to the client. This can be done by importing `apply_patch` from `g4f.tools.pydantic_ai`. The `api_key` parameter is optional, so if you have one, you can provide it. If not, the system will proceed without it.
In order to use PydanticAI with G4F models, you need to apply the necessary patch to the client. This can be done by importing `patch_infer_model` from `g4f.tools.pydantic_ai`. The `api_key` parameter is optional, so if you have one, you can provide it. If not, the system will proceed without it.

```python
from g4f.tools.pydantic_ai import apply_patch
from g4f.tools.pydantic_ai import patch_infer_model

apply_patch(api_key="your_api_key_here") # Optional
patch_infer_model(api_key="your_api_key_here") # Optional
```

If you don't have an API key, simply omit the `api_key` argument.
Expand Down Expand Up @@ -83,12 +83,58 @@ The phrase "hello world" is commonly used in programming tutorials to demonstrat

For example, you can process your query or interact with external systems before passing the data to the agent.

---

### Simple Example without `patch_infer_model`

```python
from pydantic_ai import Agent
from g4f.tools.pydantic_ai import AIModel

agent = Agent(
AIModel("gpt-4o"),
)

result = agent.run_sync('Are you gpt-4o?')
print(result.data)
```

This example shows how to initialize an agent with a specific model (`gpt-4o`) and run it synchronously.

---

### Full Example with Tool Calls:

```python
from pydantic import BaseModel
from pydantic_ai import Agent
from pydantic_ai.models import ModelSettings
from g4f.tools.pydantic_ai import patch_infer_model

patch_infer_model("your_api_key")

class MyModel(BaseModel):
city: str
country: str

agent = Agent('g4f:Groq:llama3-70b-8192', result_type=MyModel, model_settings=ModelSettings(temperature=0))

if __name__ == '__main__':
result = agent.run_sync('The windy city in the US of A.')
print(result.data)
print(result.usage())
```

This example demonstrates the use of a custom Pydantic model (`MyModel`) to capture structured data (city and country) from the response and running the agent with specific model settings.

---

## Conclusion

By following these steps, you have successfully integrated PydanticAI models into the G4F client, created an agent, and enabled debugging. This allows you to conduct conversations with the language model, pass system prompts, and retrieve responses synchronously.

### Notes:
- The `api_key` parameter when calling `apply_patch` is optional. If you don’t provide it, the system will still work without an API key.
- The `api_key` parameter when calling `patch_infer_model` is optional. If you don’t provide it, the system will still work without an API key.
- Modify the agent’s `system_prompt` to suit the nature of the conversation you wish to have.
- **Tool calls within AI requests are not fully supported** at the moment. Use the agent's basic functionality for generating responses and handle external calls separately.

Expand Down
15 changes: 6 additions & 9 deletions g4f/Provider/Copilot.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,11 @@
import json
import asyncio
import base64
from http.cookiejar import CookieJar
from urllib.parse import quote

try:
from curl_cffi.requests import Session, CurlWsFlag
from curl_cffi.requests import Session
from curl_cffi import CurlWsFlag
has_curl_cffi = True
except ImportError:
has_curl_cffi = False
Expand Down Expand Up @@ -55,7 +55,7 @@ class Copilot(AbstractProvider, ProviderModelMixin):
conversation_url = f"{url}/c/api/conversations"

_access_token: str = None
_cookies: CookieJar = None
_cookies: dict = None

@classmethod
def create_completion(
Expand Down Expand Up @@ -86,9 +86,7 @@ def create_completion(
except NoValidHarFileError as h:
debug.log(f"Copilot: {h}")
if has_nodriver:
login_url = os.environ.get("G4F_LOGIN_URL")
if login_url:
yield RequestLogin(cls.label, login_url)
yield RequestLogin(cls.label, os.environ.get("G4F_LOGIN_URL", ""))
get_running_loop(check_nested=True)
cls._access_token, cls._cookies = asyncio.run(get_access_token_and_cookies(cls.url, proxy))
else:
Expand All @@ -104,7 +102,7 @@ def create_completion(
cookies=cls._cookies,
) as session:
if cls._access_token is not None:
cls._cookies = session.cookies.jar
cls._cookies = session.cookies.jar if hasattr(session.cookies, "jar") else session.cookies
# if cls._access_token is None:
# try:
# url = "https://copilot.microsoft.com/cl/eus-sc/collect"
Expand Down Expand Up @@ -203,8 +201,7 @@ def create_completion(
if not is_started:
raise RuntimeError(f"Invalid response: {last_msg}")
finally:
yield Parameters(**{"conversation": conversation.get_dict(), "user": user, "prompt": prompt})
yield Parameters(**{"cookies": {c.name: c.value for c in session.cookies.jar}})
wss.close()

async def get_access_token_and_cookies(url: str, proxy: str = None, target: str = "ChatAI",):
browser, stop_browser = await get_nodriver(proxy=proxy, user_data_dir="copilot")
Expand Down
5 changes: 2 additions & 3 deletions g4f/Provider/PollinationsAI.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
default_model = "openai"
default_image_model = "flux"
default_vision_model = "gpt-4o"
extra_image_models = ["flux-pro", "flux-dev", "flux-schnell", "midjourney", "dall-e-3"]
image_models = ["flux-pro", "flux-dev", "flux-schnell", "midjourney", "dall-e-3", "turbo"]
vision_models = [default_vision_model, "gpt-4o-mini"]
extra_text_models = ["claude", "claude-email", "deepseek-reasoner", "deepseek-r1"] + vision_models
model_aliases = {
Expand Down Expand Up @@ -67,7 +67,6 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
"sdxl-turbo": "turbo",
}
text_models = []
image_models = []

@classmethod
def get_models(cls, **kwargs):
Expand All @@ -76,7 +75,7 @@ def get_models(cls, **kwargs):
image_response = requests.get("https://image.pollinations.ai/models")
image_response.raise_for_status()
new_image_models = image_response.json()
cls.image_models = list(dict.fromkeys([*cls.extra_image_models, *new_image_models]))
cls.image_models = list(dict.fromkeys([*cls.image_models, *new_image_models]))

text_response = requests.get("https://text.pollinations.ai/models")
text_response.raise_for_status()
Expand Down
8 changes: 4 additions & 4 deletions g4f/Provider/PollinationsImage.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,16 +7,16 @@
from .PollinationsAI import PollinationsAI

class PollinationsImage(PollinationsAI):
label = "Pollinations AI (Image)"
default_model = "flux"
default_vision_model = None
default_image_model = default_model

@classmethod
def get_models(cls, **kwargs):
if not cls.image_models:
cls.image_models = list(dict.fromkeys([*cls.image_models, *cls.extra_image_models]))
return cls.image_models
if not cls.models:
super().get_models(**kwargs)
cls.models = cls.image_models
return cls.models

@classmethod
async def create_async_generator(
Expand Down
3 changes: 1 addition & 2 deletions g4f/Provider/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from .not_working import *
from .local import *
from .hf import HuggingFace, HuggingChat, HuggingFaceAPI, HuggingFaceInference
from .hf_space import HuggingSpace
from .hf_space import *
from .mini_max import HailuoAI, MiniMax
from .template import OpenaiTemplate, BackendApi

Expand Down Expand Up @@ -53,7 +53,6 @@
if isinstance(provider, type)
and issubclass(provider, BaseProvider)
]
__providers__ = __providers__ + HuggingSpace.providers
__all__: list[str] = [
provider.__name__ for provider in __providers__
]
Expand Down
14 changes: 7 additions & 7 deletions g4f/Provider/hf/HuggingFaceInference.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
from ...requests import StreamSession, raise_for_status
from ...providers.response import FinishReason, ImageResponse
from ..helper import format_image_prompt, get_last_user_message
from .models import default_model, default_image_model, model_aliases, fallback_models, image_models
from .models import default_model, default_image_model, model_aliases, text_models, image_models, vision_models
from ... import debug

class HuggingFaceInference(AsyncGeneratorProvider, ProviderModelMixin):
Expand All @@ -29,18 +29,18 @@ class HuggingFaceInference(AsyncGeneratorProvider, ProviderModelMixin):
@classmethod
def get_models(cls) -> list[str]:
if not cls.models:
models = fallback_models.copy()
models = text_models.copy()
url = "https://huggingface.co/api/models?inference=warm&pipeline_tag=text-generation"
response = requests.get(url)
if response.ok:
extra_models = [model["id"] for model in response.json()]
extra_models.sort()
models.extend([model for model in extra_models if model not in models])
extra_models = [model["id"] for model in response.json() if model.get("trendingScore", 0) >= 10]
models = extra_models + vision_models + [model for model in models if model not in extra_models]
url = "https://huggingface.co/api/models?pipeline_tag=text-to-image"
response = requests.get(url)
cls.image_models = image_models.copy()
if response.ok:
cls.image_models = [model["id"] for model in response.json() if model.get("trendingScore", 0) >= 20]
cls.image_models.sort()
extra_models = [model["id"] for model in response.json() if model.get("trendingScore", 0) >= 20]
cls.image_models.extend([model for model in extra_models if model not in cls.image_models])
models.extend([model for model in cls.image_models if model not in models])
cls.models = models
return cls.models
Expand Down
5 changes: 3 additions & 2 deletions g4f/Provider/hf/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
default_image_model,
"black-forest-labs/FLUX.1-schnell",
]
fallback_models = [
text_models = [
default_model,
'meta-llama/Llama-3.3-70B-Instruct',
'CohereForAI/c4ai-command-r-plus-08-2024',
Expand All @@ -15,7 +15,8 @@
'meta-llama/Llama-3.2-11B-Vision-Instruct',
'mistralai/Mistral-Nemo-Instruct-2407',
'microsoft/Phi-3.5-mini-instruct',
] + image_models
]
fallback_models = text_models + image_models
model_aliases = {
### Chat ###
"qwen-2.5-72b": "Qwen/Qwen2.5-Coder-32B-Instruct",
Expand Down
4 changes: 2 additions & 2 deletions g4f/Provider/hf_space/BlackForestLabsFlux1Dev.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,15 +62,15 @@ async def create_async_generator(
seed: int = 0,
randomize_seed: bool = True,
cookies: dict = None,
zerogpu_token: str = None,
api_key: str = None,
zerogpu_uuid: str = "[object Object]",
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
async with StreamSession(impersonate="chrome", proxy=proxy) as session:
prompt = format_image_prompt(messages, prompt)
data = [prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps]
conversation = JsonConversation(zerogpu_token=zerogpu_token, zerogpu_uuid=zerogpu_uuid, session_hash=uuid.uuid4().hex)
conversation = JsonConversation(zerogpu_token=api_key, zerogpu_uuid=zerogpu_uuid, session_hash=uuid.uuid4().hex)
if conversation.zerogpu_token is None:
conversation.zerogpu_uuid, conversation.zerogpu_token = await get_zerogpu_token(cls.space, session, conversation, cookies)
async with cls.run(f"post", session, conversation, data) as response:
Expand Down
15 changes: 8 additions & 7 deletions g4f/Provider/hf_space/G4F.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

from aiohttp import ClientSession
import time
import random
import asyncio

from ...typing import AsyncResult, Messages
Expand Down Expand Up @@ -40,7 +41,7 @@ async def create_async_generator(
height: int = 1024,
seed: int = None,
cookies: dict = None,
zerogpu_token: str = None,
api_key: str = None,
zerogpu_uuid: str = "[object Object]",
**kwargs
) -> AsyncResult:
Expand All @@ -53,7 +54,7 @@ async def create_async_generator(
height=height,
seed=seed,
cookies=cookies,
zerogpu_token=zerogpu_token,
api_key=api_key,
zerogpu_uuid=zerogpu_uuid,
**kwargs
):
Expand All @@ -66,7 +67,7 @@ async def create_async_generator(
prompt=prompt,
seed=seed,
cookies=cookies,
zerogpu_token=zerogpu_token,
api_key=api_key,
zerogpu_uuid=zerogpu_uuid,
**kwargs
):
Expand All @@ -79,7 +80,7 @@ async def create_async_generator(
if prompt is None:
prompt = format_image_prompt(messages)
if seed is None:
seed = int(time.time())
seed = random.randint(9999, 2**32 - 1)

payload = {
"data": [
Expand All @@ -96,11 +97,11 @@ async def create_async_generator(
"trigger_id": 10
}
async with ClientSession() as session:
if zerogpu_token is None:
if api_key is None:
yield Reasoning(status="Acquiring GPU Token")
zerogpu_uuid, zerogpu_token = await get_zerogpu_token(cls.space, session, JsonConversation(), cookies)
zerogpu_uuid, api_key = await get_zerogpu_token(cls.space, session, JsonConversation(), cookies)
headers = {
"x-zerogpu-token": zerogpu_token,
"x-zerogpu-token": api_key,
"x-zerogpu-uuid": zerogpu_uuid,
}
headers = {k: v for k, v in headers.items() if v is not None}
Expand Down
18 changes: 7 additions & 11 deletions g4f/Provider/hf_space/Janus_Pro_7B.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,17 +71,13 @@ async def create_async_generator(
prompt: str = None,
proxy: str = None,
cookies: Cookies = None,
zerogpu_token: str = None,
api_key: str = None,
zerogpu_uuid: str = "[object Object]",
return_conversation: bool = False,
conversation: JsonConversation = None,
seed: int = None,
**kwargs
) -> AsyncResult:
def generate_session_hash():
"""Generate a unique session hash."""
return str(uuid.uuid4()).replace('-', '')[:12]

method = "post"
if model == cls.default_image_model or prompt is not None:
method = "image"
Expand All @@ -90,14 +86,14 @@ def generate_session_hash():
if seed is None:
seed = random.randint(1000, 999999)

session_hash = generate_session_hash() if conversation is None else getattr(conversation, "session_hash")
session_hash = uuid.uuid4().hex if conversation is None else getattr(conversation, "session_hash", uuid.uuid4().hex)
async with StreamSession(proxy=proxy, impersonate="chrome") as session:
session_hash = generate_session_hash() if conversation is None else getattr(conversation, "session_hash")
if zerogpu_token is None:
zerogpu_uuid, zerogpu_token = await get_zerogpu_token(cls.space, session, conversation, cookies)
if api_key is None:
zerogpu_uuid, api_key = await get_zerogpu_token(cls.space, session, conversation, cookies)
if conversation is None or not hasattr(conversation, "session_hash"):
conversation = JsonConversation(session_hash=session_hash, zerogpu_token=zerogpu_token, zerogpu_uuid=zerogpu_uuid)
conversation.zerogpu_token = zerogpu_token
conversation = JsonConversation(session_hash=session_hash, zerogpu_token=api_key, zerogpu_uuid=zerogpu_uuid)
else:
conversation.zerogpu_token = api_key
if return_conversation:
yield conversation

Expand Down
Loading

0 comments on commit cc92ce1

Please sign in to comment.