Skip to content

Commit

Permalink
Merge pull request #2766 from xtekky/27Feb
Browse files Browse the repository at this point in the history
Add langchain integration
  • Loading branch information
hlohaus authored Feb 28, 2025
2 parents 65265f3 + 963e010 commit 33e9186
Show file tree
Hide file tree
Showing 21 changed files with 302 additions and 185 deletions.
41 changes: 37 additions & 4 deletions docs/pydantic_ai.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,10 @@ pip install g4f pydantic_ai

### 1. Patch PydanticAI to Use G4F Models

In order to use PydanticAI with G4F models, you need to apply the necessary patch to the client. This can be done by importing `patch_infer_model` from `g4f.tools.pydantic_ai`. The `api_key` parameter is optional, so if you have one, you can provide it. If not, the system will proceed without it.
In order to use PydanticAI with G4F models, you need to apply the necessary patch to the client. This can be done by importing `patch_infer_model` from `g4f.integration.pydantic_ai`. The `api_key` parameter is optional, so if you have one, you can provide it. If not, the system will proceed without it.

```python
from g4f.tools.pydantic_ai import patch_infer_model
from g4f.integration.pydantic_ai import patch_infer_model

patch_infer_model(api_key="your_api_key_here") # Optional
```
Expand Down Expand Up @@ -89,7 +89,7 @@ For example, you can process your query or interact with external systems before

```python
from pydantic_ai import Agent
from g4f.tools.pydantic_ai import AIModel
from g4f.integration.pydantic_ai import AIModel

agent = Agent(
AIModel("gpt-4o"),
Expand All @@ -109,7 +109,7 @@ This example shows how to initialize an agent with a specific model (`gpt-4o`) a
from pydantic import BaseModel
from pydantic_ai import Agent
from pydantic_ai.models import ModelSettings
from g4f.tools.pydantic_ai import patch_infer_model
from g4f.integration.pydantic_ai import patch_infer_model

patch_infer_model("your_api_key")

Expand All @@ -129,6 +129,39 @@ This example demonstrates the use of a custom Pydantic model (`MyModel`) to capt

---

## LangChain Integration Example

For users working with LangChain, here is an example demonstrating how to integrate G4F models into a LangChain environment:

```python
from g4f.integration.langchain import ChatAI
import g4f.debug

# Enable debugging logs
g4f.debug.logging = True

llm = ChatAI(
model="llama3-70b-8192",
provider="Groq",
api_key="" # Optionally add your API key here
)

messages = [
{"role": "user", "content": "2 🦜 2"},
{"role": "assistant", "content": "4 🦜"},
{"role": "user", "content": "2 🦜 3"},
{"role": "assistant", "content": "5 🦜"},
{"role": "user", "content": "3 🦜 4"},
]

response = llm.invoke(messages)
assert(response.content == "7 🦜")
```

This example shows how to use LangChain's `ChatAI` integration to create a conversational agent with a G4F model. The interaction takes place with the given messages and the agent processes them step-by-step to return the expected output.

---

## Conclusion

By following these steps, you have successfully integrated PydanticAI models into the G4F client, created an agent, and enabled debugging. This allows you to conduct conversations with the language model, pass system prompts, and retrieve responses synchronously.
Expand Down
17 changes: 7 additions & 10 deletions g4f/Provider/PollinationsAI.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
from ..errors import ModelNotFoundError
from ..requests.raise_for_status import raise_for_status
from ..requests.aiohttp import get_connector
from ..providers.response import ImageResponse, ImagePreview, FinishReason, Usage, Reasoning
from ..providers.response import ImageResponse, ImagePreview, FinishReason, Usage

DEFAULT_HEADERS = {
'Accept': '*/*',
Expand Down Expand Up @@ -42,7 +42,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
text_models = [default_model]
image_models = [default_image_model]
extra_image_models = ["flux-pro", "flux-dev", "flux-schnell", "midjourney", "dall-e-3"]
vision_models = [default_vision_model, "gpt-4o-mini"]
vision_models = [default_vision_model, "gpt-4o-mini", "o1-mini"]
extra_text_models = ["claude", "claude-email", "deepseek-reasoner", "deepseek-r1"] + vision_models
_models_loaded = False
model_aliases = {
Expand All @@ -53,16 +53,14 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
"qwen-2.5-coder-32b": "qwen-coder",
"llama-3.3-70b": "llama",
"mistral-nemo": "mistral",
"gpt-4o-mini": "rtist",
"gpt-4o": "searchgpt",
"gpt-4o-mini": "p1",
"deepseek-chat": "claude-hybridspace",
"llama-3.1-8b": "llamalight",
"gpt-4o-vision": "gpt-4o",
"gpt-4o-mini-vision": "gpt-4o-mini",
"gpt-4o-mini": "claude",
"deepseek-chat": "claude-email",
"deepseek-r1": "deepseek-reasoner",
"gemini-2.0": "gemini",
"gemini-2.0-flash": "gemini",
"gemini-2.0-flash-thinking": "gemini-thinking",

Expand Down Expand Up @@ -208,10 +206,8 @@ async def _generate_image(
"enhance": str(enhance).lower(),
"safe": str(safe).lower()
}
params = {k: v for k, v in params.items() if v is not None}
query = "&".join(f"{k}={quote_plus(v)}" for k, v in params.items())
prefix = f"{model}_{seed}" if seed is not None else model
url = f"{cls.image_api_endpoint}prompt/{prefix}_{quote_plus(prompt)}?{query}"
query = "&".join(f"{k}={quote_plus(v)}" for k, v in params.items() if v is not None)
url = f"{cls.image_api_endpoint}prompt/{quote_plus(prompt)}?{query}"
yield ImagePreview(url, prompt)

async with ClientSession(headers=DEFAULT_HEADERS, connector=get_connector(proxy=proxy)) as session:
Expand Down Expand Up @@ -266,7 +262,8 @@ async def _generate_text(
"seed": seed,
"cache": cache
})

if "gemini" in model:
data.pop("seed")
async with session.post(cls.text_api_endpoint, json=data) as response:
await raise_for_status(response)
result = await response.json()
Expand Down
4 changes: 3 additions & 1 deletion g4f/Provider/hf/HuggingChat.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
from ...requests.raise_for_status import raise_for_status
from ...providers.response import JsonConversation, ImageResponse, Sources, TitleGeneration, Reasoning, RequestLogin
from ...cookies import get_cookies
from .models import default_model, fallback_models, image_models, model_aliases
from .models import default_model, fallback_models, image_models, model_aliases, llama_models
from ... import debug

class Conversation(JsonConversation):
Expand Down Expand Up @@ -97,6 +97,8 @@ async def create_authed(
) -> AsyncResult:
if not has_curl_cffi:
raise MissingRequirementsError('Install "curl_cffi" package | pip install -U curl_cffi')
if model == llama_models["name"]:
model = llama_models["text"] if images is None else llama_models["vision"]
model = cls.get_model(model)

session = Session(**auth_result.get_dict())
Expand Down
4 changes: 3 additions & 1 deletion g4f/Provider/hf/HuggingFaceAPI.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from ...errors import ModelNotSupportedError
from ...providers.helper import get_last_user_message
from ..template.OpenaiTemplate import OpenaiTemplate
from .models import model_aliases, vision_models, default_vision_model
from .models import model_aliases, vision_models, default_vision_model, llama_models
from .HuggingChat import HuggingChat
from ... import debug

Expand Down Expand Up @@ -63,6 +63,8 @@ async def create_async_generator(
):
if model in cls.model_aliases:
model = cls.model_aliases[model]
if model == llama_models["name"]:
model = llama_models["text"] if images is None else llama_models["vision"]
api_base = f"https://api-inference.huggingface.co/models/{model}/v1"
pipeline_tag = await cls.get_pipline_tag(model, api_key)
if pipeline_tag not in ("text-generation", "image-text-to-text"):
Expand Down
2 changes: 1 addition & 1 deletion g4f/Provider/hf/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ async def create_async_generator(
messages: Messages,
**kwargs
) -> AsyncResult:
if "images" not in kwargs and "deepseek" in model or random.random() >= 0.5:
if "tools" not in kwargs and "images" not in kwargs and "deepseek" in model or random.random() >= 0.5:
try:
is_started = False
async for chunk in HuggingFaceInference.create_async_generator(model, messages, **kwargs):
Expand Down
7 changes: 6 additions & 1 deletion g4f/Provider/hf/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,4 +46,9 @@
"NousResearch/Hermes-3-Llama-3.1-8B",
]
default_vision_model = "meta-llama/Llama-3.2-11B-Vision-Instruct"
vision_models = [default_vision_model, "Qwen/Qwen2-VL-7B-Instruct"]
vision_models = [default_vision_model, "Qwen/Qwen2-VL-7B-Instruct"]
llama_models = {
"name": "llama-3",
"text": "meta-llama/Llama-3.3-70B-Instruct",
"vision": "meta-llama/Llama-3.2-11B-Vision-Instruct",
}
Loading

0 comments on commit 33e9186

Please sign in to comment.