Skip to content

Commit

Permalink
Merge pull request #2753 from hlohaus/16Feb
Browse files Browse the repository at this point in the history
Fix model and provider in chat completion response
  • Loading branch information
hlohaus authored Feb 24, 2025
2 parents 07a8dfd + ee9e0c3 commit 69ab91a
Show file tree
Hide file tree
Showing 8 changed files with 163 additions and 94 deletions.
15 changes: 13 additions & 2 deletions g4f/Provider/needs_auth/Gemini.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,14 @@ def find_str(data, skip=0):
skip -= 1
continue
yield item
reasoning = "".join(find_str(response_part[4][0], 3))
reasoning = "\n\n".join(find_str(response_part[4][0], 3))
reasoning = re.sub(r"<b>|</b>", "**", reasoning)
def replace_image(match):
return f"![](https:{match.group(0)})"
reasoning = re.sub(r"//yt3.(?:ggpht.com|googleusercontent.com/ytc)/[\w=-]+", replace_image, reasoning)
reasoning = re.sub(r"\nyoutube\n", "\n\n\n", reasoning)
reasoning = re.sub(r"\nYouTube\n", "\nYouTube ", reasoning)
reasoning = reasoning.replace('https://www.gstatic.com/images/branding/productlogos/youtube/v9/192px.svg', '<i class="fa-brands fa-youtube"></i>')
content = response_part[4][0][1][0]
if reasoning:
yield Reasoning(status="🤔")
Expand All @@ -215,8 +222,12 @@ def find_str(data, skip=0):
if match:
image_prompt = match.group(1)
content = content.replace(match.group(0), '')
pattern = r"http://googleusercontent.com/image_generation_content/\d+"
pattern = r"http://googleusercontent.com/(?:image_generation|youtube)_content/\d+"
content = re.sub(pattern, "", content)
content = content.replace("<!-- end list -->", "")
content = content.replace("https://www.google.com/search?q=http://", "https://")
content = content.replace("https://www.google.com/search?q=https://", "https://")
content = content.replace("https://www.google.com/url?sa=E&source=gmail&q=http://", "http://")
if last_content and content.startswith(last_content):
yield content[len(last_content):]
else:
Expand Down
12 changes: 10 additions & 2 deletions g4f/api/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,9 @@ async def authorization(request: Request, call_next):
try:
user_g4f_api_key = await self.get_g4f_api_key(request)
except HTTPException:
user_g4f_api_key = None
user_g4f_api_key = await self.security(request)
if hasattr(user_g4f_api_key, "credentials"):
user_g4f_api_key = user_g4f_api_key.credentials
path = request.url.path
if path.startswith("/v1") or path.startswith("/api/") or (AppConfig.demo and path == '/backend-api/v2/upload_cookies'):
if user_g4f_api_key is None:
Expand Down Expand Up @@ -581,11 +583,17 @@ async def get_image(filename, request: Request):
pass
if not os.path.isfile(target):
source_url = get_source_url(str(request.query_params))
ssl = None
if source_url is None:
backend_url = os.environ.get("G4F_BACKEND_URL")
if backend_url:
source_url = f"{backend_url}/images/{filename}"
ssl = False
if source_url is not None:
try:
await copy_images(
[source_url],
target=target)
target=target, ssl=ssl)
debug.log(f"Image copied from {source_url}")
except Exception as e:
debug.error(f"Download failed: {source_url}\n{type(e).__name__}: {e}")
Expand Down
59 changes: 44 additions & 15 deletions g4f/client/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
from ..image.copy_images import copy_images
from ..typing import Messages, ImageType
from ..providers.types import ProviderType, BaseRetryProvider
from ..providers.response import ResponseType, ImageResponse, FinishReason, BaseConversation, SynthesizeData, ToolCalls, Usage
from ..providers.response import *
from ..errors import NoImageResponseError
from ..providers.retry_provider import IterListProvider
from ..providers.asyncio import to_sync_generator
Expand Down Expand Up @@ -49,6 +49,7 @@ def iter_response(
finish_reason = None
tool_calls = None
usage = None
provider: ProviderInfo = None
completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28))
idx = 0

Expand All @@ -65,25 +66,29 @@ def iter_response(
elif isinstance(chunk, Usage):
usage = chunk
continue
elif isinstance(chunk, ProviderInfo):
provider = chunk
continue
elif isinstance(chunk, BaseConversation):
yield chunk
continue
elif isinstance(chunk, SynthesizeData) or not chunk:
elif isinstance(chunk, HiddenResponse):
continue
elif isinstance(chunk, Exception):
continue

if isinstance(chunk, list):
chunk = "".join(map(str, chunk))
else:

temp = chunk.__str__()
if not isinstance(temp, str):
if isinstance(temp, list):
temp = "".join(map(str, temp))
else:
temp = repr(chunk)
chunk = temp
if not chunk:
continue

content += chunk

Expand All @@ -96,7 +101,11 @@ def iter_response(
finish_reason = "stop"

if stream:
yield ChatCompletionChunk.model_construct(chunk, None, completion_id, int(time.time()))
chunk = ChatCompletionChunk.model_construct(chunk, None, completion_id, int(time.time()))
if provider is not None:
chunk.provider = provider.name
chunk.model = provider.model
yield chunk

if finish_reason is not None:
break
Expand All @@ -108,27 +117,32 @@ def iter_response(
finish_reason = "stop" if finish_reason is None else finish_reason

if stream:
yield ChatCompletionChunk.model_construct(
chat_completion = ChatCompletionChunk.model_construct(
None, finish_reason, completion_id, int(time.time()),
usage=usage
)
else:
if response_format is not None and "type" in response_format:
if response_format["type"] == "json_object":
content = filter_json(content)
yield ChatCompletion.model_construct(
chat_completion = ChatCompletion.model_construct(
content, finish_reason, completion_id, int(time.time()),
usage=UsageModel.model_construct(**usage.get_dict()),
**filter_none(tool_calls=[ToolCallModel.model_construct(**tool_call) for tool_call in tool_calls]) if tool_calls is not None else {}
)
if provider is not None:
chat_completion.provider = provider.name
chat_completion.model = provider.model
yield chat_completion

# Synchronous iter_append_model_and_provider function
def iter_append_model_and_provider(response: ChatCompletionResponseType, last_model: str, last_provider: ProviderType) -> ChatCompletionResponseType:
if isinstance(last_provider, BaseRetryProvider):
last_provider = last_provider.last_provider
yield from response
return
for chunk in response:
if isinstance(chunk, (ChatCompletion, ChatCompletionChunk)):
if last_provider is not None:
if chunk.provider is None and last_provider is not None:
chunk.model = getattr(last_provider, "last_model", last_model)
chunk.provider = last_provider.__name__
yield chunk
Expand All @@ -146,6 +160,7 @@ async def async_iter_response(
idx = 0
tool_calls = None
usage = None
provider: ProviderInfo = None

try:
async for chunk in response:
Expand All @@ -161,12 +176,17 @@ async def async_iter_response(
elif isinstance(chunk, Usage):
usage = chunk
continue
elif isinstance(chunk, SynthesizeData) or not chunk:
elif isinstance(chunk, ProviderInfo):
provider = chunk
continue
elif isinstance(chunk, HiddenResponse):
continue
elif isinstance(chunk, Exception):
continue

chunk = str(chunk)
if not chunk:
continue
content += chunk
idx += 1

Expand All @@ -179,7 +199,11 @@ async def async_iter_response(
finish_reason = "stop"

if stream:
yield ChatCompletionChunk.model_construct(chunk, None, completion_id, int(time.time()))
chunk = ChatCompletionChunk.model_construct(chunk, None, completion_id, int(time.time()))
if provider is not None:
chunk.provider = provider.name
chunk.model = provider.model
yield chunk

if finish_reason is not None:
break
Expand All @@ -190,19 +214,23 @@ async def async_iter_response(
usage = Usage(completion_tokens=idx, total_tokens=idx)

if stream:
yield ChatCompletionChunk.model_construct(
chat_completion = ChatCompletionChunk.model_construct(
None, finish_reason, completion_id, int(time.time()),
usage=usage.get_dict()
)
else:
if response_format is not None and "type" in response_format:
if response_format["type"] == "json_object":
content = filter_json(content)
yield ChatCompletion.model_construct(
chat_completion = ChatCompletion.model_construct(
content, finish_reason, completion_id, int(time.time()),
usage=UsageModel.model_construct(**usage.get_dict()),
**filter_none(tool_calls=[ToolCallModel.model_construct(**tool_call) for tool_call in tool_calls]) if tool_calls is not None else {}
)
if provider is not None:
chat_completion.provider = provider.name
chat_completion.model = provider.model
yield chat_completion
finally:
await safe_aclose(response)

Expand All @@ -214,11 +242,12 @@ async def async_iter_append_model_and_provider(
last_provider = None
try:
if isinstance(last_provider, BaseRetryProvider):
if last_provider is not None:
last_provider = last_provider.last_provider
async for chunk in response:
yield chunk
return
async for chunk in response:
if isinstance(chunk, (ChatCompletion, ChatCompletionChunk)):
if last_provider is not None:
if chunk.provider is None and last_provider is not None:
chunk.model = getattr(last_provider, "last_model", last_model)
chunk.provider = last_provider.__name__
yield chunk
Expand Down
49 changes: 32 additions & 17 deletions g4f/gui/client/demo.html
Original file line number Diff line number Diff line change
Expand Up @@ -183,25 +183,10 @@
const isIframe = window.self !== window.top;
const backendUrl = "{{backend_url}}";
let url = new URL(window.location.href)
let params = new URLSearchParams(url.search);
if (isIframe && backendUrl) {
if (params.get("get_gpu_token")) {
window.addEventListener('DOMContentLoaded', async function() {
const link = document.getElementById("new_window");
link.href = `${backendUrl}${url.search}`;
link.click();
});
} else {
window.location.replace(`${backendUrl}${url.search}`);
}
window.location.replace(`${backendUrl}${url.search}`);
return;
}
if (params.get("__sign")) {
localStorage.setItem("HuggingSpace-api_key", params.get("__sign"));
if (!isIframe) {
window.location.replace("/");
}
}
})();
</script>
<script src="https://unpkg.com/[email protected]/dist/es-module-shims.js"></script>
Expand Down Expand Up @@ -240,10 +225,13 @@
<p>
<a href="https://huggingface.co/settings/tokens" target="_blank">Get Access Token</a>
</p>
<img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/sign-in-with-huggingface-xl-dark.svg" alt="Sign in with Hugging Face" style="cursor: pointer; display: none;" id="signin">
<button id="signout" style="display: none">Sign out</button>
</form>
<script type="module">
import * as hub from "@huggingface/hub";
import { init } from "@huggingface/space-header";
import { oauthLoginUrl, oauthHandleRedirectIfPresent } from "@huggingface/hub";

const isIframe = window.self !== window.top;
const button = document.querySelector('form a.button');
Expand All @@ -269,7 +257,6 @@
return;
}
localStorage.setItem("HuggingFace-api_key", accessToken);
localStorage.setItem("HuggingFace-user", JSON.stringify(user));
localStorage.setItem("user", user.name);
localStorage.setItem("report_error", "true")
location.href = "/chat/";
Expand All @@ -280,6 +267,34 @@
event.preventDefault();
check_access_token();
});

let oauthResult = localStorage.getItem("oauth");
if (oauthResult) {
try {
oauthResult = JSON.parse(oauthResult);
} catch {
oauthResult = null;
}
}
oauthResult ||= await oauthHandleRedirectIfPresent();
if (oauthResult) {
localStorage.setItem("oauth", JSON.stringify(oauthResult));
localStorage.setItem("HuggingFace-api_key", oauthResult.accessToken);
localStorage.setItem("user", oauthResult.userInfo.fullname);
document.getElementById("signout").style.removeProperty("display");
document.getElementById("signout").onclick = async function() {
localStorage.removeItem("oauth");
localStorage.removeItem("HuggingFace-api_key");
window.location.href = window.location.href.replace(/\?.*$/, '');
window.location.reload();
}
} else {
document.getElementById("signin").style.removeProperty("display");
document.getElementById("signin").onclick = async function() {
// prompt=consent to re-trigger the consent screen instead of silently redirecting
window.location.href = (await oauthLoginUrl({clientId: 'ed074164-4f8d-4fb2-8bec-44952707965e', scopes: ['inference-api']})) + "&prompt=consent";
}
}
</script>

<!-- Footer -->
Expand Down
Loading

0 comments on commit 69ab91a

Please sign in to comment.