-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmain.py
More file actions
112 lines (89 loc) · 3.67 KB
/
main.py
File metadata and controls
112 lines (89 loc) · 3.67 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
import os
import argparse
from dotenv import load_dotenv
from google import genai
from google.genai import types
from prompts import system_prompt
from functions.call_function import available_functions, call_function
# Load environment variables from .env
load_dotenv()
# Read API key
api_key = os.environ.get("GEMINI_API_KEY")
# Fail fast if the API key is missing
if api_key is None:
raise RuntimeError(
"GEMINI_API_KEY not found. Make sure it is set in your .env file."
)
# Argument parser
parser = argparse.ArgumentParser(description="Chatbot")
parser.add_argument("user_prompt", type=str, help="User prompt")
parser.add_argument("--verbose", action="store_true", help="Enable verbose output")
args = parser.parse_args()
# Create Gemini client
client = genai.Client(api_key=api_key)
# Build initial messages list
messages = [
types.Content(
role="user",
parts=[types.Part(text=args.user_prompt)]
)
]
if args.verbose:
print(f"User prompt: {args.user_prompt}")
# Main loop for iterative agent interactions
for iteration in range(20):
# Generate content using current messages
response = client.models.generate_content(
model="gemini-2.5-flash",
contents=messages,
config=types.GenerateContentConfig(
tools=[available_functions], system_instruction=system_prompt
)
)
# Ensure usage metadata exists
if response.usage_metadata is None:
raise RuntimeError(
"No usage metadata returned from Gemini API. "
"This may indicate a failed or incomplete request."
)
if args.verbose:
print(f"Prompt tokens: {response.usage_metadata.prompt_token_count}")
print(f"Response tokens: {response.usage_metadata.candidates_token_count}")
print("Response:")
# Add candidates to conversation history
if response.candidates:
for candidate in response.candidates:
if candidate.content:
messages.append(candidate.content)
# Check for function calls
if response.function_calls is not None and len(response.function_calls) > 0:
# List to store function results
function_results = []
# Process each function call
for function_call in response.function_calls:
# Call the function
function_call_result = call_function(function_call, verbose=args.verbose)
# Validate the response structure
if not function_call_result.parts:
raise RuntimeError("Function call result has no parts")
if function_call_result.parts[0].function_response is None:
raise RuntimeError("Function call result has no function_response")
if function_call_result.parts[0].function_response.response is None:
raise RuntimeError("Function call result has no response")
# Add to function results list
function_results.append(function_call_result.parts[0])
# Print result if verbose
if args.verbose:
print(f"-> {function_call_result.parts[0].function_response.response}")
# Add function results to conversation history
if function_results:
messages.append(types.Content(role="user", parts=function_results))
else:
# No function calls, print text response and exit loop
print("Final response:")
print(response.text)
break
else:
# Loop completed without breaking (max iterations reached)
print("Error: Maximum iterations (20) reached without final response from the agent.")
exit(1)