diff --git a/gemini-chat/.env.example b/gemini-chat/.env.example new file mode 100644 index 00000000..48eecf40 --- /dev/null +++ b/gemini-chat/.env.example @@ -0,0 +1 @@ +GEMINI_API_KEY="YOUR_API_KEY" \ No newline at end of file diff --git a/gemini-chat/README.md b/gemini-chat/README.md new file mode 100644 index 00000000..125fddd2 --- /dev/null +++ b/gemini-chat/README.md @@ -0,0 +1,48 @@ +# Gemini Flash 2.0 Chainlit Integration + +A simple web interface for interacting with Google's Gemini Flash 2.0 model using Chainlit. + +## Setup + +1. Clone this repository +2. Create a virtual environment: + ```bash + python -m venv venv + source venv/bin/activate # On Windows, use: venv\Scripts\activate + ``` +3. Install dependencies: + ```bash + pip install chainlit google-generativeai python-dotenv + ``` +4. Create a `.env` file with your Gemini API key: + ``` + GEMINI_API_KEY=your_api_key_here + ``` +5. Create a `public/avatar` folder and add `gemini-logo.jpg` for the bot avatar + +## Running the Application + +```bash +chainlit run app.py +``` + +The application will be available at http://localhost:8000 + +## Project Structure + +``` +project/ +├── app.py # Main application code +├── chainlit.md # Chainlit configuration file +├── .env # Environment variables (API keys) +└── public/ + └── avatar/ + └── gemini-logo.jpg # Custom avatar for Gemini responses +``` + +## Features + +- Interactive chat interface +- Gemini Flash 2.0 model integration +- Custom avatar for the AI assistant +- Error handling \ No newline at end of file diff --git a/gemini-chat/app.py b/gemini-chat/app.py new file mode 100644 index 00000000..5057bd9f --- /dev/null +++ b/gemini-chat/app.py @@ -0,0 +1,43 @@ +import os +import chainlit as cl +import google.generativeai as genai +from dotenv import load_dotenv + +# Load environment variables from .env file +load_dotenv() +GEMINI_API_KEY = os.getenv("GEMINI_API_KEY") + +if not GEMINI_API_KEY: + raise ValueError("Missing GEMINI_API_KEY. Please set it in a .env file.") + +# Configure Gemini API +genai.configure(api_key=GEMINI_API_KEY) +model = genai.GenerativeModel("gemini-2.0-flash") + +@cl.on_chat_start +async def on_chat_start(): + await cl.Message(content="Welcome to Gemini Flash 2.0 with Chainlit! Ask me anything.").send() + +@cl.on_message +async def handle_message(message: cl.Message): + """Handles user messages and sends responses from Gemini Flash 2.0.""" + try: + response = await model.generate_content_async(message.content) + await cl.Message( + content=response.text, + author="Gemini Flash 2.0", + ).send() + except Exception as e: + await cl.Message(content=f"Error: {str(e)}").send() + + +@cl.on_stop +async def on_stop(): + print("User stopped the response.") + +@cl.on_chat_end +async def on_chat_end(): + print("Chat ended.") + +if __name__ == "__main__": + cl.run() diff --git a/gemini-chat/chainlit.md b/gemini-chat/chainlit.md new file mode 100644 index 00000000..bc5eb624 --- /dev/null +++ b/gemini-chat/chainlit.md @@ -0,0 +1,10 @@ +Gemini Flash 2.0 Chat Interface +Welcome to the Gemini Flash 2.0 chat interface powered by Chainlit! +Features + +Interact with Google's Gemini Flash 2.0 model +Simple and responsive UI +Error handling for API responses + +Usage +Ask any question to get started. The model will generate responses based on your queries. \ No newline at end of file diff --git a/gemini-chat/public/avatars/gemini.jpg b/gemini-chat/public/avatars/gemini.jpg new file mode 100644 index 00000000..fe42e43c Binary files /dev/null and b/gemini-chat/public/avatars/gemini.jpg differ diff --git a/gemini-chat/requirements.txt b/gemini-chat/requirements.txt new file mode 100644 index 00000000..fdc670ad --- /dev/null +++ b/gemini-chat/requirements.txt @@ -0,0 +1,3 @@ +chainlit +google-generativeai +python-dotenv \ No newline at end of file