diff --git a/AGENT_SYSTEM_COMPLETE.md b/AGENT_SYSTEM_COMPLETE.md deleted file mode 100644 index 91a0e42..0000000 --- a/AGENT_SYSTEM_COMPLETE.md +++ /dev/null @@ -1,157 +0,0 @@ -# 🤖 AI Agent System Complete! - -## ✅ **What We've Built** - -### **Autonomous AI Agents** -- **Research Planning**: AI agents analyze research goals and generate detailed experimental plans -- **OpenRouter Integration**: Uses Grok-4-fast and other models for intelligent planning -- **GPU Dispatch**: Automatically dispatches experiments to Novita AI and other providers -- **Real-time Monitoring**: Live progress tracking with WebSocket updates -- **Intelligent Analysis**: AI analyzes results and suggests next steps - -### **Agent Capabilities** - -#### 🧠 **Research Planning Agent** -```typescript -// Generates structured research plans -{ - "objectives": ["objective1", "objective2"], - "experiments": [ - { - "name": "Experiment Name", - "description": "What this experiment tests", - "model": "Model to use", - "hyperparameters": {"param": "value"}, - "expectedDuration": "2h", - "gpuRequirements": "A100 x 2" - } - ], - "metrics": ["metric1", "metric2"], - "timeline": "Expected timeline", - "budget": "Estimated cost" -} -``` - -#### ⚡ **Execution Agent** -- Dispatches experiments to GPU providers -- Monitors job progress in real-time -- Handles failures and retries -- Updates metrics and artifacts - -#### 📊 **Analysis Agent** -- Analyzes experiment results -- Provides insights and recommendations -- Suggests next steps for improvement -- Generates confidence scores - -### **Integration Points** - -#### **OpenRouter API** -- **Models**: Grok-4-fast, Claude, GPT-4, etc. -- **Use Cases**: Research planning, result analysis -- **Fallback**: Mock responses when API unavailable - -#### **Novita AI GPU Provider** -- **Job Dispatch**: Creates GPU jobs via REST API -- **Progress Monitoring**: WebSocket updates -- **Artifact Storage**: S3-compatible storage -- **Fallback**: Mock job references for demo - -#### **Convex Real-time Database** -- **Live Updates**: WebSocket connections -- **Type Safety**: End-to-end TypeScript -- **Scalability**: Serverless and auto-scaling - -## 🚀 **How to Use** - -### **1. Agent Dashboard** -Visit: `http://localhost:3000/agents` - -- Select a project -- Enter research goal -- Optionally add codebase context -- Click "Launch AI Agent" - -### **2. Project Integration** -- Projects page now has "Start Agent" buttons -- Agents create runs automatically -- Real-time progress monitoring - -### **3. Environment Setup** -Add to `.env.local`: -```bash -OPENROUTER_API_KEY=your-openrouter-api-key -NOVITA_API_KEY=your-novita-api-key -``` - -## 🔄 **Agent Workflow** - -1. **Planning Phase** - - AI analyzes research goal - - Generates structured experiment plan - - Estimates resources and timeline - -2. **Execution Phase** - - Dispatches experiments to GPU providers - - Monitors progress in real-time - - Updates metrics and artifacts - -3. **Analysis Phase** - - AI analyzes results - - Provides insights and recommendations - - Suggests next steps - -## 📱 **UI Features** - -### **Agent Dashboard** -- Project selection -- Research goal input -- Codebase context (optional) -- Real-time run monitoring -- Agent capabilities overview - -### **Project Integration** -- "Start Agent" buttons on project cards -- Real-time progress bars -- Status indicators -- Cost tracking - -### **Real-time Updates** -- Live progress bars -- Status changes -- Metric updates -- Cross-tab synchronization - -## 🎯 **Demo Mode** - -The system works in demo mode without API keys: -- Mock AI responses for planning -- Simulated GPU job dispatch -- Fake progress updates -- Sample metrics and artifacts - -## 🔧 **Next Steps** - -### **Immediate Enhancements** -1. **Codebase Integration**: Connect to GitHub repositories -2. **Advanced Models**: Support for more AI models -3. **Custom Templates**: Pre-built experiment templates -4. **Budget Controls**: Automatic cost limiting - -### **Production Features** -1. **Authentication**: User management and project ownership -2. **Webhook Handlers**: Receive updates from GPU providers -3. **File Uploads**: Handle artifact uploads -4. **Notifications**: Real-time alerts for completion - -### **Advanced Capabilities** -1. **Multi-Agent Coordination**: Multiple agents working together -2. **Hyperparameter Optimization**: Automated tuning -3. **Model Comparison**: Side-by-side evaluation -4. **Paper Generation**: Automated research paper drafts - ---- - -**Status**: ✅ **AI Agent System Complete** - Ready for Production Deployment! - -The system now provides fully autonomous AI research capabilities with real-time monitoring, intelligent planning, and seamless GPU integration. Perfect for researchers who want to scale their AI experiments! 🚀 diff --git a/AUTO_AI_RESEARCH_README.md b/AUTO_AI_RESEARCH_README.md deleted file mode 100644 index ebc1415..0000000 --- a/AUTO_AI_RESEARCH_README.md +++ /dev/null @@ -1,119 +0,0 @@ -# Auto AI Research System - UI Implementation - -This is the frontend UI implementation for the **Auto AI Research System** - a fully autonomous, web-based AI research platform. - -## 🚀 Features Implemented - -### ✅ Core Pages -- **Projects Dashboard** - Grid view of all research projects with filtering and search -- **Project Detail** - Comprehensive project view with tabbed interface -- **Run Detail** - Real-time monitoring of autonomous runs with live logs and metrics - -### ✅ UI Components -- Modern, responsive design using shadcn/ui and Tailwind CSS -- Consistent navigation with AppLayout wrapper -- Interactive dialogs for project creation -- Progress bars and status indicators -- Real-time log viewing -- Metric displays and charts placeholders - -### ✅ Navigation Structure -``` -/projects - Projects dashboard -/projects/[id] - Project detail with tabs -/projects/[id]/runs/[runId] - Run detail with live monitoring -``` - -## 🎨 Design System - -Built with: -- **Next.js 15** - React framework with App Router -- **shadcn/ui** - Modern component library -- **Tailwind CSS** - Utility-first styling -- **Lucide React** - Clean, consistent icons -- **TypeScript** - Type safety throughout - -## 📱 Responsive Features - -- Mobile-first responsive design -- Collapsible navigation on mobile devices -- Grid layouts that adapt to screen size -- Touch-friendly interfaces - -## 🔧 Mock Data - -The UI includes comprehensive mock data to demonstrate: -- Multiple project states (running, completed, paused, failed) -- Live run monitoring with progress tracking -- Real-time logs and metrics -- Timeline view of agent execution steps -- Budget tracking and cost monitoring - -## 🎯 Key User Flows - -### 1. Project Management -- Create new projects with templates -- View project grid with search and filters -- Quick actions (pause, resume, stop) -- Budget and cost tracking - -### 2. Run Monitoring -- Real-time progress tracking -- Live log streaming -- Timeline of agent steps -- Configuration viewing and editing -- Metric dashboards - -### 3. Navigation -- Consistent top navigation -- Breadcrumb navigation -- Mobile-responsive menu -- User profile dropdown - -## 🚧 Ready for Backend Integration - -The UI is designed to easily connect to backend services: -- All data is currently mocked but uses realistic data structures -- API-ready component architecture -- WebSocket placeholders for real-time updates -- Proper state management patterns - -## 🔄 Next Steps - -To complete the system: -1. Connect to FastAPI backend -2. Implement real-time WebSocket connections -3. Add actual chart rendering (recharts/chart.js) -4. Integrate with GPU providers (Novita AI) -5. Add authentication and user management -6. Implement file upload and artifact management - -## 📁 File Structure - -``` -app/ -├── projects/ -│ ├── page.tsx # Projects dashboard -│ └── [id]/ -│ ├── page.tsx # Project detail -│ └── runs/ -│ └── [runId]/ -│ └── page.tsx # Run detail -components/ -├── layout/ -│ └── app-layout.tsx # Main app layout -└── ui/ # shadcn/ui components -``` - -## 🎨 Color Scheme - -The system uses a neutral color scheme with: -- **Running**: Green indicators -- **Completed**: Blue indicators -- **Paused**: Yellow indicators -- **Failed**: Red indicators -- **Dark mode ready** with CSS variables - ---- - -**Status**: ✅ Frontend UI Complete - Ready for Backend Integration diff --git a/CONVEX_SETUP_COMPLETE.md b/CONVEX_SETUP_COMPLETE.md deleted file mode 100644 index 696e360..0000000 --- a/CONVEX_SETUP_COMPLETE.md +++ /dev/null @@ -1,119 +0,0 @@ -# 🎉 Convex Integration Complete! - -## ✅ **What We've Built** - -### **Backend (Convex)** -- **Database Schema**: Complete schema with projects, runs, steps, metrics, artifacts, and credentials -- **API Functions**: Full CRUD operations for projects and runs -- **Real-time Queries**: Automatic WebSocket connections for live updates -- **Sample Data**: Seeded data for testing and demonstration - -### **Frontend Integration** -- **Convex Provider**: Wrapped the entire app with Convex client -- **Real-time Data**: Projects page now uses live Convex data instead of mock data -- **Interactive UI**: Create projects, load sample data, real-time updates -- **Type Safety**: End-to-end TypeScript with generated types - -## 🚀 **How to Test** - -1. **Visit the Projects Page**: `http://localhost:3001/projects` - - See real-time data from Convex - - Create new projects - - Load sample data - -2. **Test Real-time Updates**: `http://localhost:3001/test` - - Create test projects - - Watch real-time updates - - Verify Convex connection - -3. **Open Multiple Tabs**: - - Create a project in one tab - - Watch it appear instantly in another tab (real-time!) - -## 📊 **Database Schema** - -```typescript -// Projects -projects: { - name, description, ownerId, status, budget, usedBudget, - settings, createdAt, updatedAt -} - -// Runs -runs: { - projectId, name, status, progress, config, cost, - gpuProvider, jobRef, startedAt, endedAt, eta -} - -// Run Steps -runSteps: { - runId, stepName, status, description, - startedAt, endedAt, duration, stepIndex -} - -// Metrics -metrics: { - runId, name, value, timestamp, stepIndex -} - -// Artifacts -artifacts: { - runId, name, type, size, url, checksum, createdAt -} -``` - -## 🔄 **Real-time Features** - -- **Live Updates**: Changes in one browser tab instantly appear in others -- **WebSocket Connection**: Automatic connection management -- **Optimistic Updates**: UI updates immediately, syncs with backend -- **Error Handling**: Graceful fallbacks and error states - -## 🎯 **Next Steps** - -### **Immediate (Ready to implement)** -1. **Project Detail Page**: Update to use Convex data -2. **Run Detail Page**: Connect to real-time run data -3. **Run Management**: Start/stop/pause runs with Convex mutations - -### **GPU Integration** -1. **Novita AI Actions**: Create Convex actions for GPU job management -2. **Webhook Handlers**: Receive progress updates from GPU providers -3. **Real-time Metrics**: Stream training metrics to the UI - -### **Advanced Features** -1. **Authentication**: Add user management with Convex auth -2. **File Uploads**: Handle artifact uploads to S3 -3. **Notifications**: Real-time notifications for run completion - -## 💡 **Key Benefits of Convex** - -✅ **5-minute setup** vs hours with traditional backend -✅ **Real-time by default** - no WebSocket management needed -✅ **Type-safe** - shared types between frontend and backend -✅ **Serverless** - scales automatically -✅ **Local development** - works offline, syncs when online -✅ **Production ready** - `npx convex deploy` and you're live - -## 🔧 **Development Commands** - -```bash -# Start Convex dev server -npx convex dev - -# Start Next.js dev server -npm run dev - -# Deploy to production -npx convex deploy -``` - -## 📱 **Test URLs** - -- **Projects**: http://localhost:3001/projects -- **Test Page**: http://localhost:3001/test -- **Convex Dashboard**: https://dashboard.convex.dev - ---- - -**Status**: ✅ **Convex Integration Complete** - Ready for GPU Provider Integration! diff --git a/ENVIRONMENT_SETUP.md b/ENVIRONMENT_SETUP.md deleted file mode 100644 index 4f40c5b..0000000 --- a/ENVIRONMENT_SETUP.md +++ /dev/null @@ -1,51 +0,0 @@ -# 🔧 Environment Configuration Guide - -## Required Environment Variables - -Add these to your `.env.local` file: - -```bash -# AI Provider - OpenRouter (for research planning and analysis) -OPENROUTER_API_KEY=your-openrouter-api-key - -# GPU Providers -NOVITA_API_KEY=your-novita-api-key - -# Storage (for artifacts) -AWS_ACCESS_KEY_ID=your-aws-access-key -AWS_SECRET_ACCESS_KEY=your-aws-secret-key -AWS_S3_BUCKET=your-s3-bucket -``` - -## Getting API Keys - -### 1. OpenRouter API Key -- Visit: https://openrouter.ai/ -- Sign up and get your API key -- Supports multiple AI models including Grok-4, Claude, GPT-4, etc. - -### 2. Novita AI API Key -- Visit: https://novita.ai/ -- Sign up for GPU access -- Get your API key for job dispatch - -### 3. AWS S3 (Optional) -- For artifact storage -- Create S3 bucket for storing model checkpoints, logs, etc. - -## Setup Instructions - -1. Copy `.env.example` to `.env.local` -2. Fill in your API keys -3. Restart the development servers: - ```bash - npx convex dev - npm run dev - ``` - -## Demo Mode - -If you don't have API keys yet, the system will work in demo mode with: -- Mock AI responses for research planning -- Mock GPU job dispatch -- Simulated progress updates diff --git a/REAL_AI_INTEGRATION.md b/REAL_AI_INTEGRATION.md deleted file mode 100644 index d3799e4..0000000 --- a/REAL_AI_INTEGRATION.md +++ /dev/null @@ -1,51 +0,0 @@ -# Real AI Integration Setup - -The chatbot now uses the real Grok API via OpenRouter instead of mock responses! - -## Setup Instructions - -1. **Get an OpenRouter API Key:** - - Go to [OpenRouter.ai](https://openrouter.ai/) - - Sign up and get your API key - - The Grok-4-Fast model is free to use - -2. **Set Convex Environment Variables:** - The API key is now stored securely in Convex! Run these commands: - ```bash - npx convex env set OPENROUTER_API_KEY your_openrouter_api_key_here - npx convex env set SITE_URL http://localhost:3000 - npx convex env set SITE_NAME "Open Superintelligence Lab" - ``` - -3. **Test the Integration:** - - Start the development server: `npm run dev` - - Go to any project page (e.g., `/projects/test`) - - Click on the "AI Assistant" tab - - Start chatting with the real Grok AI! - -## What's Real vs Mock - -✅ **Real AI:** The chatbot responses are now powered by Grok-4-Fast via OpenRouter -✅ **Real API:** Actual HTTP calls to OpenRouter's API through Convex -✅ **Real Context:** The AI knows about your project and can have natural conversations -✅ **Secure:** API key is stored securely in Convex environment variables - -🔄 **Still Mocked:** The tools (run_experiment, analyze_data, etc.) are still simulated for demo purposes - -## Features - -- **Natural Conversations:** Ask the AI anything about your research project -- **Context Awareness:** The AI knows which project you're working on -- **Tool Suggestions:** The AI can suggest using mock tools based on your requests -- **Error Handling:** Graceful fallback if the API is unavailable -- **Free Usage:** Grok-4-Fast is free on OpenRouter - -## Example Prompts to Try - -- "Help me design an experiment for machine learning" -- "What should I analyze in my dataset?" -- "How can I improve my model's performance?" -- "Explain the latest trends in AI research" -- "What tools do you have available?" - -The AI will respond naturally and may suggest using the available tools when appropriate! diff --git a/app/about/page.tsx b/app/about/page.tsx index 569d1b4..85747d1 100644 --- a/app/about/page.tsx +++ b/app/about/page.tsx @@ -1,198 +1,248 @@ -import Link from "next/link"; +'use client'; -export default function About() { - return ( -
- {/* Header */} -
- -
+import { useLanguage } from "@/components/providers/language-provider"; - {/* Main Content */} -
-
- {/* Back to Home */} - - ← Back to Home - +export default function AboutPage() { + const { language } = useLanguage(); - {/* Page Header */} -
-

About Us

-

- Building the future of open superintelligence through research, collaboration, and innovation. -

-
+ return ( + <> + {/* Hero Section */} +
+ {/* Enhanced background effects */} +
+
+
+
+ + {/* Enhanced animated background particles */} +
+ {/* Large floating particles - some made more glowy and dreamy */} +
+
+
+
+
+
+ + {/* Medium particles - enhanced with glow */} +
+
+
+
+
+ + {/* Small twinkling particles - some made more dreamy */} +
+
+
+
+
+
+ + {/* Floating geometric shapes - some made more ethereal */} +
+
+
+ + {/* Enhanced glowing orbs - made more dreamy */} +
+
+
+ + {/* Additional dreamy particles */} +
+
+
+
- {/* Mission Section */} -
-

Our Mission

-

- Open Superintelligence Lab is dedicated to making superintelligence development accessible to everyone. - We believe that the future of AI should be built openly, safely, and for the benefit of all humanity. -

-

- Our mission is to democratize AI development by creating tools and frameworks that allow anyone, - regardless of technical expertise, to contribute to superintelligence research and development. +

+
+
+

+ {language === 'en' ? 'About Our Lab' : '关于我们实验室'} +

+

+ {language === 'en' + ? 'Advancing AI research through open collaboration and innovation' + : '通过开放协作和创新推进AI研究' + }

+
+
+
- {/* Vision Section */} -
-

Our Vision

-

- Any company or person (even with no technical experience) should be able to download our repository - and run it on their GPU setup - from 1 GPU to 1 million GPUs. The system will automatically - detect your hardware configuration, tune hyperparameters for optimal performance, and run the best - possible training with or without manual configuration. -

-
-
-

🚀 Auto-Scaling

-

- Seamlessly scale from single GPU to massive distributed clusters +

+
+
+
+ {/* Mission Section */} +
+

+ {language === 'en' ? 'Our Mission' : '我们的使命'} +

+
+

+ {language === 'en' + ? 'At the Open Superintelligence Lab, we conduct open research on the best open source projects and Large Language Models (LLMs). Our mission is to advance the field of artificial intelligence through transparent, collaborative research that benefits the entire AI community.' + : '在开放超级智能实验室,我们对最好的开源项目和大语言模型(LLMs)进行开放研究。我们的使命是通过透明、协作的研究推进人工智能领域,造福整个AI社区。' + }

-
-
-

⚡ Auto-Tuning

-

- Intelligent hyperparameter optimization for your hardware +

+ {language === 'en' + ? 'We believe that the future of AI should be built on open principles, where knowledge is shared freely and innovations are accessible to everyone. Our research focuses on understanding, improving, and advancing the state-of-the-art in open source AI technologies.' + : '我们相信AI的未来应该建立在开放原则之上,知识自由分享,创新对所有人开放。我们的研究专注于理解、改进和推进开源AI技术的最先进水平。' + }

-
-

🔧 Zero-Config

-

- Works out of the box with automatic hardware detection -

+
+ + {/* Research Focus */} +
+

+ {language === 'en' ? 'Research Focus' : '研究重点'} +

+
+
+

+ {language === 'en' ? 'Open Source Projects' : '开源项目'} +

+

+ {language === 'en' + ? 'We analyze and contribute to the most promising open source AI projects, identifying best practices and areas for improvement.' + : '我们分析并贡献最有前景的开源AI项目,识别最佳实践和改进领域。' + } +

+
+
+

+ {language === 'en' ? 'Large Language Models' : '大语言模型'} +

+

+ {language === 'en' + ? 'We conduct research on state-of-the-art LLMs, exploring their capabilities, limitations, and potential for advancement.' + : '我们对最先进的大语言模型进行研究,探索它们的能力、局限性和改进潜力。' + } +

+
-
-
+ - {/* Research Focus */} -
-

Research Focus

-
-
-

Blueberry LLM 🫐

-

- Our flagship Mixture of Experts (MoE) language model implementation. - Clone, install dependencies, and train your own language model with a single command. -

- +

+ {language === 'en' ? 'Current Research Areas' : '当前研究领域'} +

+
+
+

+ DeepSeek-V3.2-Exp Research +

+

+ {language === 'en' + ? 'Investigating DeepSeek\'s Sparse Attention (DSA) mechanisms and long-context efficiency improvements in open source language models.' + : '研究DeepSeek的稀疏注意力机制(DSA)和开源语言模型中的长上下文效率改进。' + } +

+
+
+

+ GPT-OSS Research +

+

+ {language === 'en' + ? 'Exploring OpenAI\'s open-source Mixture of Experts (MoE) language models with advanced reasoning capabilities and safety features.' + : '探索OpenAI的开源专家混合(MoE)语言模型,具有先进的推理能力和安全特性。' + } +

+
+
+ + + {/* Values */} +
+

+ {language === 'en' ? 'Our Values' : '我们的价值观'} +

+
+
+
+ 🔓 +
+

+ {language === 'en' ? 'Openness' : '开放性'} +

+

+ {language === 'en' + ? 'Transparent research and open collaboration' + : '透明研究和开放协作' + } +

+
+
+
+ 🚀 +
+

+ {language === 'en' ? 'Innovation' : '创新'} +

+

+ {language === 'en' + ? 'Pushing the boundaries of AI research' + : '推动AI研究的边界' + } +

+
+
+
+ 🤝 +
+

+ {language === 'en' ? 'Collaboration' : '协作'} +

+

+ {language === 'en' + ? 'Building a stronger AI community together' + : '共同建设更强大的AI社区' + } +

+
+
+
+ + {/* Call to Action */} +
+

+ {language === 'en' ? 'Join Our Research' : '加入我们的研究'} +

+

+ {language === 'en' + ? 'Interested in contributing to open AI research? Explore our projects and learn how you can get involved in advancing the field of artificial intelligence.' + : '有兴趣为开放AI研究做贡献吗?探索我们的项目,了解如何参与推进人工智能领域的发展。' + } +

+
-
-

Open Research

-

- We conduct open research in AI safety, alignment, and governance. - All our findings, code, and models are publicly available. -

- - → Pick a Research Task + {language === 'en' ? 'GitHub' : 'GitHub'}
-
+
- - {/* Get Involved */} -
-

Get Involved

-

- We believe that addressing the challenges of superintelligence requires a collaborative effort - from the global community. Whether you're a researcher, student, developer, or simply - interested in AI safety, there are many ways to contribute. -

- - -
- - {/* Values */} -
-

Our Values

-
-
-

Open Source

-

- All our research, code, and findings are open source. We believe in transparency - and collaboration as the foundation of safe AI development. -

-
-
-

Speed

-

- We optimize for performance and efficiency, ensuring our tools can scale - from single GPU setups to massive distributed systems seamlessly. -

-
-
-

Accessibility

-

- We make advanced AI research accessible to everyone, regardless of technical - background or resources. -

-
-
-

Innovation

-

- We push the boundaries of what's possible in AI research while maintaining - our commitment to safety and openness. -

-
-
- - {/* Footer */} -
-
-

© 2024 Open Superintelligence Lab. Open source for everyone.

-
- Blueberry LLM - GitHub - About - Blog -
-
-
-
+ ); } diff --git a/app/agents/page.tsx b/app/agents/page.tsx deleted file mode 100644 index 84c16be..0000000 --- a/app/agents/page.tsx +++ /dev/null @@ -1,359 +0,0 @@ -'use client'; - -import { useState } from 'react'; -import { useQuery, useMutation } from 'convex/react'; -import { api } from '../../convex/_generated/api'; -import { Button } from '@/components/ui/button'; -import { Card, CardContent, CardDescription, CardHeader, CardTitle } from '@/components/ui/card'; -import { Badge } from '@/components/ui/badge'; -import { Input } from '@/components/ui/input'; -import { Label } from '@/components/ui/label'; -import { Textarea } from '@/components/ui/textarea'; -import { Progress } from '@/components/ui/progress'; -import { Tabs, TabsContent, TabsList, TabsTrigger } from '@/components/ui/tabs'; -import { - Play, - Pause, - Square, - Brain, - Zap, - Target, - TrendingUp, - Clock, - Cpu, - DollarSign, - Trash2 -} from 'lucide-react'; -import { AppLayout } from '@/components/layout/app-layout'; - -export default function AgentDashboard() { - const [selectedProject, setSelectedProject] = useState(''); - const [researchGoal, setResearchGoal] = useState(''); - const [codebase, setCodebase] = useState(''); - const [isCreating, setIsCreating] = useState(false); - - // Convex queries and mutations - const projects = useQuery(api.projects.list, {}); - const runs = useQuery( - api.runs.listByProject, - selectedProject ? { projectId: selectedProject as any } : "skip" - ); - const createAgentPlan = useMutation(api.agents.createAgentPlan); - const updateRunStatus = useMutation(api.runs.updateStatus); - const deleteRun = useMutation(api.runs.remove); - - const handleStartAgent = async () => { - if (!selectedProject || !researchGoal.trim()) return; - - setIsCreating(true); - try { - // Create a run first, then generate the plan - const runId = await createAgentPlan({ - projectId: selectedProject as any, - researchGoal, - codebase: codebase || undefined, - }); - - console.log("Agent started with run ID:", runId); - - // Clear the form - setResearchGoal(''); - setCodebase(''); - - // Show success message - alert(`AI Agent launched successfully! Run ID: ${runId}`); - } catch (error) { - console.error("Error starting agent:", error); - alert("Failed to start agent. Check console for details."); - } finally { - setIsCreating(false); - } - }; - - const handleStopRun = async (runId: string) => { - try { - await updateRunStatus({ - id: runId as any, - status: "paused" - }); - console.log("Run paused successfully"); - } catch (error) { - console.error("Error pausing run:", error); - alert("Failed to pause run. Check console for details."); - } - }; - - const handleDeleteRun = async (runId: string) => { - if (!confirm("Are you sure you want to delete this run? This action cannot be undone.")) { - return; - } - - try { - await deleteRun({ - id: runId as any - }); - console.log("Run deleted successfully"); - } catch (error) { - console.error("Error deleting run:", error); - alert("Failed to delete run. Check console for details."); - } - }; - - const selectedProjectData = projects?.find(p => p._id === selectedProject); - - return ( - -
-
-

AI Agent Dashboard

-

- Deploy autonomous AI agents to conduct research experiments -

-
- -
- {/* Agent Control Panel */} -
- - - - - Agent Control - - - Configure and launch AI research agents - - - -
- - -
- -
- -