Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion .env.template
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
# Rename this file to .env and add your API keys
ANTHROPIC_API_KEY=your_anthropic_api_key_here
OPENAI_API_KEY=your_openai_api_key_here
GOOGLE_API_KEY=your_google_api_key_here
GOOGLE_API_KEY=your_google_api_key_here
LAMBDA_API_KEY=your_lambda_api_key_here
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ A Python terminal application that orchestrates structured, truth-seeking discus

## Features
- Structured 4-round Socratic discussions
- Three AI panelists (GPT-5, Claude 4.1, Gemini 2.5 Pro) with a Claude moderator
- Four AI panelists (GPT-5, Claude 4.1, Gemini 2.5 Pro, DeepSeek-Llama3.3-70B) with a Claude moderator
- Automatic session saving and replay functionality
- Clean terminal UI with rich formatting
- Graceful error handling and interruption support
Expand Down Expand Up @@ -55,7 +55,7 @@ python main.py
## Requirements

- Python 3.8+
- API keys for Anthropic, OpenAI, and Google AI
- API keys for Anthropic, OpenAI, Google AI, and Lambda AI
- Terminal with Unicode support for best display

## Troubleshooting
Expand Down
3 changes: 2 additions & 1 deletion config.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,8 @@
API_KEYS = {
"anthropic": os.getenv("ANTHROPIC_API_KEY"),
"openai": os.getenv("OPENAI_API_KEY"),
"google": os.getenv("GOOGLE_API_KEY")
"google": os.getenv("GOOGLE_API_KEY"),
"lambda": os.getenv("LAMBDA_API_KEY")
}

# Print diagnostic info
Expand Down
88 changes: 88 additions & 0 deletions llm/lambda_client.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
try:
import requests
REQUESTS_AVAILABLE = True
except ImportError:
print("Requests library not installed. Install with: pip install requests")
REQUESTS_AVAILABLE = False

from typing import List, Dict
from llm.base import LLMClient, retry_with_backoff
import asyncio
import logging
import json

class LambdaClient(LLMClient):
def __init__(self, api_key: str):
if not REQUESTS_AVAILABLE:
raise ImportError("Requests library not installed. Run: pip install requests")

if not api_key or api_key == "your_lambda_api_key_here":
raise ValueError("Invalid Lambda API key. Please check your .env file")

self.api_key = api_key
self.base_url = "https://api.lambda.ai/v1"
self.model = "deepseek-llama3.3-70b"

logging.info("Lambda client initialized successfully")

async def generate_response(
self,
system_prompt: str,
messages: List[Dict],
temperature: float = 0.7,
max_tokens: int = 2048
) -> str:
async def _generate():
try:
# Format messages for Lambda API (OpenAI-compatible format)
messages_formatted = [{"role": "system", "content": system_prompt}] + messages

headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json"
}

payload = {
"model": self.model,
"messages": messages_formatted,
"temperature": temperature,
"max_tokens": max_tokens
}

# Make async HTTP request
response = await asyncio.to_thread(
requests.post,
f"{self.base_url}/chat/completions",
headers=headers,
json=payload,
timeout=60
)

if response.status_code != 200:
error_msg = f"Lambda API error: {response.status_code} - {response.text}"
logging.error(error_msg)
raise Exception(error_msg)

response_data = response.json()

if (response_data and
"choices" in response_data and
response_data["choices"] and
"message" in response_data["choices"][0] and
"content" in response_data["choices"][0]["message"]):

return response_data["choices"][0]["message"]["content"]
else:
raise ValueError("Empty or invalid response from Lambda API")

except requests.exceptions.RequestException as e:
logging.error(f"Lambda API request error: {e}")
raise
except json.JSONDecodeError as e:
logging.error(f"Lambda API JSON decode error: {e}")
raise
except Exception as e:
logging.error(f"Unexpected error calling Lambda API: {e}")
raise

return await retry_with_backoff(_generate)
7 changes: 5 additions & 2 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from llm.anthropic_client import ClaudeClient
from llm.openai_client import GPTClient
from llm.google_client import GeminiClient
from llm.lambda_client import LambdaClient
from moderator.turn_manager import TurnManager
from ui.terminal import TerminalUI
from storage.session_logger import SessionLogger
Expand All @@ -29,7 +30,8 @@ def __init__(self):
"claude_moderator": ClaudeClient(API_KEYS["anthropic"]),
"claude": ClaudeClient(API_KEYS["anthropic"]),
"gpt5": GPTClient(API_KEYS["openai"]),
"gemini": GeminiClient(API_KEYS["google"])
"gemini": GeminiClient(API_KEYS["google"]),
"deepseek": LambdaClient(API_KEYS["lambda"])
}
except Exception as e:
self.ui.console.print(f"[red]Error initializing LLM clients: {e}[/red]")
Expand All @@ -40,7 +42,8 @@ def __init__(self):
"claude_moderator": "Claude 4.1 Opus",
"claude": "Claude 4.1 Opus",
"gpt5": "GPT-5 Thinking",
"gemini": "Gemini 2.5 Pro"
"gemini": "Gemini 2.5 Pro",
"deepseek": "DeepSeek-Llama3.3-70B"
}

self.current_session_file = None
Expand Down
2 changes: 1 addition & 1 deletion moderator/turn_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

class TurnManager:
def __init__(self):
self.panelist_ids = ["gpt5", "claude", "gemini"]
self.panelist_ids = ["gpt5", "claude", "gemini", "deepseek"]
self.moderator_id = "claude_moderator"

def determine_next_speaker(self, state: DiscussionState) -> str:
Expand Down
3 changes: 2 additions & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,5 @@ anthropic>=0.7.0
openai>=1.0.0
google-generativeai>=0.3.0
rich>=13.0.0
python-dotenv>=1.0.0
python-dotenv>=1.0.0
requests>=2.25.0
127 changes: 125 additions & 2 deletions tests/test_basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,7 @@ def test_turn_manager_initialization():
assert "gpt5" in manager.panelist_ids
assert "claude" in manager.panelist_ids
assert "gemini" in manager.panelist_ids
assert "deepseek" in manager.panelist_ids

def test_turn_manager_agenda_speaker():
"""Test that moderator speaks first in agenda round"""
Expand Down Expand Up @@ -218,6 +219,7 @@ def test_llm_client_initialization_mocked(mock_gemini_model, mock_gemini_config,
from llm.anthropic_client import ClaudeClient
from llm.openai_client import GPTClient
from llm.google_client import GeminiClient
from llm.lambda_client import LambdaClient

# These should not raise errors with valid keys
claude = ClaudeClient("sk-ant-api03-valid-key-for-testing")
Expand All @@ -229,6 +231,11 @@ def test_llm_client_initialization_mocked(mock_gemini_model, mock_gemini_config,
gemini = GeminiClient("AIza-valid-key-for-testing")
assert gemini.model is not None

# Test Lambda client initialization
lambda_client = LambdaClient("lambda-valid-key-for-testing")
assert lambda_client.api_key == "lambda-valid-key-for-testing"
assert lambda_client.model == "deepseek-llama3.3-70b"

def test_config_loading():
"""Test configuration loading"""
import os
Expand All @@ -238,7 +245,8 @@ def test_config_loading():
with patch.dict(os.environ, {
'ANTHROPIC_API_KEY': 'test_anthropic',
'OPENAI_API_KEY': 'test_openai',
'GOOGLE_API_KEY': 'test_google'
'GOOGLE_API_KEY': 'test_google',
'LAMBDA_API_KEY': 'test_lambda'
}):
# Reimport config to get mocked values
import importlib
Expand All @@ -247,4 +255,119 @@ def test_config_loading():

assert config.API_KEYS['anthropic'] == 'test_anthropic'
assert config.API_KEYS['openai'] == 'test_openai'
assert config.API_KEYS['google'] == 'test_google'
assert config.API_KEYS['google'] == 'test_google'
assert config.API_KEYS['lambda'] == 'test_lambda'

@pytest.mark.mock_api
@patch('requests.post')
def test_lambda_client_generate_response(mock_post):
"""Test Lambda client response generation with mocked HTTP requests"""
from llm.lambda_client import LambdaClient
import asyncio

# Mock successful response
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {
"choices": [
{
"message": {
"content": "This is a test response from DeepSeek"
}
}
]
}
mock_post.return_value = mock_response

# Test client
client = LambdaClient("test-lambda-key")

# Test response generation
async def test_generate():
response = await client.generate_response(
system_prompt="You are a helpful assistant",
messages=[{"role": "user", "content": "Hello"}],
temperature=0.7,
max_tokens=100
)
return response

result = asyncio.run(test_generate())
assert result == "This is a test response from DeepSeek"

# Verify the request was made correctly
mock_post.assert_called_once()
call_args = mock_post.call_args
assert call_args[0][0] == "https://api.lambda.ai/v1/chat/completions"
assert call_args[1]["headers"]["Authorization"] == "Bearer test-lambda-key"
assert call_args[1]["json"]["model"] == "deepseek-llama3.3-70b"

def test_deepseek_integration_in_discussion():
"""Test that DeepSeek is properly integrated into the discussion flow"""
from moderator.turn_manager import TurnManager
from models.discussion import DiscussionState, Round, Message, Role

manager = TurnManager()
state = DiscussionState(
id="test",
topic="AI Ethics",
current_round=Round.EVIDENCE,
current_speaker=None,
turn_order=[],
transcript=[],
round_metadata={},
status="in_progress",
started_at=datetime.now(),
completed_at=None
)

# Simulate other participants having spoken
state.transcript.extend([
Message(
participant_id="gpt5",
participant_model="GPT-5",
role=Role.PANELIST,
round=Round.EVIDENCE,
content="GPT-5 evidence",
timestamp=datetime.now(),
turn_number=0
),
Message(
participant_id="claude",
participant_model="Claude",
role=Role.PANELIST,
round=Round.EVIDENCE,
content="Claude evidence",
timestamp=datetime.now(),
turn_number=1
),
Message(
participant_id="gemini",
participant_model="Gemini",
role=Role.PANELIST,
round=Round.EVIDENCE,
content="Gemini evidence",
timestamp=datetime.now(),
turn_number=2
)
])

# DeepSeek should be able to speak next
next_speaker = manager.determine_next_speaker(state)
assert next_speaker == "deepseek"

# Add DeepSeek's message
state.transcript.append(
Message(
participant_id="deepseek",
participant_model="DeepSeek-Llama3.3-70B",
role=Role.PANELIST,
round=Round.EVIDENCE,
content="DeepSeek evidence",
timestamp=datetime.now(),
turn_number=3
)
)

# Now all panelists have spoken, should advance round
assert manager.should_advance_round(state)
Loading