Skip to content
8 changes: 4 additions & 4 deletions agents/unified_transport_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
from typing import Dict, Any, Optional, List
import time
import mmap
import pickle
import json
from dataclasses import dataclass

# Import our existing components
Expand Down Expand Up @@ -165,8 +165,8 @@ async def _shared_memory_transfer(
self, pipe: MojoMessagePipe, payload: Dict
) -> Dict:
"""Shared memory transfer for large payloads"""
# Serialize to shared memory
serialized = pickle.dumps(payload)
# Serialize to shared memory using JSON (secure)
serialized = json.dumps(payload, default=str).encode('utf-8')

if pipe.shared_memory:
# Write to shared memory
Expand Down Expand Up @@ -197,7 +197,7 @@ async def _pipe_transfer(self, pipe: MojoMessagePipe, payload: Dict) -> Dict:
return {
"status": "delivered",
"method": "pipe",
"serialized_size": len(pickle.dumps(payload)),
"serialized_size": len(json.dumps(payload, default=str).encode('utf-8')),
}

async def _handle_passing_transfer(
Expand Down
12 changes: 9 additions & 3 deletions connectors/mcp_debug_tool.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,11 @@
"""

import asyncio
copilot/fix-94a3a2ef-451e-4b72-9782-aff6506fa546
import traceback
import os
=======
master
from datetime import datetime, timezone
from typing import Dict, Any, Optional, List
from dataclasses import dataclass, asdict
Expand Down Expand Up @@ -606,7 +611,7 @@ def _estimate_quantum_efficiency(self, code: str) -> str:
"tools": [
{
"name": "DebugTool",
"endpoint": "https://your-gcp-api/v1/reason",
"endpoint": "https://api.example.com/v1/reason",
"type": "debug",
"schema": {
"code": {
Expand Down Expand Up @@ -661,7 +666,7 @@ def _estimate_quantum_efficiency(self, code: str) -> str:
"version": "1.0.0",
"authentication": {
"type": "oauth2",
"token_url": "https://your-gcp-api/oauth2/token",
"token_url": "https://api.example.com/oauth2/token",
"scopes": ["https://www.googleapis.com/auth/cloud-platform"],
},
"timeout": 30000,
Expand All @@ -679,7 +684,8 @@ def _estimate_quantum_efficiency(self, code: str) -> str:
async def example_usage():
"""Example usage of the MCP Debug Tool"""
async with MCPDebugTool(
gcp_endpoint="https://your-gcp-api", auth_token="your-oauth-token"
gcp_endpoint=os.getenv("GCP_API_ENDPOINT", "https://api.example.com"),
auth_token=os.getenv("GCP_AUTH_TOKEN", "development-token")
) as debug_tool:

# Debug quantum code
Expand Down
34 changes: 31 additions & 3 deletions llm/continuous_learning_system.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
from datetime import datetime
import numpy as np
import hashlib
import pickle
import json
from pathlib import Path

# Import existing components
Expand Down Expand Up @@ -290,6 +290,12 @@ async def rollback_model(self, version_id: str) -> Dict[str, Any]:
version_id: Version ID to rollback to
"""
try:
copilot/fix-94a3a2ef-451e-4b72-9782-aff6506fa546
# Find version in history
version_path = self.model_dir / f"{version_id}.json"

if not version_path.exists():

# Find version in history (try JSON first, then pickle for backward compatibility)
json_path = self.model_dir / f"{version_id}.json"
pkl_path = self.model_dir / f"{version_id}.pkl"
Expand All @@ -303,11 +309,19 @@ async def rollback_model(self, version_id: str) -> Dict[str, Any]:
with open(pkl_path, "rb") as f:
model_data = pickle.load(f)
else:
master
return {
"success": False,
"error": f"Model version {version_id} not found",
}

copilot/fix-94a3a2ef-451e-4b72-9782-aff6506fa546
# Load the version
with open(version_path, "r") as f:
model_data = json.load(f)

=======
master
# Set as current model
self.current_model_version = model_data["version_info"]

Expand Down Expand Up @@ -583,7 +597,11 @@ async def _create_model_version(
training_data_size=self.training_stats["total_samples_processed"],
quantum_optimized=self.quantum_connector.connected,
file_path=str(self.model_dir / f"{version_id}.json"),
copilot/fix-94a3a2ef-451e-4b72-9782-aff6506fa546
checksum=hashlib.sha256(version_id.encode()).hexdigest(),
=======
checksum=hashlib.md5(version_id.encode()).hexdigest(),
master
)

# Save model version using custom JSON encoder
Expand All @@ -593,8 +611,13 @@ async def _create_model_version(
"model_state": "simulated_model_state",
}

copilot/fix-94a3a2ef-451e-4b72-9782-aff6506fa546
with open(version.file_path, "w") as f:
json.dump(model_data, f, indent=2, default=str)

with open(version.file_path, "w", encoding="utf-8") as f:
json.dump(model_data, f, cls=ModelVersionJSONEncoder, indent=2)
master

# Update current version
self.current_model_version = version
Expand Down Expand Up @@ -635,13 +658,18 @@ async def _training_loop(self):
async def _load_or_create_model(self):
"""Load existing model or create new one"""
try:
copilot/fix-94a3a2ef-451e-4b72-9782-aff6506fa546
# Look for existing model versions
model_files = list(self.model_dir.glob("*.json"))

# Look for existing model versions (first try JSON, then fallback to PKL for backward compatibility)
json_files = list(self.model_dir.glob("*.json"))
pkl_files = list(self.model_dir.glob("*.pkl"))

if json_files:
# Load latest JSON version
latest_file = max(json_files, key=lambda f: f.stat().st_mtime)
master

with open(latest_file, "r", encoding="utf-8") as f:
model_data = json.load(f, cls=ModelVersionJSONDecoder)
Expand All @@ -654,8 +682,8 @@ async def _load_or_create_model(self):
# Fallback to pickle files for backward compatibility
latest_file = max(pkl_files, key=lambda f: f.stat().st_mtime)

with open(latest_file, "rb") as f:
model_data = pickle.load(f)
with open(latest_file, "r") as f:
model_data = json.load(f)

self.current_model_version = model_data["version_info"]
logger.info(
Expand Down
45 changes: 31 additions & 14 deletions protocols/multimodal_llm_analyzer.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
from datetime import datetime
from typing import Dict, List, Any
import numpy as np
import random


def task():
Expand Down Expand Up @@ -75,31 +74,49 @@ def _analyze_massive_user_collection() -> Dict[str, Any]:
folder_name = os.path.basename(base_path)
analysis["folders_scanned"].append(folder_name)

# Get total file count for this directory
# Get total file count for this directory using secure subprocess
try:
import subprocess

result = subprocess.run(
["find", base_path, "-type", "f"],
capture_output=True,
text=True,
)
all_files = (
result.stdout.strip().split("\n") if result.stdout.strip() else []
)
folder_file_count = len(all_files)
import shutil

# Use absolute path for find command for security
find_path = shutil.which("find")
if not find_path:
# Fallback to Python implementation if find is not available
all_files = []
for root, dirs, files in os.walk(base_path):
for file in files:
all_files.append(os.path.join(root, file))
folder_file_count = len(all_files)
else:
# Validate and sanitize the base_path to prevent command injection
if not os.path.exists(base_path) or not os.path.isdir(base_path):
raise ValueError(f"Invalid directory path: {base_path}")

result = subprocess.run(
[find_path, os.path.abspath(base_path), "-type", "f"],
capture_output=True,
text=True,
timeout=30, # Add timeout for security
)
all_files = (
result.stdout.strip().split("\n") if result.stdout.strip() else []
)
folder_file_count = len(all_files)

analysis["directory_stats"][folder_name] = {
"total_files": folder_file_count,
"sample_analyzed": 0,
}
analysis["total_files"] += folder_file_count

# Use statistical sampling for massive datasets
# Use systematic sampling for massive datasets (deterministic)
if folder_file_count > 1000:
# Sample 5% or max 2000 files, whichever is smaller
sample_size = min(int(folder_file_count * 0.05), 2000)
sampled_files = random.sample(all_files, sample_size)
# Systematic sampling - take every nth file for reproducible results
step = max(1, len(all_files) // sample_size)
sampled_files = all_files[::step][:sample_size]
analysis["directory_stats"][folder_name][
"sample_analyzed"
] = sample_size
Expand Down
Loading