diff --git a/llm/continuous_learning_system.py b/llm/continuous_learning_system.py index 46afc9f..6cb75a8 100644 --- a/llm/continuous_learning_system.py +++ b/llm/continuous_learning_system.py @@ -17,6 +17,7 @@ import asyncio import json import logging +import pickle import time import os from typing import Dict, List, Any, Optional @@ -24,7 +25,6 @@ from datetime import datetime import numpy as np import hashlib -import json from pathlib import Path # Import existing components @@ -290,12 +290,6 @@ async def rollback_model(self, version_id: str) -> Dict[str, Any]: version_id: Version ID to rollback to """ try: - copilot/fix-94a3a2ef-451e-4b72-9782-aff6506fa546 - # Find version in history - version_path = self.model_dir / f"{version_id}.json" - - if not version_path.exists(): - # Find version in history (try JSON first, then pickle for backward compatibility) json_path = self.model_dir / f"{version_id}.json" pkl_path = self.model_dir / f"{version_id}.pkl" @@ -309,19 +303,11 @@ async def rollback_model(self, version_id: str) -> Dict[str, Any]: with open(pkl_path, "rb") as f: model_data = pickle.load(f) else: - master return { "success": False, "error": f"Model version {version_id} not found", } - copilot/fix-94a3a2ef-451e-4b72-9782-aff6506fa546 - # Load the version - with open(version_path, "r") as f: - model_data = json.load(f) - -======= - master # Set as current model self.current_model_version = model_data["version_info"] @@ -597,15 +583,7 @@ async def _create_model_version( training_data_size=self.training_stats["total_samples_processed"], quantum_optimized=self.quantum_connector.connected, file_path=str(self.model_dir / f"{version_id}.json"), - copilot/fix-213aa9e3-0b23-4bd9-9b0c-2eb2bc585c94 checksum=hashlib.sha256(version_id.encode()).hexdigest(), - - copilot/fix-94a3a2ef-451e-4b72-9782-aff6506fa546 - checksum=hashlib.sha256(version_id.encode()).hexdigest(), - - checksum=hashlib.md5(version_id.encode()).hexdigest(), - master - master ) # Save model version using custom JSON encoder @@ -615,18 +593,8 @@ async def _create_model_version( "model_state": "simulated_model_state", } - copilot/fix-213aa9e3-0b23-4bd9-9b0c-2eb2bc585c94 - with open(version.file_path, "w") as f: - json.dump(model_data, f, indent=2, default=str) - - copilot/fix-94a3a2ef-451e-4b72-9782-aff6506fa546 - with open(version.file_path, "w") as f: - json.dump(model_data, f, indent=2, default=str) - with open(version.file_path, "w", encoding="utf-8") as f: json.dump(model_data, f, cls=ModelVersionJSONEncoder, indent=2) - master - master # Update current version self.current_model_version = version @@ -667,26 +635,16 @@ async def _training_loop(self): async def _load_or_create_model(self): """Load existing model or create new one""" try: - copilot/fix-94a3a2ef-451e-4b72-9782-aff6506fa546 - # Look for existing model versions - model_files = list(self.model_dir.glob("*.json")) - # Look for existing model versions (first try JSON, then fallback to PKL for backward compatibility) json_files = list(self.model_dir.glob("*.json")) pkl_files = list(self.model_dir.glob("*.pkl")) - copilot/fix-213aa9e3-0b23-4bd9-9b0c-2eb2bc585c94 - with open(latest_file, "r") as f: - model_data = json.load(f) - if json_files: # Load latest JSON version latest_file = max(json_files, key=lambda f: f.stat().st_mtime) - master with open(latest_file, "r", encoding="utf-8") as f: model_data = json.load(f, cls=ModelVersionJSONDecoder) - master self.current_model_version = model_data["version_info"] logger.info( diff --git a/protocols/data_processor.py b/protocols/data_processor.py index 971eaba..f8fbed1 100644 --- a/protocols/data_processor.py +++ b/protocols/data_processor.py @@ -14,7 +14,8 @@ def process(self): """Process data files and extract insights""" return task() -def task(): + +def task(data_path=None): """Process data files and extract insights""" # Use provided data path or try multiple possible data directories if data_path and os.path.exists(data_path) and os.path.isdir(data_path): @@ -79,7 +80,8 @@ def task(): insights.append( f"{filename}: {type(data).__name__} with {len(data) if isinstance(data, (list, dict)) else 1} items" ) - except: + except (json.JSONDecodeError, OSError): + # Skip files that can't be read or parsed pass elif filename.endswith(".csv"): @@ -90,7 +92,8 @@ def task(): total_records += row_count processed_count += 1 insights.append(f"{filename}: CSV with {row_count} rows") - except BaseException: + except (csv.Error, OSError): + # Skip CSV files that can't be read or parsed pass # Always return success if we got this far diff --git a/protocols/user_data_processor.py b/protocols/user_data_processor.py index b077e0b..8b5dd16 100644 --- a/protocols/user_data_processor.py +++ b/protocols/user_data_processor.py @@ -61,7 +61,8 @@ def task(): "size": size, } ) - except: + except OSError: + # Ignore files we cannot access or whose size cannot be determined pass # Generate insights diff --git a/tests/test_mcp_compliance.py b/tests/test_mcp_compliance.py index 3368a0b..d094c68 100644 --- a/tests/test_mcp_compliance.py +++ b/tests/test_mcp_compliance.py @@ -267,7 +267,8 @@ def run_compliance_check(): content = py_file.read_text() if 'TODO:' in content or 'FIXME:' in content: placeholder_count += 1 - except: + except (OSError, UnicodeDecodeError): + # Intentionally ignore files that can't be read/decoded; skip them in this quick placeholder scan pass if placeholder_count > 0: