diff --git a/codegen-on-oss/codegen_on_oss/analyzers/README_codebase_ai.md b/codegen-on-oss/codegen_on_oss/analyzers/README_codebase_ai.md new file mode 100644 index 000000000..441b09840 --- /dev/null +++ b/codegen-on-oss/codegen_on_oss/analyzers/README_codebase_ai.md @@ -0,0 +1,94 @@ +# Codebase AI Module + +The `codebase_ai.py` module provides AI-powered code analysis and generation capabilities for the Codegen analyzer system. It enables the generation of system prompts, context, and tools for AI models to analyze and generate code. + +## Features + +- **System Prompt Generation**: Create tailored prompts for AI models to analyze and modify code +- **Context Generation**: Format code and additional information for AI models +- **Tool Definitions**: Define tools for AI models to interact with the codebase +- **Flagging System**: Determine whether code elements should be flagged for modification + +## Usage + +### Basic Usage + +```python +from codegen_on_oss.analyzers.codebase_ai import CodebaseAI, generate_system_prompt, generate_context + +# Create a CodebaseAI instance +codebase_ai = CodebaseAI() + +# Generate a system prompt +system_prompt = codebase_ai.generate_system_prompt() + +# Generate a system prompt with a target +system_prompt = codebase_ai.generate_system_prompt(target=editable_object) + +# Generate a system prompt with context +system_prompt = codebase_ai.generate_system_prompt(context=context_object) + +# Generate a system prompt with both target and context +system_prompt = codebase_ai.generate_system_prompt(target=editable_object, context=context_object) + +# Generate tools for AI models +tools = codebase_ai.generate_tools() +``` + +### Integration with Analyzers + +The `codebase_ai.py` module can be integrated with other analyzers to provide AI-powered analysis capabilities: + +```python +from codegen_on_oss.analyzers.codebase_ai import CodebaseAI +from codegen_on_oss.analyzers.codebase_analyzer import CodebaseAnalyzer + + +class AICodebaseAnalyzer(CodebaseAnalyzer): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.codebase_ai = CodebaseAI() + + def analyze_with_ai(self, file): + # Generate a system prompt for the file + system_prompt = self.codebase_ai.generate_system_prompt(target=file) + + # Use the system prompt with an AI model + # ... +``` + +## API Reference + +### Functions + +- `generate_system_prompt(target=None, context=None)`: Generate a system prompt for AI-powered code analysis and generation +- `generate_flag_system_prompt(target, context=None)`: Generate a system prompt for determining whether to flag a code element +- `generate_context(context=None)`: Generate a context string for AI models +- `generate_tools()`: Generate a list of tools for AI models +- `generate_flag_tools()`: Generate a list of tools for flagging code elements + +### Classes + +#### CodebaseAI + +The `CodebaseAI` class provides methods for generating system prompts, context, and tools for AI models to analyze and generate code. + +##### Methods + +- `generate_system_prompt(target=None, context=None)`: Generate a system prompt for AI-powered code analysis and generation +- `generate_flag_system_prompt(target, context=None)`: Generate a system prompt for determining whether to flag a code element +- `generate_context(context=None)`: Generate a context string for AI models +- `generate_tools()`: Generate a list of tools for AI models +- `generate_flag_tools()`: Generate a list of tools for flagging code elements + +## Examples + +See the `examples/codebase_ai_example.py` file for a complete example of how to use the `codebase_ai.py` module. + +## Testing + +The `codebase_ai.py` module includes comprehensive tests in the `tests/analyzers/test_codebase_ai.py` file. Run the tests using pytest: + +```bash +pytest tests/analyzers/test_codebase_ai.py +``` diff --git a/codegen-on-oss/codegen_on_oss/analyzers/__init__.py b/codegen-on-oss/codegen_on_oss/analyzers/__init__.py index 09e92d85a..0362e9a95 100644 --- a/codegen-on-oss/codegen_on_oss/analyzers/__init__.py +++ b/codegen-on-oss/codegen_on_oss/analyzers/__init__.py @@ -14,6 +14,7 @@ CodeQualityPlugin, DependencyPlugin, ) + # Main API interface from codegen_on_oss.analyzers.api import ( CodegenAnalyzerAPI, @@ -23,10 +24,20 @@ api_get_visualization, create_api, ) + # Legacy analyzer interfaces (for backward compatibility) from codegen_on_oss.analyzers.base_analyzer import BaseCodeAnalyzer + # Core analysis modules from codegen_on_oss.analyzers.code_quality import CodeQualityAnalyzer +from codegen_on_oss.analyzers.codebase_ai import ( + CodebaseAI, + generate_context, + generate_flag_system_prompt, + generate_flag_tools, + generate_system_prompt, + generate_tools, +) from codegen_on_oss.analyzers.codebase_analysis import ( get_class_summary, get_codebase_summary, @@ -39,9 +50,11 @@ ) from codegen_on_oss.analyzers.codebase_analyzer import CodebaseAnalyzer from codegen_on_oss.analyzers.dependencies import DependencyAnalyzer + # Diff tracking from codegen_on_oss.analyzers.diff_lite import ChangeType, DiffLite from codegen_on_oss.analyzers.error_analyzer import CodebaseAnalyzer as ErrorAnalyzer + # Issue tracking system from codegen_on_oss.analyzers.issues import ( AnalysisType, @@ -51,6 +64,7 @@ IssueCollection, IssueSeverity, ) + # Analysis result models from codegen_on_oss.analyzers.models.analysis_result import ( AnalysisResult, @@ -60,49 +74,55 @@ ) __all__ = [ - # Main API - "CodegenAnalyzerAPI", - "create_api", - "api_analyze_codebase", - "api_analyze_pr", - "api_get_visualization", - "api_get_static_errors", + # Analysis results + "AnalysisResult", + "AnalysisType", # Modern architecture "AnalyzerManager", "AnalyzerPlugin", "AnalyzerRegistry", + # Legacy interfaces (for backward compatibility) + "BaseCodeAnalyzer", + # Diff tracking + "ChangeType", + "CodeLocation", + # Core analyzers + "CodeQualityAnalyzer", "CodeQualityPlugin", + "CodeQualityResult", + # AI-powered code analysis + "CodebaseAI", + "CodebaseAnalyzer", + # Main API + "CodegenAnalyzerAPI", + "DependencyAnalyzer", "DependencyPlugin", + "DependencyResult", + "DiffLite", + "ErrorAnalyzer", # Issue tracking "Issue", + "IssueCategory", "IssueCollection", "IssueSeverity", - "AnalysisType", - "IssueCategory", - "CodeLocation", - # Analysis results - "AnalysisResult", - "CodeQualityResult", - "DependencyResult", "PrAnalysisResult", - # Core analyzers - "CodeQualityAnalyzer", - "DependencyAnalyzer", + "api_analyze_codebase", + "api_analyze_pr", + "api_get_static_errors", + "api_get_visualization", + "create_api", + "generate_context", + "generate_flag_system_prompt", + "generate_flag_tools", + "generate_system_prompt", + "generate_tools", + "get_class_summary", # Codebase analysis utilities "get_codebase_summary", + "get_dependency_graph", + "get_file_complexity_metrics", "get_file_summary", - "get_class_summary", "get_function_summary", - "get_symbol_summary", - "get_dependency_graph", "get_symbol_references", - "get_file_complexity_metrics", - # Diff tracking - "ChangeType", - "DiffLite", - # Legacy interfaces (for backward compatibility) - "BaseCodeAnalyzer", - "CodebaseAnalyzer", - "ErrorAnalyzer", + "get_symbol_summary", ] - diff --git a/codegen-on-oss/codegen_on_oss/analyzers/codebase_ai.py b/codegen-on-oss/codegen_on_oss/analyzers/codebase_ai.py new file mode 100644 index 000000000..f8fd827af --- /dev/null +++ b/codegen-on-oss/codegen_on_oss/analyzers/codebase_ai.py @@ -0,0 +1,364 @@ +#!/usr/bin/env python3 +""" +Codebase AI Module + +This module provides AI-powered code analysis and generation capabilities, +including system prompt generation, context generation for AI models, +and guidelines for generating and modifying code. +""" + +import logging +from typing import Any, TypeVar + +try: + from codegen.sdk.core.file import File + from codegen.sdk.core.interfaces.editable import Editable +except ImportError: + # Define fallback classes for when SDK is not available + class EditableFallback: + @property + def extended_source(self) -> str: + return "" + + class FileFallback: + @property + def source(self) -> str: + return "" + + # Use fallback classes + Editable = EditableFallback + File = FileFallback + + +# Configure logging +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", + handlers=[logging.StreamHandler()], +) +logger = logging.getLogger(__name__) + +# Type variable for context +T = TypeVar("T", bound=str | Editable | File | list[Any] | dict[str, Any]) + + +def generate_system_prompt( + target: Editable | None = None, context: T | None = None +) -> str: + """ + Generate a system prompt for AI-powered code analysis and generation. + + Args: + target: The target code to analyze or modify + context: Additional context for the analysis + + Returns: + A system prompt string for AI models + """ + prompt = """Hey CodegenBot! +You are an incredibly precise and thoughtful AI who helps developers accomplish complex transformations on their codebase. +You always provide clear, concise, and accurate responses. +When dealing with code, you maintain the original structure and style unless explicitly asked to change it. +""" + if target: + prompt += f""" +The user has just requested a response on the following code snippet: + +[[[CODE SNIPPET BEGIN]]] +{target.extended_source} +[[[CODE SNIPPET END]]] + +Your job is to follow the instructions of the user, given the context provided. +""" + else: + prompt += """ +Your job is to follow the instructions of the user. +""" + + if context: + prompt += """ +The user has provided some additional context that you can use to assist with your response. +You may use this context to inform your answer, but you're not required to directly include it in your response. + +Here is the additional context: +""" + prompt += generate_context(context) + + prompt += """ +Please ensure your response is accurate and relevant to the user's request. You may think out loud in the response. + + +Generally, when responding with an an answer, try to follow these general "ground rules": +Remember, these are just rules you should follow by default. If the user explicitly asks for something else, you should follow their instructions instead. + +> When generating new code or new classes, such as "create me a new function that does XYZ" or "generate a helper function that does XYZ", try to: + +- Do not include extra indentation that is not necessary, unless the user explicitly asks for something else. +- Include as much information as possible. Do not write things like "# the rest of the class" or "# the rest of the method", unless the user explicitly asks for something else. +- Do try to include comments and well-documented code, unless the user explicitly asks for something else. +- Only return the NEW code without re-iterating any existing code that the user has provided to you, unless the user explicitly asks for something else. +- Do not include any code that the user has explicitly asked you to remove, unless the user explicitly asks for something else. + + +> When changing existing code, such as "change this method to do XYZ" or "update this function to do XYZ" or "remove all instances of XYZ from this class", try to: + +- Do not include extra indentation that is not necessary, unless the user explicitly asks for something else. +- Include the entire context of the code that the user has provided to you, unless the user explicitly asks for something else. +- Include as much information as possible. Do not write things like "# the rest of the class" or "# the rest of the method", unless the user explicitly asks for something else. +- Do try to include comments and well-documented code, unless the user explicitly asks for something else. +- Avoid edit existing code that does not need editing, unless the user explicitly asks for something else. +- When asked to modify a very small or trivial part of the code, try to only modify the part that the user has asked you to modify, unless the user explicitly asks for something else. +- If asked to make improvements, try not to change existing function signatures, decorators, or returns, unless the user explicitly asks for something else. + + +> When dealing with anything related to docstrings, for example "Generate a google style docstring for this method." or "Convert these existing docs to google style docstrings.", try to: + +- Do not include extra indentation that is not necessary, unless the user explicitly asks for something else. +- Use the google style docstring format first, unless the user explicitly asks for something else. +- If doing google style docstrings, do not include the "self" or "cls" argument in the list of arguments, unless the user explicitly asks for something else. +- Try to have at least one line of the docstring to be a summary line, unless the user explicitly asks for something else. +- Try to keep each line of the docstring to be less than 80 characters, unless the user explicitly asks for something else. +- Try to keep any existing before and after examples in the docstring, unless the user explicitly asks for something else. +- Only respond with the content of the docstring, without any additional context like the function signature, return type, or parameter types, unless the user explicitly asks for something else. +- Do not include formatting like tripple quotes in your response, unless the user explicitly asks for something else. +- Do not include any markdown formatting, unless the user explicitly asks for something else. + +If you need a refresher on what google-style docstrings are: +- The first line is a summary line. +- The second line is a description of the method. +- The third line is a list of arguments. +- The fourth line is a list of returns. +Google docstrings may also include other information like exceptions and examples. +When generating NEW code or NEW classes, also try to generate docstrings alongside the code with the google style docstring format, +unless the user explicitly asks for something else. + + +> When dealing with anything related to comments, such as "write me a comment for this method" or "change this existing comment to be more descriptive", try to: + +- Do not include extra indentation that is not necessary, unless the user explicitly asks for something else. +- Do not include any comment delimiters like "#" or "//" unless the user explicitly asks for something else. +- Do not include any markdown formatting, unless the user explicitly asks for something else. +- Try to keep each line of the comment to be less than 80 characters, unless the user explicitly asks for something else. +- If you are only requested to edit or create a comment, do not include any code or other context that the user has provided to you, unless the user explicitly asks for something else. + + +> When dealing with single-word or single-phrase answers, like "what is a better name for this function" or "what is a better name for this class", try to: + +- Only respond with the content of the new name, without any additional context like the function signature, return type, or parameter types, unless the user explicitly asks for something else. +- Do not include formatting like tripple quotes in your response, unless the user explicitly asks for something else. +- Do not include any markdown formatting, unless the user explicitly asks for something else. +- Do not include any code or other context that the user has provided to you, unless the user explicitly asks for something else. + +REMEMBER: When giving the final answer, you must use the set_answer tool to provide the final answer that will be used in subsequent operations such as writing to a file, renaming, or editing. + """ + + return prompt + + +def generate_flag_system_prompt(target: Editable, context: T | None = None) -> str: + """ + Generate a system prompt for determining whether to flag a code element. + + Args: + target: The target code to analyze + context: Additional context for the analysis + + Returns: + A system prompt string for AI models + """ + prompt = f"""Hey CodegenBot! +You are an incredibly precise and thoughtful AI who helps developers accomplish complex transformations on their codebase. + +You are now tasked with determining whether to flag the symbol, file, attribute, or message using AI. +Flagging a symbol means to mark it as a chunk of code that should be modified in a later step. +You will be given the user prompt, and the code snippet that the user is requesting a response on. +Use the should_flag tool to return either a true or false answer to the question of whether to flag the symbol, file, attribute, or message. + +Here is the code snippet that the user is requesting a response on: + +[[[CODE SNIPPET BEGIN]]] +{target.extended_source} +[[[CODE SNIPPET END]]] +""" + + if context: + prompt += """ +The user has provided some additional context that you can use to assist with your response. +You may use this context to inform your answer, but you're not required to directly include it in your response. + +Here is the additional context: +""" + prompt += generate_context(context) + + prompt += """ +Please intelligently determine whether the user's request on the given code snippet should be flagged. +Remember, use the should_flag tool to return either a true or false answer to the question of whether to flag the symbol, file, attribute, or message +as a chunk of code that should be modified, edited, or changed in a later step. + """ + + return prompt + + +def generate_context(context: T | None = None) -> str: + """ + Generate a context string for AI models. + + Args: + context: The context to generate a string for + + Returns: + A formatted context string + """ + output = "" + if not context: + return output + else: + if isinstance(context, str): + output += f"====== Context ======\n{context}\n====================\n\n" + elif isinstance(context, Editable): + # Get class name + output += f"====== {context.__class__.__name__} ======\n" + output += f"{context.extended_source}\n" + output += "====================\n\n" + elif isinstance(context, File): + output += f"====== {context.__class__.__name__}======\n" + output += f"{context.source}\n" + output += "====================\n\n" + elif isinstance(context, list): + for item in context: + output += generate_context(item) + elif isinstance(context, dict): + for key, value in context.items(): + output += f"[[[ {key} ]]]\n" + output += generate_context(value) + output += "\n\n" + return output + + +def generate_tools() -> list[dict[str, Any]]: + """ + Generate a list of tools for AI models. + + Returns: + A list of tool definitions + """ + return [ + { + "type": "function", + "function": { + "name": "set_answer", + "description": "Use this function to set the final answer to the given prompt. This answer will be used in subsequent operations such as writing to a file, renaming, or editing.", + "parameters": { + "type": "object", + "properties": { + "answer": { + "type": "string", + "description": "The final answer to the given prompt. Do not include any uneccesary context or commentary in your response.", + }, + }, + "required": ["answer"], + }, + }, + } + ] + + +def generate_flag_tools() -> list[dict[str, Any]]: + """ + Generate a list of tools for flagging code elements. + + Returns: + A list of tool definitions + """ + return [ + { + "type": "function", + "function": { + "name": "should_flag", + "description": "Use this function to determine whether to flag the symbol, file, attribute, or message using AI.", + "parameters": { + "type": "object", + "properties": { + "flag": { + "type": "boolean", + "description": "Whether to flag the symbol, file, attribute, or message.", + }, + }, + "required": ["flag"], + }, + }, + } + ] + + +class CodebaseAI: + """ + AI-powered code analysis and generation capabilities. + + This class provides methods for generating system prompts, context, + and tools for AI models to analyze and generate code. + """ + + def __init__(self): + """Initialize the CodebaseAI instance.""" + self.logger = logging.getLogger(__name__) + + def generate_system_prompt( + self, target: Editable | None = None, context: T | None = None + ) -> str: + """ + Generate a system prompt for AI-powered code analysis and generation. + + Args: + target: The target code to analyze or modify + context: Additional context for the analysis + + Returns: + A system prompt string for AI models + """ + return generate_system_prompt(target, context) + + def generate_flag_system_prompt( + self, target: Editable, context: T | None = None + ) -> str: + """ + Generate a system prompt for determining whether to flag a code element. + + Args: + target: The target code to analyze + context: Additional context for the analysis + + Returns: + A system prompt string for AI models + """ + return generate_flag_system_prompt(target, context) + + def generate_context(self, context: T | None = None) -> str: + """ + Generate a context string for AI models. + + Args: + context: The context to generate a string for + + Returns: + A formatted context string + """ + return generate_context(context) + + def generate_tools(self) -> list[dict[str, Any]]: + """ + Generate a list of tools for AI models. + + Returns: + A list of tool definitions + """ + return generate_tools() + + def generate_flag_tools(self) -> list[dict[str, Any]]: + """ + Generate a list of tools for flagging code elements. + + Returns: + A list of tool definitions + """ + return generate_flag_tools() diff --git a/codegen-on-oss/examples/codebase_ai_example.py b/codegen-on-oss/examples/codebase_ai_example.py new file mode 100644 index 000000000..aedbc9132 --- /dev/null +++ b/codegen-on-oss/examples/codebase_ai_example.py @@ -0,0 +1,91 @@ +#!/usr/bin/env python3 +""" +Example usage of the codebase_ai module. + +This example demonstrates how to use the codebase_ai module to generate +system prompts, context, and tools for AI-powered code analysis and generation. +""" + +import logging +import os +import sys + +# Add the parent directory to the path so we can import the module +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) + +from codegen_on_oss.analyzers.codebase_ai import CodebaseAI + +# Configure logging +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", + handlers=[logging.StreamHandler()], +) +logger = logging.getLogger(__name__) + + +class SimpleEditable: + """A simple implementation of the Editable interface for demonstration.""" + + def __init__(self, source_code): + """Initialize with source code.""" + self._source = source_code + + @property + def extended_source(self): + """Get the extended source code.""" + return self._source + + +def main(): + """Run the example.""" + logger.info("Running codebase_ai example") + + # Create a CodebaseAI instance + codebase_ai = CodebaseAI() + + # Create a simple editable with some Python code + code = """ +def calculate_sum(a, b): + return a + b + +def calculate_product(a, b): + return a * b +""" + editable = SimpleEditable(code) + + # Generate a system prompt with the editable as the target + system_prompt = codebase_ai.generate_system_prompt(editable) + logger.info("Generated system prompt:") + print(f"\n{'-' * 80}\n{system_prompt}\n{'-' * 80}\n") + + # Generate context with additional information + additional_context = { + "requirements": "The code should handle edge cases and validate inputs.", + "examples": [ + "calculate_sum(1, 2) should return 3", + "calculate_product(3, 4) should return 12", + ], + } + context_string = codebase_ai.generate_context(additional_context) + logger.info("Generated context:") + print(f"\n{'-' * 80}\n{context_string}\n{'-' * 80}\n") + + # Generate a system prompt with both target and context + full_prompt = codebase_ai.generate_system_prompt(editable, additional_context) + logger.info("Generated full prompt:") + print(f"\n{'-' * 80}\n{full_prompt}\n{'-' * 80}\n") + + # Generate tools + tools = codebase_ai.generate_tools() + logger.info(f"Generated {len(tools)} tools") + for tool in tools: + print(f"Tool: {tool['function']['name']}") + print(f"Description: {tool['function']['description']}") + print() + + logger.info("Example completed successfully") + + +if __name__ == "__main__": + main() diff --git a/codegen-on-oss/tests/analyzers/test_codebase_ai.py b/codegen-on-oss/tests/analyzers/test_codebase_ai.py new file mode 100644 index 000000000..86c54085b --- /dev/null +++ b/codegen-on-oss/tests/analyzers/test_codebase_ai.py @@ -0,0 +1,159 @@ +#!/usr/bin/env python3 +""" +Tests for the codebase_ai module. +""" + +import os +import sys +import unittest + +# Add the parent directory to the path so we can import the module +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../.."))) + +from codegen_on_oss.analyzers.codebase_ai import ( + CodebaseAI, + generate_context, + generate_flag_system_prompt, + generate_flag_tools, + generate_system_prompt, + generate_tools, +) + + +class MockEditable: + """Mock Editable class for testing.""" + + @property + def extended_source(self): + return "def test_function():\n return 'test'" + + +class MockFile: + """Mock File class for testing.""" + + @property + def source(self): + return "def test_function():\n return 'test'" + + +class TestCodebaseAI(unittest.TestCase): + """Test cases for the codebase_ai module.""" + + def setUp(self): + """Set up test fixtures.""" + self.editable = MockEditable() + self.file = MockFile() + self.codebase_ai = CodebaseAI() + + def test_generate_system_prompt_no_target_no_context(self): + """Test generating a system prompt with no target and no context.""" + prompt = generate_system_prompt() + self.assertIn("Hey CodegenBot!", prompt) + self.assertIn("Your job is to follow the instructions of the user.", prompt) + self.assertNotIn("The user has provided some additional context", prompt) + + def test_generate_system_prompt_with_target(self): + """Test generating a system prompt with a target.""" + prompt = generate_system_prompt(self.editable) + self.assertIn("Hey CodegenBot!", prompt) + self.assertIn( + "The user has just requested a response on the following code snippet:", + prompt, + ) + self.assertIn("def test_function():", prompt) + self.assertIn("return 'test'", prompt) + + def test_generate_system_prompt_with_context(self): + """Test generating a system prompt with context.""" + prompt = generate_system_prompt(context="Test context") + self.assertIn("Hey CodegenBot!", prompt) + self.assertIn("The user has provided some additional context", prompt) + self.assertIn("Test context", prompt) + + def test_generate_flag_system_prompt(self): + """Test generating a flag system prompt.""" + prompt = generate_flag_system_prompt(self.editable) + self.assertIn( + "You are now tasked with determining whether to flag the symbol", prompt + ) + self.assertIn("def test_function():", prompt) + self.assertIn("return 'test'", prompt) + + def test_generate_context_string(self): + """Test generating context from a string.""" + context = generate_context("Test context") + self.assertIn("====== Context ======", context) + self.assertIn("Test context", context) + self.assertIn("====================", context) + + def test_generate_context_editable(self): + """Test generating context from an Editable.""" + context = generate_context(self.editable) + self.assertIn("====== MockEditable ======", context) + self.assertIn("def test_function():", context) + self.assertIn("return 'test'", context) + + def test_generate_context_file(self): + """Test generating context from a File.""" + context = generate_context(self.file) + self.assertIn("====== MockFile======", context) + self.assertIn("def test_function():", context) + self.assertIn("return 'test'", context) + + def test_generate_context_list(self): + """Test generating context from a list.""" + context = generate_context(["Test context 1", "Test context 2"]) + self.assertIn("Test context 1", context) + self.assertIn("Test context 2", context) + + def test_generate_context_dict(self): + """Test generating context from a dict.""" + context = generate_context({"key1": "value1", "key2": "value2"}) + self.assertIn("[[[ key1 ]]]", context) + self.assertIn("value1", context) + self.assertIn("[[[ key2 ]]]", context) + self.assertIn("value2", context) + + def test_generate_tools(self): + """Test generating tools.""" + tools = generate_tools() + self.assertEqual(len(tools), 1) + self.assertEqual(tools[0]["function"]["name"], "set_answer") + + def test_generate_flag_tools(self): + """Test generating flag tools.""" + tools = generate_flag_tools() + self.assertEqual(len(tools), 1) + self.assertEqual(tools[0]["function"]["name"], "should_flag") + + def test_codebase_ai_class_methods(self): + """Test CodebaseAI class methods.""" + # Test generate_system_prompt + prompt = self.codebase_ai.generate_system_prompt(self.editable) + self.assertIn("Hey CodegenBot!", prompt) + self.assertIn("def test_function():", prompt) + + # Test generate_flag_system_prompt + flag_prompt = self.codebase_ai.generate_flag_system_prompt(self.editable) + self.assertIn( + "You are now tasked with determining whether to flag the symbol", + flag_prompt, + ) + + # Test generate_context + context = self.codebase_ai.generate_context("Test context") + self.assertIn("Test context", context) + + # Test generate_tools + tools = self.codebase_ai.generate_tools() + self.assertEqual(len(tools), 1) + self.assertEqual(tools[0]["function"]["name"], "set_answer") + + # Test generate_flag_tools + flag_tools = self.codebase_ai.generate_flag_tools() + self.assertEqual(len(flag_tools), 1) + self.assertEqual(flag_tools[0]["function"]["name"], "should_flag") + + +if __name__ == "__main__": + unittest.main()